diff options
Diffstat (limited to 'include/asm-s390')
-rw-r--r-- | include/asm-s390/atomic.h | 176 | ||||
-rw-r--r-- | include/asm-s390/bitops.h | 1 | ||||
-rw-r--r-- | include/asm-s390/cache.h | 1 | ||||
-rw-r--r-- | include/asm-s390/ccwdev.h | 3 | ||||
-rw-r--r-- | include/asm-s390/elf.h | 2 | ||||
-rw-r--r-- | include/asm-s390/futex.h | 49 | ||||
-rw-r--r-- | include/asm-s390/ioctl.h | 89 | ||||
-rw-r--r-- | include/asm-s390/kexec.h | 5 | ||||
-rw-r--r-- | include/asm-s390/mman.h | 1 | ||||
-rw-r--r-- | include/asm-s390/mutex.h | 9 | ||||
-rw-r--r-- | include/asm-s390/processor.h | 8 | ||||
-rw-r--r-- | include/asm-s390/qdio.h | 8 | ||||
-rw-r--r-- | include/asm-s390/s390_rdev.h | 15 | ||||
-rw-r--r-- | include/asm-s390/sigcontext.h | 2 | ||||
-rw-r--r-- | include/asm-s390/system.h | 15 | ||||
-rw-r--r-- | include/asm-s390/thread_info.h | 2 | ||||
-rw-r--r-- | include/asm-s390/uaccess.h | 14 | ||||
-rw-r--r-- | include/asm-s390/unistd.h | 2 | ||||
-rw-r--r-- | include/asm-s390/vtoc.h | 24 |
19 files changed, 162 insertions, 264 deletions
diff --git a/include/asm-s390/atomic.h b/include/asm-s390/atomic.h index b3bd4f679f72..be6fefe223d6 100644 --- a/include/asm-s390/atomic.h +++ b/include/asm-s390/atomic.h | |||
@@ -5,7 +5,7 @@ | |||
5 | * include/asm-s390/atomic.h | 5 | * include/asm-s390/atomic.h |
6 | * | 6 | * |
7 | * S390 version | 7 | * S390 version |
8 | * Copyright (C) 1999-2003 IBM Deutschland Entwicklung GmbH, IBM Corporation | 8 | * Copyright (C) 1999-2005 IBM Deutschland Entwicklung GmbH, IBM Corporation |
9 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | 9 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), |
10 | * Denis Joseph Barrow, | 10 | * Denis Joseph Barrow, |
11 | * Arnd Bergmann (arndb@de.ibm.com) | 11 | * Arnd Bergmann (arndb@de.ibm.com) |
@@ -45,59 +45,59 @@ typedef struct { | |||
45 | #define atomic_read(v) ((v)->counter) | 45 | #define atomic_read(v) ((v)->counter) |
46 | #define atomic_set(v,i) (((v)->counter) = (i)) | 46 | #define atomic_set(v,i) (((v)->counter) = (i)) |
47 | 47 | ||
48 | static __inline__ void atomic_add(int i, atomic_t * v) | ||
49 | { | ||
50 | __CS_LOOP(v, i, "ar"); | ||
51 | } | ||
52 | static __inline__ int atomic_add_return(int i, atomic_t * v) | 48 | static __inline__ int atomic_add_return(int i, atomic_t * v) |
53 | { | 49 | { |
54 | return __CS_LOOP(v, i, "ar"); | 50 | return __CS_LOOP(v, i, "ar"); |
55 | } | 51 | } |
56 | static __inline__ int atomic_add_negative(int i, atomic_t * v) | 52 | #define atomic_add(_i, _v) atomic_add_return(_i, _v) |
57 | { | 53 | #define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0) |
58 | return __CS_LOOP(v, i, "ar") < 0; | 54 | #define atomic_inc(_v) atomic_add_return(1, _v) |
59 | } | 55 | #define atomic_inc_return(_v) atomic_add_return(1, _v) |
60 | static __inline__ void atomic_sub(int i, atomic_t * v) | 56 | #define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0) |
61 | { | 57 | |
62 | __CS_LOOP(v, i, "sr"); | ||
63 | } | ||
64 | static __inline__ int atomic_sub_return(int i, atomic_t * v) | 58 | static __inline__ int atomic_sub_return(int i, atomic_t * v) |
65 | { | 59 | { |
66 | return __CS_LOOP(v, i, "sr"); | 60 | return __CS_LOOP(v, i, "sr"); |
67 | } | 61 | } |
68 | static __inline__ void atomic_inc(volatile atomic_t * v) | 62 | #define atomic_sub(_i, _v) atomic_sub_return(_i, _v) |
69 | { | 63 | #define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0) |
70 | __CS_LOOP(v, 1, "ar"); | 64 | #define atomic_dec(_v) atomic_sub_return(1, _v) |
71 | } | 65 | #define atomic_dec_return(_v) atomic_sub_return(1, _v) |
72 | static __inline__ int atomic_inc_return(volatile atomic_t * v) | 66 | #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0) |
73 | { | ||
74 | return __CS_LOOP(v, 1, "ar"); | ||
75 | } | ||
76 | 67 | ||
77 | static __inline__ int atomic_inc_and_test(volatile atomic_t * v) | ||
78 | { | ||
79 | return __CS_LOOP(v, 1, "ar") == 0; | ||
80 | } | ||
81 | static __inline__ void atomic_dec(volatile atomic_t * v) | ||
82 | { | ||
83 | __CS_LOOP(v, 1, "sr"); | ||
84 | } | ||
85 | static __inline__ int atomic_dec_return(volatile atomic_t * v) | ||
86 | { | ||
87 | return __CS_LOOP(v, 1, "sr"); | ||
88 | } | ||
89 | static __inline__ int atomic_dec_and_test(volatile atomic_t * v) | ||
90 | { | ||
91 | return __CS_LOOP(v, 1, "sr") == 0; | ||
92 | } | ||
93 | static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v) | 68 | static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v) |
94 | { | 69 | { |
95 | __CS_LOOP(v, ~mask, "nr"); | 70 | __CS_LOOP(v, ~mask, "nr"); |
96 | } | 71 | } |
72 | |||
97 | static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v) | 73 | static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v) |
98 | { | 74 | { |
99 | __CS_LOOP(v, mask, "or"); | 75 | __CS_LOOP(v, mask, "or"); |
100 | } | 76 | } |
77 | |||
78 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | ||
79 | |||
80 | static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new) | ||
81 | { | ||
82 | __asm__ __volatile__(" cs %0,%3,0(%2)\n" | ||
83 | : "+d" (old), "=m" (v->counter) | ||
84 | : "a" (v), "d" (new), "m" (v->counter) | ||
85 | : "cc", "memory" ); | ||
86 | return old; | ||
87 | } | ||
88 | |||
89 | static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | ||
90 | { | ||
91 | int c, old; | ||
92 | |||
93 | c = atomic_read(v); | ||
94 | while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c) | ||
95 | c = old; | ||
96 | return c != u; | ||
97 | } | ||
98 | |||
99 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | ||
100 | |||
101 | #undef __CS_LOOP | 101 | #undef __CS_LOOP |
102 | 102 | ||
103 | #ifdef __s390x__ | 103 | #ifdef __s390x__ |
@@ -123,97 +123,67 @@ typedef struct { | |||
123 | #define atomic64_read(v) ((v)->counter) | 123 | #define atomic64_read(v) ((v)->counter) |
124 | #define atomic64_set(v,i) (((v)->counter) = (i)) | 124 | #define atomic64_set(v,i) (((v)->counter) = (i)) |
125 | 125 | ||
126 | static __inline__ void atomic64_add(long long i, atomic64_t * v) | ||
127 | { | ||
128 | __CSG_LOOP(v, i, "agr"); | ||
129 | } | ||
130 | static __inline__ long long atomic64_add_return(long long i, atomic64_t * v) | 126 | static __inline__ long long atomic64_add_return(long long i, atomic64_t * v) |
131 | { | 127 | { |
132 | return __CSG_LOOP(v, i, "agr"); | 128 | return __CSG_LOOP(v, i, "agr"); |
133 | } | 129 | } |
134 | static __inline__ long long atomic64_add_negative(long long i, atomic64_t * v) | 130 | #define atomic64_add(_i, _v) atomic64_add_return(_i, _v) |
135 | { | 131 | #define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0) |
136 | return __CSG_LOOP(v, i, "agr") < 0; | 132 | #define atomic64_inc(_v) atomic64_add_return(1, _v) |
137 | } | 133 | #define atomic64_inc_return(_v) atomic64_add_return(1, _v) |
138 | static __inline__ void atomic64_sub(long long i, atomic64_t * v) | 134 | #define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0) |
139 | { | 135 | |
140 | __CSG_LOOP(v, i, "sgr"); | 136 | static __inline__ long long atomic64_sub_return(long long i, atomic64_t * v) |
141 | } | ||
142 | static __inline__ void atomic64_inc(volatile atomic64_t * v) | ||
143 | { | ||
144 | __CSG_LOOP(v, 1, "agr"); | ||
145 | } | ||
146 | static __inline__ long long atomic64_inc_return(volatile atomic64_t * v) | ||
147 | { | ||
148 | return __CSG_LOOP(v, 1, "agr"); | ||
149 | } | ||
150 | static __inline__ long long atomic64_inc_and_test(volatile atomic64_t * v) | ||
151 | { | ||
152 | return __CSG_LOOP(v, 1, "agr") == 0; | ||
153 | } | ||
154 | static __inline__ void atomic64_dec(volatile atomic64_t * v) | ||
155 | { | ||
156 | __CSG_LOOP(v, 1, "sgr"); | ||
157 | } | ||
158 | static __inline__ long long atomic64_dec_return(volatile atomic64_t * v) | ||
159 | { | ||
160 | return __CSG_LOOP(v, 1, "sgr"); | ||
161 | } | ||
162 | static __inline__ long long atomic64_dec_and_test(volatile atomic64_t * v) | ||
163 | { | 137 | { |
164 | return __CSG_LOOP(v, 1, "sgr") == 0; | 138 | return __CSG_LOOP(v, i, "sgr"); |
165 | } | 139 | } |
140 | #define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v) | ||
141 | #define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0) | ||
142 | #define atomic64_dec(_v) atomic64_sub_return(1, _v) | ||
143 | #define atomic64_dec_return(_v) atomic64_sub_return(1, _v) | ||
144 | #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0) | ||
145 | |||
166 | static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v) | 146 | static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v) |
167 | { | 147 | { |
168 | __CSG_LOOP(v, ~mask, "ngr"); | 148 | __CSG_LOOP(v, ~mask, "ngr"); |
169 | } | 149 | } |
150 | |||
170 | static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v) | 151 | static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v) |
171 | { | 152 | { |
172 | __CSG_LOOP(v, mask, "ogr"); | 153 | __CSG_LOOP(v, mask, "ogr"); |
173 | } | 154 | } |
174 | 155 | ||
175 | #undef __CSG_LOOP | 156 | static __inline__ long long atomic64_cmpxchg(atomic64_t *v, |
176 | #endif | 157 | long long old, long long new) |
177 | 158 | { | |
178 | /* | 159 | __asm__ __volatile__(" csg %0,%3,0(%2)\n" |
179 | returns 0 if expected_oldval==value in *v ( swap was successful ) | 160 | : "+d" (old), "=m" (v->counter) |
180 | returns 1 if unsuccessful. | 161 | : "a" (v), "d" (new), "m" (v->counter) |
162 | : "cc", "memory" ); | ||
163 | return old; | ||
164 | } | ||
181 | 165 | ||
182 | This is non-portable, use bitops or spinlocks instead! | 166 | static __inline__ int atomic64_add_unless(atomic64_t *v, |
183 | */ | 167 | long long a, long long u) |
184 | static __inline__ int | ||
185 | atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v) | ||
186 | { | 168 | { |
187 | int retval; | 169 | long long c, old; |
188 | 170 | ||
189 | __asm__ __volatile__( | 171 | c = atomic64_read(v); |
190 | " lr %0,%3\n" | 172 | while (c != u && (old = atomic64_cmpxchg(v, c, c + a)) != c) |
191 | " cs %0,%4,0(%2)\n" | 173 | c = old; |
192 | " ipm %0\n" | 174 | return c != u; |
193 | " srl %0,28\n" | ||
194 | "0:" | ||
195 | : "=&d" (retval), "=m" (v->counter) | ||
196 | : "a" (v), "d" (expected_oldval) , "d" (new_val), | ||
197 | "m" (v->counter) : "cc", "memory" ); | ||
198 | return retval; | ||
199 | } | 175 | } |
200 | 176 | ||
201 | #define atomic_cmpxchg(v, o, n) (atomic_compare_and_swap((o), (n), &((v)->counter))) | 177 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) |
202 | 178 | ||
203 | #define atomic_add_unless(v, a, u) \ | 179 | #undef __CSG_LOOP |
204 | ({ \ | 180 | #endif |
205 | int c, old; \ | ||
206 | c = atomic_read(v); \ | ||
207 | while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ | ||
208 | c = old; \ | ||
209 | c != (u); \ | ||
210 | }) | ||
211 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | ||
212 | 181 | ||
213 | #define smp_mb__before_atomic_dec() smp_mb() | 182 | #define smp_mb__before_atomic_dec() smp_mb() |
214 | #define smp_mb__after_atomic_dec() smp_mb() | 183 | #define smp_mb__after_atomic_dec() smp_mb() |
215 | #define smp_mb__before_atomic_inc() smp_mb() | 184 | #define smp_mb__before_atomic_inc() smp_mb() |
216 | #define smp_mb__after_atomic_inc() smp_mb() | 185 | #define smp_mb__after_atomic_inc() smp_mb() |
217 | 186 | ||
187 | #include <asm-generic/atomic.h> | ||
218 | #endif /* __KERNEL__ */ | 188 | #endif /* __KERNEL__ */ |
219 | #endif /* __ARCH_S390_ATOMIC__ */ | 189 | #endif /* __ARCH_S390_ATOMIC__ */ |
diff --git a/include/asm-s390/bitops.h b/include/asm-s390/bitops.h index b07c578b22ea..61232760cc3b 100644 --- a/include/asm-s390/bitops.h +++ b/include/asm-s390/bitops.h | |||
@@ -839,6 +839,7 @@ static inline int sched_find_first_bit(unsigned long *b) | |||
839 | * fls: find last bit set. | 839 | * fls: find last bit set. |
840 | */ | 840 | */ |
841 | #define fls(x) generic_fls(x) | 841 | #define fls(x) generic_fls(x) |
842 | #define fls64(x) generic_fls64(x) | ||
842 | 843 | ||
843 | /* | 844 | /* |
844 | * hweightN: returns the hamming weight (i.e. the number | 845 | * hweightN: returns the hamming weight (i.e. the number |
diff --git a/include/asm-s390/cache.h b/include/asm-s390/cache.h index 29845378b206..e20cdd9074db 100644 --- a/include/asm-s390/cache.h +++ b/include/asm-s390/cache.h | |||
@@ -13,7 +13,6 @@ | |||
13 | 13 | ||
14 | #define L1_CACHE_BYTES 256 | 14 | #define L1_CACHE_BYTES 256 |
15 | #define L1_CACHE_SHIFT 8 | 15 | #define L1_CACHE_SHIFT 8 |
16 | #define L1_CACHE_SHIFT_MAX 8 /* largest L1 which this arch supports */ | ||
17 | 16 | ||
18 | #define ARCH_KMALLOC_MINALIGN 8 | 17 | #define ARCH_KMALLOC_MINALIGN 8 |
19 | 18 | ||
diff --git a/include/asm-s390/ccwdev.h b/include/asm-s390/ccwdev.h index 3eb231af5d51..12456cb2f882 100644 --- a/include/asm-s390/ccwdev.h +++ b/include/asm-s390/ccwdev.h | |||
@@ -185,8 +185,5 @@ extern struct ccw_device *ccw_device_probe_console(void); | |||
185 | extern int _ccw_device_get_device_number(struct ccw_device *); | 185 | extern int _ccw_device_get_device_number(struct ccw_device *); |
186 | extern int _ccw_device_get_subchannel_number(struct ccw_device *); | 186 | extern int _ccw_device_get_subchannel_number(struct ccw_device *); |
187 | 187 | ||
188 | extern struct device *s390_root_dev_register(const char *); | ||
189 | extern void s390_root_dev_unregister(struct device *); | ||
190 | |||
191 | extern void *ccw_device_get_chp_desc(struct ccw_device *, int); | 188 | extern void *ccw_device_get_chp_desc(struct ccw_device *, int); |
192 | #endif /* _S390_CCWDEV_H_ */ | 189 | #endif /* _S390_CCWDEV_H_ */ |
diff --git a/include/asm-s390/elf.h b/include/asm-s390/elf.h index 372d51cccd53..710646e64f7d 100644 --- a/include/asm-s390/elf.h +++ b/include/asm-s390/elf.h | |||
@@ -163,7 +163,7 @@ static inline int dump_regs(struct pt_regs *ptregs, elf_gregset_t *regs) | |||
163 | 163 | ||
164 | static inline int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) | 164 | static inline int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) |
165 | { | 165 | { |
166 | struct pt_regs *ptregs = __KSTK_PTREGS(tsk); | 166 | struct pt_regs *ptregs = task_pt_regs(tsk); |
167 | memcpy(®s->psw, &ptregs->psw, sizeof(regs->psw)+sizeof(regs->gprs)); | 167 | memcpy(®s->psw, &ptregs->psw, sizeof(regs->psw)+sizeof(regs->gprs)); |
168 | memcpy(regs->acrs, tsk->thread.acrs, sizeof(regs->acrs)); | 168 | memcpy(regs->acrs, tsk->thread.acrs, sizeof(regs->acrs)); |
169 | regs->orig_gpr2 = ptregs->orig_gpr2; | 169 | regs->orig_gpr2 = ptregs->orig_gpr2; |
diff --git a/include/asm-s390/futex.h b/include/asm-s390/futex.h index 9feff4ce1424..6a332a9f099c 100644 --- a/include/asm-s390/futex.h +++ b/include/asm-s390/futex.h | |||
@@ -1,53 +1,6 @@ | |||
1 | #ifndef _ASM_FUTEX_H | 1 | #ifndef _ASM_FUTEX_H |
2 | #define _ASM_FUTEX_H | 2 | #define _ASM_FUTEX_H |
3 | 3 | ||
4 | #ifdef __KERNEL__ | 4 | #include <asm-generic/futex.h> |
5 | 5 | ||
6 | #include <linux/futex.h> | ||
7 | #include <asm/errno.h> | ||
8 | #include <asm/uaccess.h> | ||
9 | |||
10 | static inline int | ||
11 | futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | ||
12 | { | ||
13 | int op = (encoded_op >> 28) & 7; | ||
14 | int cmp = (encoded_op >> 24) & 15; | ||
15 | int oparg = (encoded_op << 8) >> 20; | ||
16 | int cmparg = (encoded_op << 20) >> 20; | ||
17 | int oldval = 0, ret; | ||
18 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | ||
19 | oparg = 1 << oparg; | ||
20 | |||
21 | if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) | ||
22 | return -EFAULT; | ||
23 | |||
24 | inc_preempt_count(); | ||
25 | |||
26 | switch (op) { | ||
27 | case FUTEX_OP_SET: | ||
28 | case FUTEX_OP_ADD: | ||
29 | case FUTEX_OP_OR: | ||
30 | case FUTEX_OP_ANDN: | ||
31 | case FUTEX_OP_XOR: | ||
32 | default: | ||
33 | ret = -ENOSYS; | ||
34 | } | ||
35 | |||
36 | dec_preempt_count(); | ||
37 | |||
38 | if (!ret) { | ||
39 | switch (cmp) { | ||
40 | case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; | ||
41 | case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; | ||
42 | case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; | ||
43 | case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; | ||
44 | case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; | ||
45 | case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; | ||
46 | default: ret = -ENOSYS; | ||
47 | } | ||
48 | } | ||
49 | return ret; | ||
50 | } | ||
51 | |||
52 | #endif | ||
53 | #endif | 6 | #endif |
diff --git a/include/asm-s390/ioctl.h b/include/asm-s390/ioctl.h index df7394345ac4..b279fe06dfe5 100644 --- a/include/asm-s390/ioctl.h +++ b/include/asm-s390/ioctl.h | |||
@@ -1,88 +1 @@ | |||
1 | /* | #include <asm-generic/ioctl.h> | |
2 | * include/asm-s390/ioctl.h | ||
3 | * | ||
4 | * S390 version | ||
5 | * | ||
6 | * Derived from "include/asm-i386/ioctl.h" | ||
7 | */ | ||
8 | |||
9 | #ifndef _S390_IOCTL_H | ||
10 | #define _S390_IOCTL_H | ||
11 | |||
12 | /* ioctl command encoding: 32 bits total, command in lower 16 bits, | ||
13 | * size of the parameter structure in the lower 14 bits of the | ||
14 | * upper 16 bits. | ||
15 | * Encoding the size of the parameter structure in the ioctl request | ||
16 | * is useful for catching programs compiled with old versions | ||
17 | * and to avoid overwriting user space outside the user buffer area. | ||
18 | * The highest 2 bits are reserved for indicating the ``access mode''. | ||
19 | * NOTE: This limits the max parameter size to 16kB -1 ! | ||
20 | */ | ||
21 | |||
22 | /* | ||
23 | * The following is for compatibility across the various Linux | ||
24 | * platforms. The i386 ioctl numbering scheme doesn't really enforce | ||
25 | * a type field. De facto, however, the top 8 bits of the lower 16 | ||
26 | * bits are indeed used as a type field, so we might just as well make | ||
27 | * this explicit here. Please be sure to use the decoding macros | ||
28 | * below from now on. | ||
29 | */ | ||
30 | #define _IOC_NRBITS 8 | ||
31 | #define _IOC_TYPEBITS 8 | ||
32 | #define _IOC_SIZEBITS 14 | ||
33 | #define _IOC_DIRBITS 2 | ||
34 | |||
35 | #define _IOC_NRMASK ((1 << _IOC_NRBITS)-1) | ||
36 | #define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1) | ||
37 | #define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1) | ||
38 | #define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1) | ||
39 | |||
40 | #define _IOC_NRSHIFT 0 | ||
41 | #define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS) | ||
42 | #define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS) | ||
43 | #define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS) | ||
44 | |||
45 | /* | ||
46 | * Direction bits. | ||
47 | */ | ||
48 | #define _IOC_NONE 0U | ||
49 | #define _IOC_WRITE 1U | ||
50 | #define _IOC_READ 2U | ||
51 | |||
52 | #define _IOC(dir,type,nr,size) \ | ||
53 | (((dir) << _IOC_DIRSHIFT) | \ | ||
54 | ((type) << _IOC_TYPESHIFT) | \ | ||
55 | ((nr) << _IOC_NRSHIFT) | \ | ||
56 | ((size) << _IOC_SIZESHIFT)) | ||
57 | |||
58 | /* provoke compile error for invalid uses of size argument */ | ||
59 | extern unsigned long __invalid_size_argument_for_IOC; | ||
60 | #define _IOC_TYPECHECK(t) \ | ||
61 | ((sizeof(t) == sizeof(t[1]) && \ | ||
62 | sizeof(t) < (1 << _IOC_SIZEBITS)) ? \ | ||
63 | sizeof(t) : __invalid_size_argument_for_IOC) | ||
64 | |||
65 | /* used to create numbers */ | ||
66 | #define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0) | ||
67 | #define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),(_IOC_TYPECHECK(size))) | ||
68 | #define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size))) | ||
69 | #define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size))) | ||
70 | #define _IOR_BAD(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size)) | ||
71 | #define _IOW_BAD(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size)) | ||
72 | #define _IOWR_BAD(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size)) | ||
73 | |||
74 | /* used to decode ioctl numbers.. */ | ||
75 | #define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK) | ||
76 | #define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK) | ||
77 | #define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK) | ||
78 | #define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK) | ||
79 | |||
80 | /* ...and for the drivers/sound files... */ | ||
81 | |||
82 | #define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT) | ||
83 | #define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT) | ||
84 | #define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT) | ||
85 | #define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT) | ||
86 | #define IOCSIZE_SHIFT (_IOC_SIZESHIFT) | ||
87 | |||
88 | #endif /* _S390_IOCTL_H */ | ||
diff --git a/include/asm-s390/kexec.h b/include/asm-s390/kexec.h index 54cf7d9f251c..ce28ddda0f50 100644 --- a/include/asm-s390/kexec.h +++ b/include/asm-s390/kexec.h | |||
@@ -35,8 +35,9 @@ | |||
35 | #define KEXEC_ARCH KEXEC_ARCH_S390 | 35 | #define KEXEC_ARCH KEXEC_ARCH_S390 |
36 | 36 | ||
37 | #define MAX_NOTE_BYTES 1024 | 37 | #define MAX_NOTE_BYTES 1024 |
38 | typedef u32 note_buf_t[MAX_NOTE_BYTES/4]; | ||
39 | 38 | ||
40 | extern note_buf_t crash_notes[]; | 39 | /* Provide a dummy definition to avoid build failures. */ |
40 | static inline void crash_setup_regs(struct pt_regs *newregs, | ||
41 | struct pt_regs *oldregs) { } | ||
41 | 42 | ||
42 | #endif /*_S390_KEXEC_H */ | 43 | #endif /*_S390_KEXEC_H */ |
diff --git a/include/asm-s390/mman.h b/include/asm-s390/mman.h index ea86bd12204f..c8d5409b5d56 100644 --- a/include/asm-s390/mman.h +++ b/include/asm-s390/mman.h | |||
@@ -43,6 +43,7 @@ | |||
43 | #define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ | 43 | #define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ |
44 | #define MADV_WILLNEED 0x3 /* pre-fault pages */ | 44 | #define MADV_WILLNEED 0x3 /* pre-fault pages */ |
45 | #define MADV_DONTNEED 0x4 /* discard these pages */ | 45 | #define MADV_DONTNEED 0x4 /* discard these pages */ |
46 | #define MADV_REMOVE 0x5 /* remove these pages & resources */ | ||
46 | 47 | ||
47 | /* compatibility flags */ | 48 | /* compatibility flags */ |
48 | #define MAP_ANON MAP_ANONYMOUS | 49 | #define MAP_ANON MAP_ANONYMOUS |
diff --git a/include/asm-s390/mutex.h b/include/asm-s390/mutex.h new file mode 100644 index 000000000000..458c1f7fbc18 --- /dev/null +++ b/include/asm-s390/mutex.h | |||
@@ -0,0 +1,9 @@ | |||
1 | /* | ||
2 | * Pull in the generic implementation for the mutex fastpath. | ||
3 | * | ||
4 | * TODO: implement optimized primitives instead, or leave the generic | ||
5 | * implementation in place, or pick the atomic_xchg() based generic | ||
6 | * implementation. (see asm-generic/mutex-xchg.h for details) | ||
7 | */ | ||
8 | |||
9 | #include <asm-generic/mutex-dec.h> | ||
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h index 4ec652ebb3b1..c5cbc4bd8414 100644 --- a/include/asm-s390/processor.h +++ b/include/asm-s390/processor.h | |||
@@ -191,10 +191,10 @@ extern void show_registers(struct pt_regs *regs); | |||
191 | extern void show_trace(struct task_struct *task, unsigned long *sp); | 191 | extern void show_trace(struct task_struct *task, unsigned long *sp); |
192 | 192 | ||
193 | unsigned long get_wchan(struct task_struct *p); | 193 | unsigned long get_wchan(struct task_struct *p); |
194 | #define __KSTK_PTREGS(tsk) ((struct pt_regs *) \ | 194 | #define task_pt_regs(tsk) ((struct pt_regs *) \ |
195 | ((unsigned long) tsk->thread_info + THREAD_SIZE - sizeof(struct pt_regs))) | 195 | (task_stack_page(tsk) + THREAD_SIZE) - 1) |
196 | #define KSTK_EIP(tsk) (__KSTK_PTREGS(tsk)->psw.addr) | 196 | #define KSTK_EIP(tsk) (task_pt_regs(tsk)->psw.addr) |
197 | #define KSTK_ESP(tsk) (__KSTK_PTREGS(tsk)->gprs[15]) | 197 | #define KSTK_ESP(tsk) (task_pt_regs(tsk)->gprs[15]) |
198 | 198 | ||
199 | /* | 199 | /* |
200 | * Give up the time slice of the virtual PU. | 200 | * Give up the time slice of the virtual PU. |
diff --git a/include/asm-s390/qdio.h b/include/asm-s390/qdio.h index 0ddf0a8ef8de..7bc15f0231db 100644 --- a/include/asm-s390/qdio.h +++ b/include/asm-s390/qdio.h | |||
@@ -195,12 +195,14 @@ struct qdr { | |||
195 | /* | 195 | /* |
196 | * queue information block (QIB) | 196 | * queue information block (QIB) |
197 | */ | 197 | */ |
198 | #define QIB_AC_INBOUND_PCI_SUPPORTED 0x80 | 198 | #define QIB_AC_INBOUND_PCI_SUPPORTED 0x80 |
199 | #define QIB_AC_OUTBOUND_PCI_SUPPORTED 0x40 | 199 | #define QIB_AC_OUTBOUND_PCI_SUPPORTED 0x40 |
200 | #define QIB_RFLAGS_ENABLE_QEBSM 0x80 | ||
201 | |||
200 | struct qib { | 202 | struct qib { |
201 | unsigned int qfmt : 8; /* queue format */ | 203 | unsigned int qfmt : 8; /* queue format */ |
202 | unsigned int pfmt : 8; /* impl. dep. parameter format */ | 204 | unsigned int pfmt : 8; /* impl. dep. parameter format */ |
203 | unsigned int res1 : 8; /* reserved */ | 205 | unsigned int rflags : 8; /* QEBSM */ |
204 | unsigned int ac : 8; /* adapter characteristics */ | 206 | unsigned int ac : 8; /* adapter characteristics */ |
205 | unsigned int res2; /* reserved */ | 207 | unsigned int res2; /* reserved */ |
206 | #ifdef QDIO_32_BIT | 208 | #ifdef QDIO_32_BIT |
diff --git a/include/asm-s390/s390_rdev.h b/include/asm-s390/s390_rdev.h new file mode 100644 index 000000000000..6fa20442a48c --- /dev/null +++ b/include/asm-s390/s390_rdev.h | |||
@@ -0,0 +1,15 @@ | |||
1 | /* | ||
2 | * include/asm-s390/ccwdev.h | ||
3 | * | ||
4 | * Copyright (C) 2002,2005 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
5 | * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> | ||
6 | * Carsten Otte <cotte@de.ibm.com> | ||
7 | * | ||
8 | * Interface for s390 root device | ||
9 | */ | ||
10 | |||
11 | #ifndef _S390_RDEV_H_ | ||
12 | #define _S390_RDEV_H_ | ||
13 | extern struct device *s390_root_dev_register(const char *); | ||
14 | extern void s390_root_dev_unregister(struct device *); | ||
15 | #endif /* _S390_RDEV_H_ */ | ||
diff --git a/include/asm-s390/sigcontext.h b/include/asm-s390/sigcontext.h index 803545351dd8..aeb6e0b13329 100644 --- a/include/asm-s390/sigcontext.h +++ b/include/asm-s390/sigcontext.h | |||
@@ -8,6 +8,8 @@ | |||
8 | #ifndef _ASM_S390_SIGCONTEXT_H | 8 | #ifndef _ASM_S390_SIGCONTEXT_H |
9 | #define _ASM_S390_SIGCONTEXT_H | 9 | #define _ASM_S390_SIGCONTEXT_H |
10 | 10 | ||
11 | #include <linux/compiler.h> | ||
12 | |||
11 | #define __NUM_GPRS 16 | 13 | #define __NUM_GPRS 16 |
12 | #define __NUM_FPRS 16 | 14 | #define __NUM_FPRS 16 |
13 | #define __NUM_ACRS 16 | 15 | #define __NUM_ACRS 16 |
diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h index 864cae7e1fd6..b2e65e8bf812 100644 --- a/include/asm-s390/system.h +++ b/include/asm-s390/system.h | |||
@@ -104,14 +104,25 @@ static inline void restore_access_regs(unsigned int *acrs) | |||
104 | prev = __switch_to(prev,next); \ | 104 | prev = __switch_to(prev,next); \ |
105 | } while (0) | 105 | } while (0) |
106 | 106 | ||
107 | /* | ||
108 | * On SMP systems, when the scheduler does migration-cost autodetection, | ||
109 | * it needs a way to flush as much of the CPU's caches as possible. | ||
110 | * | ||
111 | * TODO: fill this in! | ||
112 | */ | ||
113 | static inline void sched_cacheflush(void) | ||
114 | { | ||
115 | } | ||
116 | |||
107 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 117 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
108 | extern void account_user_vtime(struct task_struct *); | 118 | extern void account_vtime(struct task_struct *); |
119 | extern void account_tick_vtime(struct task_struct *); | ||
109 | extern void account_system_vtime(struct task_struct *); | 120 | extern void account_system_vtime(struct task_struct *); |
110 | #endif | 121 | #endif |
111 | 122 | ||
112 | #define finish_arch_switch(prev) do { \ | 123 | #define finish_arch_switch(prev) do { \ |
113 | set_fs(current->thread.mm_segment); \ | 124 | set_fs(current->thread.mm_segment); \ |
114 | account_system_vtime(prev); \ | 125 | account_vtime(prev); \ |
115 | } while (0) | 126 | } while (0) |
116 | 127 | ||
117 | #define nop() __asm__ __volatile__ ("nop") | 128 | #define nop() __asm__ __volatile__ ("nop") |
diff --git a/include/asm-s390/thread_info.h b/include/asm-s390/thread_info.h index 6c18a3f24316..f3797a52c4ea 100644 --- a/include/asm-s390/thread_info.h +++ b/include/asm-s390/thread_info.h | |||
@@ -81,8 +81,6 @@ static inline struct thread_info *current_thread_info(void) | |||
81 | #define alloc_thread_info(tsk) ((struct thread_info *) \ | 81 | #define alloc_thread_info(tsk) ((struct thread_info *) \ |
82 | __get_free_pages(GFP_KERNEL,THREAD_ORDER)) | 82 | __get_free_pages(GFP_KERNEL,THREAD_ORDER)) |
83 | #define free_thread_info(ti) free_pages((unsigned long) (ti),THREAD_ORDER) | 83 | #define free_thread_info(ti) free_pages((unsigned long) (ti),THREAD_ORDER) |
84 | #define get_thread_info(ti) get_task_struct((ti)->task) | ||
85 | #define put_thread_info(ti) put_task_struct((ti)->task) | ||
86 | 84 | ||
87 | #endif | 85 | #endif |
88 | 86 | ||
diff --git a/include/asm-s390/uaccess.h b/include/asm-s390/uaccess.h index 10a619da4761..be104f21c70a 100644 --- a/include/asm-s390/uaccess.h +++ b/include/asm-s390/uaccess.h | |||
@@ -61,8 +61,10 @@ | |||
61 | #define segment_eq(a,b) ((a).ar4 == (b).ar4) | 61 | #define segment_eq(a,b) ((a).ar4 == (b).ar4) |
62 | 62 | ||
63 | 63 | ||
64 | #define __access_ok(addr,size) (1) | 64 | static inline int __access_ok(const void *addr, unsigned long size) |
65 | 65 | { | |
66 | return 1; | ||
67 | } | ||
66 | #define access_ok(type,addr,size) __access_ok(addr,size) | 68 | #define access_ok(type,addr,size) __access_ok(addr,size) |
67 | 69 | ||
68 | /* | 70 | /* |
@@ -206,25 +208,25 @@ extern int __put_user_bad(void) __attribute__((noreturn)); | |||
206 | case 1: { \ | 208 | case 1: { \ |
207 | unsigned char __x; \ | 209 | unsigned char __x; \ |
208 | __get_user_asm(__x, ptr, __gu_err); \ | 210 | __get_user_asm(__x, ptr, __gu_err); \ |
209 | (x) = (__typeof__(*(ptr))) __x; \ | 211 | (x) = *(__typeof__(*(ptr)) *) &__x; \ |
210 | break; \ | 212 | break; \ |
211 | }; \ | 213 | }; \ |
212 | case 2: { \ | 214 | case 2: { \ |
213 | unsigned short __x; \ | 215 | unsigned short __x; \ |
214 | __get_user_asm(__x, ptr, __gu_err); \ | 216 | __get_user_asm(__x, ptr, __gu_err); \ |
215 | (x) = (__typeof__(*(ptr))) __x; \ | 217 | (x) = *(__typeof__(*(ptr)) *) &__x; \ |
216 | break; \ | 218 | break; \ |
217 | }; \ | 219 | }; \ |
218 | case 4: { \ | 220 | case 4: { \ |
219 | unsigned int __x; \ | 221 | unsigned int __x; \ |
220 | __get_user_asm(__x, ptr, __gu_err); \ | 222 | __get_user_asm(__x, ptr, __gu_err); \ |
221 | (x) = (__typeof__(*(ptr))) __x; \ | 223 | (x) = *(__typeof__(*(ptr)) *) &__x; \ |
222 | break; \ | 224 | break; \ |
223 | }; \ | 225 | }; \ |
224 | case 8: { \ | 226 | case 8: { \ |
225 | unsigned long long __x; \ | 227 | unsigned long long __x; \ |
226 | __get_user_asm(__x, ptr, __gu_err); \ | 228 | __get_user_asm(__x, ptr, __gu_err); \ |
227 | (x) = (__typeof__(*(ptr))) __x; \ | 229 | (x) = *(__typeof__(*(ptr)) *) &__x; \ |
228 | break; \ | 230 | break; \ |
229 | }; \ | 231 | }; \ |
230 | default: \ | 232 | default: \ |
diff --git a/include/asm-s390/unistd.h b/include/asm-s390/unistd.h index f97d92691f17..2861cdc243ad 100644 --- a/include/asm-s390/unistd.h +++ b/include/asm-s390/unistd.h | |||
@@ -539,7 +539,7 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ | |||
539 | #define __ARCH_WANT_SYS_SIGPENDING | 539 | #define __ARCH_WANT_SYS_SIGPENDING |
540 | #define __ARCH_WANT_SYS_SIGPROCMASK | 540 | #define __ARCH_WANT_SYS_SIGPROCMASK |
541 | #define __ARCH_WANT_SYS_RT_SIGACTION | 541 | #define __ARCH_WANT_SYS_RT_SIGACTION |
542 | # ifdef CONFIG_ARCH_S390_31 | 542 | # ifndef CONFIG_64BIT |
543 | # define __ARCH_WANT_STAT64 | 543 | # define __ARCH_WANT_STAT64 |
544 | # define __ARCH_WANT_SYS_TIME | 544 | # define __ARCH_WANT_SYS_TIME |
545 | # endif | 545 | # endif |
diff --git a/include/asm-s390/vtoc.h b/include/asm-s390/vtoc.h index 41d369f38b0e..d1de5b7ebb0b 100644 --- a/include/asm-s390/vtoc.h +++ b/include/asm-s390/vtoc.h | |||
@@ -176,4 +176,28 @@ struct vtoc_format7_label | |||
176 | struct vtoc_cchhb DS7PTRDS; /* pointer to next FMT7 DSCB */ | 176 | struct vtoc_cchhb DS7PTRDS; /* pointer to next FMT7 DSCB */ |
177 | } __attribute__ ((packed)); | 177 | } __attribute__ ((packed)); |
178 | 178 | ||
179 | struct vtoc_cms_label { | ||
180 | u8 label_id[4]; /* Label identifier */ | ||
181 | u8 vol_id[6]; /* Volid */ | ||
182 | u16 version_id; /* Version identifier */ | ||
183 | u32 block_size; /* Disk block size */ | ||
184 | u32 origin_ptr; /* Disk origin pointer */ | ||
185 | u32 usable_count; /* Number of usable cylinders/blocks */ | ||
186 | u32 formatted_count; /* Maximum number of formatted cylinders/ | ||
187 | * blocks */ | ||
188 | u32 block_count; /* Disk size in CMS blocks */ | ||
189 | u32 used_count; /* Number of CMS blocks in use */ | ||
190 | u32 fst_size; /* File Status Table (FST) size */ | ||
191 | u32 fst_count; /* Number of FSTs per CMS block */ | ||
192 | u8 format_date[6]; /* Disk FORMAT date */ | ||
193 | u8 reserved1[2]; | ||
194 | u32 disk_offset; /* Disk offset when reserved*/ | ||
195 | u32 map_block; /* Allocation Map Block with next hole */ | ||
196 | u32 hblk_disp; /* Displacement into HBLK data of next hole */ | ||
197 | u32 user_disp; /* Displacement into user part of Allocation | ||
198 | * map */ | ||
199 | u8 reserved2[4]; | ||
200 | u8 segment_name[8]; /* Name of shared segment */ | ||
201 | } __attribute__ ((packed)); | ||
202 | |||
179 | #endif /* _ASM_S390_VTOC_H */ | 203 | #endif /* _ASM_S390_VTOC_H */ |