aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/include/asm
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2011-05-24 03:06:26 -0400
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2011-05-24 03:06:26 -0400
commitb73077eb03f510a84b102fb97640e595a958403c (patch)
tree8b639000418e2756bf6baece4e00e07d2534bccc /arch/s390/include/asm
parent28350e330cfab46b60a1dbf763b678d859f9f3d9 (diff)
parent9d2e173644bb5c42ff1b280fbdda3f195a7cf1f7 (diff)
Merge branch 'next' into for-linus
Diffstat (limited to 'arch/s390/include/asm')
-rw-r--r--arch/s390/include/asm/atomic.h28
-rw-r--r--arch/s390/include/asm/bitops.h65
-rw-r--r--arch/s390/include/asm/cache.h1
-rw-r--r--arch/s390/include/asm/cacheflush.h27
-rw-r--r--arch/s390/include/asm/ccwdev.h4
-rw-r--r--arch/s390/include/asm/ccwgroup.h4
-rw-r--r--arch/s390/include/asm/cio.h2
-rw-r--r--arch/s390/include/asm/cmpxchg.h225
-rw-r--r--arch/s390/include/asm/futex.h12
-rw-r--r--arch/s390/include/asm/processor.h5
-rw-r--r--arch/s390/include/asm/rwsem.h63
-rw-r--r--arch/s390/include/asm/system.h196
-rw-r--r--arch/s390/include/asm/tlb.h1
-rw-r--r--arch/s390/include/asm/types.h8
-rw-r--r--arch/s390/include/asm/uaccess.h4
-rw-r--r--arch/s390/include/asm/unistd.h6
16 files changed, 312 insertions, 339 deletions
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index 76daea117181..d9db13810d15 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -9,7 +9,7 @@
9 * 9 *
10 * Atomic operations that C can't guarantee us. 10 * Atomic operations that C can't guarantee us.
11 * Useful for resource counting etc. 11 * Useful for resource counting etc.
12 * s390 uses 'Compare And Swap' for atomicity in SMP enviroment. 12 * s390 uses 'Compare And Swap' for atomicity in SMP environment.
13 * 13 *
14 */ 14 */
15 15
@@ -36,14 +36,19 @@
36 36
37static inline int atomic_read(const atomic_t *v) 37static inline int atomic_read(const atomic_t *v)
38{ 38{
39 barrier(); 39 int c;
40 return v->counter; 40
41 asm volatile(
42 " l %0,%1\n"
43 : "=d" (c) : "Q" (v->counter));
44 return c;
41} 45}
42 46
43static inline void atomic_set(atomic_t *v, int i) 47static inline void atomic_set(atomic_t *v, int i)
44{ 48{
45 v->counter = i; 49 asm volatile(
46 barrier(); 50 " st %1,%0\n"
51 : "=Q" (v->counter) : "d" (i));
47} 52}
48 53
49static inline int atomic_add_return(int i, atomic_t *v) 54static inline int atomic_add_return(int i, atomic_t *v)
@@ -128,14 +133,19 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
128 133
129static inline long long atomic64_read(const atomic64_t *v) 134static inline long long atomic64_read(const atomic64_t *v)
130{ 135{
131 barrier(); 136 long long c;
132 return v->counter; 137
138 asm volatile(
139 " lg %0,%1\n"
140 : "=d" (c) : "Q" (v->counter));
141 return c;
133} 142}
134 143
135static inline void atomic64_set(atomic64_t *v, long long i) 144static inline void atomic64_set(atomic64_t *v, long long i)
136{ 145{
137 v->counter = i; 146 asm volatile(
138 barrier(); 147 " stg %1,%0\n"
148 : "=Q" (v->counter) : "d" (i));
139} 149}
140 150
141static inline long long atomic64_add_return(long long i, atomic64_t *v) 151static inline long long atomic64_add_return(long long i, atomic64_t *v)
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 2e05972c5085..e1c8f3a49884 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -742,18 +742,42 @@ static inline int sched_find_first_bit(unsigned long *b)
742 * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24 742 * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24
743 */ 743 */
744 744
745#define ext2_set_bit(nr, addr) \ 745static inline void __set_bit_le(unsigned long nr, void *addr)
746 __test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) 746{
747#define ext2_set_bit_atomic(lock, nr, addr) \ 747 __set_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr);
748 test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) 748}
749#define ext2_clear_bit(nr, addr) \ 749
750 __test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) 750static inline void __clear_bit_le(unsigned long nr, void *addr)
751#define ext2_clear_bit_atomic(lock, nr, addr) \ 751{
752 test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) 752 __clear_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr);
753#define ext2_test_bit(nr, addr) \ 753}
754 test_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) 754
755 755static inline int __test_and_set_bit_le(unsigned long nr, void *addr)
756static inline int ext2_find_first_zero_bit(void *vaddr, unsigned int size) 756{
757 return __test_and_set_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr);
758}
759
760static inline int test_and_set_bit_le(unsigned long nr, void *addr)
761{
762 return test_and_set_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr);
763}
764
765static inline int __test_and_clear_bit_le(unsigned long nr, void *addr)
766{
767 return __test_and_clear_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr);
768}
769
770static inline int test_and_clear_bit_le(unsigned long nr, void *addr)
771{
772 return test_and_clear_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr);
773}
774
775static inline int test_bit_le(unsigned long nr, const void *addr)
776{
777 return test_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr);
778}
779
780static inline int find_first_zero_bit_le(void *vaddr, unsigned int size)
757{ 781{
758 unsigned long bytes, bits; 782 unsigned long bytes, bits;
759 783
@@ -764,7 +788,7 @@ static inline int ext2_find_first_zero_bit(void *vaddr, unsigned int size)
764 return (bits < size) ? bits : size; 788 return (bits < size) ? bits : size;
765} 789}
766 790
767static inline int ext2_find_next_zero_bit(void *vaddr, unsigned long size, 791static inline int find_next_zero_bit_le(void *vaddr, unsigned long size,
768 unsigned long offset) 792 unsigned long offset)
769{ 793{
770 unsigned long *addr = vaddr, *p; 794 unsigned long *addr = vaddr, *p;
@@ -790,11 +814,10 @@ static inline int ext2_find_next_zero_bit(void *vaddr, unsigned long size,
790 size -= __BITOPS_WORDSIZE; 814 size -= __BITOPS_WORDSIZE;
791 p++; 815 p++;
792 } 816 }
793 return offset + ext2_find_first_zero_bit(p, size); 817 return offset + find_first_zero_bit_le(p, size);
794} 818}
795 819
796static inline unsigned long ext2_find_first_bit(void *vaddr, 820static inline unsigned long find_first_bit_le(void *vaddr, unsigned long size)
797 unsigned long size)
798{ 821{
799 unsigned long bytes, bits; 822 unsigned long bytes, bits;
800 823
@@ -805,7 +828,7 @@ static inline unsigned long ext2_find_first_bit(void *vaddr,
805 return (bits < size) ? bits : size; 828 return (bits < size) ? bits : size;
806} 829}
807 830
808static inline int ext2_find_next_bit(void *vaddr, unsigned long size, 831static inline int find_next_bit_le(void *vaddr, unsigned long size,
809 unsigned long offset) 832 unsigned long offset)
810{ 833{
811 unsigned long *addr = vaddr, *p; 834 unsigned long *addr = vaddr, *p;
@@ -831,10 +854,14 @@ static inline int ext2_find_next_bit(void *vaddr, unsigned long size,
831 size -= __BITOPS_WORDSIZE; 854 size -= __BITOPS_WORDSIZE;
832 p++; 855 p++;
833 } 856 }
834 return offset + ext2_find_first_bit(p, size); 857 return offset + find_first_bit_le(p, size);
835} 858}
836 859
837#include <asm-generic/bitops/minix.h> 860#define ext2_set_bit_atomic(lock, nr, addr) \
861 test_and_set_bit_le(nr, addr)
862#define ext2_clear_bit_atomic(lock, nr, addr) \
863 test_and_clear_bit_le(nr, addr)
864
838 865
839#endif /* __KERNEL__ */ 866#endif /* __KERNEL__ */
840 867
diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
index 24aafa68b643..2a30d5ac0667 100644
--- a/arch/s390/include/asm/cache.h
+++ b/arch/s390/include/asm/cache.h
@@ -13,6 +13,7 @@
13 13
14#define L1_CACHE_BYTES 256 14#define L1_CACHE_BYTES 256
15#define L1_CACHE_SHIFT 8 15#define L1_CACHE_SHIFT 8
16#define NET_SKB_PAD 32
16 17
17#define __read_mostly __attribute__((__section__(".data..read_mostly"))) 18#define __read_mostly __attribute__((__section__(".data..read_mostly")))
18 19
diff --git a/arch/s390/include/asm/cacheflush.h b/arch/s390/include/asm/cacheflush.h
index 405cc97c6249..43a5c78046db 100644
--- a/arch/s390/include/asm/cacheflush.h
+++ b/arch/s390/include/asm/cacheflush.h
@@ -1,32 +1,15 @@
1#ifndef _S390_CACHEFLUSH_H 1#ifndef _S390_CACHEFLUSH_H
2#define _S390_CACHEFLUSH_H 2#define _S390_CACHEFLUSH_H
3 3
4/* Keep includes the same across arches. */
5#include <linux/mm.h>
6
7/* Caches aren't brain-dead on the s390. */ 4/* Caches aren't brain-dead on the s390. */
8#define flush_cache_all() do { } while (0) 5#include <asm-generic/cacheflush.h>
9#define flush_cache_mm(mm) do { } while (0)
10#define flush_cache_dup_mm(mm) do { } while (0)
11#define flush_cache_range(vma, start, end) do { } while (0)
12#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
13#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
14#define flush_dcache_page(page) do { } while (0)
15#define flush_dcache_mmap_lock(mapping) do { } while (0)
16#define flush_dcache_mmap_unlock(mapping) do { } while (0)
17#define flush_icache_range(start, end) do { } while (0)
18#define flush_icache_page(vma,pg) do { } while (0)
19#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
20#define flush_cache_vmap(start, end) do { } while (0)
21#define flush_cache_vunmap(start, end) do { } while (0)
22
23#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
24 memcpy(dst, src, len)
25#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
26 memcpy(dst, src, len)
27 6
28#ifdef CONFIG_DEBUG_PAGEALLOC 7#ifdef CONFIG_DEBUG_PAGEALLOC
29void kernel_map_pages(struct page *page, int numpages, int enable); 8void kernel_map_pages(struct page *page, int numpages, int enable);
30#endif 9#endif
31 10
11int set_memory_ro(unsigned long addr, int numpages);
12int set_memory_rw(unsigned long addr, int numpages);
13int set_memory_nx(unsigned long addr, int numpages);
14
32#endif /* _S390_CACHEFLUSH_H */ 15#endif /* _S390_CACHEFLUSH_H */
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index ff6f62e0ec3e..623f2fb71774 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -112,7 +112,6 @@ enum uc_todo {
112 112
113/** 113/**
114 * struct ccw driver - device driver for channel attached devices 114 * struct ccw driver - device driver for channel attached devices
115 * @owner: owning module
116 * @ids: ids supported by this driver 115 * @ids: ids supported by this driver
117 * @probe: function called on probe 116 * @probe: function called on probe
118 * @remove: function called on remove 117 * @remove: function called on remove
@@ -128,10 +127,8 @@ enum uc_todo {
128 * @restore: callback for restoring after hibernation 127 * @restore: callback for restoring after hibernation
129 * @uc_handler: callback for unit check handler 128 * @uc_handler: callback for unit check handler
130 * @driver: embedded device driver structure 129 * @driver: embedded device driver structure
131 * @name: device driver name
132 */ 130 */
133struct ccw_driver { 131struct ccw_driver {
134 struct module *owner;
135 struct ccw_device_id *ids; 132 struct ccw_device_id *ids;
136 int (*probe) (struct ccw_device *); 133 int (*probe) (struct ccw_device *);
137 void (*remove) (struct ccw_device *); 134 void (*remove) (struct ccw_device *);
@@ -147,7 +144,6 @@ struct ccw_driver {
147 int (*restore)(struct ccw_device *); 144 int (*restore)(struct ccw_device *);
148 enum uc_todo (*uc_handler) (struct ccw_device *, struct irb *); 145 enum uc_todo (*uc_handler) (struct ccw_device *, struct irb *);
149 struct device_driver driver; 146 struct device_driver driver;
150 char *name;
151}; 147};
152 148
153extern struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv, 149extern struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
diff --git a/arch/s390/include/asm/ccwgroup.h b/arch/s390/include/asm/ccwgroup.h
index c79c1e787b86..f2ea2c56a7e1 100644
--- a/arch/s390/include/asm/ccwgroup.h
+++ b/arch/s390/include/asm/ccwgroup.h
@@ -29,8 +29,6 @@ struct ccwgroup_device {
29 29
30/** 30/**
31 * struct ccwgroup_driver - driver for ccw group devices 31 * struct ccwgroup_driver - driver for ccw group devices
32 * @owner: driver owner
33 * @name: driver name
34 * @max_slaves: maximum number of slave devices 32 * @max_slaves: maximum number of slave devices
35 * @driver_id: unique id 33 * @driver_id: unique id
36 * @probe: function called on probe 34 * @probe: function called on probe
@@ -46,8 +44,6 @@ struct ccwgroup_device {
46 * @driver: embedded driver structure 44 * @driver: embedded driver structure
47 */ 45 */
48struct ccwgroup_driver { 46struct ccwgroup_driver {
49 struct module *owner;
50 char *name;
51 int max_slaves; 47 int max_slaves;
52 unsigned long driver_id; 48 unsigned long driver_id;
53 49
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index e34347d567a6..fc50a3342da3 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -183,7 +183,7 @@ struct esw3 {
183 * The irb that is handed to the device driver when an interrupt occurs. For 183 * The irb that is handed to the device driver when an interrupt occurs. For
184 * solicited interrupts, the common I/O layer already performs checks whether 184 * solicited interrupts, the common I/O layer already performs checks whether
185 * a field is valid; a field not being valid is always passed as %0. 185 * a field is valid; a field not being valid is always passed as %0.
186 * If a unit check occured, @ecw may contain sense data; this is retrieved 186 * If a unit check occurred, @ecw may contain sense data; this is retrieved
187 * by the common I/O layer itself if the device doesn't support concurrent 187 * by the common I/O layer itself if the device doesn't support concurrent
188 * sense (so that the device driver never needs to perform basic sene itself). 188 * sense (so that the device driver never needs to perform basic sene itself).
189 * For unsolicited interrupts, the irb is passed as-is (expect for sense data, 189 * For unsolicited interrupts, the irb is passed as-is (expect for sense data,
diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h
new file mode 100644
index 000000000000..7488e52efa97
--- /dev/null
+++ b/arch/s390/include/asm/cmpxchg.h
@@ -0,0 +1,225 @@
1/*
2 * Copyright IBM Corp. 1999, 2011
3 *
4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
5 */
6
7#ifndef __ASM_CMPXCHG_H
8#define __ASM_CMPXCHG_H
9
10#include <linux/types.h>
11
12extern void __xchg_called_with_bad_pointer(void);
13
14static inline unsigned long __xchg(unsigned long x, void *ptr, int size)
15{
16 unsigned long addr, old;
17 int shift;
18
19 switch (size) {
20 case 1:
21 addr = (unsigned long) ptr;
22 shift = (3 ^ (addr & 3)) << 3;
23 addr ^= addr & 3;
24 asm volatile(
25 " l %0,%4\n"
26 "0: lr 0,%0\n"
27 " nr 0,%3\n"
28 " or 0,%2\n"
29 " cs %0,0,%4\n"
30 " jl 0b\n"
31 : "=&d" (old), "=Q" (*(int *) addr)
32 : "d" (x << shift), "d" (~(255 << shift)),
33 "Q" (*(int *) addr) : "memory", "cc", "0");
34 return old >> shift;
35 case 2:
36 addr = (unsigned long) ptr;
37 shift = (2 ^ (addr & 2)) << 3;
38 addr ^= addr & 2;
39 asm volatile(
40 " l %0,%4\n"
41 "0: lr 0,%0\n"
42 " nr 0,%3\n"
43 " or 0,%2\n"
44 " cs %0,0,%4\n"
45 " jl 0b\n"
46 : "=&d" (old), "=Q" (*(int *) addr)
47 : "d" (x << shift), "d" (~(65535 << shift)),
48 "Q" (*(int *) addr) : "memory", "cc", "0");
49 return old >> shift;
50 case 4:
51 asm volatile(
52 " l %0,%3\n"
53 "0: cs %0,%2,%3\n"
54 " jl 0b\n"
55 : "=&d" (old), "=Q" (*(int *) ptr)
56 : "d" (x), "Q" (*(int *) ptr)
57 : "memory", "cc");
58 return old;
59#ifdef CONFIG_64BIT
60 case 8:
61 asm volatile(
62 " lg %0,%3\n"
63 "0: csg %0,%2,%3\n"
64 " jl 0b\n"
65 : "=&d" (old), "=m" (*(long *) ptr)
66 : "d" (x), "Q" (*(long *) ptr)
67 : "memory", "cc");
68 return old;
69#endif /* CONFIG_64BIT */
70 }
71 __xchg_called_with_bad_pointer();
72 return x;
73}
74
75#define xchg(ptr, x) \
76({ \
77 __typeof__(*(ptr)) __ret; \
78 __ret = (__typeof__(*(ptr))) \
79 __xchg((unsigned long)(x), (void *)(ptr), sizeof(*(ptr)));\
80 __ret; \
81})
82
83/*
84 * Atomic compare and exchange. Compare OLD with MEM, if identical,
85 * store NEW in MEM. Return the initial value in MEM. Success is
86 * indicated by comparing RETURN with OLD.
87 */
88
89#define __HAVE_ARCH_CMPXCHG
90
91extern void __cmpxchg_called_with_bad_pointer(void);
92
93static inline unsigned long __cmpxchg(void *ptr, unsigned long old,
94 unsigned long new, int size)
95{
96 unsigned long addr, prev, tmp;
97 int shift;
98
99 switch (size) {
100 case 1:
101 addr = (unsigned long) ptr;
102 shift = (3 ^ (addr & 3)) << 3;
103 addr ^= addr & 3;
104 asm volatile(
105 " l %0,%2\n"
106 "0: nr %0,%5\n"
107 " lr %1,%0\n"
108 " or %0,%3\n"
109 " or %1,%4\n"
110 " cs %0,%1,%2\n"
111 " jnl 1f\n"
112 " xr %1,%0\n"
113 " nr %1,%5\n"
114 " jnz 0b\n"
115 "1:"
116 : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr)
117 : "d" (old << shift), "d" (new << shift),
118 "d" (~(255 << shift)), "Q" (*(int *) ptr)
119 : "memory", "cc");
120 return prev >> shift;
121 case 2:
122 addr = (unsigned long) ptr;
123 shift = (2 ^ (addr & 2)) << 3;
124 addr ^= addr & 2;
125 asm volatile(
126 " l %0,%2\n"
127 "0: nr %0,%5\n"
128 " lr %1,%0\n"
129 " or %0,%3\n"
130 " or %1,%4\n"
131 " cs %0,%1,%2\n"
132 " jnl 1f\n"
133 " xr %1,%0\n"
134 " nr %1,%5\n"
135 " jnz 0b\n"
136 "1:"
137 : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr)
138 : "d" (old << shift), "d" (new << shift),
139 "d" (~(65535 << shift)), "Q" (*(int *) ptr)
140 : "memory", "cc");
141 return prev >> shift;
142 case 4:
143 asm volatile(
144 " cs %0,%3,%1\n"
145 : "=&d" (prev), "=Q" (*(int *) ptr)
146 : "0" (old), "d" (new), "Q" (*(int *) ptr)
147 : "memory", "cc");
148 return prev;
149#ifdef CONFIG_64BIT
150 case 8:
151 asm volatile(
152 " csg %0,%3,%1\n"
153 : "=&d" (prev), "=Q" (*(long *) ptr)
154 : "0" (old), "d" (new), "Q" (*(long *) ptr)
155 : "memory", "cc");
156 return prev;
157#endif /* CONFIG_64BIT */
158 }
159 __cmpxchg_called_with_bad_pointer();
160 return old;
161}
162
163#define cmpxchg(ptr, o, n) \
164 ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
165 (unsigned long)(n), sizeof(*(ptr))))
166
167#ifdef CONFIG_64BIT
168#define cmpxchg64(ptr, o, n) \
169({ \
170 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
171 cmpxchg((ptr), (o), (n)); \
172})
173#else /* CONFIG_64BIT */
174static inline unsigned long long __cmpxchg64(void *ptr,
175 unsigned long long old,
176 unsigned long long new)
177{
178 register_pair rp_old = {.pair = old};
179 register_pair rp_new = {.pair = new};
180
181 asm volatile(
182 " cds %0,%2,%1"
183 : "+&d" (rp_old), "=Q" (ptr)
184 : "d" (rp_new), "Q" (ptr)
185 : "cc");
186 return rp_old.pair;
187}
188#define cmpxchg64(ptr, o, n) \
189 ((__typeof__(*(ptr)))__cmpxchg64((ptr), \
190 (unsigned long long)(o), \
191 (unsigned long long)(n)))
192#endif /* CONFIG_64BIT */
193
194#include <asm-generic/cmpxchg-local.h>
195
196static inline unsigned long __cmpxchg_local(void *ptr,
197 unsigned long old,
198 unsigned long new, int size)
199{
200 switch (size) {
201 case 1:
202 case 2:
203 case 4:
204#ifdef CONFIG_64BIT
205 case 8:
206#endif
207 return __cmpxchg(ptr, old, new, size);
208 default:
209 return __cmpxchg_local_generic(ptr, old, new, size);
210 }
211
212 return old;
213}
214
215/*
216 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
217 * them available.
218 */
219#define cmpxchg_local(ptr, o, n) \
220 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
221 (unsigned long)(n), sizeof(*(ptr))))
222
223#define cmpxchg64_local(ptr, o, n) cmpxchg64((ptr), (o), (n))
224
225#endif /* __ASM_CMPXCHG_H */
diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h
index 5c5d02de49e9..81cf36b691f1 100644
--- a/arch/s390/include/asm/futex.h
+++ b/arch/s390/include/asm/futex.h
@@ -7,7 +7,7 @@
7#include <linux/uaccess.h> 7#include <linux/uaccess.h>
8#include <asm/errno.h> 8#include <asm/errno.h>
9 9
10static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) 10static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
11{ 11{
12 int op = (encoded_op >> 28) & 7; 12 int op = (encoded_op >> 28) & 7;
13 int cmp = (encoded_op >> 24) & 15; 13 int cmp = (encoded_op >> 24) & 15;
@@ -18,7 +18,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
18 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) 18 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
19 oparg = 1 << oparg; 19 oparg = 1 << oparg;
20 20
21 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) 21 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
22 return -EFAULT; 22 return -EFAULT;
23 23
24 pagefault_disable(); 24 pagefault_disable();
@@ -39,13 +39,13 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
39 return ret; 39 return ret;
40} 40}
41 41
42static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, 42static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
43 int oldval, int newval) 43 u32 oldval, u32 newval)
44{ 44{
45 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) 45 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
46 return -EFAULT; 46 return -EFAULT;
47 47
48 return uaccess.futex_atomic_cmpxchg(uaddr, oldval, newval); 48 return uaccess.futex_atomic_cmpxchg(uval, uaddr, oldval, newval);
49} 49}
50 50
51#endif /* __KERNEL__ */ 51#endif /* __KERNEL__ */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index bf3de04170a7..2c79b6416271 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -148,11 +148,6 @@ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
148 */ 148 */
149extern unsigned long thread_saved_pc(struct task_struct *t); 149extern unsigned long thread_saved_pc(struct task_struct *t);
150 150
151/*
152 * Print register of task into buffer. Used in fs/proc/array.c.
153 */
154extern void task_show_regs(struct seq_file *m, struct task_struct *task);
155
156extern void show_code(struct pt_regs *regs); 151extern void show_code(struct pt_regs *regs);
157 152
158unsigned long get_wchan(struct task_struct *p); 153unsigned long get_wchan(struct task_struct *p);
diff --git a/arch/s390/include/asm/rwsem.h b/arch/s390/include/asm/rwsem.h
index 423fdda2322d..d0eb4653cebd 100644
--- a/arch/s390/include/asm/rwsem.h
+++ b/arch/s390/include/asm/rwsem.h
@@ -43,29 +43,6 @@
43 43
44#ifdef __KERNEL__ 44#ifdef __KERNEL__
45 45
46#include <linux/list.h>
47#include <linux/spinlock.h>
48
49struct rwsem_waiter;
50
51extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *);
52extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *);
53extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
54extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *);
55extern struct rw_semaphore *rwsem_downgrade_write(struct rw_semaphore *);
56
57/*
58 * the semaphore definition
59 */
60struct rw_semaphore {
61 signed long count;
62 spinlock_t wait_lock;
63 struct list_head wait_list;
64#ifdef CONFIG_DEBUG_LOCK_ALLOC
65 struct lockdep_map dep_map;
66#endif
67};
68
69#ifndef __s390x__ 46#ifndef __s390x__
70#define RWSEM_UNLOCKED_VALUE 0x00000000 47#define RWSEM_UNLOCKED_VALUE 0x00000000
71#define RWSEM_ACTIVE_BIAS 0x00000001 48#define RWSEM_ACTIVE_BIAS 0x00000001
@@ -81,41 +58,6 @@ struct rw_semaphore {
81#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 58#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
82 59
83/* 60/*
84 * initialisation
85 */
86
87#ifdef CONFIG_DEBUG_LOCK_ALLOC
88# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
89#else
90# define __RWSEM_DEP_MAP_INIT(lockname)
91#endif
92
93#define __RWSEM_INITIALIZER(name) \
94 { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait.lock), \
95 LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
96
97#define DECLARE_RWSEM(name) \
98 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
99
100static inline void init_rwsem(struct rw_semaphore *sem)
101{
102 sem->count = RWSEM_UNLOCKED_VALUE;
103 spin_lock_init(&sem->wait_lock);
104 INIT_LIST_HEAD(&sem->wait_list);
105}
106
107extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
108 struct lock_class_key *key);
109
110#define init_rwsem(sem) \
111do { \
112 static struct lock_class_key __key; \
113 \
114 __init_rwsem((sem), #sem, &__key); \
115} while (0)
116
117
118/*
119 * lock for reading 61 * lock for reading
120 */ 62 */
121static inline void __down_read(struct rw_semaphore *sem) 63static inline void __down_read(struct rw_semaphore *sem)
@@ -377,10 +319,5 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
377 return new; 319 return new;
378} 320}
379 321
380static inline int rwsem_is_locked(struct rw_semaphore *sem)
381{
382 return (sem->count != 0);
383}
384
385#endif /* __KERNEL__ */ 322#endif /* __KERNEL__ */
386#endif /* _S390_RWSEM_H */ 323#endif /* _S390_RWSEM_H */
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
index 8f8d759f6a7b..d382629a0172 100644
--- a/arch/s390/include/asm/system.h
+++ b/arch/s390/include/asm/system.h
@@ -14,6 +14,7 @@
14#include <asm/setup.h> 14#include <asm/setup.h>
15#include <asm/processor.h> 15#include <asm/processor.h>
16#include <asm/lowcore.h> 16#include <asm/lowcore.h>
17#include <asm/cmpxchg.h>
17 18
18#ifdef __KERNEL__ 19#ifdef __KERNEL__
19 20
@@ -120,161 +121,6 @@ extern int memcpy_real(void *, void *, size_t);
120 121
121#define nop() asm volatile("nop") 122#define nop() asm volatile("nop")
122 123
123#define xchg(ptr,x) \
124({ \
125 __typeof__(*(ptr)) __ret; \
126 __ret = (__typeof__(*(ptr))) \
127 __xchg((unsigned long)(x), (void *)(ptr),sizeof(*(ptr))); \
128 __ret; \
129})
130
131extern void __xchg_called_with_bad_pointer(void);
132
133static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
134{
135 unsigned long addr, old;
136 int shift;
137
138 switch (size) {
139 case 1:
140 addr = (unsigned long) ptr;
141 shift = (3 ^ (addr & 3)) << 3;
142 addr ^= addr & 3;
143 asm volatile(
144 " l %0,%4\n"
145 "0: lr 0,%0\n"
146 " nr 0,%3\n"
147 " or 0,%2\n"
148 " cs %0,0,%4\n"
149 " jl 0b\n"
150 : "=&d" (old), "=Q" (*(int *) addr)
151 : "d" (x << shift), "d" (~(255 << shift)),
152 "Q" (*(int *) addr) : "memory", "cc", "0");
153 return old >> shift;
154 case 2:
155 addr = (unsigned long) ptr;
156 shift = (2 ^ (addr & 2)) << 3;
157 addr ^= addr & 2;
158 asm volatile(
159 " l %0,%4\n"
160 "0: lr 0,%0\n"
161 " nr 0,%3\n"
162 " or 0,%2\n"
163 " cs %0,0,%4\n"
164 " jl 0b\n"
165 : "=&d" (old), "=Q" (*(int *) addr)
166 : "d" (x << shift), "d" (~(65535 << shift)),
167 "Q" (*(int *) addr) : "memory", "cc", "0");
168 return old >> shift;
169 case 4:
170 asm volatile(
171 " l %0,%3\n"
172 "0: cs %0,%2,%3\n"
173 " jl 0b\n"
174 : "=&d" (old), "=Q" (*(int *) ptr)
175 : "d" (x), "Q" (*(int *) ptr)
176 : "memory", "cc");
177 return old;
178#ifdef __s390x__
179 case 8:
180 asm volatile(
181 " lg %0,%3\n"
182 "0: csg %0,%2,%3\n"
183 " jl 0b\n"
184 : "=&d" (old), "=m" (*(long *) ptr)
185 : "d" (x), "Q" (*(long *) ptr)
186 : "memory", "cc");
187 return old;
188#endif /* __s390x__ */
189 }
190 __xchg_called_with_bad_pointer();
191 return x;
192}
193
194/*
195 * Atomic compare and exchange. Compare OLD with MEM, if identical,
196 * store NEW in MEM. Return the initial value in MEM. Success is
197 * indicated by comparing RETURN with OLD.
198 */
199
200#define __HAVE_ARCH_CMPXCHG 1
201
202#define cmpxchg(ptr, o, n) \
203 ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
204 (unsigned long)(n), sizeof(*(ptr))))
205
206extern void __cmpxchg_called_with_bad_pointer(void);
207
208static inline unsigned long
209__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
210{
211 unsigned long addr, prev, tmp;
212 int shift;
213
214 switch (size) {
215 case 1:
216 addr = (unsigned long) ptr;
217 shift = (3 ^ (addr & 3)) << 3;
218 addr ^= addr & 3;
219 asm volatile(
220 " l %0,%2\n"
221 "0: nr %0,%5\n"
222 " lr %1,%0\n"
223 " or %0,%3\n"
224 " or %1,%4\n"
225 " cs %0,%1,%2\n"
226 " jnl 1f\n"
227 " xr %1,%0\n"
228 " nr %1,%5\n"
229 " jnz 0b\n"
230 "1:"
231 : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr)
232 : "d" (old << shift), "d" (new << shift),
233 "d" (~(255 << shift)), "Q" (*(int *) ptr)
234 : "memory", "cc");
235 return prev >> shift;
236 case 2:
237 addr = (unsigned long) ptr;
238 shift = (2 ^ (addr & 2)) << 3;
239 addr ^= addr & 2;
240 asm volatile(
241 " l %0,%2\n"
242 "0: nr %0,%5\n"
243 " lr %1,%0\n"
244 " or %0,%3\n"
245 " or %1,%4\n"
246 " cs %0,%1,%2\n"
247 " jnl 1f\n"
248 " xr %1,%0\n"
249 " nr %1,%5\n"
250 " jnz 0b\n"
251 "1:"
252 : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr)
253 : "d" (old << shift), "d" (new << shift),
254 "d" (~(65535 << shift)), "Q" (*(int *) ptr)
255 : "memory", "cc");
256 return prev >> shift;
257 case 4:
258 asm volatile(
259 " cs %0,%3,%1\n"
260 : "=&d" (prev), "=Q" (*(int *) ptr)
261 : "0" (old), "d" (new), "Q" (*(int *) ptr)
262 : "memory", "cc");
263 return prev;
264#ifdef __s390x__
265 case 8:
266 asm volatile(
267 " csg %0,%3,%1\n"
268 : "=&d" (prev), "=Q" (*(long *) ptr)
269 : "0" (old), "d" (new), "Q" (*(long *) ptr)
270 : "memory", "cc");
271 return prev;
272#endif /* __s390x__ */
273 }
274 __cmpxchg_called_with_bad_pointer();
275 return old;
276}
277
278/* 124/*
279 * Force strict CPU ordering. 125 * Force strict CPU ordering.
280 * And yes, this is required on UP too when we're talking 126 * And yes, this is required on UP too when we're talking
@@ -353,46 +199,6 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
353 __ctl_load(__dummy, cr, cr); \ 199 __ctl_load(__dummy, cr, cr); \
354}) 200})
355 201
356#include <linux/irqflags.h>
357
358#include <asm-generic/cmpxchg-local.h>
359
360static inline unsigned long __cmpxchg_local(volatile void *ptr,
361 unsigned long old,
362 unsigned long new, int size)
363{
364 switch (size) {
365 case 1:
366 case 2:
367 case 4:
368#ifdef __s390x__
369 case 8:
370#endif
371 return __cmpxchg(ptr, old, new, size);
372 default:
373 return __cmpxchg_local_generic(ptr, old, new, size);
374 }
375
376 return old;
377}
378
379/*
380 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
381 * them available.
382 */
383#define cmpxchg_local(ptr, o, n) \
384 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
385 (unsigned long)(n), sizeof(*(ptr))))
386#ifdef __s390x__
387#define cmpxchg64_local(ptr, o, n) \
388 ({ \
389 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
390 cmpxchg_local((ptr), (o), (n)); \
391 })
392#else
393#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
394#endif
395
396/* 202/*
397 * Use to set psw mask except for the first byte which 203 * Use to set psw mask except for the first byte which
398 * won't be changed by this function. 204 * won't be changed by this function.
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index f1f644f2240a..9074a54c4d10 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -22,6 +22,7 @@
22 */ 22 */
23 23
24#include <linux/mm.h> 24#include <linux/mm.h>
25#include <linux/pagemap.h>
25#include <linux/swap.h> 26#include <linux/swap.h>
26#include <asm/processor.h> 27#include <asm/processor.h>
27#include <asm/pgalloc.h> 28#include <asm/pgalloc.h>
diff --git a/arch/s390/include/asm/types.h b/arch/s390/include/asm/types.h
index 04d6b95a89c6..eeb52ccf499f 100644
--- a/arch/s390/include/asm/types.h
+++ b/arch/s390/include/asm/types.h
@@ -30,14 +30,6 @@ typedef __signed__ long saddr_t;
30 30
31#ifndef __ASSEMBLY__ 31#ifndef __ASSEMBLY__
32 32
33typedef u64 dma64_addr_t;
34#ifdef __s390x__
35/* DMA addresses come in 32-bit and 64-bit flavours. */
36typedef u64 dma_addr_t;
37#else
38typedef u32 dma_addr_t;
39#endif
40
41#ifndef __s390x__ 33#ifndef __s390x__
42typedef union { 34typedef union {
43 unsigned long long pair; 35 unsigned long long pair;
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index d6b1ed0ec52b..2d9ea11f919a 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -83,8 +83,8 @@ struct uaccess_ops {
83 size_t (*clear_user)(size_t, void __user *); 83 size_t (*clear_user)(size_t, void __user *);
84 size_t (*strnlen_user)(size_t, const char __user *); 84 size_t (*strnlen_user)(size_t, const char __user *);
85 size_t (*strncpy_from_user)(size_t, const char __user *, char *); 85 size_t (*strncpy_from_user)(size_t, const char __user *, char *);
86 int (*futex_atomic_op)(int op, int __user *, int oparg, int *old); 86 int (*futex_atomic_op)(int op, u32 __user *, int oparg, int *old);
87 int (*futex_atomic_cmpxchg)(int __user *, int old, int new); 87 int (*futex_atomic_cmpxchg)(u32 *, u32 __user *, u32 old, u32 new);
88}; 88};
89 89
90extern struct uaccess_ops uaccess; 90extern struct uaccess_ops uaccess;
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index 1049ef27c15e..e82152572377 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -272,7 +272,11 @@
272#define __NR_fanotify_init 332 272#define __NR_fanotify_init 332
273#define __NR_fanotify_mark 333 273#define __NR_fanotify_mark 333
274#define __NR_prlimit64 334 274#define __NR_prlimit64 334
275#define NR_syscalls 335 275#define __NR_name_to_handle_at 335
276#define __NR_open_by_handle_at 336
277#define __NR_clock_adjtime 337
278#define __NR_syncfs 338
279#define NR_syscalls 339
276 280
277/* 281/*
278 * There are some system calls that are not present on 64 bit, some 282 * There are some system calls that are not present on 64 bit, some