aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/include/asm')
-rw-r--r--arch/s390/include/asm/Kbuild3
-rw-r--r--arch/s390/include/asm/appldata.h24
-rw-r--r--arch/s390/include/asm/atomic.h95
-rw-r--r--arch/s390/include/asm/barrier.h13
-rw-r--r--arch/s390/include/asm/bitops.h28
-rw-r--r--arch/s390/include/asm/cacheflush.h4
-rw-r--r--arch/s390/include/asm/cmpxchg.h247
-rw-r--r--arch/s390/include/asm/cpu_mf.h14
-rw-r--r--arch/s390/include/asm/cputime.h92
-rw-r--r--arch/s390/include/asm/ctl_reg.h14
-rw-r--r--arch/s390/include/asm/debug.h29
-rw-r--r--arch/s390/include/asm/dis.h13
-rw-r--r--arch/s390/include/asm/dma-mapping.h33
-rw-r--r--arch/s390/include/asm/elf.h23
-rw-r--r--arch/s390/include/asm/ftrace.h70
-rw-r--r--arch/s390/include/asm/idals.h16
-rw-r--r--arch/s390/include/asm/idle.h27
-rw-r--r--arch/s390/include/asm/io.h19
-rw-r--r--arch/s390/include/asm/ipl.h12
-rw-r--r--arch/s390/include/asm/irq.h15
-rw-r--r--arch/s390/include/asm/irqflags.h2
-rw-r--r--arch/s390/include/asm/jump_label.h22
-rw-r--r--arch/s390/include/asm/kprobes.h5
-rw-r--r--arch/s390/include/asm/kvm_host.h234
-rw-r--r--arch/s390/include/asm/livepatch.h43
-rw-r--r--arch/s390/include/asm/lowcore.h172
-rw-r--r--arch/s390/include/asm/mman.h2
-rw-r--r--arch/s390/include/asm/mmu_context.h17
-rw-r--r--arch/s390/include/asm/nmi.h2
-rw-r--r--arch/s390/include/asm/page.h13
-rw-r--r--arch/s390/include/asm/pci.h15
-rw-r--r--arch/s390/include/asm/pci_io.h7
-rw-r--r--arch/s390/include/asm/percpu.h20
-rw-r--r--arch/s390/include/asm/perf_event.h3
-rw-r--r--arch/s390/include/asm/pgalloc.h35
-rw-r--r--arch/s390/include/asm/pgtable.h461
-rw-r--r--arch/s390/include/asm/processor.h87
-rw-r--r--arch/s390/include/asm/ptrace.h10
-rw-r--r--arch/s390/include/asm/qdio.h14
-rw-r--r--arch/s390/include/asm/reset.h3
-rw-r--r--arch/s390/include/asm/runtime_instr.h10
-rw-r--r--arch/s390/include/asm/rwsem.h81
-rw-r--r--arch/s390/include/asm/scatterlist.h3
-rw-r--r--arch/s390/include/asm/sclp.h11
-rw-r--r--arch/s390/include/asm/setup.h40
-rw-r--r--arch/s390/include/asm/sfp-util.h10
-rw-r--r--arch/s390/include/asm/sigp.h8
-rw-r--r--arch/s390/include/asm/smp.h6
-rw-r--r--arch/s390/include/asm/sparsemem.h9
-rw-r--r--arch/s390/include/asm/spinlock.h144
-rw-r--r--arch/s390/include/asm/spinlock_types.h1
-rw-r--r--arch/s390/include/asm/string.h1
-rw-r--r--arch/s390/include/asm/switch_to.h70
-rw-r--r--arch/s390/include/asm/syscall.h4
-rw-r--r--arch/s390/include/asm/sysinfo.h30
-rw-r--r--arch/s390/include/asm/thread_info.h20
-rw-r--r--arch/s390/include/asm/timex.h10
-rw-r--r--arch/s390/include/asm/tlb.h7
-rw-r--r--arch/s390/include/asm/tlbflush.h7
-rw-r--r--arch/s390/include/asm/topology.h24
-rw-r--r--arch/s390/include/asm/types.h17
-rw-r--r--arch/s390/include/asm/uaccess.h1
-rw-r--r--arch/s390/include/asm/unistd.h8
-rw-r--r--arch/s390/include/asm/uprobes.h42
-rw-r--r--arch/s390/include/asm/vdso.h20
-rw-r--r--arch/s390/include/asm/vtimer.h2
66 files changed, 1080 insertions, 1464 deletions
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 57892a8a9055..c631f98fd524 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -1,7 +1,8 @@
1 1
2 2
3generic-y += clkdev.h 3generic-y += clkdev.h
4generic-y += hash.h 4generic-y += irq_work.h
5generic-y += mcs_spinlock.h 5generic-y += mcs_spinlock.h
6generic-y += preempt.h 6generic-y += preempt.h
7generic-y += scatterlist.h
7generic-y += trace_clock.h 8generic-y += trace_clock.h
diff --git a/arch/s390/include/asm/appldata.h b/arch/s390/include/asm/appldata.h
index 32a705987156..16887c5fd989 100644
--- a/arch/s390/include/asm/appldata.h
+++ b/arch/s390/include/asm/appldata.h
@@ -9,28 +9,6 @@
9 9
10#include <asm/io.h> 10#include <asm/io.h>
11 11
12#ifndef CONFIG_64BIT
13
14#define APPLDATA_START_INTERVAL_REC 0x00 /* Function codes for */
15#define APPLDATA_STOP_REC 0x01 /* DIAG 0xDC */
16#define APPLDATA_GEN_EVENT_REC 0x02
17#define APPLDATA_START_CONFIG_REC 0x03
18
19/*
20 * Parameter list for DIAGNOSE X'DC'
21 */
22struct appldata_parameter_list {
23 u16 diag; /* The DIAGNOSE code X'00DC' */
24 u8 function; /* The function code for the DIAGNOSE */
25 u8 parlist_length; /* Length of the parameter list */
26 u32 product_id_addr; /* Address of the 16-byte product ID */
27 u16 reserved;
28 u16 buffer_length; /* Length of the application data buffer */
29 u32 buffer_addr; /* Address of the application data buffer */
30} __attribute__ ((packed));
31
32#else /* CONFIG_64BIT */
33
34#define APPLDATA_START_INTERVAL_REC 0x80 12#define APPLDATA_START_INTERVAL_REC 0x80
35#define APPLDATA_STOP_REC 0x81 13#define APPLDATA_STOP_REC 0x81
36#define APPLDATA_GEN_EVENT_REC 0x82 14#define APPLDATA_GEN_EVENT_REC 0x82
@@ -51,8 +29,6 @@ struct appldata_parameter_list {
51 u64 buffer_addr; 29 u64 buffer_addr;
52} __attribute__ ((packed)); 30} __attribute__ ((packed));
53 31
54#endif /* CONFIG_64BIT */
55
56struct appldata_product_id { 32struct appldata_product_id {
57 char prod_nr[7]; /* product number */ 33 char prod_nr[7]; /* product number */
58 u16 prod_fn; /* product function */ 34 u16 prod_fn; /* product function */
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index fa934fe080c1..adbe3802e377 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -160,8 +160,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
160 160
161#define ATOMIC64_INIT(i) { (i) } 161#define ATOMIC64_INIT(i) { (i) }
162 162
163#ifdef CONFIG_64BIT
164
165#define __ATOMIC64_NO_BARRIER "\n" 163#define __ATOMIC64_NO_BARRIER "\n"
166 164
167#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES 165#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
@@ -274,99 +272,6 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
274 272
275#undef __ATOMIC64_LOOP 273#undef __ATOMIC64_LOOP
276 274
277#else /* CONFIG_64BIT */
278
279typedef struct {
280 long long counter;
281} atomic64_t;
282
283static inline long long atomic64_read(const atomic64_t *v)
284{
285 register_pair rp;
286
287 asm volatile(
288 " lm %0,%N0,%1"
289 : "=&d" (rp) : "Q" (v->counter) );
290 return rp.pair;
291}
292
293static inline void atomic64_set(atomic64_t *v, long long i)
294{
295 register_pair rp = {.pair = i};
296
297 asm volatile(
298 " stm %1,%N1,%0"
299 : "=Q" (v->counter) : "d" (rp) );
300}
301
302static inline long long atomic64_xchg(atomic64_t *v, long long new)
303{
304 register_pair rp_new = {.pair = new};
305 register_pair rp_old;
306
307 asm volatile(
308 " lm %0,%N0,%1\n"
309 "0: cds %0,%2,%1\n"
310 " jl 0b\n"
311 : "=&d" (rp_old), "+Q" (v->counter)
312 : "d" (rp_new)
313 : "cc");
314 return rp_old.pair;
315}
316
317static inline long long atomic64_cmpxchg(atomic64_t *v,
318 long long old, long long new)
319{
320 register_pair rp_old = {.pair = old};
321 register_pair rp_new = {.pair = new};
322
323 asm volatile(
324 " cds %0,%2,%1"
325 : "+&d" (rp_old), "+Q" (v->counter)
326 : "d" (rp_new)
327 : "cc");
328 return rp_old.pair;
329}
330
331
332static inline long long atomic64_add_return(long long i, atomic64_t *v)
333{
334 long long old, new;
335
336 do {
337 old = atomic64_read(v);
338 new = old + i;
339 } while (atomic64_cmpxchg(v, old, new) != old);
340 return new;
341}
342
343static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v)
344{
345 long long old, new;
346
347 do {
348 old = atomic64_read(v);
349 new = old | mask;
350 } while (atomic64_cmpxchg(v, old, new) != old);
351}
352
353static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
354{
355 long long old, new;
356
357 do {
358 old = atomic64_read(v);
359 new = old & mask;
360 } while (atomic64_cmpxchg(v, old, new) != old);
361}
362
363static inline void atomic64_add(long long i, atomic64_t *v)
364{
365 atomic64_add_return(i, v);
366}
367
368#endif /* CONFIG_64BIT */
369
370static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u) 275static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
371{ 276{
372 long long c, old; 277 long long c, old;
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index 19ff956b752b..8d724718ec21 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -15,18 +15,23 @@
15 15
16#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES 16#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
17/* Fast-BCR without checkpoint synchronization */ 17/* Fast-BCR without checkpoint synchronization */
18#define mb() do { asm volatile("bcr 14,0" : : : "memory"); } while (0) 18#define __ASM_BARRIER "bcr 14,0\n"
19#else 19#else
20#define mb() do { asm volatile("bcr 15,0" : : : "memory"); } while (0) 20#define __ASM_BARRIER "bcr 15,0\n"
21#endif 21#endif
22 22
23#define mb() do { asm volatile(__ASM_BARRIER : : : "memory"); } while (0)
24
23#define rmb() mb() 25#define rmb() mb()
24#define wmb() mb() 26#define wmb() mb()
25#define read_barrier_depends() do { } while(0) 27#define dma_rmb() rmb()
28#define dma_wmb() wmb()
26#define smp_mb() mb() 29#define smp_mb() mb()
27#define smp_rmb() rmb() 30#define smp_rmb() rmb()
28#define smp_wmb() wmb() 31#define smp_wmb() wmb()
29#define smp_read_barrier_depends() read_barrier_depends() 32
33#define read_barrier_depends() do { } while (0)
34#define smp_read_barrier_depends() do { } while (0)
30 35
31#define smp_mb__before_atomic() smp_mb() 36#define smp_mb__before_atomic() smp_mb()
32#define smp_mb__after_atomic() smp_mb() 37#define smp_mb__after_atomic() smp_mb()
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 520542477678..9b68e98a724f 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -51,32 +51,6 @@
51 51
52#define __BITOPS_NO_BARRIER "\n" 52#define __BITOPS_NO_BARRIER "\n"
53 53
54#ifndef CONFIG_64BIT
55
56#define __BITOPS_OR "or"
57#define __BITOPS_AND "nr"
58#define __BITOPS_XOR "xr"
59#define __BITOPS_BARRIER "\n"
60
61#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \
62({ \
63 unsigned long __old, __new; \
64 \
65 typecheck(unsigned long *, (__addr)); \
66 asm volatile( \
67 " l %0,%2\n" \
68 "0: lr %1,%0\n" \
69 __op_string " %1,%3\n" \
70 " cs %0,%1,%2\n" \
71 " jl 0b" \
72 : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
73 : "d" (__val) \
74 : "cc", "memory"); \
75 __old; \
76})
77
78#else /* CONFIG_64BIT */
79
80#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES 54#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
81 55
82#define __BITOPS_OR "laog" 56#define __BITOPS_OR "laog"
@@ -125,8 +99,6 @@
125 99
126#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ 100#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
127 101
128#endif /* CONFIG_64BIT */
129
130#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG) 102#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
131 103
132static inline unsigned long * 104static inline unsigned long *
diff --git a/arch/s390/include/asm/cacheflush.h b/arch/s390/include/asm/cacheflush.h
index 3e20383d0921..58fae7d098cf 100644
--- a/arch/s390/include/asm/cacheflush.h
+++ b/arch/s390/include/asm/cacheflush.h
@@ -4,10 +4,6 @@
4/* Caches aren't brain-dead on the s390. */ 4/* Caches aren't brain-dead on the s390. */
5#include <asm-generic/cacheflush.h> 5#include <asm-generic/cacheflush.h>
6 6
7#ifdef CONFIG_DEBUG_PAGEALLOC
8void kernel_map_pages(struct page *page, int numpages, int enable);
9#endif
10
11int set_memory_ro(unsigned long addr, int numpages); 7int set_memory_ro(unsigned long addr, int numpages);
12int set_memory_rw(unsigned long addr, int numpages); 8int set_memory_rw(unsigned long addr, int numpages);
13int set_memory_nx(unsigned long addr, int numpages); 9int set_memory_nx(unsigned long addr, int numpages);
diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h
index 4236408070e5..4eadec466b8c 100644
--- a/arch/s390/include/asm/cmpxchg.h
+++ b/arch/s390/include/asm/cmpxchg.h
@@ -11,200 +11,28 @@
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/bug.h> 12#include <linux/bug.h>
13 13
14extern void __xchg_called_with_bad_pointer(void); 14#define cmpxchg(ptr, o, n) \
15 15({ \
16static inline unsigned long __xchg(unsigned long x, void *ptr, int size) 16 __typeof__(*(ptr)) __o = (o); \
17{ 17 __typeof__(*(ptr)) __n = (n); \
18 unsigned long addr, old; 18 (__typeof__(*(ptr))) __sync_val_compare_and_swap((ptr),__o,__n);\
19 int shift;
20
21 switch (size) {
22 case 1:
23 addr = (unsigned long) ptr;
24 shift = (3 ^ (addr & 3)) << 3;
25 addr ^= addr & 3;
26 asm volatile(
27 " l %0,%4\n"
28 "0: lr 0,%0\n"
29 " nr 0,%3\n"
30 " or 0,%2\n"
31 " cs %0,0,%4\n"
32 " jl 0b\n"
33 : "=&d" (old), "=Q" (*(int *) addr)
34 : "d" ((x & 0xff) << shift), "d" (~(0xff << shift)),
35 "Q" (*(int *) addr) : "memory", "cc", "0");
36 return old >> shift;
37 case 2:
38 addr = (unsigned long) ptr;
39 shift = (2 ^ (addr & 2)) << 3;
40 addr ^= addr & 2;
41 asm volatile(
42 " l %0,%4\n"
43 "0: lr 0,%0\n"
44 " nr 0,%3\n"
45 " or 0,%2\n"
46 " cs %0,0,%4\n"
47 " jl 0b\n"
48 : "=&d" (old), "=Q" (*(int *) addr)
49 : "d" ((x & 0xffff) << shift), "d" (~(0xffff << shift)),
50 "Q" (*(int *) addr) : "memory", "cc", "0");
51 return old >> shift;
52 case 4:
53 asm volatile(
54 " l %0,%3\n"
55 "0: cs %0,%2,%3\n"
56 " jl 0b\n"
57 : "=&d" (old), "=Q" (*(int *) ptr)
58 : "d" (x), "Q" (*(int *) ptr)
59 : "memory", "cc");
60 return old;
61#ifdef CONFIG_64BIT
62 case 8:
63 asm volatile(
64 " lg %0,%3\n"
65 "0: csg %0,%2,%3\n"
66 " jl 0b\n"
67 : "=&d" (old), "=m" (*(long *) ptr)
68 : "d" (x), "Q" (*(long *) ptr)
69 : "memory", "cc");
70 return old;
71#endif /* CONFIG_64BIT */
72 }
73 __xchg_called_with_bad_pointer();
74 return x;
75}
76
77#define xchg(ptr, x) \
78({ \
79 __typeof__(*(ptr)) __ret; \
80 __ret = (__typeof__(*(ptr))) \
81 __xchg((unsigned long)(x), (void *)(ptr), sizeof(*(ptr)));\
82 __ret; \
83}) 19})
84 20
85/* 21#define cmpxchg64 cmpxchg
86 * Atomic compare and exchange. Compare OLD with MEM, if identical, 22#define cmpxchg_local cmpxchg
87 * store NEW in MEM. Return the initial value in MEM. Success is 23#define cmpxchg64_local cmpxchg
88 * indicated by comparing RETURN with OLD.
89 */
90
91#define __HAVE_ARCH_CMPXCHG
92
93extern void __cmpxchg_called_with_bad_pointer(void);
94
95static inline unsigned long __cmpxchg(void *ptr, unsigned long old,
96 unsigned long new, int size)
97{
98 unsigned long addr, prev, tmp;
99 int shift;
100
101 switch (size) {
102 case 1:
103 addr = (unsigned long) ptr;
104 shift = (3 ^ (addr & 3)) << 3;
105 addr ^= addr & 3;
106 asm volatile(
107 " l %0,%2\n"
108 "0: nr %0,%5\n"
109 " lr %1,%0\n"
110 " or %0,%3\n"
111 " or %1,%4\n"
112 " cs %0,%1,%2\n"
113 " jnl 1f\n"
114 " xr %1,%0\n"
115 " nr %1,%5\n"
116 " jnz 0b\n"
117 "1:"
118 : "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) addr)
119 : "d" ((old & 0xff) << shift),
120 "d" ((new & 0xff) << shift),
121 "d" (~(0xff << shift))
122 : "memory", "cc");
123 return prev >> shift;
124 case 2:
125 addr = (unsigned long) ptr;
126 shift = (2 ^ (addr & 2)) << 3;
127 addr ^= addr & 2;
128 asm volatile(
129 " l %0,%2\n"
130 "0: nr %0,%5\n"
131 " lr %1,%0\n"
132 " or %0,%3\n"
133 " or %1,%4\n"
134 " cs %0,%1,%2\n"
135 " jnl 1f\n"
136 " xr %1,%0\n"
137 " nr %1,%5\n"
138 " jnz 0b\n"
139 "1:"
140 : "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) addr)
141 : "d" ((old & 0xffff) << shift),
142 "d" ((new & 0xffff) << shift),
143 "d" (~(0xffff << shift))
144 : "memory", "cc");
145 return prev >> shift;
146 case 4:
147 asm volatile(
148 " cs %0,%3,%1\n"
149 : "=&d" (prev), "=Q" (*(int *) ptr)
150 : "0" (old), "d" (new), "Q" (*(int *) ptr)
151 : "memory", "cc");
152 return prev;
153#ifdef CONFIG_64BIT
154 case 8:
155 asm volatile(
156 " csg %0,%3,%1\n"
157 : "=&d" (prev), "=Q" (*(long *) ptr)
158 : "0" (old), "d" (new), "Q" (*(long *) ptr)
159 : "memory", "cc");
160 return prev;
161#endif /* CONFIG_64BIT */
162 }
163 __cmpxchg_called_with_bad_pointer();
164 return old;
165}
166 24
167#define cmpxchg(ptr, o, n) \ 25#define xchg(ptr, x) \
168({ \
169 __typeof__(*(ptr)) __ret; \
170 __ret = (__typeof__(*(ptr))) \
171 __cmpxchg((ptr), (unsigned long)(o), (unsigned long)(n), \
172 sizeof(*(ptr))); \
173 __ret; \
174})
175
176#ifdef CONFIG_64BIT
177#define cmpxchg64(ptr, o, n) \
178({ \ 26({ \
179 cmpxchg((ptr), (o), (n)); \ 27 __typeof__(ptr) __ptr = (ptr); \
28 __typeof__(*(ptr)) __old; \
29 do { \
30 __old = *__ptr; \
31 } while (!__sync_bool_compare_and_swap(__ptr, __old, x)); \
32 __old; \
180}) 33})
181#else /* CONFIG_64BIT */
182static inline unsigned long long __cmpxchg64(void *ptr,
183 unsigned long long old,
184 unsigned long long new)
185{
186 register_pair rp_old = {.pair = old};
187 register_pair rp_new = {.pair = new};
188 unsigned long long *ullptr = ptr;
189
190 asm volatile(
191 " cds %0,%2,%1"
192 : "+d" (rp_old), "+Q" (*ullptr)
193 : "d" (rp_new)
194 : "memory", "cc");
195 return rp_old.pair;
196}
197 34
198#define cmpxchg64(ptr, o, n) \ 35#define __HAVE_ARCH_CMPXCHG
199({ \
200 __typeof__(*(ptr)) __ret; \
201 __ret = (__typeof__(*(ptr))) \
202 __cmpxchg64((ptr), \
203 (unsigned long long)(o), \
204 (unsigned long long)(n)); \
205 __ret; \
206})
207#endif /* CONFIG_64BIT */
208 36
209#define __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, insn) \ 37#define __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, insn) \
210({ \ 38({ \
@@ -252,53 +80,12 @@ extern void __cmpxchg_double_called_with_bad_pointer(void);
252({ \ 80({ \
253 __typeof__(p1) __p1 = (p1); \ 81 __typeof__(p1) __p1 = (p1); \
254 __typeof__(p2) __p2 = (p2); \ 82 __typeof__(p2) __p2 = (p2); \
255 int __ret; \
256 BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \ 83 BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \
257 BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \ 84 BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \
258 VM_BUG_ON((unsigned long)((__p1) + 1) != (unsigned long)(__p2));\ 85 VM_BUG_ON((unsigned long)((__p1) + 1) != (unsigned long)(__p2));\
259 if (sizeof(long) == 4) \ 86 __cmpxchg_double_8(__p1, __p2, o1, o2, n1, n2); \
260 __ret = __cmpxchg_double_4(__p1, __p2, o1, o2, n1, n2); \
261 else \
262 __ret = __cmpxchg_double_8(__p1, __p2, o1, o2, n1, n2); \
263 __ret; \
264}) 87})
265 88
266#define system_has_cmpxchg_double() 1 89#define system_has_cmpxchg_double() 1
267 90
268#include <asm-generic/cmpxchg-local.h>
269
270static inline unsigned long __cmpxchg_local(void *ptr,
271 unsigned long old,
272 unsigned long new, int size)
273{
274 switch (size) {
275 case 1:
276 case 2:
277 case 4:
278#ifdef CONFIG_64BIT
279 case 8:
280#endif
281 return __cmpxchg(ptr, old, new, size);
282 default:
283 return __cmpxchg_local_generic(ptr, old, new, size);
284 }
285
286 return old;
287}
288
289/*
290 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
291 * them available.
292 */
293#define cmpxchg_local(ptr, o, n) \
294({ \
295 __typeof__(*(ptr)) __ret; \
296 __ret = (__typeof__(*(ptr))) \
297 __cmpxchg_local((ptr), (unsigned long)(o), \
298 (unsigned long)(n), sizeof(*(ptr))); \
299 __ret; \
300})
301
302#define cmpxchg64_local(ptr, o, n) cmpxchg64((ptr), (o), (n))
303
304#endif /* __ASM_CMPXCHG_H */ 91#endif /* __ASM_CMPXCHG_H */
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index cb700d54bd83..5243a8679a1d 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -189,6 +189,20 @@ static inline int ecctr(u64 ctr, u64 *val)
189 return cc; 189 return cc;
190} 190}
191 191
192/* Store CPU counter multiple for the MT utilization counter set */
193static inline int stcctm5(u64 num, u64 *val)
194{
195 typedef struct { u64 _[num]; } addrtype;
196 int cc;
197
198 asm volatile (
199 " .insn rsy,0xeb0000000017,%2,5,%1\n"
200 " ipm %0\n"
201 " srl %0,28\n"
202 : "=d" (cc), "=Q" (*(addrtype *) val) : "d" (num) : "cc");
203 return cc;
204}
205
192/* Query sampling information */ 206/* Query sampling information */
193static inline int qsi(struct hws_qsi_info_block *info) 207static inline int qsi(struct hws_qsi_info_block *info)
194{ 208{
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index f65bd3634519..221b454c734a 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -8,27 +8,21 @@
8#define _S390_CPUTIME_H 8#define _S390_CPUTIME_H
9 9
10#include <linux/types.h> 10#include <linux/types.h>
11#include <linux/percpu.h>
12#include <linux/spinlock.h>
13#include <asm/div64.h> 11#include <asm/div64.h>
14 12
13#define CPUTIME_PER_USEC 4096ULL
14#define CPUTIME_PER_SEC (CPUTIME_PER_USEC * USEC_PER_SEC)
15 15
16/* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */ 16/* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */
17 17
18typedef unsigned long long __nocast cputime_t; 18typedef unsigned long long __nocast cputime_t;
19typedef unsigned long long __nocast cputime64_t; 19typedef unsigned long long __nocast cputime64_t;
20 20
21#define cmpxchg_cputime(ptr, old, new) cmpxchg64(ptr, old, new)
22
21static inline unsigned long __div(unsigned long long n, unsigned long base) 23static inline unsigned long __div(unsigned long long n, unsigned long base)
22{ 24{
23#ifndef CONFIG_64BIT
24 register_pair rp;
25
26 rp.pair = n >> 1;
27 asm ("dr %0,%1" : "+d" (rp) : "d" (base >> 1));
28 return rp.subreg.odd;
29#else /* CONFIG_64BIT */
30 return n / base; 25 return n / base;
31#endif /* CONFIG_64BIT */
32} 26}
33 27
34#define cputime_one_jiffy jiffies_to_cputime(1) 28#define cputime_one_jiffy jiffies_to_cputime(1)
@@ -38,24 +32,24 @@ static inline unsigned long __div(unsigned long long n, unsigned long base)
38 */ 32 */
39static inline unsigned long cputime_to_jiffies(const cputime_t cputime) 33static inline unsigned long cputime_to_jiffies(const cputime_t cputime)
40{ 34{
41 return __div((__force unsigned long long) cputime, 4096000000ULL / HZ); 35 return __div((__force unsigned long long) cputime, CPUTIME_PER_SEC / HZ);
42} 36}
43 37
44static inline cputime_t jiffies_to_cputime(const unsigned int jif) 38static inline cputime_t jiffies_to_cputime(const unsigned int jif)
45{ 39{
46 return (__force cputime_t)(jif * (4096000000ULL / HZ)); 40 return (__force cputime_t)(jif * (CPUTIME_PER_SEC / HZ));
47} 41}
48 42
49static inline u64 cputime64_to_jiffies64(cputime64_t cputime) 43static inline u64 cputime64_to_jiffies64(cputime64_t cputime)
50{ 44{
51 unsigned long long jif = (__force unsigned long long) cputime; 45 unsigned long long jif = (__force unsigned long long) cputime;
52 do_div(jif, 4096000000ULL / HZ); 46 do_div(jif, CPUTIME_PER_SEC / HZ);
53 return jif; 47 return jif;
54} 48}
55 49
56static inline cputime64_t jiffies64_to_cputime64(const u64 jif) 50static inline cputime64_t jiffies64_to_cputime64(const u64 jif)
57{ 51{
58 return (__force cputime64_t)(jif * (4096000000ULL / HZ)); 52 return (__force cputime64_t)(jif * (CPUTIME_PER_SEC / HZ));
59} 53}
60 54
61/* 55/*
@@ -68,7 +62,7 @@ static inline unsigned int cputime_to_usecs(const cputime_t cputime)
68 62
69static inline cputime_t usecs_to_cputime(const unsigned int m) 63static inline cputime_t usecs_to_cputime(const unsigned int m)
70{ 64{
71 return (__force cputime_t)(m * 4096ULL); 65 return (__force cputime_t)(m * CPUTIME_PER_USEC);
72} 66}
73 67
74#define usecs_to_cputime64(m) usecs_to_cputime(m) 68#define usecs_to_cputime64(m) usecs_to_cputime(m)
@@ -78,12 +72,12 @@ static inline cputime_t usecs_to_cputime(const unsigned int m)
78 */ 72 */
79static inline unsigned int cputime_to_secs(const cputime_t cputime) 73static inline unsigned int cputime_to_secs(const cputime_t cputime)
80{ 74{
81 return __div((__force unsigned long long) cputime, 2048000000) >> 1; 75 return __div((__force unsigned long long) cputime, CPUTIME_PER_SEC / 2) >> 1;
82} 76}
83 77
84static inline cputime_t secs_to_cputime(const unsigned int s) 78static inline cputime_t secs_to_cputime(const unsigned int s)
85{ 79{
86 return (__force cputime_t)(s * 4096000000ULL); 80 return (__force cputime_t)(s * CPUTIME_PER_SEC);
87} 81}
88 82
89/* 83/*
@@ -91,25 +85,16 @@ static inline cputime_t secs_to_cputime(const unsigned int s)
91 */ 85 */
92static inline cputime_t timespec_to_cputime(const struct timespec *value) 86static inline cputime_t timespec_to_cputime(const struct timespec *value)
93{ 87{
94 unsigned long long ret = value->tv_sec * 4096000000ULL; 88 unsigned long long ret = value->tv_sec * CPUTIME_PER_SEC;
95 return (__force cputime_t)(ret + value->tv_nsec * 4096 / 1000); 89 return (__force cputime_t)(ret + __div(value->tv_nsec * CPUTIME_PER_USEC, NSEC_PER_USEC));
96} 90}
97 91
98static inline void cputime_to_timespec(const cputime_t cputime, 92static inline void cputime_to_timespec(const cputime_t cputime,
99 struct timespec *value) 93 struct timespec *value)
100{ 94{
101 unsigned long long __cputime = (__force unsigned long long) cputime; 95 unsigned long long __cputime = (__force unsigned long long) cputime;
102#ifndef CONFIG_64BIT 96 value->tv_nsec = (__cputime % CPUTIME_PER_SEC) * NSEC_PER_USEC / CPUTIME_PER_USEC;
103 register_pair rp; 97 value->tv_sec = __cputime / CPUTIME_PER_SEC;
104
105 rp.pair = __cputime >> 1;
106 asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL));
107 value->tv_nsec = rp.subreg.even * 1000 / 4096;
108 value->tv_sec = rp.subreg.odd;
109#else
110 value->tv_nsec = (__cputime % 4096000000ULL) * 1000 / 4096;
111 value->tv_sec = __cputime / 4096000000ULL;
112#endif
113} 98}
114 99
115/* 100/*
@@ -119,25 +104,16 @@ static inline void cputime_to_timespec(const cputime_t cputime,
119 */ 104 */
120static inline cputime_t timeval_to_cputime(const struct timeval *value) 105static inline cputime_t timeval_to_cputime(const struct timeval *value)
121{ 106{
122 unsigned long long ret = value->tv_sec * 4096000000ULL; 107 unsigned long long ret = value->tv_sec * CPUTIME_PER_SEC;
123 return (__force cputime_t)(ret + value->tv_usec * 4096ULL); 108 return (__force cputime_t)(ret + value->tv_usec * CPUTIME_PER_USEC);
124} 109}
125 110
126static inline void cputime_to_timeval(const cputime_t cputime, 111static inline void cputime_to_timeval(const cputime_t cputime,
127 struct timeval *value) 112 struct timeval *value)
128{ 113{
129 unsigned long long __cputime = (__force unsigned long long) cputime; 114 unsigned long long __cputime = (__force unsigned long long) cputime;
130#ifndef CONFIG_64BIT 115 value->tv_usec = (__cputime % CPUTIME_PER_SEC) / CPUTIME_PER_USEC;
131 register_pair rp; 116 value->tv_sec = __cputime / CPUTIME_PER_SEC;
132
133 rp.pair = __cputime >> 1;
134 asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL));
135 value->tv_usec = rp.subreg.even / 4096;
136 value->tv_sec = rp.subreg.odd;
137#else
138 value->tv_usec = (__cputime % 4096000000ULL) / 4096;
139 value->tv_sec = __cputime / 4096000000ULL;
140#endif
141} 117}
142 118
143/* 119/*
@@ -146,13 +122,13 @@ static inline void cputime_to_timeval(const cputime_t cputime,
146static inline clock_t cputime_to_clock_t(cputime_t cputime) 122static inline clock_t cputime_to_clock_t(cputime_t cputime)
147{ 123{
148 unsigned long long clock = (__force unsigned long long) cputime; 124 unsigned long long clock = (__force unsigned long long) cputime;
149 do_div(clock, 4096000000ULL / USER_HZ); 125 do_div(clock, CPUTIME_PER_SEC / USER_HZ);
150 return clock; 126 return clock;
151} 127}
152 128
153static inline cputime_t clock_t_to_cputime(unsigned long x) 129static inline cputime_t clock_t_to_cputime(unsigned long x)
154{ 130{
155 return (__force cputime_t)(x * (4096000000ULL / USER_HZ)); 131 return (__force cputime_t)(x * (CPUTIME_PER_SEC / USER_HZ));
156} 132}
157 133
158/* 134/*
@@ -161,32 +137,12 @@ static inline cputime_t clock_t_to_cputime(unsigned long x)
161static inline clock_t cputime64_to_clock_t(cputime64_t cputime) 137static inline clock_t cputime64_to_clock_t(cputime64_t cputime)
162{ 138{
163 unsigned long long clock = (__force unsigned long long) cputime; 139 unsigned long long clock = (__force unsigned long long) cputime;
164 do_div(clock, 4096000000ULL / USER_HZ); 140 do_div(clock, CPUTIME_PER_SEC / USER_HZ);
165 return clock; 141 return clock;
166} 142}
167 143
168struct s390_idle_data { 144cputime64_t arch_cpu_idle_time(int cpu);
169 int nohz_delay;
170 unsigned int sequence;
171 unsigned long long idle_count;
172 unsigned long long idle_time;
173 unsigned long long clock_idle_enter;
174 unsigned long long clock_idle_exit;
175 unsigned long long timer_idle_enter;
176 unsigned long long timer_idle_exit;
177};
178
179DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
180
181cputime64_t s390_get_idle_time(int cpu);
182
183#define arch_idle_time(cpu) s390_get_idle_time(cpu)
184
185static inline int s390_nohz_delay(int cpu)
186{
187 return __get_cpu_var(s390_idle).nohz_delay != 0;
188}
189 145
190#define arch_needs_cpu(cpu) s390_nohz_delay(cpu) 146#define arch_idle_time(cpu) arch_cpu_idle_time(cpu)
191 147
192#endif /* _S390_CPUTIME_H */ 148#endif /* _S390_CPUTIME_H */
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index 31ab9f346d7e..cfad7fca01d6 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -9,20 +9,12 @@
9 9
10#include <linux/bug.h> 10#include <linux/bug.h>
11 11
12#ifdef CONFIG_64BIT
13# define __CTL_LOAD "lctlg"
14# define __CTL_STORE "stctg"
15#else
16# define __CTL_LOAD "lctl"
17# define __CTL_STORE "stctl"
18#endif
19
20#define __ctl_load(array, low, high) { \ 12#define __ctl_load(array, low, high) { \
21 typedef struct { char _[sizeof(array)]; } addrtype; \ 13 typedef struct { char _[sizeof(array)]; } addrtype; \
22 \ 14 \
23 BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\ 15 BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
24 asm volatile( \ 16 asm volatile( \
25 __CTL_LOAD " %1,%2,%0\n" \ 17 " lctlg %1,%2,%0\n" \
26 : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\ 18 : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\
27} 19}
28 20
@@ -31,7 +23,7 @@
31 \ 23 \
32 BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\ 24 BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
33 asm volatile( \ 25 asm volatile( \
34 __CTL_STORE " %1,%2,%0\n" \ 26 " stctg %1,%2,%0\n" \
35 : "=Q" (*(addrtype *)(&array)) \ 27 : "=Q" (*(addrtype *)(&array)) \
36 : "i" (low), "i" (high)); \ 28 : "i" (low), "i" (high)); \
37} 29}
@@ -60,9 +52,7 @@ void smp_ctl_clear_bit(int cr, int bit);
60union ctlreg0 { 52union ctlreg0 {
61 unsigned long val; 53 unsigned long val;
62 struct { 54 struct {
63#ifdef CONFIG_64BIT
64 unsigned long : 32; 55 unsigned long : 32;
65#endif
66 unsigned long : 3; 56 unsigned long : 3;
67 unsigned long lap : 1; /* Low-address-protection control */ 57 unsigned long lap : 1; /* Low-address-protection control */
68 unsigned long : 4; 58 unsigned long : 4;
diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h
index 530c15eb01e9..0206c8052328 100644
--- a/arch/s390/include/asm/debug.h
+++ b/arch/s390/include/asm/debug.h
@@ -151,9 +151,21 @@ debug_text_event(debug_info_t* id, int level, const char* txt)
151 * stored in the s390dbf. See Documentation/s390/s390dbf.txt for more details! 151 * stored in the s390dbf. See Documentation/s390/s390dbf.txt for more details!
152 */ 152 */
153extern debug_entry_t * 153extern debug_entry_t *
154debug_sprintf_event(debug_info_t* id,int level,char *string,...) 154__debug_sprintf_event(debug_info_t *id, int level, char *string, ...)
155 __attribute__ ((format(printf, 3, 4))); 155 __attribute__ ((format(printf, 3, 4)));
156 156
157#define debug_sprintf_event(_id, _level, _fmt, ...) \
158({ \
159 debug_entry_t *__ret; \
160 debug_info_t *__id = _id; \
161 int __level = _level; \
162 if ((!__id) || (__level > __id->level)) \
163 __ret = NULL; \
164 else \
165 __ret = __debug_sprintf_event(__id, __level, \
166 _fmt, ## __VA_ARGS__); \
167 __ret; \
168})
157 169
158static inline debug_entry_t* 170static inline debug_entry_t*
159debug_exception(debug_info_t* id, int level, void* data, int length) 171debug_exception(debug_info_t* id, int level, void* data, int length)
@@ -194,9 +206,22 @@ debug_text_exception(debug_info_t* id, int level, const char* txt)
194 * stored in the s390dbf. See Documentation/s390/s390dbf.txt for more details! 206 * stored in the s390dbf. See Documentation/s390/s390dbf.txt for more details!
195 */ 207 */
196extern debug_entry_t * 208extern debug_entry_t *
197debug_sprintf_exception(debug_info_t* id,int level,char *string,...) 209__debug_sprintf_exception(debug_info_t *id, int level, char *string, ...)
198 __attribute__ ((format(printf, 3, 4))); 210 __attribute__ ((format(printf, 3, 4)));
199 211
212#define debug_sprintf_exception(_id, _level, _fmt, ...) \
213({ \
214 debug_entry_t *__ret; \
215 debug_info_t *__id = _id; \
216 int __level = _level; \
217 if ((!__id) || (__level > __id->level)) \
218 __ret = NULL; \
219 else \
220 __ret = __debug_sprintf_exception(__id, __level, \
221 _fmt, ## __VA_ARGS__);\
222 __ret; \
223})
224
200int debug_register_view(debug_info_t* id, struct debug_view* view); 225int debug_register_view(debug_info_t* id, struct debug_view* view);
201int debug_unregister_view(debug_info_t* id, struct debug_view* view); 226int debug_unregister_view(debug_info_t* id, struct debug_view* view);
202 227
diff --git a/arch/s390/include/asm/dis.h b/arch/s390/include/asm/dis.h
index 04a83f5773cd..60323c21938b 100644
--- a/arch/s390/include/asm/dis.h
+++ b/arch/s390/include/asm/dis.h
@@ -13,12 +13,13 @@
13#define OPERAND_FPR 0x2 /* Operand printed as %fx */ 13#define OPERAND_FPR 0x2 /* Operand printed as %fx */
14#define OPERAND_AR 0x4 /* Operand printed as %ax */ 14#define OPERAND_AR 0x4 /* Operand printed as %ax */
15#define OPERAND_CR 0x8 /* Operand printed as %cx */ 15#define OPERAND_CR 0x8 /* Operand printed as %cx */
16#define OPERAND_DISP 0x10 /* Operand printed as displacement */ 16#define OPERAND_VR 0x10 /* Operand printed as %vx */
17#define OPERAND_BASE 0x20 /* Operand printed as base register */ 17#define OPERAND_DISP 0x20 /* Operand printed as displacement */
18#define OPERAND_INDEX 0x40 /* Operand printed as index register */ 18#define OPERAND_BASE 0x40 /* Operand printed as base register */
19#define OPERAND_PCREL 0x80 /* Operand printed as pc-relative symbol */ 19#define OPERAND_INDEX 0x80 /* Operand printed as index register */
20#define OPERAND_SIGNED 0x100 /* Operand printed as signed value */ 20#define OPERAND_PCREL 0x100 /* Operand printed as pc-relative symbol */
21#define OPERAND_LENGTH 0x200 /* Operand printed as length (+1) */ 21#define OPERAND_SIGNED 0x200 /* Operand printed as signed value */
22#define OPERAND_LENGTH 0x400 /* Operand printed as length (+1) */
22 23
23 24
24struct s390_operand { 25struct s390_operand {
diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h
index 3fbc67d9e197..9d395961e713 100644
--- a/arch/s390/include/asm/dma-mapping.h
+++ b/arch/s390/include/asm/dma-mapping.h
@@ -42,7 +42,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
42static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 42static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
43{ 43{
44 if (!dev->dma_mask) 44 if (!dev->dma_mask)
45 return 0; 45 return false;
46 return addr + size - 1 <= *dev->dma_mask; 46 return addr + size - 1 <= *dev->dma_mask;
47} 47}
48 48
@@ -56,24 +56,35 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
56 return dma_addr == DMA_ERROR_CODE; 56 return dma_addr == DMA_ERROR_CODE;
57} 57}
58 58
59static inline void *dma_alloc_coherent(struct device *dev, size_t size, 59#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
60 dma_addr_t *dma_handle, gfp_t flag) 60
61static inline void *dma_alloc_attrs(struct device *dev, size_t size,
62 dma_addr_t *dma_handle, gfp_t flags,
63 struct dma_attrs *attrs)
61{ 64{
62 struct dma_map_ops *ops = get_dma_ops(dev); 65 struct dma_map_ops *ops = get_dma_ops(dev);
63 void *ret; 66 void *cpu_addr;
67
68 BUG_ON(!ops);
64 69
65 ret = ops->alloc(dev, size, dma_handle, flag, NULL); 70 cpu_addr = ops->alloc(dev, size, dma_handle, flags, attrs);
66 debug_dma_alloc_coherent(dev, size, *dma_handle, ret); 71 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
67 return ret; 72
73 return cpu_addr;
68} 74}
69 75
70static inline void dma_free_coherent(struct device *dev, size_t size, 76#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL)
71 void *cpu_addr, dma_addr_t dma_handle) 77
78static inline void dma_free_attrs(struct device *dev, size_t size,
79 void *cpu_addr, dma_addr_t dma_handle,
80 struct dma_attrs *attrs)
72{ 81{
73 struct dma_map_ops *dma_ops = get_dma_ops(dev); 82 struct dma_map_ops *ops = get_dma_ops(dev);
83
84 BUG_ON(!ops);
74 85
75 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); 86 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
76 dma_ops->free(dev, size, cpu_addr, dma_handle, NULL); 87 ops->free(dev, size, cpu_addr, dma_handle, attrs);
77} 88}
78 89
79#endif /* _ASM_S390_DMA_MAPPING_H */ 90#endif /* _ASM_S390_DMA_MAPPING_H */
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 78f4f8711d58..3ad48f22de78 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -102,15 +102,12 @@
102#define HWCAP_S390_ETF3EH 256 102#define HWCAP_S390_ETF3EH 256
103#define HWCAP_S390_HIGH_GPRS 512 103#define HWCAP_S390_HIGH_GPRS 512
104#define HWCAP_S390_TE 1024 104#define HWCAP_S390_TE 1024
105#define HWCAP_S390_VXRS 2048
105 106
106/* 107/*
107 * These are used to set parameters in the core dumps. 108 * These are used to set parameters in the core dumps.
108 */ 109 */
109#ifndef CONFIG_64BIT
110#define ELF_CLASS ELFCLASS32
111#else /* CONFIG_64BIT */
112#define ELF_CLASS ELFCLASS64 110#define ELF_CLASS ELFCLASS64
113#endif /* CONFIG_64BIT */
114#define ELF_DATA ELFDATA2MSB 111#define ELF_DATA ELFDATA2MSB
115#define ELF_ARCH EM_S390 112#define ELF_ARCH EM_S390
116 113
@@ -160,10 +157,11 @@ extern unsigned int vdso_enabled;
160/* This is the location that an ET_DYN program is loaded if exec'ed. Typical 157/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
161 use of this is to invoke "./ld.so someprog" to test out a new version of 158 use of this is to invoke "./ld.so someprog" to test out a new version of
162 the loader. We need to make sure that it is out of the way of the program 159 the loader. We need to make sure that it is out of the way of the program
163 that it will "exec", and that there is sufficient room for the brk. */ 160 that it will "exec", and that there is sufficient room for the brk. 64-bit
164 161 tasks are aligned to 4GB. */
165extern unsigned long randomize_et_dyn(unsigned long base); 162#define ELF_ET_DYN_BASE (is_32bit_task() ? \
166#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2)) 163 (STACK_TOP / 3 * 2) : \
164 (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1))
167 165
168/* This yields a mask that user programs can use to figure out what 166/* This yields a mask that user programs can use to figure out what
169 instruction set this CPU supports. */ 167 instruction set this CPU supports. */
@@ -208,7 +206,9 @@ do { \
208} while (0) 206} while (0)
209#endif /* CONFIG_COMPAT */ 207#endif /* CONFIG_COMPAT */
210 208
211#define STACK_RND_MASK 0x7ffUL 209extern unsigned long mmap_rnd_mask;
210
211#define STACK_RND_MASK (test_thread_flag(TIF_31BIT) ? 0x7ff : mmap_rnd_mask)
212 212
213#define ARCH_DLINFO \ 213#define ARCH_DLINFO \
214do { \ 214do { \
@@ -222,9 +222,6 @@ struct linux_binprm;
222#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 222#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
223int arch_setup_additional_pages(struct linux_binprm *, int); 223int arch_setup_additional_pages(struct linux_binprm *, int);
224 224
225extern unsigned long arch_randomize_brk(struct mm_struct *mm); 225void *fill_cpu_elf_notes(void *ptr, struct save_area *sa, __vector128 *vxrs);
226#define arch_randomize_brk arch_randomize_brk
227
228void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
229 226
230#endif 227#endif
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index bf246dae1367..836c56290499 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -1,26 +1,84 @@
1#ifndef _ASM_S390_FTRACE_H 1#ifndef _ASM_S390_FTRACE_H
2#define _ASM_S390_FTRACE_H 2#define _ASM_S390_FTRACE_H
3 3
4#define ARCH_SUPPORTS_FTRACE_OPS 1
5
6#ifdef CC_USING_HOTPATCH
7#define MCOUNT_INSN_SIZE 6
8#else
9#define MCOUNT_INSN_SIZE 24
10#define MCOUNT_RETURN_FIXUP 18
11#endif
12
4#ifndef __ASSEMBLY__ 13#ifndef __ASSEMBLY__
5 14
6extern void _mcount(void); 15#define ftrace_return_address(n) __builtin_return_address(n)
16
17void _mcount(void);
18void ftrace_caller(void);
19
20extern char ftrace_graph_caller_end;
21extern unsigned long ftrace_plt;
7 22
8struct dyn_arch_ftrace { }; 23struct dyn_arch_ftrace { };
9 24
10#define MCOUNT_ADDR ((long)_mcount) 25#define MCOUNT_ADDR ((unsigned long)_mcount)
26#define FTRACE_ADDR ((unsigned long)ftrace_caller)
11 27
28#define KPROBE_ON_FTRACE_NOP 0
29#define KPROBE_ON_FTRACE_CALL 1
12 30
13static inline unsigned long ftrace_call_adjust(unsigned long addr) 31static inline unsigned long ftrace_call_adjust(unsigned long addr)
14{ 32{
15 return addr; 33 return addr;
16} 34}
17 35
18#endif /* __ASSEMBLY__ */ 36struct ftrace_insn {
37 u16 opc;
38 s32 disp;
39} __packed;
40
41static inline void ftrace_generate_nop_insn(struct ftrace_insn *insn)
42{
43#ifdef CONFIG_FUNCTION_TRACER
44#ifdef CC_USING_HOTPATCH
45 /* brcl 0,0 */
46 insn->opc = 0xc004;
47 insn->disp = 0;
48#else
49 /* jg .+24 */
50 insn->opc = 0xc0f4;
51 insn->disp = MCOUNT_INSN_SIZE / 2;
52#endif
53#endif
54}
19 55
20#ifdef CONFIG_64BIT 56static inline int is_ftrace_nop(struct ftrace_insn *insn)
21#define MCOUNT_INSN_SIZE 12 57{
58#ifdef CONFIG_FUNCTION_TRACER
59#ifdef CC_USING_HOTPATCH
60 if (insn->disp == 0)
61 return 1;
22#else 62#else
23#define MCOUNT_INSN_SIZE 22 63 if (insn->disp == MCOUNT_INSN_SIZE / 2)
64 return 1;
24#endif 65#endif
66#endif
67 return 0;
68}
25 69
70static inline void ftrace_generate_call_insn(struct ftrace_insn *insn,
71 unsigned long ip)
72{
73#ifdef CONFIG_FUNCTION_TRACER
74 unsigned long target;
75
76 /* brasl r0,ftrace_caller */
77 target = is_module_addr((void *) ip) ? ftrace_plt : FTRACE_ADDR;
78 insn->opc = 0xc005;
79 insn->disp = (target - ip) / 2;
80#endif
81}
82
83#endif /* __ASSEMBLY__ */
26#endif /* _ASM_S390_FTRACE_H */ 84#endif /* _ASM_S390_FTRACE_H */
diff --git a/arch/s390/include/asm/idals.h b/arch/s390/include/asm/idals.h
index ea5a6e45fd93..a7b2d7504049 100644
--- a/arch/s390/include/asm/idals.h
+++ b/arch/s390/include/asm/idals.h
@@ -19,11 +19,7 @@
19#include <asm/cio.h> 19#include <asm/cio.h>
20#include <asm/uaccess.h> 20#include <asm/uaccess.h>
21 21
22#ifdef CONFIG_64BIT
23#define IDA_SIZE_LOG 12 /* 11 for 2k , 12 for 4k */ 22#define IDA_SIZE_LOG 12 /* 11 for 2k , 12 for 4k */
24#else
25#define IDA_SIZE_LOG 11 /* 11 for 2k , 12 for 4k */
26#endif
27#define IDA_BLOCK_SIZE (1L<<IDA_SIZE_LOG) 23#define IDA_BLOCK_SIZE (1L<<IDA_SIZE_LOG)
28 24
29/* 25/*
@@ -32,11 +28,7 @@
32static inline int 28static inline int
33idal_is_needed(void *vaddr, unsigned int length) 29idal_is_needed(void *vaddr, unsigned int length)
34{ 30{
35#ifdef CONFIG_64BIT
36 return ((__pa(vaddr) + length - 1) >> 31) != 0; 31 return ((__pa(vaddr) + length - 1) >> 31) != 0;
37#else
38 return 0;
39#endif
40} 32}
41 33
42 34
@@ -77,7 +69,6 @@ static inline unsigned long *idal_create_words(unsigned long *idaws,
77static inline int 69static inline int
78set_normalized_cda(struct ccw1 * ccw, void *vaddr) 70set_normalized_cda(struct ccw1 * ccw, void *vaddr)
79{ 71{
80#ifdef CONFIG_64BIT
81 unsigned int nridaws; 72 unsigned int nridaws;
82 unsigned long *idal; 73 unsigned long *idal;
83 74
@@ -93,7 +84,6 @@ set_normalized_cda(struct ccw1 * ccw, void *vaddr)
93 ccw->flags |= CCW_FLAG_IDA; 84 ccw->flags |= CCW_FLAG_IDA;
94 vaddr = idal; 85 vaddr = idal;
95 } 86 }
96#endif
97 ccw->cda = (__u32)(unsigned long) vaddr; 87 ccw->cda = (__u32)(unsigned long) vaddr;
98 return 0; 88 return 0;
99} 89}
@@ -104,12 +94,10 @@ set_normalized_cda(struct ccw1 * ccw, void *vaddr)
104static inline void 94static inline void
105clear_normalized_cda(struct ccw1 * ccw) 95clear_normalized_cda(struct ccw1 * ccw)
106{ 96{
107#ifdef CONFIG_64BIT
108 if (ccw->flags & CCW_FLAG_IDA) { 97 if (ccw->flags & CCW_FLAG_IDA) {
109 kfree((void *)(unsigned long) ccw->cda); 98 kfree((void *)(unsigned long) ccw->cda);
110 ccw->flags &= ~CCW_FLAG_IDA; 99 ccw->flags &= ~CCW_FLAG_IDA;
111 } 100 }
112#endif
113 ccw->cda = 0; 101 ccw->cda = 0;
114} 102}
115 103
@@ -181,12 +169,8 @@ idal_buffer_free(struct idal_buffer *ib)
181static inline int 169static inline int
182__idal_buffer_is_needed(struct idal_buffer *ib) 170__idal_buffer_is_needed(struct idal_buffer *ib)
183{ 171{
184#ifdef CONFIG_64BIT
185 return ib->size > (4096ul << ib->page_order) || 172 return ib->size > (4096ul << ib->page_order) ||
186 idal_is_needed(ib->data[0], ib->size); 173 idal_is_needed(ib->data[0], ib->size);
187#else
188 return ib->size > (4096ul << ib->page_order);
189#endif
190} 174}
191 175
192/* 176/*
diff --git a/arch/s390/include/asm/idle.h b/arch/s390/include/asm/idle.h
new file mode 100644
index 000000000000..113cd963dbbe
--- /dev/null
+++ b/arch/s390/include/asm/idle.h
@@ -0,0 +1,27 @@
1/*
2 * Copyright IBM Corp. 2014
3 *
4 * Author: Martin Schwidefsky <schwidefsky@de.ibm.com>
5 */
6
7#ifndef _S390_IDLE_H
8#define _S390_IDLE_H
9
10#include <linux/types.h>
11#include <linux/device.h>
12#include <linux/seqlock.h>
13
14struct s390_idle_data {
15 seqcount_t seqcount;
16 unsigned long long idle_count;
17 unsigned long long idle_time;
18 unsigned long long clock_idle_enter;
19 unsigned long long clock_idle_exit;
20 unsigned long long timer_idle_enter;
21 unsigned long long timer_idle_exit;
22};
23
24extern struct device_attribute dev_attr_idle_count;
25extern struct device_attribute dev_attr_idle_time_us;
26
27#endif /* _S390_IDLE_H */
diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h
index cd6b9ee7b69c..30fd5c84680e 100644
--- a/arch/s390/include/asm/io.h
+++ b/arch/s390/include/asm/io.h
@@ -13,9 +13,10 @@
13#include <asm/page.h> 13#include <asm/page.h>
14#include <asm/pci_io.h> 14#include <asm/pci_io.h>
15 15
16void *xlate_dev_mem_ptr(unsigned long phys);
17#define xlate_dev_mem_ptr xlate_dev_mem_ptr 16#define xlate_dev_mem_ptr xlate_dev_mem_ptr
18void unxlate_dev_mem_ptr(unsigned long phys, void *addr); 17void *xlate_dev_mem_ptr(phys_addr_t phys);
18#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
19void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
19 20
20/* 21/*
21 * Convert a virtual cached pointer to an uncached pointer 22 * Convert a virtual cached pointer to an uncached pointer
@@ -38,6 +39,15 @@ static inline void iounmap(volatile void __iomem *addr)
38{ 39{
39} 40}
40 41
42static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
43{
44 return NULL;
45}
46
47static inline void ioport_unmap(void __iomem *p)
48{
49}
50
41/* 51/*
42 * s390 needs a private implementation of pci_iomap since ioremap with its 52 * s390 needs a private implementation of pci_iomap since ioremap with its
43 * offset parameter isn't sufficient. That's because BAR spaces are not 53 * offset parameter isn't sufficient. That's because BAR spaces are not
@@ -60,11 +70,6 @@ static inline void iounmap(volatile void __iomem *addr)
60#define __raw_writel zpci_write_u32 70#define __raw_writel zpci_write_u32
61#define __raw_writeq zpci_write_u64 71#define __raw_writeq zpci_write_u64
62 72
63#define readb_relaxed readb
64#define readw_relaxed readw
65#define readl_relaxed readl
66#define readq_relaxed readq
67
68#endif /* CONFIG_PCI */ 73#endif /* CONFIG_PCI */
69 74
70#include <asm-generic/io.h> 75#include <asm-generic/io.h>
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index 2fcccc0c997c..ece606c2ee86 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -17,12 +17,12 @@
17#define IPL_PARM_BLK_FCP_LEN (sizeof(struct ipl_list_hdr) + \ 17#define IPL_PARM_BLK_FCP_LEN (sizeof(struct ipl_list_hdr) + \
18 sizeof(struct ipl_block_fcp)) 18 sizeof(struct ipl_block_fcp))
19 19
20#define IPL_PARM_BLK0_FCP_LEN (sizeof(struct ipl_block_fcp) + 8) 20#define IPL_PARM_BLK0_FCP_LEN (sizeof(struct ipl_block_fcp) + 16)
21 21
22#define IPL_PARM_BLK_CCW_LEN (sizeof(struct ipl_list_hdr) + \ 22#define IPL_PARM_BLK_CCW_LEN (sizeof(struct ipl_list_hdr) + \
23 sizeof(struct ipl_block_ccw)) 23 sizeof(struct ipl_block_ccw))
24 24
25#define IPL_PARM_BLK0_CCW_LEN (sizeof(struct ipl_block_ccw) + 8) 25#define IPL_PARM_BLK0_CCW_LEN (sizeof(struct ipl_block_ccw) + 16)
26 26
27#define IPL_MAX_SUPPORTED_VERSION (0) 27#define IPL_MAX_SUPPORTED_VERSION (0)
28 28
@@ -38,10 +38,11 @@ struct ipl_list_hdr {
38 u8 pbt; 38 u8 pbt;
39 u8 flags; 39 u8 flags;
40 u16 reserved2; 40 u16 reserved2;
41 u8 loadparm[8];
41} __attribute__((packed)); 42} __attribute__((packed));
42 43
43struct ipl_block_fcp { 44struct ipl_block_fcp {
44 u8 reserved1[313-1]; 45 u8 reserved1[305-1];
45 u8 opt; 46 u8 opt;
46 u8 reserved2[3]; 47 u8 reserved2[3];
47 u16 reserved3; 48 u16 reserved3;
@@ -62,7 +63,6 @@ struct ipl_block_fcp {
62 offsetof(struct ipl_block_fcp, scp_data))) 63 offsetof(struct ipl_block_fcp, scp_data)))
63 64
64struct ipl_block_ccw { 65struct ipl_block_ccw {
65 u8 load_parm[8];
66 u8 reserved1[84]; 66 u8 reserved1[84];
67 u8 reserved2[2]; 67 u8 reserved2[2];
68 u16 devno; 68 u16 devno;
@@ -89,12 +89,12 @@ extern u32 ipl_flags;
89extern u32 dump_prefix_page; 89extern u32 dump_prefix_page;
90 90
91struct dump_save_areas { 91struct dump_save_areas {
92 struct save_area **areas; 92 struct save_area_ext **areas;
93 int count; 93 int count;
94}; 94};
95 95
96extern struct dump_save_areas dump_save_areas; 96extern struct dump_save_areas dump_save_areas;
97struct save_area *dump_save_area_create(int cpu); 97struct save_area_ext *dump_save_area_create(int cpu);
98 98
99extern void do_reipl(void); 99extern void do_reipl(void);
100extern void do_halt(void); 100extern void do_halt(void);
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index c4dd400a2791..ff95d15a2384 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -1,11 +1,11 @@
1#ifndef _ASM_IRQ_H 1#ifndef _ASM_IRQ_H
2#define _ASM_IRQ_H 2#define _ASM_IRQ_H
3 3
4#define EXT_INTERRUPT 1 4#define EXT_INTERRUPT 0
5#define IO_INTERRUPT 2 5#define IO_INTERRUPT 1
6#define THIN_INTERRUPT 3 6#define THIN_INTERRUPT 2
7 7
8#define NR_IRQS_BASE 4 8#define NR_IRQS_BASE 3
9 9
10#ifdef CONFIG_PCI_NR_MSI 10#ifdef CONFIG_PCI_NR_MSI
11# define NR_IRQS (NR_IRQS_BASE + CONFIG_PCI_NR_MSI) 11# define NR_IRQS (NR_IRQS_BASE + CONFIG_PCI_NR_MSI)
@@ -13,9 +13,6 @@
13# define NR_IRQS NR_IRQS_BASE 13# define NR_IRQS NR_IRQS_BASE
14#endif 14#endif
15 15
16/* This number is used when no interrupt has been assigned */
17#define NO_IRQ 0
18
19/* External interruption codes */ 16/* External interruption codes */
20#define EXT_IRQ_INTERRUPT_KEY 0x0040 17#define EXT_IRQ_INTERRUPT_KEY 0x0040
21#define EXT_IRQ_CLK_COMP 0x1004 18#define EXT_IRQ_CLK_COMP 0x1004
@@ -51,6 +48,7 @@ enum interruption_class {
51 IRQEXT_CMS, 48 IRQEXT_CMS,
52 IRQEXT_CMC, 49 IRQEXT_CMC,
53 IRQEXT_CMR, 50 IRQEXT_CMR,
51 IRQEXT_FTP,
54 IRQIO_CIO, 52 IRQIO_CIO,
55 IRQIO_QAI, 53 IRQIO_QAI,
56 IRQIO_DAS, 54 IRQIO_DAS,
@@ -59,7 +57,6 @@ enum interruption_class {
59 IRQIO_TAP, 57 IRQIO_TAP,
60 IRQIO_VMR, 58 IRQIO_VMR,
61 IRQIO_LCS, 59 IRQIO_LCS,
62 IRQIO_CLW,
63 IRQIO_CTC, 60 IRQIO_CTC,
64 IRQIO_APB, 61 IRQIO_APB,
65 IRQIO_ADM, 62 IRQIO_ADM,
@@ -81,7 +78,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat);
81 78
82static __always_inline void inc_irq_stat(enum interruption_class irq) 79static __always_inline void inc_irq_stat(enum interruption_class irq)
83{ 80{
84 __get_cpu_var(irq_stat).irqs[irq]++; 81 __this_cpu_inc(irq_stat.irqs[irq]);
85} 82}
86 83
87struct ext_code { 84struct ext_code {
diff --git a/arch/s390/include/asm/irqflags.h b/arch/s390/include/asm/irqflags.h
index 37b9091ab8c0..16aa0c779e07 100644
--- a/arch/s390/include/asm/irqflags.h
+++ b/arch/s390/include/asm/irqflags.h
@@ -36,7 +36,7 @@ static inline notrace void __arch_local_irq_ssm(unsigned long flags)
36 36
37static inline notrace unsigned long arch_local_save_flags(void) 37static inline notrace unsigned long arch_local_save_flags(void)
38{ 38{
39 return __arch_local_irq_stosm(0x00); 39 return __arch_local_irq_stnsm(0xff);
40} 40}
41 41
42static inline notrace unsigned long arch_local_irq_save(void) 42static inline notrace unsigned long arch_local_irq_save(void)
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
index 346b1c85ffb4..69972b7957ee 100644
--- a/arch/s390/include/asm/jump_label.h
+++ b/arch/s390/include/asm/jump_label.h
@@ -1,24 +1,23 @@
1#ifndef _ASM_S390_JUMP_LABEL_H 1#ifndef _ASM_S390_JUMP_LABEL_H
2#define _ASM_S390_JUMP_LABEL_H 2#define _ASM_S390_JUMP_LABEL_H
3 3
4#ifndef __ASSEMBLY__
5
4#include <linux/types.h> 6#include <linux/types.h>
5 7
6#define JUMP_LABEL_NOP_SIZE 6 8#define JUMP_LABEL_NOP_SIZE 6
9#define JUMP_LABEL_NOP_OFFSET 2
7 10
8#ifdef CONFIG_64BIT 11/*
9#define ASM_PTR ".quad" 12 * We use a brcl 0,2 instruction for jump labels at compile time so it
10#define ASM_ALIGN ".balign 8" 13 * can be easily distinguished from a hotpatch generated instruction.
11#else 14 */
12#define ASM_PTR ".long"
13#define ASM_ALIGN ".balign 4"
14#endif
15
16static __always_inline bool arch_static_branch(struct static_key *key) 15static __always_inline bool arch_static_branch(struct static_key *key)
17{ 16{
18 asm_volatile_goto("0: brcl 0,0\n" 17 asm_volatile_goto("0: brcl 0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n"
19 ".pushsection __jump_table, \"aw\"\n" 18 ".pushsection __jump_table, \"aw\"\n"
20 ASM_ALIGN "\n" 19 ".balign 8\n"
21 ASM_PTR " 0b, %l[label], %0\n" 20 ".quad 0b, %l[label], %0\n"
22 ".popsection\n" 21 ".popsection\n"
23 : : "X" (key) : : label); 22 : : "X" (key) : : label);
24 return false; 23 return false;
@@ -34,4 +33,5 @@ struct jump_entry {
34 jump_label_t key; 33 jump_label_t key;
35}; 34};
36 35
36#endif /* __ASSEMBLY__ */
37#endif 37#endif
diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h
index 4176dfe0fba1..b47ad3b642cc 100644
--- a/arch/s390/include/asm/kprobes.h
+++ b/arch/s390/include/asm/kprobes.h
@@ -60,6 +60,7 @@ typedef u16 kprobe_opcode_t;
60struct arch_specific_insn { 60struct arch_specific_insn {
61 /* copy of original instruction */ 61 /* copy of original instruction */
62 kprobe_opcode_t *insn; 62 kprobe_opcode_t *insn;
63 unsigned int is_ftrace_insn : 1;
63}; 64};
64 65
65struct prev_kprobe { 66struct prev_kprobe {
@@ -84,6 +85,10 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
84int kprobe_exceptions_notify(struct notifier_block *self, 85int kprobe_exceptions_notify(struct notifier_block *self,
85 unsigned long val, void *data); 86 unsigned long val, void *data);
86 87
88int probe_is_prohibited_opcode(u16 *insn);
89int probe_get_fixup_type(u16 *insn);
90int probe_is_insn_relative_long(u16 *insn);
91
87#define flush_insn_slot(p) do { } while (0) 92#define flush_insn_slot(p) do { } while (0)
88 93
89#endif /* _ASM_S390_KPROBES_H */ 94#endif /* _ASM_S390_KPROBES_H */
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 4181d7baabba..d01fc588b5c3 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -13,8 +13,11 @@
13 13
14#ifndef ASM_KVM_HOST_H 14#ifndef ASM_KVM_HOST_H
15#define ASM_KVM_HOST_H 15#define ASM_KVM_HOST_H
16
17#include <linux/types.h>
16#include <linux/hrtimer.h> 18#include <linux/hrtimer.h>
17#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/kvm_types.h>
18#include <linux/kvm_host.h> 21#include <linux/kvm_host.h>
19#include <linux/kvm.h> 22#include <linux/kvm.h>
20#include <asm/debug.h> 23#include <asm/debug.h>
@@ -32,11 +35,13 @@
32#define KVM_NR_IRQCHIPS 1 35#define KVM_NR_IRQCHIPS 1
33#define KVM_IRQCHIP_NUM_PINS 4096 36#define KVM_IRQCHIP_NUM_PINS 4096
34 37
35#define SIGP_CTRL_C 0x00800000 38#define SIGP_CTRL_C 0x80
39#define SIGP_CTRL_SCN_MASK 0x3f
36 40
37struct sca_entry { 41struct sca_entry {
38 atomic_t ctrl; 42 __u8 reserved0;
39 __u32 reserved; 43 __u8 sigp_ctrl;
44 __u16 reserved[3];
40 __u64 sda; 45 __u64 sda;
41 __u64 reserved2[2]; 46 __u64 reserved2[2];
42} __attribute__((packed)); 47} __attribute__((packed));
@@ -84,7 +89,8 @@ struct kvm_s390_sie_block {
84 atomic_t cpuflags; /* 0x0000 */ 89 atomic_t cpuflags; /* 0x0000 */
85 __u32 : 1; /* 0x0004 */ 90 __u32 : 1; /* 0x0004 */
86 __u32 prefix : 18; 91 __u32 prefix : 18;
87 __u32 : 13; 92 __u32 : 1;
93 __u32 ibc : 12;
88 __u8 reserved08[4]; /* 0x0008 */ 94 __u8 reserved08[4]; /* 0x0008 */
89#define PROG_IN_SIE (1<<0) 95#define PROG_IN_SIE (1<<0)
90 __u32 prog0c; /* 0x000c */ 96 __u32 prog0c; /* 0x000c */
@@ -120,7 +126,7 @@ struct kvm_s390_sie_block {
120#define ICPT_PARTEXEC 0x38 126#define ICPT_PARTEXEC 0x38
121#define ICPT_IOINST 0x40 127#define ICPT_IOINST 0x40
122 __u8 icptcode; /* 0x0050 */ 128 __u8 icptcode; /* 0x0050 */
123 __u8 reserved51; /* 0x0051 */ 129 __u8 icptstatus; /* 0x0051 */
124 __u16 ihcpu; /* 0x0052 */ 130 __u16 ihcpu; /* 0x0052 */
125 __u8 reserved54[2]; /* 0x0054 */ 131 __u8 reserved54[2]; /* 0x0054 */
126 __u16 ipa; /* 0x0056 */ 132 __u16 ipa; /* 0x0056 */
@@ -129,7 +135,9 @@ struct kvm_s390_sie_block {
129 __u8 reserved60; /* 0x0060 */ 135 __u8 reserved60; /* 0x0060 */
130 __u8 ecb; /* 0x0061 */ 136 __u8 ecb; /* 0x0061 */
131 __u8 ecb2; /* 0x0062 */ 137 __u8 ecb2; /* 0x0062 */
132 __u8 reserved63[1]; /* 0x0063 */ 138#define ECB3_AES 0x04
139#define ECB3_DEA 0x08
140 __u8 ecb3; /* 0x0063 */
133 __u32 scaol; /* 0x0064 */ 141 __u32 scaol; /* 0x0064 */
134 __u8 reserved68[4]; /* 0x0068 */ 142 __u8 reserved68[4]; /* 0x0068 */
135 __u32 todpr; /* 0x006c */ 143 __u32 todpr; /* 0x006c */
@@ -154,14 +162,19 @@ struct kvm_s390_sie_block {
154 __u8 armid; /* 0x00e3 */ 162 __u8 armid; /* 0x00e3 */
155 __u8 reservede4[4]; /* 0x00e4 */ 163 __u8 reservede4[4]; /* 0x00e4 */
156 __u64 tecmc; /* 0x00e8 */ 164 __u64 tecmc; /* 0x00e8 */
157 __u8 reservedf0[16]; /* 0x00f0 */ 165 __u8 reservedf0[12]; /* 0x00f0 */
166#define CRYCB_FORMAT1 0x00000001
167#define CRYCB_FORMAT2 0x00000003
168 __u32 crycbd; /* 0x00fc */
158 __u64 gcr[16]; /* 0x0100 */ 169 __u64 gcr[16]; /* 0x0100 */
159 __u64 gbea; /* 0x0180 */ 170 __u64 gbea; /* 0x0180 */
160 __u8 reserved188[24]; /* 0x0188 */ 171 __u8 reserved188[24]; /* 0x0188 */
161 __u32 fac; /* 0x01a0 */ 172 __u32 fac; /* 0x01a0 */
162 __u8 reserved1a4[20]; /* 0x01a4 */ 173 __u8 reserved1a4[20]; /* 0x01a4 */
163 __u64 cbrlo; /* 0x01b8 */ 174 __u64 cbrlo; /* 0x01b8 */
164 __u8 reserved1c0[30]; /* 0x01c0 */ 175 __u8 reserved1c0[8]; /* 0x01c0 */
176 __u32 ecd; /* 0x01c8 */
177 __u8 reserved1cc[18]; /* 0x01cc */
165 __u64 pp; /* 0x01de */ 178 __u64 pp; /* 0x01de */
166 __u8 reserved1e6[2]; /* 0x01e6 */ 179 __u8 reserved1e6[2]; /* 0x01e6 */
167 __u64 itdba; /* 0x01e8 */ 180 __u64 itdba; /* 0x01e8 */
@@ -172,11 +185,17 @@ struct kvm_s390_itdb {
172 __u8 data[256]; 185 __u8 data[256];
173} __packed; 186} __packed;
174 187
188struct kvm_s390_vregs {
189 __vector128 vrs[32];
190 __u8 reserved200[512]; /* for future vector expansion */
191} __packed;
192
175struct sie_page { 193struct sie_page {
176 struct kvm_s390_sie_block sie_block; 194 struct kvm_s390_sie_block sie_block;
177 __u8 reserved200[1024]; /* 0x0200 */ 195 __u8 reserved200[1024]; /* 0x0200 */
178 struct kvm_s390_itdb itdb; /* 0x0600 */ 196 struct kvm_s390_itdb itdb; /* 0x0600 */
179 __u8 reserved700[2304]; /* 0x0700 */ 197 __u8 reserved700[1280]; /* 0x0700 */
198 struct kvm_s390_vregs vregs; /* 0x0c00 */
180} __packed; 199} __packed;
181 200
182struct kvm_vcpu_stat { 201struct kvm_vcpu_stat {
@@ -187,6 +206,8 @@ struct kvm_vcpu_stat {
187 u32 exit_stop_request; 206 u32 exit_stop_request;
188 u32 exit_validity; 207 u32 exit_validity;
189 u32 exit_instruction; 208 u32 exit_instruction;
209 u32 halt_successful_poll;
210 u32 halt_wakeup;
190 u32 instruction_lctl; 211 u32 instruction_lctl;
191 u32 instruction_lctlg; 212 u32 instruction_lctlg;
192 u32 instruction_stctl; 213 u32 instruction_stctl;
@@ -220,10 +241,18 @@ struct kvm_vcpu_stat {
220 u32 instruction_sigp_sense_running; 241 u32 instruction_sigp_sense_running;
221 u32 instruction_sigp_external_call; 242 u32 instruction_sigp_external_call;
222 u32 instruction_sigp_emergency; 243 u32 instruction_sigp_emergency;
244 u32 instruction_sigp_cond_emergency;
245 u32 instruction_sigp_start;
223 u32 instruction_sigp_stop; 246 u32 instruction_sigp_stop;
247 u32 instruction_sigp_stop_store_status;
248 u32 instruction_sigp_store_status;
249 u32 instruction_sigp_store_adtl_status;
224 u32 instruction_sigp_arch; 250 u32 instruction_sigp_arch;
225 u32 instruction_sigp_prefix; 251 u32 instruction_sigp_prefix;
226 u32 instruction_sigp_restart; 252 u32 instruction_sigp_restart;
253 u32 instruction_sigp_init_cpu_reset;
254 u32 instruction_sigp_cpu_reset;
255 u32 instruction_sigp_unknown;
227 u32 diagnose_10; 256 u32 diagnose_10;
228 u32 diagnose_44; 257 u32 diagnose_44;
229 u32 diagnose_9c; 258 u32 diagnose_9c;
@@ -250,6 +279,7 @@ struct kvm_vcpu_stat {
250#define PGM_SPECIAL_OPERATION 0x13 279#define PGM_SPECIAL_OPERATION 0x13
251#define PGM_OPERAND 0x15 280#define PGM_OPERAND 0x15
252#define PGM_TRACE_TABEL 0x16 281#define PGM_TRACE_TABEL 0x16
282#define PGM_VECTOR_PROCESSING 0x1b
253#define PGM_SPACE_SWITCH 0x1c 283#define PGM_SPACE_SWITCH 0x1c
254#define PGM_HFP_SQUARE_ROOT 0x1d 284#define PGM_HFP_SQUARE_ROOT 0x1d
255#define PGM_PC_TRANSLATION_SPEC 0x1f 285#define PGM_PC_TRANSLATION_SPEC 0x1f
@@ -282,6 +312,84 @@ struct kvm_vcpu_stat {
282#define PGM_PER 0x80 312#define PGM_PER 0x80
283#define PGM_CRYPTO_OPERATION 0x119 313#define PGM_CRYPTO_OPERATION 0x119
284 314
315/* irq types in order of priority */
316enum irq_types {
317 IRQ_PEND_MCHK_EX = 0,
318 IRQ_PEND_SVC,
319 IRQ_PEND_PROG,
320 IRQ_PEND_MCHK_REP,
321 IRQ_PEND_EXT_IRQ_KEY,
322 IRQ_PEND_EXT_MALFUNC,
323 IRQ_PEND_EXT_EMERGENCY,
324 IRQ_PEND_EXT_EXTERNAL,
325 IRQ_PEND_EXT_CLOCK_COMP,
326 IRQ_PEND_EXT_CPU_TIMER,
327 IRQ_PEND_EXT_TIMING,
328 IRQ_PEND_EXT_SERVICE,
329 IRQ_PEND_EXT_HOST,
330 IRQ_PEND_PFAULT_INIT,
331 IRQ_PEND_PFAULT_DONE,
332 IRQ_PEND_VIRTIO,
333 IRQ_PEND_IO_ISC_0,
334 IRQ_PEND_IO_ISC_1,
335 IRQ_PEND_IO_ISC_2,
336 IRQ_PEND_IO_ISC_3,
337 IRQ_PEND_IO_ISC_4,
338 IRQ_PEND_IO_ISC_5,
339 IRQ_PEND_IO_ISC_6,
340 IRQ_PEND_IO_ISC_7,
341 IRQ_PEND_SIGP_STOP,
342 IRQ_PEND_RESTART,
343 IRQ_PEND_SET_PREFIX,
344 IRQ_PEND_COUNT
345};
346
347/* We have 2M for virtio device descriptor pages. Smallest amount of
348 * memory per page is 24 bytes (1 queue), so (2048*1024) / 24 = 87381
349 */
350#define KVM_S390_MAX_VIRTIO_IRQS 87381
351
352/*
353 * Repressible (non-floating) machine check interrupts
354 * subclass bits in MCIC
355 */
356#define MCHK_EXTD_BIT 58
357#define MCHK_DEGR_BIT 56
358#define MCHK_WARN_BIT 55
359#define MCHK_REP_MASK ((1UL << MCHK_DEGR_BIT) | \
360 (1UL << MCHK_EXTD_BIT) | \
361 (1UL << MCHK_WARN_BIT))
362
363/* Exigent machine check interrupts subclass bits in MCIC */
364#define MCHK_SD_BIT 63
365#define MCHK_PD_BIT 62
366#define MCHK_EX_MASK ((1UL << MCHK_SD_BIT) | (1UL << MCHK_PD_BIT))
367
368#define IRQ_PEND_EXT_MASK ((1UL << IRQ_PEND_EXT_IRQ_KEY) | \
369 (1UL << IRQ_PEND_EXT_CLOCK_COMP) | \
370 (1UL << IRQ_PEND_EXT_CPU_TIMER) | \
371 (1UL << IRQ_PEND_EXT_MALFUNC) | \
372 (1UL << IRQ_PEND_EXT_EMERGENCY) | \
373 (1UL << IRQ_PEND_EXT_EXTERNAL) | \
374 (1UL << IRQ_PEND_EXT_TIMING) | \
375 (1UL << IRQ_PEND_EXT_HOST) | \
376 (1UL << IRQ_PEND_EXT_SERVICE) | \
377 (1UL << IRQ_PEND_VIRTIO) | \
378 (1UL << IRQ_PEND_PFAULT_INIT) | \
379 (1UL << IRQ_PEND_PFAULT_DONE))
380
381#define IRQ_PEND_IO_MASK ((1UL << IRQ_PEND_IO_ISC_0) | \
382 (1UL << IRQ_PEND_IO_ISC_1) | \
383 (1UL << IRQ_PEND_IO_ISC_2) | \
384 (1UL << IRQ_PEND_IO_ISC_3) | \
385 (1UL << IRQ_PEND_IO_ISC_4) | \
386 (1UL << IRQ_PEND_IO_ISC_5) | \
387 (1UL << IRQ_PEND_IO_ISC_6) | \
388 (1UL << IRQ_PEND_IO_ISC_7))
389
390#define IRQ_PEND_MCHK_MASK ((1UL << IRQ_PEND_MCHK_REP) | \
391 (1UL << IRQ_PEND_MCHK_EX))
392
285struct kvm_s390_interrupt_info { 393struct kvm_s390_interrupt_info {
286 struct list_head list; 394 struct list_head list;
287 u64 type; 395 u64 type;
@@ -292,32 +400,58 @@ struct kvm_s390_interrupt_info {
292 struct kvm_s390_emerg_info emerg; 400 struct kvm_s390_emerg_info emerg;
293 struct kvm_s390_extcall_info extcall; 401 struct kvm_s390_extcall_info extcall;
294 struct kvm_s390_prefix_info prefix; 402 struct kvm_s390_prefix_info prefix;
403 struct kvm_s390_stop_info stop;
295 struct kvm_s390_mchk_info mchk; 404 struct kvm_s390_mchk_info mchk;
296 }; 405 };
297}; 406};
298 407
299/* for local_interrupt.action_flags */ 408struct kvm_s390_irq_payload {
300#define ACTION_STORE_ON_STOP (1<<0) 409 struct kvm_s390_io_info io;
301#define ACTION_STOP_ON_STOP (1<<1) 410 struct kvm_s390_ext_info ext;
411 struct kvm_s390_pgm_info pgm;
412 struct kvm_s390_emerg_info emerg;
413 struct kvm_s390_extcall_info extcall;
414 struct kvm_s390_prefix_info prefix;
415 struct kvm_s390_stop_info stop;
416 struct kvm_s390_mchk_info mchk;
417};
302 418
303struct kvm_s390_local_interrupt { 419struct kvm_s390_local_interrupt {
304 spinlock_t lock; 420 spinlock_t lock;
305 struct list_head list;
306 atomic_t active;
307 struct kvm_s390_float_interrupt *float_int; 421 struct kvm_s390_float_interrupt *float_int;
308 int timer_due; /* event indicator for waitqueue below */
309 wait_queue_head_t *wq; 422 wait_queue_head_t *wq;
310 atomic_t *cpuflags; 423 atomic_t *cpuflags;
311 unsigned int action_bits; 424 DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
425 struct kvm_s390_irq_payload irq;
426 unsigned long pending_irqs;
312}; 427};
313 428
429#define FIRQ_LIST_IO_ISC_0 0
430#define FIRQ_LIST_IO_ISC_1 1
431#define FIRQ_LIST_IO_ISC_2 2
432#define FIRQ_LIST_IO_ISC_3 3
433#define FIRQ_LIST_IO_ISC_4 4
434#define FIRQ_LIST_IO_ISC_5 5
435#define FIRQ_LIST_IO_ISC_6 6
436#define FIRQ_LIST_IO_ISC_7 7
437#define FIRQ_LIST_PFAULT 8
438#define FIRQ_LIST_VIRTIO 9
439#define FIRQ_LIST_COUNT 10
440#define FIRQ_CNTR_IO 0
441#define FIRQ_CNTR_SERVICE 1
442#define FIRQ_CNTR_VIRTIO 2
443#define FIRQ_CNTR_PFAULT 3
444#define FIRQ_MAX_COUNT 4
445
314struct kvm_s390_float_interrupt { 446struct kvm_s390_float_interrupt {
447 unsigned long pending_irqs;
315 spinlock_t lock; 448 spinlock_t lock;
316 struct list_head list; 449 struct list_head lists[FIRQ_LIST_COUNT];
317 atomic_t active; 450 int counters[FIRQ_MAX_COUNT];
451 struct kvm_s390_mchk_info mchk;
452 struct kvm_s390_ext_info srv_signal;
318 int next_rr_cpu; 453 int next_rr_cpu;
319 unsigned long idle_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)]; 454 unsigned long idle_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)];
320 unsigned int irq_count;
321}; 455};
322 456
323struct kvm_hw_wp_info_arch { 457struct kvm_hw_wp_info_arch {
@@ -365,9 +499,9 @@ struct kvm_vcpu_arch {
365 s390_fp_regs host_fpregs; 499 s390_fp_regs host_fpregs;
366 unsigned int host_acrs[NUM_ACRS]; 500 unsigned int host_acrs[NUM_ACRS];
367 s390_fp_regs guest_fpregs; 501 s390_fp_regs guest_fpregs;
502 struct kvm_s390_vregs *host_vregs;
368 struct kvm_s390_local_interrupt local_int; 503 struct kvm_s390_local_interrupt local_int;
369 struct hrtimer ckc_timer; 504 struct hrtimer ckc_timer;
370 struct tasklet_struct tasklet;
371 struct kvm_s390_pgm_info pgm; 505 struct kvm_s390_pgm_info pgm;
372 union { 506 union {
373 struct cpuid cpu_id; 507 struct cpuid cpu_id;
@@ -375,7 +509,6 @@ struct kvm_vcpu_arch {
375 }; 509 };
376 struct gmap *gmap; 510 struct gmap *gmap;
377 struct kvm_guestdbg_info_arch guestdbg; 511 struct kvm_guestdbg_info_arch guestdbg;
378#define KVM_S390_PFAULT_TOKEN_INVALID (-1UL)
379 unsigned long pfault_token; 512 unsigned long pfault_token;
380 unsigned long pfault_select; 513 unsigned long pfault_select;
381 unsigned long pfault_compare; 514 unsigned long pfault_compare;
@@ -409,6 +542,41 @@ struct s390_io_adapter {
409#define MAX_S390_IO_ADAPTERS ((MAX_ISC + 1) * 8) 542#define MAX_S390_IO_ADAPTERS ((MAX_ISC + 1) * 8)
410#define MAX_S390_ADAPTER_MAPS 256 543#define MAX_S390_ADAPTER_MAPS 256
411 544
545/* maximum size of facilities and facility mask is 2k bytes */
546#define S390_ARCH_FAC_LIST_SIZE_BYTE (1<<11)
547#define S390_ARCH_FAC_LIST_SIZE_U64 \
548 (S390_ARCH_FAC_LIST_SIZE_BYTE / sizeof(u64))
549#define S390_ARCH_FAC_MASK_SIZE_BYTE S390_ARCH_FAC_LIST_SIZE_BYTE
550#define S390_ARCH_FAC_MASK_SIZE_U64 \
551 (S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64))
552
553struct kvm_s390_fac {
554 /* facility list requested by guest */
555 __u64 list[S390_ARCH_FAC_LIST_SIZE_U64];
556 /* facility mask supported by kvm & hosting machine */
557 __u64 mask[S390_ARCH_FAC_LIST_SIZE_U64];
558};
559
560struct kvm_s390_cpu_model {
561 struct kvm_s390_fac *fac;
562 struct cpuid cpu_id;
563 unsigned short ibc;
564};
565
566struct kvm_s390_crypto {
567 struct kvm_s390_crypto_cb *crycb;
568 __u32 crycbd;
569 __u8 aes_kw;
570 __u8 dea_kw;
571};
572
573struct kvm_s390_crypto_cb {
574 __u8 reserved00[72]; /* 0x0000 */
575 __u8 dea_wrapping_key_mask[24]; /* 0x0048 */
576 __u8 aes_wrapping_key_mask[32]; /* 0x0060 */
577 __u8 reserved80[128]; /* 0x0080 */
578};
579
412struct kvm_arch{ 580struct kvm_arch{
413 struct sca_block *sca; 581 struct sca_block *sca;
414 debug_info_t *dbf; 582 debug_info_t *dbf;
@@ -418,9 +586,17 @@ struct kvm_arch{
418 int css_support; 586 int css_support;
419 int use_irqchip; 587 int use_irqchip;
420 int use_cmma; 588 int use_cmma;
589 int user_cpu_state_ctrl;
590 int user_sigp;
591 int user_stsi;
421 struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS]; 592 struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS];
422 wait_queue_head_t ipte_wq; 593 wait_queue_head_t ipte_wq;
594 int ipte_lock_count;
595 struct mutex ipte_mutex;
423 spinlock_t start_stop_lock; 596 spinlock_t start_stop_lock;
597 struct kvm_s390_cpu_model model;
598 struct kvm_s390_crypto crypto;
599 u64 epoch;
424}; 600};
425 601
426#define KVM_HVA_ERR_BAD (-1UL) 602#define KVM_HVA_ERR_BAD (-1UL)
@@ -432,8 +608,6 @@ static inline bool kvm_is_error_hva(unsigned long addr)
432} 608}
433 609
434#define ASYNC_PF_PER_VCPU 64 610#define ASYNC_PF_PER_VCPU 64
435struct kvm_vcpu;
436struct kvm_async_pf;
437struct kvm_arch_async_pf { 611struct kvm_arch_async_pf {
438 unsigned long pfault_token; 612 unsigned long pfault_token;
439}; 613};
@@ -451,4 +625,18 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
451 625
452extern int sie64a(struct kvm_s390_sie_block *, u64 *); 626extern int sie64a(struct kvm_s390_sie_block *, u64 *);
453extern char sie_exit; 627extern char sie_exit;
628
629static inline void kvm_arch_hardware_disable(void) {}
630static inline void kvm_arch_check_processor_compat(void *rtn) {}
631static inline void kvm_arch_exit(void) {}
632static inline void kvm_arch_sync_events(struct kvm *kvm) {}
633static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
634static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
635static inline void kvm_arch_free_memslot(struct kvm *kvm,
636 struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
637static inline void kvm_arch_memslots_updated(struct kvm *kvm) {}
638static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
639static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
640 struct kvm_memory_slot *slot) {}
641
454#endif 642#endif
diff --git a/arch/s390/include/asm/livepatch.h b/arch/s390/include/asm/livepatch.h
new file mode 100644
index 000000000000..7aa799134a11
--- /dev/null
+++ b/arch/s390/include/asm/livepatch.h
@@ -0,0 +1,43 @@
1/*
2 * livepatch.h - s390-specific Kernel Live Patching Core
3 *
4 * Copyright (c) 2013-2015 SUSE
5 * Authors: Jiri Kosina
6 * Vojtech Pavlik
7 * Jiri Slaby
8 */
9
10/*
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the Free
13 * Software Foundation; either version 2 of the License, or (at your option)
14 * any later version.
15 */
16
17#ifndef ASM_LIVEPATCH_H
18#define ASM_LIVEPATCH_H
19
20#include <linux/module.h>
21
22#ifdef CONFIG_LIVEPATCH
23static inline int klp_check_compiler_support(void)
24{
25 return 0;
26}
27
28static inline int klp_write_module_reloc(struct module *mod, unsigned long
29 type, unsigned long loc, unsigned long value)
30{
31 /* not supported yet */
32 return -ENOSYS;
33}
34
35static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
36{
37 regs->psw.addr = ip;
38}
39#else
40#error Live patching support is disabled; check CONFIG_LIVEPATCH
41#endif
42
43#endif
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 4349197ab9df..663f23e37460 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -11,158 +11,7 @@
11#include <linux/types.h> 11#include <linux/types.h>
12#include <asm/ptrace.h> 12#include <asm/ptrace.h>
13#include <asm/cpu.h> 13#include <asm/cpu.h>
14 14#include <asm/types.h>
15#ifdef CONFIG_32BIT
16
17#define LC_ORDER 0
18#define LC_PAGES 1
19
20struct save_area {
21 u32 ext_save;
22 u64 timer;
23 u64 clk_cmp;
24 u8 pad1[24];
25 u8 psw[8];
26 u32 pref_reg;
27 u8 pad2[20];
28 u32 acc_regs[16];
29 u64 fp_regs[4];
30 u32 gp_regs[16];
31 u32 ctrl_regs[16];
32} __packed;
33
34struct _lowcore {
35 psw_t restart_psw; /* 0x0000 */
36 psw_t restart_old_psw; /* 0x0008 */
37 __u8 pad_0x0010[0x0014-0x0010]; /* 0x0010 */
38 __u32 ipl_parmblock_ptr; /* 0x0014 */
39 psw_t external_old_psw; /* 0x0018 */
40 psw_t svc_old_psw; /* 0x0020 */
41 psw_t program_old_psw; /* 0x0028 */
42 psw_t mcck_old_psw; /* 0x0030 */
43 psw_t io_old_psw; /* 0x0038 */
44 __u8 pad_0x0040[0x0058-0x0040]; /* 0x0040 */
45 psw_t external_new_psw; /* 0x0058 */
46 psw_t svc_new_psw; /* 0x0060 */
47 psw_t program_new_psw; /* 0x0068 */
48 psw_t mcck_new_psw; /* 0x0070 */
49 psw_t io_new_psw; /* 0x0078 */
50 __u32 ext_params; /* 0x0080 */
51 __u16 ext_cpu_addr; /* 0x0084 */
52 __u16 ext_int_code; /* 0x0086 */
53 __u16 svc_ilc; /* 0x0088 */
54 __u16 svc_code; /* 0x008a */
55 __u16 pgm_ilc; /* 0x008c */
56 __u16 pgm_code; /* 0x008e */
57 __u32 trans_exc_code; /* 0x0090 */
58 __u16 mon_class_num; /* 0x0094 */
59 __u8 per_code; /* 0x0096 */
60 __u8 per_atmid; /* 0x0097 */
61 __u32 per_address; /* 0x0098 */
62 __u32 monitor_code; /* 0x009c */
63 __u8 exc_access_id; /* 0x00a0 */
64 __u8 per_access_id; /* 0x00a1 */
65 __u8 op_access_id; /* 0x00a2 */
66 __u8 ar_mode_id; /* 0x00a3 */
67 __u8 pad_0x00a4[0x00b8-0x00a4]; /* 0x00a4 */
68 __u16 subchannel_id; /* 0x00b8 */
69 __u16 subchannel_nr; /* 0x00ba */
70 __u32 io_int_parm; /* 0x00bc */
71 __u32 io_int_word; /* 0x00c0 */
72 __u8 pad_0x00c4[0x00c8-0x00c4]; /* 0x00c4 */
73 __u32 stfl_fac_list; /* 0x00c8 */
74 __u8 pad_0x00cc[0x00d4-0x00cc]; /* 0x00cc */
75 __u32 extended_save_area_addr; /* 0x00d4 */
76 __u32 cpu_timer_save_area[2]; /* 0x00d8 */
77 __u32 clock_comp_save_area[2]; /* 0x00e0 */
78 __u32 mcck_interruption_code[2]; /* 0x00e8 */
79 __u8 pad_0x00f0[0x00f4-0x00f0]; /* 0x00f0 */
80 __u32 external_damage_code; /* 0x00f4 */
81 __u32 failing_storage_address; /* 0x00f8 */
82 __u8 pad_0x00fc[0x0100-0x00fc]; /* 0x00fc */
83 psw_t psw_save_area; /* 0x0100 */
84 __u32 prefixreg_save_area; /* 0x0108 */
85 __u8 pad_0x010c[0x0120-0x010c]; /* 0x010c */
86
87 /* CPU register save area: defined by architecture */
88 __u32 access_regs_save_area[16]; /* 0x0120 */
89 __u32 floating_pt_save_area[8]; /* 0x0160 */
90 __u32 gpregs_save_area[16]; /* 0x0180 */
91 __u32 cregs_save_area[16]; /* 0x01c0 */
92
93 /* Save areas. */
94 __u32 save_area_sync[8]; /* 0x0200 */
95 __u32 save_area_async[8]; /* 0x0220 */
96 __u32 save_area_restart[1]; /* 0x0240 */
97
98 /* CPU flags. */
99 __u32 cpu_flags; /* 0x0244 */
100
101 /* Return psws. */
102 psw_t return_psw; /* 0x0248 */
103 psw_t return_mcck_psw; /* 0x0250 */
104
105 /* CPU time accounting values */
106 __u64 sync_enter_timer; /* 0x0258 */
107 __u64 async_enter_timer; /* 0x0260 */
108 __u64 mcck_enter_timer; /* 0x0268 */
109 __u64 exit_timer; /* 0x0270 */
110 __u64 user_timer; /* 0x0278 */
111 __u64 system_timer; /* 0x0280 */
112 __u64 steal_timer; /* 0x0288 */
113 __u64 last_update_timer; /* 0x0290 */
114 __u64 last_update_clock; /* 0x0298 */
115 __u64 int_clock; /* 0x02a0 */
116 __u64 mcck_clock; /* 0x02a8 */
117 __u64 clock_comparator; /* 0x02b0 */
118
119 /* Current process. */
120 __u32 current_task; /* 0x02b8 */
121 __u32 thread_info; /* 0x02bc */
122 __u32 kernel_stack; /* 0x02c0 */
123
124 /* Interrupt, panic and restart stack. */
125 __u32 async_stack; /* 0x02c4 */
126 __u32 panic_stack; /* 0x02c8 */
127 __u32 restart_stack; /* 0x02cc */
128
129 /* Restart function and parameter. */
130 __u32 restart_fn; /* 0x02d0 */
131 __u32 restart_data; /* 0x02d4 */
132 __u32 restart_source; /* 0x02d8 */
133
134 /* Address space pointer. */
135 __u32 kernel_asce; /* 0x02dc */
136 __u32 user_asce; /* 0x02e0 */
137 __u32 current_pid; /* 0x02e4 */
138
139 /* SMP info area */
140 __u32 cpu_nr; /* 0x02e8 */
141 __u32 softirq_pending; /* 0x02ec */
142 __u32 percpu_offset; /* 0x02f0 */
143 __u32 machine_flags; /* 0x02f4 */
144 __u32 ftrace_func; /* 0x02f8 */
145 __u32 spinlock_lockval; /* 0x02fc */
146
147 __u8 pad_0x0300[0x0e00-0x0300]; /* 0x0300 */
148
149 /*
150 * 0xe00 contains the address of the IPL Parameter Information
151 * block. Dump tools need IPIB for IPL after dump.
152 * Note: do not change the position of any fields in 0x0e00-0x0f00
153 */
154 __u32 ipib; /* 0x0e00 */
155 __u32 ipib_checksum; /* 0x0e04 */
156 __u32 vmcore_info; /* 0x0e08 */
157 __u8 pad_0x0e0c[0x0e18-0x0e0c]; /* 0x0e0c */
158 __u32 os_info; /* 0x0e18 */
159 __u8 pad_0x0e1c[0x0f00-0x0e1c]; /* 0x0e1c */
160
161 /* Extended facility list */
162 __u64 stfle_fac_list[32]; /* 0x0f00 */
163} __packed;
164
165#else /* CONFIG_32BIT */
166 15
167#define LC_ORDER 1 16#define LC_ORDER 1
168#define LC_PAGES 2 17#define LC_PAGES 2
@@ -183,6 +32,11 @@ struct save_area {
183 u64 ctrl_regs[16]; 32 u64 ctrl_regs[16];
184} __packed; 33} __packed;
185 34
35struct save_area_ext {
36 struct save_area sa;
37 __vector128 vx_regs[32];
38};
39
186struct _lowcore { 40struct _lowcore {
187 __u8 pad_0x0000[0x0014-0x0000]; /* 0x0000 */ 41 __u8 pad_0x0000[0x0014-0x0000]; /* 0x0000 */
188 __u32 ipl_parmblock_ptr; /* 0x0014 */ 42 __u32 ipl_parmblock_ptr; /* 0x0014 */
@@ -286,7 +140,7 @@ struct _lowcore {
286 __u64 percpu_offset; /* 0x0378 */ 140 __u64 percpu_offset; /* 0x0378 */
287 __u64 vdso_per_cpu_data; /* 0x0380 */ 141 __u64 vdso_per_cpu_data; /* 0x0380 */
288 __u64 machine_flags; /* 0x0388 */ 142 __u64 machine_flags; /* 0x0388 */
289 __u64 ftrace_func; /* 0x0390 */ 143 __u8 pad_0x0390[0x0398-0x0390]; /* 0x0390 */
290 __u64 gmap; /* 0x0398 */ 144 __u64 gmap; /* 0x0398 */
291 __u32 spinlock_lockval; /* 0x03a0 */ 145 __u32 spinlock_lockval; /* 0x03a0 */
292 __u8 pad_0x03a0[0x0400-0x03a4]; /* 0x03a4 */ 146 __u8 pad_0x03a0[0x0400-0x03a4]; /* 0x03a4 */
@@ -310,7 +164,10 @@ struct _lowcore {
310 164
311 /* Extended facility list */ 165 /* Extended facility list */
312 __u64 stfle_fac_list[32]; /* 0x0f00 */ 166 __u64 stfle_fac_list[32]; /* 0x0f00 */
313 __u8 pad_0x1000[0x11b8-0x1000]; /* 0x1000 */ 167 __u8 pad_0x1000[0x11b0-0x1000]; /* 0x1000 */
168
169 /* Pointer to vector register save area */
170 __u64 vector_save_area_addr; /* 0x11b0 */
314 171
315 /* 64 bit extparam used for pfault/diag 250: defined by architecture */ 172 /* 64 bit extparam used for pfault/diag 250: defined by architecture */
316 __u64 ext_params2; /* 0x11B8 */ 173 __u64 ext_params2; /* 0x11B8 */
@@ -334,13 +191,12 @@ struct _lowcore {
334 191
335 /* Transaction abort diagnostic block */ 192 /* Transaction abort diagnostic block */
336 __u8 pgm_tdb[256]; /* 0x1800 */ 193 __u8 pgm_tdb[256]; /* 0x1800 */
194 __u8 pad_0x1900[0x1c00-0x1900]; /* 0x1900 */
337 195
338 /* align to the top of the prefix area */ 196 /* Software defined save area for vector registers */
339 __u8 pad_0x1900[0x2000-0x1900]; /* 0x1900 */ 197 __u8 vector_save_area[1024]; /* 0x1c00 */
340} __packed; 198} __packed;
341 199
342#endif /* CONFIG_32BIT */
343
344#define S390_lowcore (*((struct _lowcore *) 0)) 200#define S390_lowcore (*((struct _lowcore *) 0))
345 201
346extern struct _lowcore *lowcore_ptr[]; 202extern struct _lowcore *lowcore_ptr[];
diff --git a/arch/s390/include/asm/mman.h b/arch/s390/include/asm/mman.h
index 9977e08df5bd..b55a59e1d134 100644
--- a/arch/s390/include/asm/mman.h
+++ b/arch/s390/include/asm/mman.h
@@ -8,7 +8,7 @@
8 8
9#include <uapi/asm/mman.h> 9#include <uapi/asm/mman.h>
10 10
11#if !defined(__ASSEMBLY__) && defined(CONFIG_64BIT) 11#ifndef __ASSEMBLY__
12int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags); 12int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
13#define arch_mmap_check(addr, len, flags) s390_mmap_check(addr, len, flags) 13#define arch_mmap_check(addr, len, flags) s390_mmap_check(addr, len, flags)
14#endif 14#endif
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 3815bfea1b2d..d25d9ff10ba8 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -19,9 +19,7 @@ static inline int init_new_context(struct task_struct *tsk,
19 atomic_set(&mm->context.attach_count, 0); 19 atomic_set(&mm->context.attach_count, 0);
20 mm->context.flush_mm = 0; 20 mm->context.flush_mm = 0;
21 mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS; 21 mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
22#ifdef CONFIG_64BIT
23 mm->context.asce_bits |= _ASCE_TYPE_REGION3; 22 mm->context.asce_bits |= _ASCE_TYPE_REGION3;
24#endif
25 mm->context.has_pgste = 0; 23 mm->context.has_pgste = 0;
26 mm->context.use_skey = 0; 24 mm->context.use_skey = 0;
27 mm->context.asce_limit = STACK_TOP_MAX; 25 mm->context.asce_limit = STACK_TOP_MAX;
@@ -62,6 +60,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
62{ 60{
63 int cpu = smp_processor_id(); 61 int cpu = smp_processor_id();
64 62
63 S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
65 if (prev == next) 64 if (prev == next)
66 return; 65 return;
67 if (MACHINE_HAS_TLB_LC) 66 if (MACHINE_HAS_TLB_LC)
@@ -73,7 +72,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
73 atomic_dec(&prev->context.attach_count); 72 atomic_dec(&prev->context.attach_count);
74 if (MACHINE_HAS_TLB_LC) 73 if (MACHINE_HAS_TLB_LC)
75 cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); 74 cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
76 S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
77} 75}
78 76
79#define finish_arch_post_lock_switch finish_arch_post_lock_switch 77#define finish_arch_post_lock_switch finish_arch_post_lock_switch
@@ -110,14 +108,23 @@ static inline void activate_mm(struct mm_struct *prev,
110static inline void arch_dup_mmap(struct mm_struct *oldmm, 108static inline void arch_dup_mmap(struct mm_struct *oldmm,
111 struct mm_struct *mm) 109 struct mm_struct *mm)
112{ 110{
113#ifdef CONFIG_64BIT
114 if (oldmm->context.asce_limit < mm->context.asce_limit) 111 if (oldmm->context.asce_limit < mm->context.asce_limit)
115 crst_table_downgrade(mm, oldmm->context.asce_limit); 112 crst_table_downgrade(mm, oldmm->context.asce_limit);
116#endif
117} 113}
118 114
119static inline void arch_exit_mmap(struct mm_struct *mm) 115static inline void arch_exit_mmap(struct mm_struct *mm)
120{ 116{
121} 117}
122 118
119static inline void arch_unmap(struct mm_struct *mm,
120 struct vm_area_struct *vma,
121 unsigned long start, unsigned long end)
122{
123}
124
125static inline void arch_bprm_mm_init(struct mm_struct *mm,
126 struct vm_area_struct *vma)
127{
128}
129
123#endif /* __S390_MMU_CONTEXT_H */ 130#endif /* __S390_MMU_CONTEXT_H */
diff --git a/arch/s390/include/asm/nmi.h b/arch/s390/include/asm/nmi.h
index 35f8ec185616..3027a5a72b74 100644
--- a/arch/s390/include/asm/nmi.h
+++ b/arch/s390/include/asm/nmi.h
@@ -38,7 +38,7 @@ struct mci {
38 __u32 pm : 1; /* 22 psw program mask and cc validity */ 38 __u32 pm : 1; /* 22 psw program mask and cc validity */
39 __u32 ia : 1; /* 23 psw instruction address validity */ 39 __u32 ia : 1; /* 23 psw instruction address validity */
40 __u32 fa : 1; /* 24 failing storage address validity */ 40 __u32 fa : 1; /* 24 failing storage address validity */
41 __u32 : 1; /* 25 */ 41 __u32 vr : 1; /* 25 vector register validity */
42 __u32 ec : 1; /* 26 external damage code validity */ 42 __u32 ec : 1; /* 26 external damage code validity */
43 __u32 fp : 1; /* 27 floating point register validity */ 43 __u32 fp : 1; /* 27 floating point register validity */
44 __u32 gr : 1; /* 28 general register validity */ 44 __u32 gr : 1; /* 28 general register validity */
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 114258eeaacd..53eacbd4f09b 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -37,16 +37,7 @@ static inline void storage_key_init_range(unsigned long start, unsigned long end
37#endif 37#endif
38} 38}
39 39
40static inline void clear_page(void *page) 40#define clear_page(page) memset((page), 0, PAGE_SIZE)
41{
42 register unsigned long reg1 asm ("1") = 0;
43 register void *reg2 asm ("2") = page;
44 register unsigned long reg3 asm ("3") = 4096;
45 asm volatile(
46 " mvcl 2,0"
47 : "+d" (reg2), "+d" (reg3) : "d" (reg1)
48 : "memory", "cc");
49}
50 41
51/* 42/*
52 * copy_page uses the mvcl instruction with 0xb0 padding byte in order to 43 * copy_page uses the mvcl instruction with 0xb0 padding byte in order to
@@ -162,6 +153,4 @@ static inline int devmem_is_allowed(unsigned long pfn)
162#include <asm-generic/memory_model.h> 153#include <asm-generic/memory_model.h>
163#include <asm-generic/getorder.h> 154#include <asm-generic/getorder.h>
164 155
165#define __HAVE_ARCH_GATE_AREA 1
166
167#endif /* _S390_PAGE_H */ 156#endif /* _S390_PAGE_H */
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index c030900320e0..a648338c434a 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -7,6 +7,7 @@
7#define PCI_BAR_COUNT 6 7#define PCI_BAR_COUNT 6
8 8
9#include <linux/pci.h> 9#include <linux/pci.h>
10#include <linux/mutex.h>
10#include <asm-generic/pci.h> 11#include <asm-generic/pci.h>
11#include <asm-generic/pci-dma-compat.h> 12#include <asm-generic/pci-dma-compat.h>
12#include <asm/pci_clp.h> 13#include <asm/pci_clp.h>
@@ -44,16 +45,8 @@ struct zpci_fmb {
44 u64 rpcit_ops; 45 u64 rpcit_ops;
45 u64 dma_rbytes; 46 u64 dma_rbytes;
46 u64 dma_wbytes; 47 u64 dma_wbytes;
47 /* software counters */
48 atomic64_t allocated_pages;
49 atomic64_t mapped_pages;
50 atomic64_t unmapped_pages;
51} __packed __aligned(16); 48} __packed __aligned(16);
52 49
53#define ZPCI_MSI_VEC_BITS 11
54#define ZPCI_MSI_VEC_MAX (1 << ZPCI_MSI_VEC_BITS)
55#define ZPCI_MSI_VEC_MASK (ZPCI_MSI_VEC_MAX - 1)
56
57enum zpci_state { 50enum zpci_state {
58 ZPCI_FN_STATE_RESERVED, 51 ZPCI_FN_STATE_RESERVED,
59 ZPCI_FN_STATE_STANDBY, 52 ZPCI_FN_STATE_STANDBY,
@@ -84,12 +77,14 @@ struct zpci_dev {
84 u8 pft; /* pci function type */ 77 u8 pft; /* pci function type */
85 u16 domain; 78 u16 domain;
86 79
80 struct mutex lock;
87 u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */ 81 u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */
88 u32 uid; /* user defined id */ 82 u32 uid; /* user defined id */
89 u8 util_str[CLP_UTIL_STR_LEN]; /* utility string */ 83 u8 util_str[CLP_UTIL_STR_LEN]; /* utility string */
90 84
91 /* IRQ stuff */ 85 /* IRQ stuff */
92 u64 msi_addr; /* MSI address */ 86 u64 msi_addr; /* MSI address */
87 unsigned int max_msi; /* maximum number of MSI's */
93 struct airq_iv *aibv; /* adapter interrupt bit vector */ 88 struct airq_iv *aibv; /* adapter interrupt bit vector */
94 unsigned int aisb; /* number of the summary bit */ 89 unsigned int aisb; /* number of the summary bit */
95 90
@@ -114,6 +109,10 @@ struct zpci_dev {
114 /* Function measurement block */ 109 /* Function measurement block */
115 struct zpci_fmb *fmb; 110 struct zpci_fmb *fmb;
116 u16 fmb_update; /* update interval */ 111 u16 fmb_update; /* update interval */
112 /* software counters */
113 atomic64_t allocated_pages;
114 atomic64_t mapped_pages;
115 atomic64_t unmapped_pages;
117 116
118 enum pci_bus_speed max_bus_speed; 117 enum pci_bus_speed max_bus_speed;
119 118
diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h
index d194d544d694..1a9a98de5bde 100644
--- a/arch/s390/include/asm/pci_io.h
+++ b/arch/s390/include/asm/pci_io.h
@@ -16,6 +16,7 @@
16struct zpci_iomap_entry { 16struct zpci_iomap_entry {
17 u32 fh; 17 u32 fh;
18 u8 bar; 18 u8 bar;
19 u16 count;
19}; 20};
20 21
21extern struct zpci_iomap_entry *zpci_iomap_start; 22extern struct zpci_iomap_entry *zpci_iomap_start;
@@ -139,7 +140,8 @@ static inline int zpci_memcpy_fromio(void *dst,
139 int size, rc = 0; 140 int size, rc = 0;
140 141
141 while (n > 0) { 142 while (n > 0) {
142 size = zpci_get_max_write_size((u64) src, (u64) dst, n, 8); 143 size = zpci_get_max_write_size((u64 __force) src,
144 (u64) dst, n, 8);
143 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size); 145 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size);
144 rc = zpci_read_single(req, dst, offset, size); 146 rc = zpci_read_single(req, dst, offset, size);
145 if (rc) 147 if (rc)
@@ -162,7 +164,8 @@ static inline int zpci_memcpy_toio(volatile void __iomem *dst,
162 return -EINVAL; 164 return -EINVAL;
163 165
164 while (n > 0) { 166 while (n > 0) {
165 size = zpci_get_max_write_size((u64) dst, (u64) src, n, 128); 167 size = zpci_get_max_write_size((u64 __force) dst,
168 (u64) src, n, 128);
166 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size); 169 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size);
167 170
168 if (size > 8) /* main path */ 171 if (size > 8) /* main path */
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index fa91e0097458..6d6556ca24aa 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -10,8 +10,6 @@
10 */ 10 */
11#define __my_cpu_offset S390_lowcore.percpu_offset 11#define __my_cpu_offset S390_lowcore.percpu_offset
12 12
13#ifdef CONFIG_64BIT
14
15/* 13/*
16 * For 64 bit module code, the module may be more than 4G above the 14 * For 64 bit module code, the module may be more than 4G above the
17 * per cpu area, use weak definitions to force the compiler to 15 * per cpu area, use weak definitions to force the compiler to
@@ -31,7 +29,7 @@
31 pcp_op_T__ old__, new__, prev__; \ 29 pcp_op_T__ old__, new__, prev__; \
32 pcp_op_T__ *ptr__; \ 30 pcp_op_T__ *ptr__; \
33 preempt_disable(); \ 31 preempt_disable(); \
34 ptr__ = __this_cpu_ptr(&(pcp)); \ 32 ptr__ = raw_cpu_ptr(&(pcp)); \
35 prev__ = *ptr__; \ 33 prev__ = *ptr__; \
36 do { \ 34 do { \
37 old__ = prev__; \ 35 old__ = prev__; \
@@ -70,7 +68,7 @@
70 pcp_op_T__ val__ = (val); \ 68 pcp_op_T__ val__ = (val); \
71 pcp_op_T__ old__, *ptr__; \ 69 pcp_op_T__ old__, *ptr__; \
72 preempt_disable(); \ 70 preempt_disable(); \
73 ptr__ = __this_cpu_ptr(&(pcp)); \ 71 ptr__ = raw_cpu_ptr(&(pcp)); \
74 if (__builtin_constant_p(val__) && \ 72 if (__builtin_constant_p(val__) && \
75 ((szcast)val__ > -129) && ((szcast)val__ < 128)) { \ 73 ((szcast)val__ > -129) && ((szcast)val__ < 128)) { \
76 asm volatile( \ 74 asm volatile( \
@@ -97,7 +95,7 @@
97 pcp_op_T__ val__ = (val); \ 95 pcp_op_T__ val__ = (val); \
98 pcp_op_T__ old__, *ptr__; \ 96 pcp_op_T__ old__, *ptr__; \
99 preempt_disable(); \ 97 preempt_disable(); \
100 ptr__ = __this_cpu_ptr(&(pcp)); \ 98 ptr__ = raw_cpu_ptr(&(pcp)); \
101 asm volatile( \ 99 asm volatile( \
102 op " %[old__],%[val__],%[ptr__]\n" \ 100 op " %[old__],%[val__],%[ptr__]\n" \
103 : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \ 101 : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
@@ -116,7 +114,7 @@
116 pcp_op_T__ val__ = (val); \ 114 pcp_op_T__ val__ = (val); \
117 pcp_op_T__ old__, *ptr__; \ 115 pcp_op_T__ old__, *ptr__; \
118 preempt_disable(); \ 116 preempt_disable(); \
119 ptr__ = __this_cpu_ptr(&(pcp)); \ 117 ptr__ = raw_cpu_ptr(&(pcp)); \
120 asm volatile( \ 118 asm volatile( \
121 op " %[old__],%[val__],%[ptr__]\n" \ 119 op " %[old__],%[val__],%[ptr__]\n" \
122 : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \ 120 : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
@@ -138,7 +136,7 @@
138 pcp_op_T__ ret__; \ 136 pcp_op_T__ ret__; \
139 pcp_op_T__ *ptr__; \ 137 pcp_op_T__ *ptr__; \
140 preempt_disable(); \ 138 preempt_disable(); \
141 ptr__ = __this_cpu_ptr(&(pcp)); \ 139 ptr__ = raw_cpu_ptr(&(pcp)); \
142 ret__ = cmpxchg(ptr__, oval, nval); \ 140 ret__ = cmpxchg(ptr__, oval, nval); \
143 preempt_enable(); \ 141 preempt_enable(); \
144 ret__; \ 142 ret__; \
@@ -154,7 +152,7 @@
154 typeof(pcp) *ptr__; \ 152 typeof(pcp) *ptr__; \
155 typeof(pcp) ret__; \ 153 typeof(pcp) ret__; \
156 preempt_disable(); \ 154 preempt_disable(); \
157 ptr__ = __this_cpu_ptr(&(pcp)); \ 155 ptr__ = raw_cpu_ptr(&(pcp)); \
158 ret__ = xchg(ptr__, nval); \ 156 ret__ = xchg(ptr__, nval); \
159 preempt_enable(); \ 157 preempt_enable(); \
160 ret__; \ 158 ret__; \
@@ -173,8 +171,8 @@
173 typeof(pcp2) *p2__; \ 171 typeof(pcp2) *p2__; \
174 int ret__; \ 172 int ret__; \
175 preempt_disable(); \ 173 preempt_disable(); \
176 p1__ = __this_cpu_ptr(&(pcp1)); \ 174 p1__ = raw_cpu_ptr(&(pcp1)); \
177 p2__ = __this_cpu_ptr(&(pcp2)); \ 175 p2__ = raw_cpu_ptr(&(pcp2)); \
178 ret__ = __cmpxchg_double(p1__, p2__, o1__, o2__, n1__, n2__); \ 176 ret__ = __cmpxchg_double(p1__, p2__, o1__, o2__, n1__, n2__); \
179 preempt_enable(); \ 177 preempt_enable(); \
180 ret__; \ 178 ret__; \
@@ -183,8 +181,6 @@
183#define this_cpu_cmpxchg_double_4 arch_this_cpu_cmpxchg_double 181#define this_cpu_cmpxchg_double_4 arch_this_cpu_cmpxchg_double
184#define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double 182#define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double
185 183
186#endif /* CONFIG_64BIT */
187
188#include <asm-generic/percpu.h> 184#include <asm-generic/percpu.h>
189 185
190#endif /* __ARCH_S390_PERCPU__ */ 186#endif /* __ARCH_S390_PERCPU__ */
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
index 159a8ec6da9a..4cb19fe76dd9 100644
--- a/arch/s390/include/asm/perf_event.h
+++ b/arch/s390/include/asm/perf_event.h
@@ -9,8 +9,6 @@
9#ifndef _ASM_S390_PERF_EVENT_H 9#ifndef _ASM_S390_PERF_EVENT_H
10#define _ASM_S390_PERF_EVENT_H 10#define _ASM_S390_PERF_EVENT_H
11 11
12#ifdef CONFIG_64BIT
13
14#include <linux/perf_event.h> 12#include <linux/perf_event.h>
15#include <linux/device.h> 13#include <linux/device.h>
16#include <asm/cpu_mf.h> 14#include <asm/cpu_mf.h>
@@ -92,5 +90,4 @@ struct sf_raw_sample {
92int perf_reserve_sampling(void); 90int perf_reserve_sampling(void);
93void perf_release_sampling(void); 91void perf_release_sampling(void);
94 92
95#endif /* CONFIG_64BIT */
96#endif /* _ASM_S390_PERF_EVENT_H */ 93#endif /* _ASM_S390_PERF_EVENT_H */
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 9e18a61d3df3..51e7fb634ebc 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -18,14 +18,13 @@
18unsigned long *crst_table_alloc(struct mm_struct *); 18unsigned long *crst_table_alloc(struct mm_struct *);
19void crst_table_free(struct mm_struct *, unsigned long *); 19void crst_table_free(struct mm_struct *, unsigned long *);
20 20
21unsigned long *page_table_alloc(struct mm_struct *, unsigned long); 21unsigned long *page_table_alloc(struct mm_struct *);
22void page_table_free(struct mm_struct *, unsigned long *); 22void page_table_free(struct mm_struct *, unsigned long *);
23void page_table_free_rcu(struct mmu_gather *, unsigned long *); 23void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
24 24
25void page_table_reset_pgste(struct mm_struct *, unsigned long, unsigned long,
26 bool init_skey);
27int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, 25int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
28 unsigned long key, bool nq); 26 unsigned long key, bool nq);
27unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr);
29 28
30static inline void clear_table(unsigned long *s, unsigned long val, size_t n) 29static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
31{ 30{
@@ -34,11 +33,7 @@ static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
34 *s = val; 33 *s = val;
35 n = (n / 256) - 1; 34 n = (n / 256) - 1;
36 asm volatile( 35 asm volatile(
37#ifdef CONFIG_64BIT
38 " mvc 8(248,%0),0(%0)\n" 36 " mvc 8(248,%0),0(%0)\n"
39#else
40 " mvc 4(252,%0),0(%0)\n"
41#endif
42 "0: mvc 256(256,%0),0(%0)\n" 37 "0: mvc 256(256,%0),0(%0)\n"
43 " la %0,256(%0)\n" 38 " la %0,256(%0)\n"
44 " brct %1,0b\n" 39 " brct %1,0b\n"
@@ -51,24 +46,6 @@ static inline void crst_table_init(unsigned long *crst, unsigned long entry)
51 clear_table(crst, entry, sizeof(unsigned long)*2048); 46 clear_table(crst, entry, sizeof(unsigned long)*2048);
52} 47}
53 48
54#ifndef CONFIG_64BIT
55
56static inline unsigned long pgd_entry_type(struct mm_struct *mm)
57{
58 return _SEGMENT_ENTRY_EMPTY;
59}
60
61#define pud_alloc_one(mm,address) ({ BUG(); ((pud_t *)2); })
62#define pud_free(mm, x) do { } while (0)
63
64#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
65#define pmd_free(mm, x) do { } while (0)
66
67#define pgd_populate(mm, pgd, pud) BUG()
68#define pud_populate(mm, pud, pmd) BUG()
69
70#else /* CONFIG_64BIT */
71
72static inline unsigned long pgd_entry_type(struct mm_struct *mm) 49static inline unsigned long pgd_entry_type(struct mm_struct *mm)
73{ 50{
74 if (mm->context.asce_limit <= (1UL << 31)) 51 if (mm->context.asce_limit <= (1UL << 31))
@@ -120,8 +97,6 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
120 pud_val(*pud) = _REGION3_ENTRY | __pa(pmd); 97 pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
121} 98}
122 99
123#endif /* CONFIG_64BIT */
124
125static inline pgd_t *pgd_alloc(struct mm_struct *mm) 100static inline pgd_t *pgd_alloc(struct mm_struct *mm)
126{ 101{
127 spin_lock_init(&mm->context.list_lock); 102 spin_lock_init(&mm->context.list_lock);
@@ -145,8 +120,8 @@ static inline void pmd_populate(struct mm_struct *mm,
145/* 120/*
146 * page table entry allocation/free routines. 121 * page table entry allocation/free routines.
147 */ 122 */
148#define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm, vmaddr)) 123#define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
149#define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm, vmaddr)) 124#define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
150 125
151#define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte) 126#define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
152#define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte) 127#define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index fcba5e03839f..989cfae9e202 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -30,6 +30,7 @@
30#include <linux/sched.h> 30#include <linux/sched.h>
31#include <linux/mm_types.h> 31#include <linux/mm_types.h>
32#include <linux/page-flags.h> 32#include <linux/page-flags.h>
33#include <linux/radix-tree.h>
33#include <asm/bug.h> 34#include <asm/bug.h>
34#include <asm/page.h> 35#include <asm/page.h>
35 36
@@ -65,15 +66,9 @@ extern unsigned long zero_page_mask;
65 * table can map 66 * table can map
66 * PGDIR_SHIFT determines what a third-level page table entry can map 67 * PGDIR_SHIFT determines what a third-level page table entry can map
67 */ 68 */
68#ifndef CONFIG_64BIT 69#define PMD_SHIFT 20
69# define PMD_SHIFT 20 70#define PUD_SHIFT 31
70# define PUD_SHIFT 20 71#define PGDIR_SHIFT 42
71# define PGDIR_SHIFT 20
72#else /* CONFIG_64BIT */
73# define PMD_SHIFT 20
74# define PUD_SHIFT 31
75# define PGDIR_SHIFT 42
76#endif /* CONFIG_64BIT */
77 72
78#define PMD_SIZE (1UL << PMD_SHIFT) 73#define PMD_SIZE (1UL << PMD_SHIFT)
79#define PMD_MASK (~(PMD_SIZE-1)) 74#define PMD_MASK (~(PMD_SIZE-1))
@@ -89,16 +84,11 @@ extern unsigned long zero_page_mask;
89 * that leads to 1024 pte per pgd 84 * that leads to 1024 pte per pgd
90 */ 85 */
91#define PTRS_PER_PTE 256 86#define PTRS_PER_PTE 256
92#ifndef CONFIG_64BIT
93#define PTRS_PER_PMD 1
94#define PTRS_PER_PUD 1
95#else /* CONFIG_64BIT */
96#define PTRS_PER_PMD 2048 87#define PTRS_PER_PMD 2048
97#define PTRS_PER_PUD 2048 88#define PTRS_PER_PUD 2048
98#endif /* CONFIG_64BIT */
99#define PTRS_PER_PGD 2048 89#define PTRS_PER_PGD 2048
100 90
101#define FIRST_USER_ADDRESS 0 91#define FIRST_USER_ADDRESS 0UL
102 92
103#define pte_ERROR(e) \ 93#define pte_ERROR(e) \
104 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e)) 94 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
@@ -124,13 +114,21 @@ extern struct page *vmemmap;
124 114
125#define VMEM_MAX_PHYS ((unsigned long) vmemmap) 115#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
126 116
127#ifdef CONFIG_64BIT
128extern unsigned long MODULES_VADDR; 117extern unsigned long MODULES_VADDR;
129extern unsigned long MODULES_END; 118extern unsigned long MODULES_END;
130#define MODULES_VADDR MODULES_VADDR 119#define MODULES_VADDR MODULES_VADDR
131#define MODULES_END MODULES_END 120#define MODULES_END MODULES_END
132#define MODULES_LEN (1UL << 31) 121#define MODULES_LEN (1UL << 31)
133#endif 122
123static inline int is_module_addr(void *addr)
124{
125 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
126 if (addr < (void *)MODULES_VADDR)
127 return 0;
128 if (addr > (void *)MODULES_END)
129 return 0;
130 return 1;
131}
134 132
135/* 133/*
136 * A 31 bit pagetable entry of S390 has following format: 134 * A 31 bit pagetable entry of S390 has following format:
@@ -216,7 +214,6 @@ extern unsigned long MODULES_END;
216 */ 214 */
217 215
218/* Hardware bits in the page table entry */ 216/* Hardware bits in the page table entry */
219#define _PAGE_CO 0x100 /* HW Change-bit override */
220#define _PAGE_PROTECT 0x200 /* HW read-only bit */ 217#define _PAGE_PROTECT 0x200 /* HW read-only bit */
221#define _PAGE_INVALID 0x400 /* HW invalid bit */ 218#define _PAGE_INVALID 0x400 /* HW invalid bit */
222#define _PAGE_LARGE 0x800 /* Bit to mark a large pte */ 219#define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
@@ -233,14 +230,14 @@ extern unsigned long MODULES_END;
233#define __HAVE_ARCH_PTE_SPECIAL 230#define __HAVE_ARCH_PTE_SPECIAL
234 231
235/* Set of bits not changed in pte_modify */ 232/* Set of bits not changed in pte_modify */
236#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_CO | \ 233#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
237 _PAGE_DIRTY | _PAGE_YOUNG) 234 _PAGE_YOUNG)
238 235
239/* 236/*
240 * handle_pte_fault uses pte_present, pte_none and pte_file to find out the 237 * handle_pte_fault uses pte_present and pte_none to find out the pte type
241 * pte type WITHOUT holding the page table lock. The _PAGE_PRESENT bit 238 * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
242 * is used to distinguish present from not-present ptes. It is changed only 239 * distinguish present from not-present ptes. It is changed only with the page
243 * with the page table lock held. 240 * table lock held.
244 * 241 *
245 * The following table gives the different possible bit combinations for 242 * The following table gives the different possible bit combinations for
246 * the pte hardware and software bits in the last 12 bits of a pte: 243 * the pte hardware and software bits in the last 12 bits of a pte:
@@ -267,53 +264,9 @@ extern unsigned long MODULES_END;
267 * 264 *
268 * pte_present is true for the bit pattern .xx...xxxxx1, (pte & 0x001) == 0x001 265 * pte_present is true for the bit pattern .xx...xxxxx1, (pte & 0x001) == 0x001
269 * pte_none is true for the bit pattern .10...xxxx00, (pte & 0x603) == 0x400 266 * pte_none is true for the bit pattern .10...xxxx00, (pte & 0x603) == 0x400
270 * pte_file is true for the bit pattern .11...xxxxx0, (pte & 0x601) == 0x600
271 * pte_swap is true for the bit pattern .10...xxxx10, (pte & 0x603) == 0x402 267 * pte_swap is true for the bit pattern .10...xxxx10, (pte & 0x603) == 0x402
272 */ 268 */
273 269
274#ifndef CONFIG_64BIT
275
276/* Bits in the segment table address-space-control-element */
277#define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */
278#define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */
279#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
280#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
281#define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */
282
283/* Bits in the segment table entry */
284#define _SEGMENT_ENTRY_BITS 0x7fffffffUL /* Valid segment table bits */
285#define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
286#define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
287#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
288#define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
289#define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
290#define _SEGMENT_ENTRY_NONE _SEGMENT_ENTRY_PROTECT
291
292#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
293#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
294
295/*
296 * Segment table entry encoding (I = invalid, R = read-only bit):
297 * ..R...I.....
298 * prot-none ..1...1.....
299 * read-only ..1...0.....
300 * read-write ..0...0.....
301 * empty ..0...1.....
302 */
303
304/* Page status table bits for virtualization */
305#define PGSTE_ACC_BITS 0xf0000000UL
306#define PGSTE_FP_BIT 0x08000000UL
307#define PGSTE_PCL_BIT 0x00800000UL
308#define PGSTE_HR_BIT 0x00400000UL
309#define PGSTE_HC_BIT 0x00200000UL
310#define PGSTE_GR_BIT 0x00040000UL
311#define PGSTE_GC_BIT 0x00020000UL
312#define PGSTE_UC_BIT 0x00008000UL /* user dirty (migration) */
313#define PGSTE_IN_BIT 0x00004000UL /* IPTE notify bit */
314
315#else /* CONFIG_64BIT */
316
317/* Bits in the segment/region table address-space-control-element */ 270/* Bits in the segment/region table address-space-control-element */
318#define _ASCE_ORIGIN ~0xfffUL/* segment table origin */ 271#define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
319#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ 272#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
@@ -346,11 +299,10 @@ extern unsigned long MODULES_END;
346 299
347#define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */ 300#define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */
348#define _REGION3_ENTRY_RO 0x200 /* page protection bit */ 301#define _REGION3_ENTRY_RO 0x200 /* page protection bit */
349#define _REGION3_ENTRY_CO 0x100 /* change-recording override */
350 302
351/* Bits in the segment table entry */ 303/* Bits in the segment table entry */
352#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL 304#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
353#define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff1ff33UL 305#define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
354#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */ 306#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
355#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ 307#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
356#define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */ 308#define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
@@ -359,30 +311,33 @@ extern unsigned long MODULES_END;
359#define _SEGMENT_ENTRY (0) 311#define _SEGMENT_ENTRY (0)
360#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID) 312#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
361 313
362#define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */ 314#define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */
363#define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */ 315#define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
364#define _SEGMENT_ENTRY_SPLIT 0x001 /* THP splitting bit */ 316#define _SEGMENT_ENTRY_SPLIT 0x0800 /* THP splitting bit */
365#define _SEGMENT_ENTRY_YOUNG 0x002 /* SW segment young bit */ 317#define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
366#define _SEGMENT_ENTRY_NONE _SEGMENT_ENTRY_YOUNG 318#define _SEGMENT_ENTRY_READ 0x0002 /* SW segment read bit */
319#define _SEGMENT_ENTRY_WRITE 0x0001 /* SW segment write bit */
367 320
368/* 321/*
369 * Segment table entry encoding (R = read-only, I = invalid, y = young bit): 322 * Segment table entry encoding (R = read-only, I = invalid, y = young bit):
370 * ..R...I...y. 323 * dy..R...I...wr
371 * prot-none, old ..0...1...1. 324 * prot-none, clean, old 00..1...1...00
372 * prot-none, young ..1...1...1. 325 * prot-none, clean, young 01..1...1...00
373 * read-only, old ..1...1...0. 326 * prot-none, dirty, old 10..1...1...00
374 * read-only, young ..1...0...1. 327 * prot-none, dirty, young 11..1...1...00
375 * read-write, old ..0...1...0. 328 * read-only, clean, old 00..1...1...01
376 * read-write, young ..0...0...1. 329 * read-only, clean, young 01..1...0...01
330 * read-only, dirty, old 10..1...1...01
331 * read-only, dirty, young 11..1...0...01
332 * read-write, clean, old 00..1...1...11
333 * read-write, clean, young 01..1...0...11
334 * read-write, dirty, old 10..0...1...11
335 * read-write, dirty, young 11..0...0...11
377 * The segment table origin is used to distinguish empty (origin==0) from 336 * The segment table origin is used to distinguish empty (origin==0) from
378 * read-write, old segment table entries (origin!=0) 337 * read-write, old segment table entries (origin!=0)
379 */ 338 */
380 339
381#define _SEGMENT_ENTRY_SPLIT_BIT 0 /* THP splitting bit number */ 340#define _SEGMENT_ENTRY_SPLIT_BIT 11 /* THP splitting bit number */
382
383/* Set of bits not changed in pmd_modify */
384#define _SEGMENT_CHG_MASK (_SEGMENT_ENTRY_ORIGIN | _SEGMENT_ENTRY_LARGE \
385 | _SEGMENT_ENTRY_SPLIT | _SEGMENT_ENTRY_CO)
386 341
387/* Page status table bits for virtualization */ 342/* Page status table bits for virtualization */
388#define PGSTE_ACC_BITS 0xf000000000000000UL 343#define PGSTE_ACC_BITS 0xf000000000000000UL
@@ -395,8 +350,6 @@ extern unsigned long MODULES_END;
395#define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */ 350#define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */
396#define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */ 351#define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */
397 352
398#endif /* CONFIG_64BIT */
399
400/* Guest Page State used for virtualization */ 353/* Guest Page State used for virtualization */
401#define _PGSTE_GPS_ZERO 0x0000000080000000UL 354#define _PGSTE_GPS_ZERO 0x0000000080000000UL
402#define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL 355#define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
@@ -455,10 +408,11 @@ extern unsigned long MODULES_END;
455 * Segment entry (large page) protection definitions. 408 * Segment entry (large page) protection definitions.
456 */ 409 */
457#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \ 410#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
458 _SEGMENT_ENTRY_NONE)
459#define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_INVALID | \
460 _SEGMENT_ENTRY_PROTECT) 411 _SEGMENT_ENTRY_PROTECT)
461#define SEGMENT_WRITE __pgprot(_SEGMENT_ENTRY_INVALID) 412#define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_PROTECT | \
413 _SEGMENT_ENTRY_READ)
414#define SEGMENT_WRITE __pgprot(_SEGMENT_ENTRY_READ | \
415 _SEGMENT_ENTRY_WRITE)
462 416
463static inline int mm_has_pgste(struct mm_struct *mm) 417static inline int mm_has_pgste(struct mm_struct *mm)
464{ 418{
@@ -469,6 +423,11 @@ static inline int mm_has_pgste(struct mm_struct *mm)
469 return 0; 423 return 0;
470} 424}
471 425
426/*
427 * In the case that a guest uses storage keys
428 * faults should no longer be backed by zero pages
429 */
430#define mm_forbids_zeropage mm_use_skey
472static inline int mm_use_skey(struct mm_struct *mm) 431static inline int mm_use_skey(struct mm_struct *mm)
473{ 432{
474#ifdef CONFIG_PGSTE 433#ifdef CONFIG_PGSTE
@@ -481,19 +440,6 @@ static inline int mm_use_skey(struct mm_struct *mm)
481/* 440/*
482 * pgd/pmd/pte query functions 441 * pgd/pmd/pte query functions
483 */ 442 */
484#ifndef CONFIG_64BIT
485
486static inline int pgd_present(pgd_t pgd) { return 1; }
487static inline int pgd_none(pgd_t pgd) { return 0; }
488static inline int pgd_bad(pgd_t pgd) { return 0; }
489
490static inline int pud_present(pud_t pud) { return 1; }
491static inline int pud_none(pud_t pud) { return 0; }
492static inline int pud_large(pud_t pud) { return 0; }
493static inline int pud_bad(pud_t pud) { return 0; }
494
495#else /* CONFIG_64BIT */
496
497static inline int pgd_present(pgd_t pgd) 443static inline int pgd_present(pgd_t pgd)
498{ 444{
499 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) 445 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
@@ -555,8 +501,6 @@ static inline int pud_bad(pud_t pud)
555 return (pud_val(pud) & mask) != 0; 501 return (pud_val(pud) & mask) != 0;
556} 502}
557 503
558#endif /* CONFIG_64BIT */
559
560static inline int pmd_present(pmd_t pmd) 504static inline int pmd_present(pmd_t pmd)
561{ 505{
562 return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID; 506 return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID;
@@ -569,25 +513,23 @@ static inline int pmd_none(pmd_t pmd)
569 513
570static inline int pmd_large(pmd_t pmd) 514static inline int pmd_large(pmd_t pmd)
571{ 515{
572#ifdef CONFIG_64BIT
573 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0; 516 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
574#else
575 return 0;
576#endif
577} 517}
578 518
579static inline int pmd_prot_none(pmd_t pmd) 519static inline int pmd_pfn(pmd_t pmd)
580{ 520{
581 return (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID) && 521 unsigned long origin_mask;
582 (pmd_val(pmd) & _SEGMENT_ENTRY_NONE); 522
523 origin_mask = _SEGMENT_ENTRY_ORIGIN;
524 if (pmd_large(pmd))
525 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
526 return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
583} 527}
584 528
585static inline int pmd_bad(pmd_t pmd) 529static inline int pmd_bad(pmd_t pmd)
586{ 530{
587#ifdef CONFIG_64BIT
588 if (pmd_large(pmd)) 531 if (pmd_large(pmd))
589 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0; 532 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
590#endif
591 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0; 533 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
592} 534}
593 535
@@ -607,20 +549,22 @@ extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
607#define __HAVE_ARCH_PMD_WRITE 549#define __HAVE_ARCH_PMD_WRITE
608static inline int pmd_write(pmd_t pmd) 550static inline int pmd_write(pmd_t pmd)
609{ 551{
610 if (pmd_prot_none(pmd)) 552 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
611 return 0; 553}
612 return (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT) == 0; 554
555static inline int pmd_dirty(pmd_t pmd)
556{
557 int dirty = 1;
558 if (pmd_large(pmd))
559 dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
560 return dirty;
613} 561}
614 562
615static inline int pmd_young(pmd_t pmd) 563static inline int pmd_young(pmd_t pmd)
616{ 564{
617 int young = 0; 565 int young = 1;
618#ifdef CONFIG_64BIT 566 if (pmd_large(pmd))
619 if (pmd_prot_none(pmd))
620 young = (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT) != 0;
621 else
622 young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0; 567 young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
623#endif
624 return young; 568 return young;
625} 569}
626 570
@@ -644,13 +588,6 @@ static inline int pte_swap(pte_t pte)
644 == (_PAGE_INVALID | _PAGE_TYPE); 588 == (_PAGE_INVALID | _PAGE_TYPE);
645} 589}
646 590
647static inline int pte_file(pte_t pte)
648{
649 /* Bit pattern: (pte & 0x601) == 0x600 */
650 return (pte_val(pte) & (_PAGE_INVALID | _PAGE_PROTECT | _PAGE_PRESENT))
651 == (_PAGE_INVALID | _PAGE_PROTECT);
652}
653
654static inline int pte_special(pte_t pte) 591static inline int pte_special(pte_t pte)
655{ 592{
656 return (pte_val(pte) & _PAGE_SPECIAL); 593 return (pte_val(pte) & _PAGE_SPECIAL);
@@ -777,82 +714,67 @@ static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
777 714
778/** 715/**
779 * struct gmap_struct - guest address space 716 * struct gmap_struct - guest address space
717 * @crst_list: list of all crst tables used in the guest address space
780 * @mm: pointer to the parent mm_struct 718 * @mm: pointer to the parent mm_struct
719 * @guest_to_host: radix tree with guest to host address translation
720 * @host_to_guest: radix tree with pointer to segment table entries
721 * @guest_table_lock: spinlock to protect all entries in the guest page table
781 * @table: pointer to the page directory 722 * @table: pointer to the page directory
782 * @asce: address space control element for gmap page table 723 * @asce: address space control element for gmap page table
783 * @crst_list: list of all crst tables used in the guest address space
784 * @pfault_enabled: defines if pfaults are applicable for the guest 724 * @pfault_enabled: defines if pfaults are applicable for the guest
785 */ 725 */
786struct gmap { 726struct gmap {
787 struct list_head list; 727 struct list_head list;
728 struct list_head crst_list;
788 struct mm_struct *mm; 729 struct mm_struct *mm;
730 struct radix_tree_root guest_to_host;
731 struct radix_tree_root host_to_guest;
732 spinlock_t guest_table_lock;
789 unsigned long *table; 733 unsigned long *table;
790 unsigned long asce; 734 unsigned long asce;
735 unsigned long asce_end;
791 void *private; 736 void *private;
792 struct list_head crst_list;
793 bool pfault_enabled; 737 bool pfault_enabled;
794}; 738};
795 739
796/** 740/**
797 * struct gmap_rmap - reverse mapping for segment table entries
798 * @gmap: pointer to the gmap_struct
799 * @entry: pointer to a segment table entry
800 * @vmaddr: virtual address in the guest address space
801 */
802struct gmap_rmap {
803 struct list_head list;
804 struct gmap *gmap;
805 unsigned long *entry;
806 unsigned long vmaddr;
807};
808
809/**
810 * struct gmap_pgtable - gmap information attached to a page table
811 * @vmaddr: address of the 1MB segment in the process virtual memory
812 * @mapper: list of segment table entries mapping a page table
813 */
814struct gmap_pgtable {
815 unsigned long vmaddr;
816 struct list_head mapper;
817};
818
819/**
820 * struct gmap_notifier - notify function block for page invalidation 741 * struct gmap_notifier - notify function block for page invalidation
821 * @notifier_call: address of callback function 742 * @notifier_call: address of callback function
822 */ 743 */
823struct gmap_notifier { 744struct gmap_notifier {
824 struct list_head list; 745 struct list_head list;
825 void (*notifier_call)(struct gmap *gmap, unsigned long address); 746 void (*notifier_call)(struct gmap *gmap, unsigned long gaddr);
826}; 747};
827 748
828struct gmap *gmap_alloc(struct mm_struct *mm); 749struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit);
829void gmap_free(struct gmap *gmap); 750void gmap_free(struct gmap *gmap);
830void gmap_enable(struct gmap *gmap); 751void gmap_enable(struct gmap *gmap);
831void gmap_disable(struct gmap *gmap); 752void gmap_disable(struct gmap *gmap);
832int gmap_map_segment(struct gmap *gmap, unsigned long from, 753int gmap_map_segment(struct gmap *gmap, unsigned long from,
833 unsigned long to, unsigned long len); 754 unsigned long to, unsigned long len);
834int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len); 755int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
835unsigned long __gmap_translate(unsigned long address, struct gmap *); 756unsigned long __gmap_translate(struct gmap *, unsigned long gaddr);
836unsigned long gmap_translate(unsigned long address, struct gmap *); 757unsigned long gmap_translate(struct gmap *, unsigned long gaddr);
837unsigned long __gmap_fault(unsigned long address, struct gmap *); 758int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr);
838unsigned long gmap_fault(unsigned long address, struct gmap *); 759int gmap_fault(struct gmap *, unsigned long gaddr, unsigned int fault_flags);
839void gmap_discard(unsigned long from, unsigned long to, struct gmap *); 760void gmap_discard(struct gmap *, unsigned long from, unsigned long to);
840void __gmap_zap(unsigned long address, struct gmap *); 761void __gmap_zap(struct gmap *, unsigned long gaddr);
841bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *); 762bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *);
842 763
843 764
844void gmap_register_ipte_notifier(struct gmap_notifier *); 765void gmap_register_ipte_notifier(struct gmap_notifier *);
845void gmap_unregister_ipte_notifier(struct gmap_notifier *); 766void gmap_unregister_ipte_notifier(struct gmap_notifier *);
846int gmap_ipte_notify(struct gmap *, unsigned long start, unsigned long len); 767int gmap_ipte_notify(struct gmap *, unsigned long start, unsigned long len);
847void gmap_do_ipte_notify(struct mm_struct *, pte_t *); 768void gmap_do_ipte_notify(struct mm_struct *, unsigned long addr, pte_t *);
848 769
849static inline pgste_t pgste_ipte_notify(struct mm_struct *mm, 770static inline pgste_t pgste_ipte_notify(struct mm_struct *mm,
771 unsigned long addr,
850 pte_t *ptep, pgste_t pgste) 772 pte_t *ptep, pgste_t pgste)
851{ 773{
852#ifdef CONFIG_PGSTE 774#ifdef CONFIG_PGSTE
853 if (pgste_val(pgste) & PGSTE_IN_BIT) { 775 if (pgste_val(pgste) & PGSTE_IN_BIT) {
854 pgste_val(pgste) &= ~PGSTE_IN_BIT; 776 pgste_val(pgste) &= ~PGSTE_IN_BIT;
855 gmap_do_ipte_notify(mm, ptep); 777 gmap_do_ipte_notify(mm, addr, ptep);
856 } 778 }
857#endif 779#endif
858 return pgste; 780 return pgste;
@@ -875,8 +797,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
875 pgste = pgste_set_pte(ptep, pgste, entry); 797 pgste = pgste_set_pte(ptep, pgste, entry);
876 pgste_set_unlock(ptep, pgste); 798 pgste_set_unlock(ptep, pgste);
877 } else { 799 } else {
878 if (!(pte_val(entry) & _PAGE_INVALID) && MACHINE_HAS_EDAT1)
879 pte_val(entry) |= _PAGE_CO;
880 *ptep = entry; 800 *ptep = entry;
881 } 801 }
882} 802}
@@ -912,18 +832,14 @@ static inline int pte_unused(pte_t pte)
912 832
913static inline void pgd_clear(pgd_t *pgd) 833static inline void pgd_clear(pgd_t *pgd)
914{ 834{
915#ifdef CONFIG_64BIT
916 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) 835 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
917 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY; 836 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
918#endif
919} 837}
920 838
921static inline void pud_clear(pud_t *pud) 839static inline void pud_clear(pud_t *pud)
922{ 840{
923#ifdef CONFIG_64BIT
924 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 841 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
925 pud_val(*pud) = _REGION3_ENTRY_EMPTY; 842 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
926#endif
927} 843}
928 844
929static inline void pmd_clear(pmd_t *pmdp) 845static inline void pmd_clear(pmd_t *pmdp)
@@ -1022,10 +938,6 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
1022{ 938{
1023 unsigned long pto = (unsigned long) ptep; 939 unsigned long pto = (unsigned long) ptep;
1024 940
1025#ifndef CONFIG_64BIT
1026 /* pto in ESA mode must point to the start of the segment table */
1027 pto &= 0x7ffffc00;
1028#endif
1029 /* Invalidation + global TLB flush for the pte */ 941 /* Invalidation + global TLB flush for the pte */
1030 asm volatile( 942 asm volatile(
1031 " ipte %2,%3" 943 " ipte %2,%3"
@@ -1036,16 +948,24 @@ static inline void __ptep_ipte_local(unsigned long address, pte_t *ptep)
1036{ 948{
1037 unsigned long pto = (unsigned long) ptep; 949 unsigned long pto = (unsigned long) ptep;
1038 950
1039#ifndef CONFIG_64BIT
1040 /* pto in ESA mode must point to the start of the segment table */
1041 pto &= 0x7ffffc00;
1042#endif
1043 /* Invalidation + local TLB flush for the pte */ 951 /* Invalidation + local TLB flush for the pte */
1044 asm volatile( 952 asm volatile(
1045 " .insn rrf,0xb2210000,%2,%3,0,1" 953 " .insn rrf,0xb2210000,%2,%3,0,1"
1046 : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address)); 954 : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
1047} 955}
1048 956
957static inline void __ptep_ipte_range(unsigned long address, int nr, pte_t *ptep)
958{
959 unsigned long pto = (unsigned long) ptep;
960
961 /* Invalidate a range of ptes + global TLB flush of the ptes */
962 do {
963 asm volatile(
964 " .insn rrf,0xb2210000,%2,%0,%1,0"
965 : "+a" (address), "+a" (nr) : "a" (pto) : "memory");
966 } while (nr != 255);
967}
968
1049static inline void ptep_flush_direct(struct mm_struct *mm, 969static inline void ptep_flush_direct(struct mm_struct *mm,
1050 unsigned long address, pte_t *ptep) 970 unsigned long address, pte_t *ptep)
1051{ 971{
@@ -1098,7 +1018,7 @@ static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm,
1098 pgste_val(pgste) &= ~PGSTE_UC_BIT; 1018 pgste_val(pgste) &= ~PGSTE_UC_BIT;
1099 pte = *ptep; 1019 pte = *ptep;
1100 if (dirty && (pte_val(pte) & _PAGE_PRESENT)) { 1020 if (dirty && (pte_val(pte) & _PAGE_PRESENT)) {
1101 pgste = pgste_ipte_notify(mm, ptep, pgste); 1021 pgste = pgste_ipte_notify(mm, addr, ptep, pgste);
1102 __ptep_ipte(addr, ptep); 1022 __ptep_ipte(addr, ptep);
1103 if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE)) 1023 if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
1104 pte_val(pte) |= _PAGE_PROTECT; 1024 pte_val(pte) |= _PAGE_PROTECT;
@@ -1115,20 +1035,21 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1115 unsigned long addr, pte_t *ptep) 1035 unsigned long addr, pte_t *ptep)
1116{ 1036{
1117 pgste_t pgste; 1037 pgste_t pgste;
1118 pte_t pte; 1038 pte_t pte, oldpte;
1119 int young; 1039 int young;
1120 1040
1121 if (mm_has_pgste(vma->vm_mm)) { 1041 if (mm_has_pgste(vma->vm_mm)) {
1122 pgste = pgste_get_lock(ptep); 1042 pgste = pgste_get_lock(ptep);
1123 pgste = pgste_ipte_notify(vma->vm_mm, ptep, pgste); 1043 pgste = pgste_ipte_notify(vma->vm_mm, addr, ptep, pgste);
1124 } 1044 }
1125 1045
1126 pte = *ptep; 1046 oldpte = pte = *ptep;
1127 ptep_flush_direct(vma->vm_mm, addr, ptep); 1047 ptep_flush_direct(vma->vm_mm, addr, ptep);
1128 young = pte_young(pte); 1048 young = pte_young(pte);
1129 pte = pte_mkold(pte); 1049 pte = pte_mkold(pte);
1130 1050
1131 if (mm_has_pgste(vma->vm_mm)) { 1051 if (mm_has_pgste(vma->vm_mm)) {
1052 pgste = pgste_update_all(&oldpte, pgste, vma->vm_mm);
1132 pgste = pgste_set_pte(ptep, pgste, pte); 1053 pgste = pgste_set_pte(ptep, pgste, pte);
1133 pgste_set_unlock(ptep, pgste); 1054 pgste_set_unlock(ptep, pgste);
1134 } else 1055 } else
@@ -1166,7 +1087,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1166 1087
1167 if (mm_has_pgste(mm)) { 1088 if (mm_has_pgste(mm)) {
1168 pgste = pgste_get_lock(ptep); 1089 pgste = pgste_get_lock(ptep);
1169 pgste = pgste_ipte_notify(mm, ptep, pgste); 1090 pgste = pgste_ipte_notify(mm, address, ptep, pgste);
1170 } 1091 }
1171 1092
1172 pte = *ptep; 1093 pte = *ptep;
@@ -1190,7 +1111,7 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
1190 1111
1191 if (mm_has_pgste(mm)) { 1112 if (mm_has_pgste(mm)) {
1192 pgste = pgste_get_lock(ptep); 1113 pgste = pgste_get_lock(ptep);
1193 pgste_ipte_notify(mm, ptep, pgste); 1114 pgste_ipte_notify(mm, address, ptep, pgste);
1194 } 1115 }
1195 1116
1196 pte = *ptep; 1117 pte = *ptep;
@@ -1227,7 +1148,7 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1227 1148
1228 if (mm_has_pgste(vma->vm_mm)) { 1149 if (mm_has_pgste(vma->vm_mm)) {
1229 pgste = pgste_get_lock(ptep); 1150 pgste = pgste_get_lock(ptep);
1230 pgste = pgste_ipte_notify(vma->vm_mm, ptep, pgste); 1151 pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste);
1231 } 1152 }
1232 1153
1233 pte = *ptep; 1154 pte = *ptep;
@@ -1261,7 +1182,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1261 1182
1262 if (!full && mm_has_pgste(mm)) { 1183 if (!full && mm_has_pgste(mm)) {
1263 pgste = pgste_get_lock(ptep); 1184 pgste = pgste_get_lock(ptep);
1264 pgste = pgste_ipte_notify(mm, ptep, pgste); 1185 pgste = pgste_ipte_notify(mm, address, ptep, pgste);
1265 } 1186 }
1266 1187
1267 pte = *ptep; 1188 pte = *ptep;
@@ -1286,7 +1207,7 @@ static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
1286 if (pte_write(pte)) { 1207 if (pte_write(pte)) {
1287 if (mm_has_pgste(mm)) { 1208 if (mm_has_pgste(mm)) {
1288 pgste = pgste_get_lock(ptep); 1209 pgste = pgste_get_lock(ptep);
1289 pgste = pgste_ipte_notify(mm, ptep, pgste); 1210 pgste = pgste_ipte_notify(mm, address, ptep, pgste);
1290 } 1211 }
1291 1212
1292 ptep_flush_lazy(mm, address, ptep); 1213 ptep_flush_lazy(mm, address, ptep);
@@ -1312,12 +1233,13 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1312 return 0; 1233 return 0;
1313 if (mm_has_pgste(vma->vm_mm)) { 1234 if (mm_has_pgste(vma->vm_mm)) {
1314 pgste = pgste_get_lock(ptep); 1235 pgste = pgste_get_lock(ptep);
1315 pgste = pgste_ipte_notify(vma->vm_mm, ptep, pgste); 1236 pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste);
1316 } 1237 }
1317 1238
1318 ptep_flush_direct(vma->vm_mm, address, ptep); 1239 ptep_flush_direct(vma->vm_mm, address, ptep);
1319 1240
1320 if (mm_has_pgste(vma->vm_mm)) { 1241 if (mm_has_pgste(vma->vm_mm)) {
1242 pgste_set_key(ptep, pgste, entry, vma->vm_mm);
1321 pgste = pgste_set_pte(ptep, pgste, entry); 1243 pgste = pgste_set_pte(ptep, pgste, entry);
1322 pgste_set_unlock(ptep, pgste); 1244 pgste_set_unlock(ptep, pgste);
1323 } else 1245 } else
@@ -1354,17 +1276,6 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1354#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) 1276#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
1355#define pgd_offset_k(address) pgd_offset(&init_mm, address) 1277#define pgd_offset_k(address) pgd_offset(&init_mm, address)
1356 1278
1357#ifndef CONFIG_64BIT
1358
1359#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1360#define pud_deref(pmd) ({ BUG(); 0UL; })
1361#define pgd_deref(pmd) ({ BUG(); 0UL; })
1362
1363#define pud_offset(pgd, address) ((pud_t *) pgd)
1364#define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
1365
1366#else /* CONFIG_64BIT */
1367
1368#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) 1279#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1369#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN) 1280#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1370#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) 1281#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
@@ -1385,13 +1296,11 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1385 return pmd + pmd_index(address); 1296 return pmd + pmd_index(address);
1386} 1297}
1387 1298
1388#endif /* CONFIG_64BIT */
1389
1390#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot)) 1299#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1391#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) 1300#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1392#define pte_page(x) pfn_to_page(pte_pfn(x)) 1301#define pte_page(x) pfn_to_page(pte_pfn(x))
1393 1302
1394#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) 1303#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1395 1304
1396/* Find an entry in the lowest level page table.. */ 1305/* Find an entry in the lowest level page table.. */
1397#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr)) 1306#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
@@ -1413,41 +1322,75 @@ static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1413 return pgprot_val(SEGMENT_WRITE); 1322 return pgprot_val(SEGMENT_WRITE);
1414} 1323}
1415 1324
1416static inline pmd_t pmd_mkyoung(pmd_t pmd) 1325static inline pmd_t pmd_wrprotect(pmd_t pmd)
1326{
1327 pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
1328 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1329 return pmd;
1330}
1331
1332static inline pmd_t pmd_mkwrite(pmd_t pmd)
1333{
1334 pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
1335 if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1336 return pmd;
1337 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1338 return pmd;
1339}
1340
1341static inline pmd_t pmd_mkclean(pmd_t pmd)
1417{ 1342{
1418#ifdef CONFIG_64BIT 1343 if (pmd_large(pmd)) {
1419 if (pmd_prot_none(pmd)) { 1344 pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
1420 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; 1345 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1421 } else { 1346 }
1347 return pmd;
1348}
1349
1350static inline pmd_t pmd_mkdirty(pmd_t pmd)
1351{
1352 if (pmd_large(pmd)) {
1353 pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY;
1354 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1355 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1356 }
1357 return pmd;
1358}
1359
1360static inline pmd_t pmd_mkyoung(pmd_t pmd)
1361{
1362 if (pmd_large(pmd)) {
1422 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; 1363 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1423 pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID; 1364 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1365 pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
1424 } 1366 }
1425#endif
1426 return pmd; 1367 return pmd;
1427} 1368}
1428 1369
1429static inline pmd_t pmd_mkold(pmd_t pmd) 1370static inline pmd_t pmd_mkold(pmd_t pmd)
1430{ 1371{
1431#ifdef CONFIG_64BIT 1372 if (pmd_large(pmd)) {
1432 if (pmd_prot_none(pmd)) {
1433 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1434 } else {
1435 pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG; 1373 pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
1436 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; 1374 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1437 } 1375 }
1438#endif
1439 return pmd; 1376 return pmd;
1440} 1377}
1441 1378
1442static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 1379static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1443{ 1380{
1444 int young; 1381 if (pmd_large(pmd)) {
1445 1382 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
1446 young = pmd_young(pmd); 1383 _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
1447 pmd_val(pmd) &= _SEGMENT_CHG_MASK; 1384 _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SPLIT;
1385 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1386 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1387 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1388 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1389 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1390 return pmd;
1391 }
1392 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN;
1448 pmd_val(pmd) |= massage_pgprot_pmd(newprot); 1393 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1449 if (young)
1450 pmd = pmd_mkyoung(pmd);
1451 return pmd; 1394 return pmd;
1452} 1395}
1453 1396
@@ -1455,16 +1398,9 @@ static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1455{ 1398{
1456 pmd_t __pmd; 1399 pmd_t __pmd;
1457 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot); 1400 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1458 return pmd_mkyoung(__pmd); 1401 return __pmd;
1459} 1402}
1460 1403
1461static inline pmd_t pmd_mkwrite(pmd_t pmd)
1462{
1463 /* Do not clobber PROT_NONE segments! */
1464 if (!pmd_prot_none(pmd))
1465 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1466 return pmd;
1467}
1468#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ 1404#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1469 1405
1470static inline void __pmdp_csp(pmd_t *pmdp) 1406static inline void __pmdp_csp(pmd_t *pmdp)
@@ -1555,34 +1491,21 @@ extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1555 1491
1556static inline int pmd_trans_splitting(pmd_t pmd) 1492static inline int pmd_trans_splitting(pmd_t pmd)
1557{ 1493{
1558 return pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT; 1494 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) &&
1495 (pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT);
1559} 1496}
1560 1497
1561static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 1498static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1562 pmd_t *pmdp, pmd_t entry) 1499 pmd_t *pmdp, pmd_t entry)
1563{ 1500{
1564 if (!(pmd_val(entry) & _SEGMENT_ENTRY_INVALID) && MACHINE_HAS_EDAT1)
1565 pmd_val(entry) |= _SEGMENT_ENTRY_CO;
1566 *pmdp = entry; 1501 *pmdp = entry;
1567} 1502}
1568 1503
1569static inline pmd_t pmd_mkhuge(pmd_t pmd) 1504static inline pmd_t pmd_mkhuge(pmd_t pmd)
1570{ 1505{
1571 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE; 1506 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1572 return pmd; 1507 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1573} 1508 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1574
1575static inline pmd_t pmd_wrprotect(pmd_t pmd)
1576{
1577 /* Do not clobber PROT_NONE segments! */
1578 if (!pmd_prot_none(pmd))
1579 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1580 return pmd;
1581}
1582
1583static inline pmd_t pmd_mkdirty(pmd_t pmd)
1584{
1585 /* No dirty bit in the segment table entry. */
1586 return pmd; 1509 return pmd;
1587} 1510}
1588 1511
@@ -1609,6 +1532,19 @@ static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
1609 return pmd; 1532 return pmd;
1610} 1533}
1611 1534
1535#define __HAVE_ARCH_PMDP_GET_AND_CLEAR_FULL
1536static inline pmd_t pmdp_get_and_clear_full(struct mm_struct *mm,
1537 unsigned long address,
1538 pmd_t *pmdp, int full)
1539{
1540 pmd_t pmd = *pmdp;
1541
1542 if (!full)
1543 pmdp_flush_lazy(mm, address, pmdp);
1544 pmd_clear(pmdp);
1545 return pmd;
1546}
1547
1612#define __HAVE_ARCH_PMDP_CLEAR_FLUSH 1548#define __HAVE_ARCH_PMDP_CLEAR_FLUSH
1613static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma, 1549static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
1614 unsigned long address, pmd_t *pmdp) 1550 unsigned long address, pmd_t *pmdp)
@@ -1647,11 +1583,6 @@ static inline int has_transparent_hugepage(void)
1647{ 1583{
1648 return MACHINE_HAS_HPAGE ? 1 : 0; 1584 return MACHINE_HAS_HPAGE ? 1 : 0;
1649} 1585}
1650
1651static inline unsigned long pmd_pfn(pmd_t pmd)
1652{
1653 return pmd_val(pmd) >> PAGE_SHIFT;
1654}
1655#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1586#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1656 1587
1657/* 1588/*
@@ -1685,11 +1616,9 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
1685 * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66 1616 * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66
1686 * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23 1617 * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23
1687 */ 1618 */
1688#ifndef CONFIG_64BIT 1619
1689#define __SWP_OFFSET_MASK (~0UL >> 12)
1690#else
1691#define __SWP_OFFSET_MASK (~0UL >> 11) 1620#define __SWP_OFFSET_MASK (~0UL >> 11)
1692#endif 1621
1693static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) 1622static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1694{ 1623{
1695 pte_t pte; 1624 pte_t pte;
@@ -1706,19 +1635,6 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1706#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 1635#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1707#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 1636#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1708 1637
1709#ifndef CONFIG_64BIT
1710# define PTE_FILE_MAX_BITS 26
1711#else /* CONFIG_64BIT */
1712# define PTE_FILE_MAX_BITS 59
1713#endif /* CONFIG_64BIT */
1714
1715#define pte_to_pgoff(__pte) \
1716 ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f))
1717
1718#define pgoff_to_pte(__off) \
1719 ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
1720 | _PAGE_INVALID | _PAGE_PROTECT })
1721
1722#endif /* !__ASSEMBLY__ */ 1638#endif /* !__ASSEMBLY__ */
1723 1639
1724#define kern_addr_valid(addr) (1) 1640#define kern_addr_valid(addr) (1)
@@ -1726,7 +1642,12 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1726extern int vmem_add_mapping(unsigned long start, unsigned long size); 1642extern int vmem_add_mapping(unsigned long start, unsigned long size);
1727extern int vmem_remove_mapping(unsigned long start, unsigned long size); 1643extern int vmem_remove_mapping(unsigned long start, unsigned long size);
1728extern int s390_enable_sie(void); 1644extern int s390_enable_sie(void);
1729extern void s390_enable_skey(void); 1645extern int s390_enable_skey(void);
1646extern void s390_reset_cmma(struct mm_struct *mm);
1647
1648/* s390 has a private copy of get unmapped area to deal with cache synonyms */
1649#define HAVE_ARCH_UNMAPPED_AREA
1650#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1730 1651
1731/* 1652/*
1732 * No page table caches to initialise 1653 * No page table caches to initialise
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 6f02d452bbee..dedb6218544b 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -13,10 +13,11 @@
13 13
14#define CIF_MCCK_PENDING 0 /* machine check handling is pending */ 14#define CIF_MCCK_PENDING 0 /* machine check handling is pending */
15#define CIF_ASCE 1 /* user asce needs fixup / uaccess */ 15#define CIF_ASCE 1 /* user asce needs fixup / uaccess */
16#define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */
16 17
17#define _CIF_MCCK_PENDING (1<<CIF_MCCK_PENDING) 18#define _CIF_MCCK_PENDING (1<<CIF_MCCK_PENDING)
18#define _CIF_ASCE (1<<CIF_ASCE) 19#define _CIF_ASCE (1<<CIF_ASCE)
19 20#define _CIF_NOHZ_DELAY (1<<CIF_NOHZ_DELAY)
20 21
21#ifndef __ASSEMBLY__ 22#ifndef __ASSEMBLY__
22 23
@@ -43,6 +44,8 @@ static inline int test_cpu_flag(int flag)
43 return !!(S390_lowcore.cpu_flags & (1U << flag)); 44 return !!(S390_lowcore.cpu_flags & (1U << flag));
44} 45}
45 46
47#define arch_needs_cpu() test_cpu_flag(CIF_NOHZ_DELAY)
48
46/* 49/*
47 * Default implementation of macro that returns current 50 * Default implementation of macro that returns current
48 * instruction pointer ("program counter"). 51 * instruction pointer ("program counter").
@@ -62,13 +65,6 @@ extern void execve_tail(void);
62/* 65/*
63 * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit. 66 * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
64 */ 67 */
65#ifndef CONFIG_64BIT
66
67#define TASK_SIZE (1UL << 31)
68#define TASK_MAX_SIZE (1UL << 31)
69#define TASK_UNMAPPED_BASE (1UL << 30)
70
71#else /* CONFIG_64BIT */
72 68
73#define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit) 69#define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit)
74#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \ 70#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \
@@ -76,15 +72,8 @@ extern void execve_tail(void);
76#define TASK_SIZE TASK_SIZE_OF(current) 72#define TASK_SIZE TASK_SIZE_OF(current)
77#define TASK_MAX_SIZE (1UL << 53) 73#define TASK_MAX_SIZE (1UL << 53)
78 74
79#endif /* CONFIG_64BIT */
80
81#ifndef CONFIG_64BIT
82#define STACK_TOP (1UL << 31)
83#define STACK_TOP_MAX (1UL << 31)
84#else /* CONFIG_64BIT */
85#define STACK_TOP (1UL << (test_thread_flag(TIF_31BIT) ? 31:42)) 75#define STACK_TOP (1UL << (test_thread_flag(TIF_31BIT) ? 31:42))
86#define STACK_TOP_MAX (1UL << 42) 76#define STACK_TOP_MAX (1UL << 42)
87#endif /* CONFIG_64BIT */
88 77
89#define HAVE_ARCH_PICK_MMAP_LAYOUT 78#define HAVE_ARCH_PICK_MMAP_LAYOUT
90 79
@@ -111,9 +100,8 @@ struct thread_struct {
111 /* cpu runtime instrumentation */ 100 /* cpu runtime instrumentation */
112 struct runtime_instr_cb *ri_cb; 101 struct runtime_instr_cb *ri_cb;
113 int ri_signum; 102 int ri_signum;
114#ifdef CONFIG_64BIT
115 unsigned char trap_tdb[256]; /* Transaction abort diagnose block */ 103 unsigned char trap_tdb[256]; /* Transaction abort diagnose block */
116#endif 104 __vector128 *vxrs; /* Vector register save area */
117}; 105};
118 106
119/* Flag to disable transactions. */ 107/* Flag to disable transactions. */
@@ -176,11 +164,7 @@ struct task_struct;
176struct mm_struct; 164struct mm_struct;
177struct seq_file; 165struct seq_file;
178 166
179#ifdef CONFIG_64BIT 167void show_cacheinfo(struct seq_file *m);
180extern void show_cacheinfo(struct seq_file *m);
181#else
182static inline void show_cacheinfo(struct seq_file *m) { }
183#endif
184 168
185/* Free all resources held by a thread. */ 169/* Free all resources held by a thread. */
186extern void release_thread(struct task_struct *); 170extern void release_thread(struct task_struct *);
@@ -210,14 +194,9 @@ static inline unsigned short stap(void)
210/* 194/*
211 * Give up the time slice of the virtual PU. 195 * Give up the time slice of the virtual PU.
212 */ 196 */
213static inline void cpu_relax(void) 197void cpu_relax(void);
214{
215 if (MACHINE_HAS_DIAG44)
216 asm volatile("diag 0,0,68");
217 barrier();
218}
219 198
220#define arch_mutex_cpu_relax() barrier() 199#define cpu_relax_lowlatency() barrier()
221 200
222static inline void psw_set_key(unsigned int key) 201static inline void psw_set_key(unsigned int key)
223{ 202{
@@ -229,11 +208,7 @@ static inline void psw_set_key(unsigned int key)
229 */ 208 */
230static inline void __load_psw(psw_t psw) 209static inline void __load_psw(psw_t psw)
231{ 210{
232#ifndef CONFIG_64BIT
233 asm volatile("lpsw %0" : : "Q" (psw) : "cc");
234#else
235 asm volatile("lpswe %0" : : "Q" (psw) : "cc"); 211 asm volatile("lpswe %0" : : "Q" (psw) : "cc");
236#endif
237} 212}
238 213
239/* 214/*
@@ -247,22 +222,12 @@ static inline void __load_psw_mask (unsigned long mask)
247 222
248 psw.mask = mask; 223 psw.mask = mask;
249 224
250#ifndef CONFIG_64BIT
251 asm volatile(
252 " basr %0,0\n"
253 "0: ahi %0,1f-0b\n"
254 " st %0,%O1+4(%R1)\n"
255 " lpsw %1\n"
256 "1:"
257 : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc");
258#else /* CONFIG_64BIT */
259 asm volatile( 225 asm volatile(
260 " larl %0,1f\n" 226 " larl %0,1f\n"
261 " stg %0,%O1+8(%R1)\n" 227 " stg %0,%O1+8(%R1)\n"
262 " lpswe %1\n" 228 " lpswe %1\n"
263 "1:" 229 "1:"
264 : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc"); 230 : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc");
265#endif /* CONFIG_64BIT */
266} 231}
267 232
268/* 233/*
@@ -270,22 +235,19 @@ static inline void __load_psw_mask (unsigned long mask)
270 */ 235 */
271static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc) 236static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc)
272{ 237{
273#ifndef CONFIG_64BIT
274 if (psw.addr & PSW_ADDR_AMODE)
275 /* 31 bit mode */
276 return (psw.addr - ilc) | PSW_ADDR_AMODE;
277 /* 24 bit mode */
278 return (psw.addr - ilc) & ((1UL << 24) - 1);
279#else
280 unsigned long mask; 238 unsigned long mask;
281 239
282 mask = (psw.mask & PSW_MASK_EA) ? -1UL : 240 mask = (psw.mask & PSW_MASK_EA) ? -1UL :
283 (psw.mask & PSW_MASK_BA) ? (1UL << 31) - 1 : 241 (psw.mask & PSW_MASK_BA) ? (1UL << 31) - 1 :
284 (1UL << 24) - 1; 242 (1UL << 24) - 1;
285 return (psw.addr - ilc) & mask; 243 return (psw.addr - ilc) & mask;
286#endif
287} 244}
288 245
246/*
247 * Function to stop a processor until the next interrupt occurs
248 */
249void enabled_wait(void);
250
289/* 251/*
290 * Function to drop a processor into disabled wait state 252 * Function to drop a processor into disabled wait state
291 */ 253 */
@@ -300,26 +262,6 @@ static inline void __noreturn disabled_wait(unsigned long code)
300 * Store status and then load disabled wait psw, 262 * Store status and then load disabled wait psw,
301 * the processor is dead afterwards 263 * the processor is dead afterwards
302 */ 264 */
303#ifndef CONFIG_64BIT
304 asm volatile(
305 " stctl 0,0,0(%2)\n"
306 " ni 0(%2),0xef\n" /* switch off protection */
307 " lctl 0,0,0(%2)\n"
308 " stpt 0xd8\n" /* store timer */
309 " stckc 0xe0\n" /* store clock comparator */
310 " stpx 0x108\n" /* store prefix register */
311 " stam 0,15,0x120\n" /* store access registers */
312 " std 0,0x160\n" /* store f0 */
313 " std 2,0x168\n" /* store f2 */
314 " std 4,0x170\n" /* store f4 */
315 " std 6,0x178\n" /* store f6 */
316 " stm 0,15,0x180\n" /* store general registers */
317 " stctl 0,15,0x1c0\n" /* store control registers */
318 " oi 0x1c0,0x10\n" /* fake protection bit */
319 " lpsw 0(%1)"
320 : "=m" (ctl_buf)
321 : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc");
322#else /* CONFIG_64BIT */
323 asm volatile( 265 asm volatile(
324 " stctg 0,0,0(%2)\n" 266 " stctg 0,0,0(%2)\n"
325 " ni 4(%2),0xef\n" /* switch off protection */ 267 " ni 4(%2),0xef\n" /* switch off protection */
@@ -352,7 +294,6 @@ static inline void __noreturn disabled_wait(unsigned long code)
352 " lpswe 0(%1)" 294 " lpswe 0(%1)"
353 : "=m" (ctl_buf) 295 : "=m" (ctl_buf)
354 : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0", "1"); 296 : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0", "1");
355#endif /* CONFIG_64BIT */
356 while (1); 297 while (1);
357} 298}
358 299
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 55d69dd7473c..6feda2599282 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -40,12 +40,8 @@ struct psw_bits {
40 unsigned long long ri : 1; /* Runtime Instrumentation */ 40 unsigned long long ri : 1; /* Runtime Instrumentation */
41 unsigned long long : 6; 41 unsigned long long : 6;
42 unsigned long long eaba : 2; /* Addressing Mode */ 42 unsigned long long eaba : 2; /* Addressing Mode */
43#ifdef CONFIG_64BIT
44 unsigned long long : 31; 43 unsigned long long : 31;
45 unsigned long long ia : 64;/* Instruction Address */ 44 unsigned long long ia : 64;/* Instruction Address */
46#else
47 unsigned long long ia : 31;/* Instruction Address */
48#endif
49}; 45};
50 46
51enum { 47enum {
@@ -161,6 +157,12 @@ static inline long regs_return_value(struct pt_regs *regs)
161 return regs->gprs[2]; 157 return regs->gprs[2];
162} 158}
163 159
160static inline void instruction_pointer_set(struct pt_regs *regs,
161 unsigned long val)
162{
163 regs->psw.addr = val | PSW_ADDR_AMODE;
164}
165
164int regs_query_register_offset(const char *name); 166int regs_query_register_offset(const char *name);
165const char *regs_query_register_name(unsigned int offset); 167const char *regs_query_register_name(unsigned int offset);
166unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset); 168unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset);
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index d786c634e052..998b61cd0e56 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -211,11 +211,6 @@ struct qdio_buffer_element {
211 u8 scount; 211 u8 scount;
212 u8 sflags; 212 u8 sflags;
213 u32 length; 213 u32 length;
214#ifdef CONFIG_32BIT
215 /* private: */
216 void *res2;
217 /* public: */
218#endif
219 void *addr; 214 void *addr;
220} __attribute__ ((packed, aligned(16))); 215} __attribute__ ((packed, aligned(16)));
221 216
@@ -232,11 +227,6 @@ struct qdio_buffer {
232 * @sbal: absolute SBAL address 227 * @sbal: absolute SBAL address
233 */ 228 */
234struct sl_element { 229struct sl_element {
235#ifdef CONFIG_32BIT
236 /* private: */
237 unsigned long reserved;
238 /* public: */
239#endif
240 unsigned long sbal; 230 unsigned long sbal;
241} __attribute__ ((packed)); 231} __attribute__ ((packed));
242 232
@@ -415,6 +405,10 @@ struct qdio_brinfo_entry_l2 {
415#define QDIO_FLAG_SYNC_OUTPUT 0x02 405#define QDIO_FLAG_SYNC_OUTPUT 0x02
416#define QDIO_FLAG_PCI_OUT 0x10 406#define QDIO_FLAG_PCI_OUT 0x10
417 407
408int qdio_alloc_buffers(struct qdio_buffer **buf, unsigned int count);
409void qdio_free_buffers(struct qdio_buffer **buf, unsigned int count);
410void qdio_reset_buffers(struct qdio_buffer **buf, unsigned int count);
411
418extern int qdio_allocate(struct qdio_initialize *); 412extern int qdio_allocate(struct qdio_initialize *);
419extern int qdio_establish(struct qdio_initialize *); 413extern int qdio_establish(struct qdio_initialize *);
420extern int qdio_activate(struct ccw_device *); 414extern int qdio_activate(struct ccw_device *);
diff --git a/arch/s390/include/asm/reset.h b/arch/s390/include/asm/reset.h
index 804578587a7a..72786067b300 100644
--- a/arch/s390/include/asm/reset.h
+++ b/arch/s390/include/asm/reset.h
@@ -15,5 +15,6 @@ struct reset_call {
15 15
16extern void register_reset_call(struct reset_call *reset); 16extern void register_reset_call(struct reset_call *reset);
17extern void unregister_reset_call(struct reset_call *reset); 17extern void unregister_reset_call(struct reset_call *reset);
18extern void s390_reset_system(void (*func)(void *), void *data); 18extern void s390_reset_system(void (*fn_pre)(void),
19 void (*fn_post)(void *), void *data);
19#endif /* _ASM_S390_RESET_H */ 20#endif /* _ASM_S390_RESET_H */
diff --git a/arch/s390/include/asm/runtime_instr.h b/arch/s390/include/asm/runtime_instr.h
index 830da737ff85..402ad6df4897 100644
--- a/arch/s390/include/asm/runtime_instr.h
+++ b/arch/s390/include/asm/runtime_instr.h
@@ -72,27 +72,19 @@ static inline void store_runtime_instr_cb(struct runtime_instr_cb *cb)
72 72
73static inline void save_ri_cb(struct runtime_instr_cb *cb_prev) 73static inline void save_ri_cb(struct runtime_instr_cb *cb_prev)
74{ 74{
75#ifdef CONFIG_64BIT
76 if (cb_prev) 75 if (cb_prev)
77 store_runtime_instr_cb(cb_prev); 76 store_runtime_instr_cb(cb_prev);
78#endif
79} 77}
80 78
81static inline void restore_ri_cb(struct runtime_instr_cb *cb_next, 79static inline void restore_ri_cb(struct runtime_instr_cb *cb_next,
82 struct runtime_instr_cb *cb_prev) 80 struct runtime_instr_cb *cb_prev)
83{ 81{
84#ifdef CONFIG_64BIT
85 if (cb_next) 82 if (cb_next)
86 load_runtime_instr_cb(cb_next); 83 load_runtime_instr_cb(cb_next);
87 else if (cb_prev) 84 else if (cb_prev)
88 load_runtime_instr_cb(&runtime_instr_empty_cb); 85 load_runtime_instr_cb(&runtime_instr_empty_cb);
89#endif
90} 86}
91 87
92#ifdef CONFIG_64BIT 88void exit_thread_runtime_instr(void);
93extern void exit_thread_runtime_instr(void);
94#else
95static inline void exit_thread_runtime_instr(void) { }
96#endif
97 89
98#endif /* _RUNTIME_INSTR_H */ 90#endif /* _RUNTIME_INSTR_H */
diff --git a/arch/s390/include/asm/rwsem.h b/arch/s390/include/asm/rwsem.h
index 487f9b64efb9..4b43ee7e6776 100644
--- a/arch/s390/include/asm/rwsem.h
+++ b/arch/s390/include/asm/rwsem.h
@@ -39,17 +39,10 @@
39#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead" 39#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
40#endif 40#endif
41 41
42#ifndef CONFIG_64BIT
43#define RWSEM_UNLOCKED_VALUE 0x00000000
44#define RWSEM_ACTIVE_BIAS 0x00000001
45#define RWSEM_ACTIVE_MASK 0x0000ffff
46#define RWSEM_WAITING_BIAS (-0x00010000)
47#else /* CONFIG_64BIT */
48#define RWSEM_UNLOCKED_VALUE 0x0000000000000000L 42#define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
49#define RWSEM_ACTIVE_BIAS 0x0000000000000001L 43#define RWSEM_ACTIVE_BIAS 0x0000000000000001L
50#define RWSEM_ACTIVE_MASK 0x00000000ffffffffL 44#define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
51#define RWSEM_WAITING_BIAS (-0x0000000100000000L) 45#define RWSEM_WAITING_BIAS (-0x0000000100000000L)
52#endif /* CONFIG_64BIT */
53#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS 46#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
54#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 47#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
55 48
@@ -61,19 +54,11 @@ static inline void __down_read(struct rw_semaphore *sem)
61 signed long old, new; 54 signed long old, new;
62 55
63 asm volatile( 56 asm volatile(
64#ifndef CONFIG_64BIT
65 " l %0,%2\n"
66 "0: lr %1,%0\n"
67 " ahi %1,%4\n"
68 " cs %0,%1,%2\n"
69 " jl 0b"
70#else /* CONFIG_64BIT */
71 " lg %0,%2\n" 57 " lg %0,%2\n"
72 "0: lgr %1,%0\n" 58 "0: lgr %1,%0\n"
73 " aghi %1,%4\n" 59 " aghi %1,%4\n"
74 " csg %0,%1,%2\n" 60 " csg %0,%1,%2\n"
75 " jl 0b" 61 " jl 0b"
76#endif /* CONFIG_64BIT */
77 : "=&d" (old), "=&d" (new), "=Q" (sem->count) 62 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
78 : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS) 63 : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
79 : "cc", "memory"); 64 : "cc", "memory");
@@ -89,15 +74,6 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
89 signed long old, new; 74 signed long old, new;
90 75
91 asm volatile( 76 asm volatile(
92#ifndef CONFIG_64BIT
93 " l %0,%2\n"
94 "0: ltr %1,%0\n"
95 " jm 1f\n"
96 " ahi %1,%4\n"
97 " cs %0,%1,%2\n"
98 " jl 0b\n"
99 "1:"
100#else /* CONFIG_64BIT */
101 " lg %0,%2\n" 77 " lg %0,%2\n"
102 "0: ltgr %1,%0\n" 78 "0: ltgr %1,%0\n"
103 " jm 1f\n" 79 " jm 1f\n"
@@ -105,7 +81,6 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
105 " csg %0,%1,%2\n" 81 " csg %0,%1,%2\n"
106 " jl 0b\n" 82 " jl 0b\n"
107 "1:" 83 "1:"
108#endif /* CONFIG_64BIT */
109 : "=&d" (old), "=&d" (new), "=Q" (sem->count) 84 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
110 : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS) 85 : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
111 : "cc", "memory"); 86 : "cc", "memory");
@@ -121,19 +96,11 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
121 96
122 tmp = RWSEM_ACTIVE_WRITE_BIAS; 97 tmp = RWSEM_ACTIVE_WRITE_BIAS;
123 asm volatile( 98 asm volatile(
124#ifndef CONFIG_64BIT
125 " l %0,%2\n"
126 "0: lr %1,%0\n"
127 " a %1,%4\n"
128 " cs %0,%1,%2\n"
129 " jl 0b"
130#else /* CONFIG_64BIT */
131 " lg %0,%2\n" 99 " lg %0,%2\n"
132 "0: lgr %1,%0\n" 100 "0: lgr %1,%0\n"
133 " ag %1,%4\n" 101 " ag %1,%4\n"
134 " csg %0,%1,%2\n" 102 " csg %0,%1,%2\n"
135 " jl 0b" 103 " jl 0b"
136#endif /* CONFIG_64BIT */
137 : "=&d" (old), "=&d" (new), "=Q" (sem->count) 104 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
138 : "Q" (sem->count), "m" (tmp) 105 : "Q" (sem->count), "m" (tmp)
139 : "cc", "memory"); 106 : "cc", "memory");
@@ -154,19 +121,11 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
154 signed long old; 121 signed long old;
155 122
156 asm volatile( 123 asm volatile(
157#ifndef CONFIG_64BIT
158 " l %0,%1\n"
159 "0: ltr %0,%0\n"
160 " jnz 1f\n"
161 " cs %0,%3,%1\n"
162 " jl 0b\n"
163#else /* CONFIG_64BIT */
164 " lg %0,%1\n" 124 " lg %0,%1\n"
165 "0: ltgr %0,%0\n" 125 "0: ltgr %0,%0\n"
166 " jnz 1f\n" 126 " jnz 1f\n"
167 " csg %0,%3,%1\n" 127 " csg %0,%3,%1\n"
168 " jl 0b\n" 128 " jl 0b\n"
169#endif /* CONFIG_64BIT */
170 "1:" 129 "1:"
171 : "=&d" (old), "=Q" (sem->count) 130 : "=&d" (old), "=Q" (sem->count)
172 : "Q" (sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS) 131 : "Q" (sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS)
@@ -182,19 +141,11 @@ static inline void __up_read(struct rw_semaphore *sem)
182 signed long old, new; 141 signed long old, new;
183 142
184 asm volatile( 143 asm volatile(
185#ifndef CONFIG_64BIT
186 " l %0,%2\n"
187 "0: lr %1,%0\n"
188 " ahi %1,%4\n"
189 " cs %0,%1,%2\n"
190 " jl 0b"
191#else /* CONFIG_64BIT */
192 " lg %0,%2\n" 144 " lg %0,%2\n"
193 "0: lgr %1,%0\n" 145 "0: lgr %1,%0\n"
194 " aghi %1,%4\n" 146 " aghi %1,%4\n"
195 " csg %0,%1,%2\n" 147 " csg %0,%1,%2\n"
196 " jl 0b" 148 " jl 0b"
197#endif /* CONFIG_64BIT */
198 : "=&d" (old), "=&d" (new), "=Q" (sem->count) 149 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
199 : "Q" (sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS) 150 : "Q" (sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS)
200 : "cc", "memory"); 151 : "cc", "memory");
@@ -212,19 +163,11 @@ static inline void __up_write(struct rw_semaphore *sem)
212 163
213 tmp = -RWSEM_ACTIVE_WRITE_BIAS; 164 tmp = -RWSEM_ACTIVE_WRITE_BIAS;
214 asm volatile( 165 asm volatile(
215#ifndef CONFIG_64BIT
216 " l %0,%2\n"
217 "0: lr %1,%0\n"
218 " a %1,%4\n"
219 " cs %0,%1,%2\n"
220 " jl 0b"
221#else /* CONFIG_64BIT */
222 " lg %0,%2\n" 166 " lg %0,%2\n"
223 "0: lgr %1,%0\n" 167 "0: lgr %1,%0\n"
224 " ag %1,%4\n" 168 " ag %1,%4\n"
225 " csg %0,%1,%2\n" 169 " csg %0,%1,%2\n"
226 " jl 0b" 170 " jl 0b"
227#endif /* CONFIG_64BIT */
228 : "=&d" (old), "=&d" (new), "=Q" (sem->count) 171 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
229 : "Q" (sem->count), "m" (tmp) 172 : "Q" (sem->count), "m" (tmp)
230 : "cc", "memory"); 173 : "cc", "memory");
@@ -242,19 +185,11 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
242 185
243 tmp = -RWSEM_WAITING_BIAS; 186 tmp = -RWSEM_WAITING_BIAS;
244 asm volatile( 187 asm volatile(
245#ifndef CONFIG_64BIT
246 " l %0,%2\n"
247 "0: lr %1,%0\n"
248 " a %1,%4\n"
249 " cs %0,%1,%2\n"
250 " jl 0b"
251#else /* CONFIG_64BIT */
252 " lg %0,%2\n" 188 " lg %0,%2\n"
253 "0: lgr %1,%0\n" 189 "0: lgr %1,%0\n"
254 " ag %1,%4\n" 190 " ag %1,%4\n"
255 " csg %0,%1,%2\n" 191 " csg %0,%1,%2\n"
256 " jl 0b" 192 " jl 0b"
257#endif /* CONFIG_64BIT */
258 : "=&d" (old), "=&d" (new), "=Q" (sem->count) 193 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
259 : "Q" (sem->count), "m" (tmp) 194 : "Q" (sem->count), "m" (tmp)
260 : "cc", "memory"); 195 : "cc", "memory");
@@ -270,19 +205,11 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
270 signed long old, new; 205 signed long old, new;
271 206
272 asm volatile( 207 asm volatile(
273#ifndef CONFIG_64BIT
274 " l %0,%2\n"
275 "0: lr %1,%0\n"
276 " ar %1,%4\n"
277 " cs %0,%1,%2\n"
278 " jl 0b"
279#else /* CONFIG_64BIT */
280 " lg %0,%2\n" 208 " lg %0,%2\n"
281 "0: lgr %1,%0\n" 209 "0: lgr %1,%0\n"
282 " agr %1,%4\n" 210 " agr %1,%4\n"
283 " csg %0,%1,%2\n" 211 " csg %0,%1,%2\n"
284 " jl 0b" 212 " jl 0b"
285#endif /* CONFIG_64BIT */
286 : "=&d" (old), "=&d" (new), "=Q" (sem->count) 213 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
287 : "Q" (sem->count), "d" (delta) 214 : "Q" (sem->count), "d" (delta)
288 : "cc", "memory"); 215 : "cc", "memory");
@@ -296,19 +223,11 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
296 signed long old, new; 223 signed long old, new;
297 224
298 asm volatile( 225 asm volatile(
299#ifndef CONFIG_64BIT
300 " l %0,%2\n"
301 "0: lr %1,%0\n"
302 " ar %1,%4\n"
303 " cs %0,%1,%2\n"
304 " jl 0b"
305#else /* CONFIG_64BIT */
306 " lg %0,%2\n" 226 " lg %0,%2\n"
307 "0: lgr %1,%0\n" 227 "0: lgr %1,%0\n"
308 " agr %1,%4\n" 228 " agr %1,%4\n"
309 " csg %0,%1,%2\n" 229 " csg %0,%1,%2\n"
310 " jl 0b" 230 " jl 0b"
311#endif /* CONFIG_64BIT */
312 : "=&d" (old), "=&d" (new), "=Q" (sem->count) 231 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
313 : "Q" (sem->count), "d" (delta) 232 : "Q" (sem->count), "d" (delta)
314 : "cc", "memory"); 233 : "cc", "memory");
diff --git a/arch/s390/include/asm/scatterlist.h b/arch/s390/include/asm/scatterlist.h
deleted file mode 100644
index 6d45ef6c12a7..000000000000
--- a/arch/s390/include/asm/scatterlist.h
+++ /dev/null
@@ -1,3 +0,0 @@
1#include <asm-generic/scatterlist.h>
2
3#define ARCH_HAS_SG_CHAIN
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index 1aba89b53cb9..f1096bab5199 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -27,11 +27,12 @@ struct sclp_ipl_info {
27}; 27};
28 28
29struct sclp_cpu_entry { 29struct sclp_cpu_entry {
30 u8 address; 30 u8 core_id;
31 u8 reserved0[2]; 31 u8 reserved0[2];
32 u8 : 3; 32 u8 : 3;
33 u8 siif : 1; 33 u8 siif : 1;
34 u8 : 4; 34 u8 sigpif : 1;
35 u8 : 3;
35 u8 reserved2[10]; 36 u8 reserved2[10];
36 u8 type; 37 u8 type;
37 u8 reserved1; 38 u8 reserved1;
@@ -51,6 +52,9 @@ int sclp_cpu_deconfigure(u8 cpu);
51unsigned long long sclp_get_rnmax(void); 52unsigned long long sclp_get_rnmax(void);
52unsigned long long sclp_get_rzm(void); 53unsigned long long sclp_get_rzm(void);
53unsigned int sclp_get_max_cpu(void); 54unsigned int sclp_get_max_cpu(void);
55unsigned int sclp_get_mtid(u8 cpu_type);
56unsigned int sclp_get_mtid_max(void);
57unsigned int sclp_get_mtid_prev(void);
54int sclp_sdias_blk_count(void); 58int sclp_sdias_blk_count(void);
55int sclp_sdias_copy(void *dest, int blk_num, int nr_blks); 59int sclp_sdias_copy(void *dest, int blk_num, int nr_blks);
56int sclp_chp_configure(struct chp_id chpid); 60int sclp_chp_configure(struct chp_id chpid);
@@ -66,6 +70,9 @@ int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode);
66unsigned long sclp_get_hsa_size(void); 70unsigned long sclp_get_hsa_size(void);
67void sclp_early_detect(void); 71void sclp_early_detect(void);
68int sclp_has_siif(void); 72int sclp_has_siif(void);
73int sclp_has_sigpif(void);
69unsigned int sclp_get_ibc(void); 74unsigned int sclp_get_ibc(void);
70 75
76long _sclp_print_early(const char *);
77
71#endif /* _ASM_S390_SCLP_H */ 78#endif /* _ASM_S390_SCLP_H */
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 089a49814c50..b8ffc1bd0a9f 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -15,19 +15,11 @@
15#include <asm/lowcore.h> 15#include <asm/lowcore.h>
16#include <asm/types.h> 16#include <asm/types.h>
17 17
18#ifndef CONFIG_64BIT
19#define IPL_DEVICE (*(unsigned long *) (0x10404))
20#define INITRD_START (*(unsigned long *) (0x1040C))
21#define INITRD_SIZE (*(unsigned long *) (0x10414))
22#define OLDMEM_BASE (*(unsigned long *) (0x1041C))
23#define OLDMEM_SIZE (*(unsigned long *) (0x10424))
24#else /* CONFIG_64BIT */
25#define IPL_DEVICE (*(unsigned long *) (0x10400)) 18#define IPL_DEVICE (*(unsigned long *) (0x10400))
26#define INITRD_START (*(unsigned long *) (0x10408)) 19#define INITRD_START (*(unsigned long *) (0x10408))
27#define INITRD_SIZE (*(unsigned long *) (0x10410)) 20#define INITRD_SIZE (*(unsigned long *) (0x10410))
28#define OLDMEM_BASE (*(unsigned long *) (0x10418)) 21#define OLDMEM_BASE (*(unsigned long *) (0x10418))
29#define OLDMEM_SIZE (*(unsigned long *) (0x10420)) 22#define OLDMEM_SIZE (*(unsigned long *) (0x10420))
30#endif /* CONFIG_64BIT */
31#define COMMAND_LINE ((char *) (0x10480)) 23#define COMMAND_LINE ((char *) (0x10480))
32 24
33extern int memory_end_set; 25extern int memory_end_set;
@@ -55,8 +47,9 @@ extern void detect_memory_memblock(void);
55#define MACHINE_FLAG_LPP (1UL << 13) 47#define MACHINE_FLAG_LPP (1UL << 13)
56#define MACHINE_FLAG_TOPOLOGY (1UL << 14) 48#define MACHINE_FLAG_TOPOLOGY (1UL << 14)
57#define MACHINE_FLAG_TE (1UL << 15) 49#define MACHINE_FLAG_TE (1UL << 15)
58#define MACHINE_FLAG_RRBM (1UL << 16)
59#define MACHINE_FLAG_TLB_LC (1UL << 17) 50#define MACHINE_FLAG_TLB_LC (1UL << 17)
51#define MACHINE_FLAG_VX (1UL << 18)
52#define MACHINE_FLAG_CAD (1UL << 19)
60 53
61#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) 54#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
62#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) 55#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
@@ -67,33 +60,16 @@ extern void detect_memory_memblock(void);
67#define MACHINE_HAS_PFMF MACHINE_HAS_EDAT1 60#define MACHINE_HAS_PFMF MACHINE_HAS_EDAT1
68#define MACHINE_HAS_HPAGE MACHINE_HAS_EDAT1 61#define MACHINE_HAS_HPAGE MACHINE_HAS_EDAT1
69 62
70#ifndef CONFIG_64BIT
71#define MACHINE_HAS_IEEE (S390_lowcore.machine_flags & MACHINE_FLAG_IEEE)
72#define MACHINE_HAS_CSP (S390_lowcore.machine_flags & MACHINE_FLAG_CSP)
73#define MACHINE_HAS_IDTE (0)
74#define MACHINE_HAS_DIAG44 (1)
75#define MACHINE_HAS_MVPG (S390_lowcore.machine_flags & MACHINE_FLAG_MVPG)
76#define MACHINE_HAS_EDAT1 (0)
77#define MACHINE_HAS_EDAT2 (0)
78#define MACHINE_HAS_LPP (0)
79#define MACHINE_HAS_TOPOLOGY (0)
80#define MACHINE_HAS_TE (0)
81#define MACHINE_HAS_RRBM (0)
82#define MACHINE_HAS_TLB_LC (0)
83#else /* CONFIG_64BIT */
84#define MACHINE_HAS_IEEE (1)
85#define MACHINE_HAS_CSP (1)
86#define MACHINE_HAS_IDTE (S390_lowcore.machine_flags & MACHINE_FLAG_IDTE) 63#define MACHINE_HAS_IDTE (S390_lowcore.machine_flags & MACHINE_FLAG_IDTE)
87#define MACHINE_HAS_DIAG44 (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG44) 64#define MACHINE_HAS_DIAG44 (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG44)
88#define MACHINE_HAS_MVPG (1)
89#define MACHINE_HAS_EDAT1 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1) 65#define MACHINE_HAS_EDAT1 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1)
90#define MACHINE_HAS_EDAT2 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2) 66#define MACHINE_HAS_EDAT2 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2)
91#define MACHINE_HAS_LPP (S390_lowcore.machine_flags & MACHINE_FLAG_LPP) 67#define MACHINE_HAS_LPP (S390_lowcore.machine_flags & MACHINE_FLAG_LPP)
92#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY) 68#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
93#define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE) 69#define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE)
94#define MACHINE_HAS_RRBM (S390_lowcore.machine_flags & MACHINE_FLAG_RRBM)
95#define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC) 70#define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC)
96#endif /* CONFIG_64BIT */ 71#define MACHINE_HAS_VX (S390_lowcore.machine_flags & MACHINE_FLAG_VX)
72#define MACHINE_HAS_CAD (S390_lowcore.machine_flags & MACHINE_FLAG_CAD)
97 73
98/* 74/*
99 * Console mode. Override with conmode= 75 * Console mode. Override with conmode=
@@ -132,19 +108,11 @@ extern void (*_machine_power_off)(void);
132 108
133#else /* __ASSEMBLY__ */ 109#else /* __ASSEMBLY__ */
134 110
135#ifndef CONFIG_64BIT
136#define IPL_DEVICE 0x10404
137#define INITRD_START 0x1040C
138#define INITRD_SIZE 0x10414
139#define OLDMEM_BASE 0x1041C
140#define OLDMEM_SIZE 0x10424
141#else /* CONFIG_64BIT */
142#define IPL_DEVICE 0x10400 111#define IPL_DEVICE 0x10400
143#define INITRD_START 0x10408 112#define INITRD_START 0x10408
144#define INITRD_SIZE 0x10410 113#define INITRD_SIZE 0x10410
145#define OLDMEM_BASE 0x10418 114#define OLDMEM_BASE 0x10418
146#define OLDMEM_SIZE 0x10420 115#define OLDMEM_SIZE 0x10420
147#endif /* CONFIG_64BIT */
148#define COMMAND_LINE 0x10480 116#define COMMAND_LINE 0x10480
149 117
150#endif /* __ASSEMBLY__ */ 118#endif /* __ASSEMBLY__ */
diff --git a/arch/s390/include/asm/sfp-util.h b/arch/s390/include/asm/sfp-util.h
index 5959bfb3b693..c8b7cf9d6279 100644
--- a/arch/s390/include/asm/sfp-util.h
+++ b/arch/s390/include/asm/sfp-util.h
@@ -51,7 +51,6 @@
51 wl = __wl; \ 51 wl = __wl; \
52}) 52})
53 53
54#ifdef CONFIG_64BIT
55#define udiv_qrnnd(q, r, n1, n0, d) \ 54#define udiv_qrnnd(q, r, n1, n0, d) \
56 do { unsigned long __n; \ 55 do { unsigned long __n; \
57 unsigned int __r, __d; \ 56 unsigned int __r, __d; \
@@ -60,15 +59,6 @@
60 (q) = __n / __d; \ 59 (q) = __n / __d; \
61 (r) = __n % __d; \ 60 (r) = __n % __d; \
62 } while (0) 61 } while (0)
63#else
64#define udiv_qrnnd(q, r, n1, n0, d) \
65 do { unsigned int __r; \
66 (q) = __udiv_qrnnd (&__r, (n1), (n0), (d)); \
67 (r) = __r; \
68 } while (0)
69extern unsigned long __udiv_qrnnd (unsigned int *, unsigned int,
70 unsigned int , unsigned int);
71#endif
72 62
73#define UDIV_NEEDS_NORMALIZATION 0 63#define UDIV_NEEDS_NORMALIZATION 0
74 64
diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h
index bf9c823d4020..ec60cf7fa0a2 100644
--- a/arch/s390/include/asm/sigp.h
+++ b/arch/s390/include/asm/sigp.h
@@ -10,11 +10,14 @@
10#define SIGP_RESTART 6 10#define SIGP_RESTART 6
11#define SIGP_STOP_AND_STORE_STATUS 9 11#define SIGP_STOP_AND_STORE_STATUS 9
12#define SIGP_INITIAL_CPU_RESET 11 12#define SIGP_INITIAL_CPU_RESET 11
13#define SIGP_CPU_RESET 12
13#define SIGP_SET_PREFIX 13 14#define SIGP_SET_PREFIX 13
14#define SIGP_STORE_STATUS_AT_ADDRESS 14 15#define SIGP_STORE_STATUS_AT_ADDRESS 14
15#define SIGP_SET_ARCHITECTURE 18 16#define SIGP_SET_ARCHITECTURE 18
16#define SIGP_COND_EMERGENCY_SIGNAL 19 17#define SIGP_COND_EMERGENCY_SIGNAL 19
17#define SIGP_SENSE_RUNNING 21 18#define SIGP_SENSE_RUNNING 21
19#define SIGP_SET_MULTI_THREADING 22
20#define SIGP_STORE_ADDITIONAL_STATUS 23
18 21
19/* SIGP condition codes */ 22/* SIGP condition codes */
20#define SIGP_CC_ORDER_CODE_ACCEPTED 0 23#define SIGP_CC_ORDER_CODE_ACCEPTED 0
@@ -33,9 +36,10 @@
33 36
34#ifndef __ASSEMBLY__ 37#ifndef __ASSEMBLY__
35 38
36static inline int __pcpu_sigp(u16 addr, u8 order, u32 parm, u32 *status) 39static inline int __pcpu_sigp(u16 addr, u8 order, unsigned long parm,
40 u32 *status)
37{ 41{
38 register unsigned int reg1 asm ("1") = parm; 42 register unsigned long reg1 asm ("1") = parm;
39 int cc; 43 int cc;
40 44
41 asm volatile( 45 asm volatile(
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index 4f1307962a95..b3bd0282dd98 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -16,6 +16,8 @@
16#define raw_smp_processor_id() (S390_lowcore.cpu_nr) 16#define raw_smp_processor_id() (S390_lowcore.cpu_nr)
17 17
18extern struct mutex smp_cpu_state_mutex; 18extern struct mutex smp_cpu_state_mutex;
19extern unsigned int smp_cpu_mt_shift;
20extern unsigned int smp_cpu_mtid;
19 21
20extern int __cpu_up(unsigned int cpu, struct task_struct *tidle); 22extern int __cpu_up(unsigned int cpu, struct task_struct *tidle);
21 23
@@ -29,13 +31,14 @@ extern int smp_find_processor_id(u16 address);
29extern int smp_store_status(int cpu); 31extern int smp_store_status(int cpu);
30extern int smp_vcpu_scheduled(int cpu); 32extern int smp_vcpu_scheduled(int cpu);
31extern void smp_yield_cpu(int cpu); 33extern void smp_yield_cpu(int cpu);
32extern void smp_yield(void);
33extern void smp_cpu_set_polarization(int cpu, int val); 34extern void smp_cpu_set_polarization(int cpu, int val);
34extern int smp_cpu_get_polarization(int cpu); 35extern int smp_cpu_get_polarization(int cpu);
35extern void smp_fill_possible_mask(void); 36extern void smp_fill_possible_mask(void);
36 37
37#else /* CONFIG_SMP */ 38#else /* CONFIG_SMP */
38 39
40#define smp_cpu_mtid 0
41
39static inline void smp_call_ipl_cpu(void (*func)(void *), void *data) 42static inline void smp_call_ipl_cpu(void (*func)(void *), void *data)
40{ 43{
41 func(data); 44 func(data);
@@ -50,7 +53,6 @@ static inline int smp_find_processor_id(u16 address) { return 0; }
50static inline int smp_store_status(int cpu) { return 0; } 53static inline int smp_store_status(int cpu) { return 0; }
51static inline int smp_vcpu_scheduled(int cpu) { return 1; } 54static inline int smp_vcpu_scheduled(int cpu) { return 1; }
52static inline void smp_yield_cpu(int cpu) { } 55static inline void smp_yield_cpu(int cpu) { }
53static inline void smp_yield(void) { }
54static inline void smp_fill_possible_mask(void) { } 56static inline void smp_fill_possible_mask(void) { }
55 57
56#endif /* CONFIG_SMP */ 58#endif /* CONFIG_SMP */
diff --git a/arch/s390/include/asm/sparsemem.h b/arch/s390/include/asm/sparsemem.h
index a60d085ddb4d..487428b6d099 100644
--- a/arch/s390/include/asm/sparsemem.h
+++ b/arch/s390/include/asm/sparsemem.h
@@ -1,16 +1,7 @@
1#ifndef _ASM_S390_SPARSEMEM_H 1#ifndef _ASM_S390_SPARSEMEM_H
2#define _ASM_S390_SPARSEMEM_H 2#define _ASM_S390_SPARSEMEM_H
3 3
4#ifdef CONFIG_64BIT
5
6#define SECTION_SIZE_BITS 28 4#define SECTION_SIZE_BITS 28
7#define MAX_PHYSMEM_BITS 46 5#define MAX_PHYSMEM_BITS 46
8 6
9#else
10
11#define SECTION_SIZE_BITS 25
12#define MAX_PHYSMEM_BITS 31
13
14#endif /* CONFIG_64BIT */
15
16#endif /* _ASM_S390_SPARSEMEM_H */ 7#endif /* _ASM_S390_SPARSEMEM_H */
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index 96879f7ad6da..0e37cd041241 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -18,14 +18,7 @@ extern int spin_retry;
18static inline int 18static inline int
19_raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new) 19_raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
20{ 20{
21 unsigned int old_expected = old; 21 return __sync_bool_compare_and_swap(lock, old, new);
22
23 asm volatile(
24 " cs %0,%3,%1"
25 : "=d" (old), "=Q" (*lock)
26 : "0" (old), "d" (new), "Q" (*lock)
27 : "cc", "memory" );
28 return old == old_expected;
29} 22}
30 23
31/* 24/*
@@ -37,11 +30,17 @@ _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
37 * (the type definitions are in asm/spinlock_types.h) 30 * (the type definitions are in asm/spinlock_types.h)
38 */ 31 */
39 32
33void arch_lock_relax(unsigned int cpu);
34
40void arch_spin_lock_wait(arch_spinlock_t *); 35void arch_spin_lock_wait(arch_spinlock_t *);
41int arch_spin_trylock_retry(arch_spinlock_t *); 36int arch_spin_trylock_retry(arch_spinlock_t *);
42void arch_spin_relax(arch_spinlock_t *);
43void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); 37void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
44 38
39static inline void arch_spin_relax(arch_spinlock_t *lock)
40{
41 arch_lock_relax(lock->lock);
42}
43
45static inline u32 arch_spin_lockval(int cpu) 44static inline u32 arch_spin_lockval(int cpu)
46{ 45{
47 return ~cpu; 46 return ~cpu;
@@ -64,11 +63,6 @@ static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
64 _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL)); 63 _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL));
65} 64}
66 65
67static inline int arch_spin_tryrelease_once(arch_spinlock_t *lp)
68{
69 return _raw_compare_and_swap(&lp->lock, SPINLOCK_LOCKVAL, 0);
70}
71
72static inline void arch_spin_lock(arch_spinlock_t *lp) 66static inline void arch_spin_lock(arch_spinlock_t *lp)
73{ 67{
74 if (!arch_spin_trylock_once(lp)) 68 if (!arch_spin_trylock_once(lp))
@@ -91,7 +85,13 @@ static inline int arch_spin_trylock(arch_spinlock_t *lp)
91 85
92static inline void arch_spin_unlock(arch_spinlock_t *lp) 86static inline void arch_spin_unlock(arch_spinlock_t *lp)
93{ 87{
94 arch_spin_tryrelease_once(lp); 88 typecheck(unsigned int, lp->lock);
89 asm volatile(
90 __ASM_BARRIER
91 "st %1,%0\n"
92 : "+Q" (lp->lock)
93 : "d" (0)
94 : "cc", "memory");
95} 95}
96 96
97static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) 97static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
@@ -123,13 +123,12 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
123 */ 123 */
124#define arch_write_can_lock(x) ((x)->lock == 0) 124#define arch_write_can_lock(x) ((x)->lock == 0)
125 125
126extern void _raw_read_lock_wait(arch_rwlock_t *lp);
127extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
128extern int _raw_read_trylock_retry(arch_rwlock_t *lp); 126extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
129extern void _raw_write_lock_wait(arch_rwlock_t *lp);
130extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
131extern int _raw_write_trylock_retry(arch_rwlock_t *lp); 127extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
132 128
129#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
130#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
131
133static inline int arch_read_trylock_once(arch_rwlock_t *rw) 132static inline int arch_read_trylock_once(arch_rwlock_t *rw)
134{ 133{
135 unsigned int old = ACCESS_ONCE(rw->lock); 134 unsigned int old = ACCESS_ONCE(rw->lock);
@@ -144,16 +143,82 @@ static inline int arch_write_trylock_once(arch_rwlock_t *rw)
144 _raw_compare_and_swap(&rw->lock, 0, 0x80000000)); 143 _raw_compare_and_swap(&rw->lock, 0, 0x80000000));
145} 144}
146 145
146#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
147
148#define __RAW_OP_OR "lao"
149#define __RAW_OP_AND "lan"
150#define __RAW_OP_ADD "laa"
151
152#define __RAW_LOCK(ptr, op_val, op_string) \
153({ \
154 unsigned int old_val; \
155 \
156 typecheck(unsigned int *, ptr); \
157 asm volatile( \
158 op_string " %0,%2,%1\n" \
159 "bcr 14,0\n" \
160 : "=d" (old_val), "+Q" (*ptr) \
161 : "d" (op_val) \
162 : "cc", "memory"); \
163 old_val; \
164})
165
166#define __RAW_UNLOCK(ptr, op_val, op_string) \
167({ \
168 unsigned int old_val; \
169 \
170 typecheck(unsigned int *, ptr); \
171 asm volatile( \
172 "bcr 14,0\n" \
173 op_string " %0,%2,%1\n" \
174 : "=d" (old_val), "+Q" (*ptr) \
175 : "d" (op_val) \
176 : "cc", "memory"); \
177 old_val; \
178})
179
180extern void _raw_read_lock_wait(arch_rwlock_t *lp);
181extern void _raw_write_lock_wait(arch_rwlock_t *lp, unsigned int prev);
182
147static inline void arch_read_lock(arch_rwlock_t *rw) 183static inline void arch_read_lock(arch_rwlock_t *rw)
148{ 184{
149 if (!arch_read_trylock_once(rw)) 185 unsigned int old;
186
187 old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD);
188 if ((int) old < 0)
150 _raw_read_lock_wait(rw); 189 _raw_read_lock_wait(rw);
151} 190}
152 191
153static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags) 192static inline void arch_read_unlock(arch_rwlock_t *rw)
193{
194 __RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD);
195}
196
197static inline void arch_write_lock(arch_rwlock_t *rw)
198{
199 unsigned int old;
200
201 old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
202 if (old != 0)
203 _raw_write_lock_wait(rw, old);
204 rw->owner = SPINLOCK_LOCKVAL;
205}
206
207static inline void arch_write_unlock(arch_rwlock_t *rw)
208{
209 rw->owner = 0;
210 __RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND);
211}
212
213#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
214
215extern void _raw_read_lock_wait(arch_rwlock_t *lp);
216extern void _raw_write_lock_wait(arch_rwlock_t *lp);
217
218static inline void arch_read_lock(arch_rwlock_t *rw)
154{ 219{
155 if (!arch_read_trylock_once(rw)) 220 if (!arch_read_trylock_once(rw))
156 _raw_read_lock_wait_flags(rw, flags); 221 _raw_read_lock_wait(rw);
157} 222}
158 223
159static inline void arch_read_unlock(arch_rwlock_t *rw) 224static inline void arch_read_unlock(arch_rwlock_t *rw)
@@ -169,19 +234,24 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
169{ 234{
170 if (!arch_write_trylock_once(rw)) 235 if (!arch_write_trylock_once(rw))
171 _raw_write_lock_wait(rw); 236 _raw_write_lock_wait(rw);
172} 237 rw->owner = SPINLOCK_LOCKVAL;
173
174static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
175{
176 if (!arch_write_trylock_once(rw))
177 _raw_write_lock_wait_flags(rw, flags);
178} 238}
179 239
180static inline void arch_write_unlock(arch_rwlock_t *rw) 240static inline void arch_write_unlock(arch_rwlock_t *rw)
181{ 241{
182 _raw_compare_and_swap(&rw->lock, 0x80000000, 0); 242 typecheck(unsigned int, rw->lock);
243
244 rw->owner = 0;
245 asm volatile(
246 __ASM_BARRIER
247 "st %1,%0\n"
248 : "+Q" (rw->lock)
249 : "d" (0)
250 : "cc", "memory");
183} 251}
184 252
253#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
254
185static inline int arch_read_trylock(arch_rwlock_t *rw) 255static inline int arch_read_trylock(arch_rwlock_t *rw)
186{ 256{
187 if (!arch_read_trylock_once(rw)) 257 if (!arch_read_trylock_once(rw))
@@ -191,12 +261,20 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
191 261
192static inline int arch_write_trylock(arch_rwlock_t *rw) 262static inline int arch_write_trylock(arch_rwlock_t *rw)
193{ 263{
194 if (!arch_write_trylock_once(rw)) 264 if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw))
195 return _raw_write_trylock_retry(rw); 265 return 0;
266 rw->owner = SPINLOCK_LOCKVAL;
196 return 1; 267 return 1;
197} 268}
198 269
199#define arch_read_relax(lock) cpu_relax() 270static inline void arch_read_relax(arch_rwlock_t *rw)
200#define arch_write_relax(lock) cpu_relax() 271{
272 arch_lock_relax(rw->owner);
273}
274
275static inline void arch_write_relax(arch_rwlock_t *rw)
276{
277 arch_lock_relax(rw->owner);
278}
201 279
202#endif /* __ASM_SPINLOCK_H */ 280#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h
index b2cd6ff7c2c5..d84b6939237c 100644
--- a/arch/s390/include/asm/spinlock_types.h
+++ b/arch/s390/include/asm/spinlock_types.h
@@ -13,6 +13,7 @@ typedef struct {
13 13
14typedef struct { 14typedef struct {
15 unsigned int lock; 15 unsigned int lock;
16 unsigned int owner;
16} arch_rwlock_t; 17} arch_rwlock_t;
17 18
18#define __ARCH_RW_LOCK_UNLOCKED { 0 } 19#define __ARCH_RW_LOCK_UNLOCKED { 0 }
diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h
index 7e2dcd7c57ef..8662f5c8e17f 100644
--- a/arch/s390/include/asm/string.h
+++ b/arch/s390/include/asm/string.h
@@ -44,7 +44,6 @@ extern char *strstr(const char *, const char *);
44#undef __HAVE_ARCH_STRCHR 44#undef __HAVE_ARCH_STRCHR
45#undef __HAVE_ARCH_STRNCHR 45#undef __HAVE_ARCH_STRNCHR
46#undef __HAVE_ARCH_STRNCMP 46#undef __HAVE_ARCH_STRNCMP
47#undef __HAVE_ARCH_STRNICMP
48#undef __HAVE_ARCH_STRPBRK 47#undef __HAVE_ARCH_STRPBRK
49#undef __HAVE_ARCH_STRSEP 48#undef __HAVE_ARCH_STRSEP
50#undef __HAVE_ARCH_STRSPN 49#undef __HAVE_ARCH_STRSPN
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index 18ea9e3f8142..d62e7a69605f 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -18,9 +18,6 @@ static inline int test_fp_ctl(u32 fpc)
18 u32 orig_fpc; 18 u32 orig_fpc;
19 int rc; 19 int rc;
20 20
21 if (!MACHINE_HAS_IEEE)
22 return 0;
23
24 asm volatile( 21 asm volatile(
25 " efpc %1\n" 22 " efpc %1\n"
26 " sfpc %2\n" 23 " sfpc %2\n"
@@ -35,9 +32,6 @@ static inline int test_fp_ctl(u32 fpc)
35 32
36static inline void save_fp_ctl(u32 *fpc) 33static inline void save_fp_ctl(u32 *fpc)
37{ 34{
38 if (!MACHINE_HAS_IEEE)
39 return;
40
41 asm volatile( 35 asm volatile(
42 " stfpc %0\n" 36 " stfpc %0\n"
43 : "+Q" (*fpc)); 37 : "+Q" (*fpc));
@@ -47,9 +41,6 @@ static inline int restore_fp_ctl(u32 *fpc)
47{ 41{
48 int rc; 42 int rc;
49 43
50 if (!MACHINE_HAS_IEEE)
51 return 0;
52
53 asm volatile( 44 asm volatile(
54 " lfpc %1\n" 45 " lfpc %1\n"
55 "0: la %0,0\n" 46 "0: la %0,0\n"
@@ -65,8 +56,6 @@ static inline void save_fp_regs(freg_t *fprs)
65 asm volatile("std 2,%0" : "=Q" (fprs[2])); 56 asm volatile("std 2,%0" : "=Q" (fprs[2]));
66 asm volatile("std 4,%0" : "=Q" (fprs[4])); 57 asm volatile("std 4,%0" : "=Q" (fprs[4]));
67 asm volatile("std 6,%0" : "=Q" (fprs[6])); 58 asm volatile("std 6,%0" : "=Q" (fprs[6]));
68 if (!MACHINE_HAS_IEEE)
69 return;
70 asm volatile("std 1,%0" : "=Q" (fprs[1])); 59 asm volatile("std 1,%0" : "=Q" (fprs[1]));
71 asm volatile("std 3,%0" : "=Q" (fprs[3])); 60 asm volatile("std 3,%0" : "=Q" (fprs[3]));
72 asm volatile("std 5,%0" : "=Q" (fprs[5])); 61 asm volatile("std 5,%0" : "=Q" (fprs[5]));
@@ -87,8 +76,6 @@ static inline void restore_fp_regs(freg_t *fprs)
87 asm volatile("ld 2,%0" : : "Q" (fprs[2])); 76 asm volatile("ld 2,%0" : : "Q" (fprs[2]));
88 asm volatile("ld 4,%0" : : "Q" (fprs[4])); 77 asm volatile("ld 4,%0" : : "Q" (fprs[4]));
89 asm volatile("ld 6,%0" : : "Q" (fprs[6])); 78 asm volatile("ld 6,%0" : : "Q" (fprs[6]));
90 if (!MACHINE_HAS_IEEE)
91 return;
92 asm volatile("ld 1,%0" : : "Q" (fprs[1])); 79 asm volatile("ld 1,%0" : : "Q" (fprs[1]));
93 asm volatile("ld 3,%0" : : "Q" (fprs[3])); 80 asm volatile("ld 3,%0" : : "Q" (fprs[3]));
94 asm volatile("ld 5,%0" : : "Q" (fprs[5])); 81 asm volatile("ld 5,%0" : : "Q" (fprs[5]));
@@ -103,6 +90,57 @@ static inline void restore_fp_regs(freg_t *fprs)
103 asm volatile("ld 15,%0" : : "Q" (fprs[15])); 90 asm volatile("ld 15,%0" : : "Q" (fprs[15]));
104} 91}
105 92
93static inline void save_vx_regs(__vector128 *vxrs)
94{
95 typedef struct { __vector128 _[__NUM_VXRS]; } addrtype;
96
97 asm volatile(
98 " la 1,%0\n"
99 " .word 0xe70f,0x1000,0x003e\n" /* vstm 0,15,0(1) */
100 " .word 0xe70f,0x1100,0x0c3e\n" /* vstm 16,31,256(1) */
101 : "=Q" (*(addrtype *) vxrs) : : "1");
102}
103
104static inline void save_vx_regs_safe(__vector128 *vxrs)
105{
106 unsigned long cr0, flags;
107
108 flags = arch_local_irq_save();
109 __ctl_store(cr0, 0, 0);
110 __ctl_set_bit(0, 17);
111 __ctl_set_bit(0, 18);
112 save_vx_regs(vxrs);
113 __ctl_load(cr0, 0, 0);
114 arch_local_irq_restore(flags);
115}
116
117static inline void restore_vx_regs(__vector128 *vxrs)
118{
119 typedef struct { __vector128 _[__NUM_VXRS]; } addrtype;
120
121 asm volatile(
122 " la 1,%0\n"
123 " .word 0xe70f,0x1000,0x0036\n" /* vlm 0,15,0(1) */
124 " .word 0xe70f,0x1100,0x0c36\n" /* vlm 16,31,256(1) */
125 : : "Q" (*(addrtype *) vxrs) : "1");
126}
127
128static inline void save_fp_vx_regs(struct task_struct *task)
129{
130 if (task->thread.vxrs)
131 save_vx_regs(task->thread.vxrs);
132 else
133 save_fp_regs(task->thread.fp_regs.fprs);
134}
135
136static inline void restore_fp_vx_regs(struct task_struct *task)
137{
138 if (task->thread.vxrs)
139 restore_vx_regs(task->thread.vxrs);
140 else
141 restore_fp_regs(task->thread.fp_regs.fprs);
142}
143
106static inline void save_access_regs(unsigned int *acrs) 144static inline void save_access_regs(unsigned int *acrs)
107{ 145{
108 typedef struct { int _[NUM_ACRS]; } acrstype; 146 typedef struct { int _[NUM_ACRS]; } acrstype;
@@ -120,16 +158,16 @@ static inline void restore_access_regs(unsigned int *acrs)
120#define switch_to(prev,next,last) do { \ 158#define switch_to(prev,next,last) do { \
121 if (prev->mm) { \ 159 if (prev->mm) { \
122 save_fp_ctl(&prev->thread.fp_regs.fpc); \ 160 save_fp_ctl(&prev->thread.fp_regs.fpc); \
123 save_fp_regs(prev->thread.fp_regs.fprs); \ 161 save_fp_vx_regs(prev); \
124 save_access_regs(&prev->thread.acrs[0]); \ 162 save_access_regs(&prev->thread.acrs[0]); \
125 save_ri_cb(prev->thread.ri_cb); \ 163 save_ri_cb(prev->thread.ri_cb); \
126 } \ 164 } \
127 if (next->mm) { \ 165 if (next->mm) { \
166 update_cr_regs(next); \
128 restore_fp_ctl(&next->thread.fp_regs.fpc); \ 167 restore_fp_ctl(&next->thread.fp_regs.fpc); \
129 restore_fp_regs(next->thread.fp_regs.fprs); \ 168 restore_fp_vx_regs(next); \
130 restore_access_regs(&next->thread.acrs[0]); \ 169 restore_access_regs(&next->thread.acrs[0]); \
131 restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \ 170 restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
132 update_cr_regs(next); \
133 } \ 171 } \
134 prev = __switch_to(prev,next); \ 172 prev = __switch_to(prev,next); \
135} while (0) 173} while (0)
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index abad78d5b10c..6ba0bf928909 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -54,7 +54,7 @@ static inline void syscall_set_return_value(struct task_struct *task,
54 struct pt_regs *regs, 54 struct pt_regs *regs,
55 int error, long val) 55 int error, long val)
56{ 56{
57 regs->gprs[2] = error ? -error : val; 57 regs->gprs[2] = error ? error : val;
58} 58}
59 59
60static inline void syscall_get_arguments(struct task_struct *task, 60static inline void syscall_get_arguments(struct task_struct *task,
@@ -95,6 +95,6 @@ static inline int syscall_get_arch(void)
95 if (test_tsk_thread_flag(current, TIF_31BIT)) 95 if (test_tsk_thread_flag(current, TIF_31BIT))
96 return AUDIT_ARCH_S390; 96 return AUDIT_ARCH_S390;
97#endif 97#endif
98 return sizeof(long) == 8 ? AUDIT_ARCH_S390X : AUDIT_ARCH_S390; 98 return AUDIT_ARCH_S390X;
99} 99}
100#endif /* _ASM_SYSCALL_H */ 100#endif /* _ASM_SYSCALL_H */
diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h
index f92428e459f8..f7054a892d9e 100644
--- a/arch/s390/include/asm/sysinfo.h
+++ b/arch/s390/include/asm/sysinfo.h
@@ -15,6 +15,7 @@
15#define __ASM_S390_SYSINFO_H 15#define __ASM_S390_SYSINFO_H
16 16
17#include <asm/bitsperlong.h> 17#include <asm/bitsperlong.h>
18#include <linux/uuid.h>
18 19
19struct sysinfo_1_1_1 { 20struct sysinfo_1_1_1 {
20 unsigned char p:1; 21 unsigned char p:1;
@@ -90,7 +91,11 @@ struct sysinfo_2_2_2 {
90 unsigned short cpus_reserved; 91 unsigned short cpus_reserved;
91 char name[8]; 92 char name[8];
92 unsigned int caf; 93 unsigned int caf;
93 char reserved_2[16]; 94 char reserved_2[8];
95 unsigned char mt_installed;
96 unsigned char mt_general;
97 unsigned char mt_psmtid;
98 char reserved_3[5];
94 unsigned short cpus_dedicated; 99 unsigned short cpus_dedicated;
95 unsigned short cpus_shared; 100 unsigned short cpus_shared;
96}; 101};
@@ -112,34 +117,39 @@ struct sysinfo_3_2_2 {
112 char name[8]; 117 char name[8];
113 unsigned int caf; 118 unsigned int caf;
114 char cpi[16]; 119 char cpi[16];
115 char reserved_1[24]; 120 char reserved_1[3];
116 121 char ext_name_encoding;
122 unsigned int reserved_2;
123 uuid_be uuid;
117 } vm[8]; 124 } vm[8];
118 char reserved_544[3552]; 125 char reserved_3[1504];
126 char ext_names[8][256];
119}; 127};
120 128
121extern int topology_max_mnest; 129extern int topology_max_mnest;
122 130
123#define TOPOLOGY_CPU_BITS 64 131#define TOPOLOGY_CORE_BITS 64
124#define TOPOLOGY_NR_MAG 6 132#define TOPOLOGY_NR_MAG 6
125 133
126struct topology_cpu { 134struct topology_core {
127 unsigned char reserved0[4]; 135 unsigned char nl;
136 unsigned char reserved0[3];
128 unsigned char :6; 137 unsigned char :6;
129 unsigned char pp:2; 138 unsigned char pp:2;
130 unsigned char reserved1; 139 unsigned char reserved1;
131 unsigned short origin; 140 unsigned short origin;
132 unsigned long mask[TOPOLOGY_CPU_BITS / BITS_PER_LONG]; 141 unsigned long mask[TOPOLOGY_CORE_BITS / BITS_PER_LONG];
133}; 142};
134 143
135struct topology_container { 144struct topology_container {
136 unsigned char reserved[7]; 145 unsigned char nl;
146 unsigned char reserved[6];
137 unsigned char id; 147 unsigned char id;
138}; 148};
139 149
140union topology_entry { 150union topology_entry {
141 unsigned char nl; 151 unsigned char nl;
142 struct topology_cpu cpu; 152 struct topology_core cpu;
143 struct topology_container container; 153 struct topology_container container;
144}; 154};
145 155
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index b833e9c0bfbf..4c27ec764c36 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -10,13 +10,8 @@
10/* 10/*
11 * Size of kernel stack for each process 11 * Size of kernel stack for each process
12 */ 12 */
13#ifndef CONFIG_64BIT
14#define THREAD_ORDER 1
15#define ASYNC_ORDER 1
16#else /* CONFIG_64BIT */
17#define THREAD_ORDER 2 13#define THREAD_ORDER 2
18#define ASYNC_ORDER 2 14#define ASYNC_ORDER 2
19#endif /* CONFIG_64BIT */
20 15
21#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) 16#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
22#define ASYNC_SIZE (PAGE_SIZE << ASYNC_ORDER) 17#define ASYNC_SIZE (PAGE_SIZE << ASYNC_ORDER)
@@ -34,12 +29,10 @@
34 */ 29 */
35struct thread_info { 30struct thread_info {
36 struct task_struct *task; /* main task structure */ 31 struct task_struct *task; /* main task structure */
37 struct exec_domain *exec_domain; /* execution domain */
38 unsigned long flags; /* low level flags */ 32 unsigned long flags; /* low level flags */
39 unsigned long sys_call_table; /* System call table address */ 33 unsigned long sys_call_table; /* System call table address */
40 unsigned int cpu; /* current CPU */ 34 unsigned int cpu; /* current CPU */
41 int preempt_count; /* 0 => preemptable, <0 => BUG */ 35 int preempt_count; /* 0 => preemptable, <0 => BUG */
42 struct restart_block restart_block;
43 unsigned int system_call; 36 unsigned int system_call;
44 __u64 user_timer; 37 __u64 user_timer;
45 __u64 system_timer; 38 __u64 system_timer;
@@ -52,13 +45,9 @@ struct thread_info {
52#define INIT_THREAD_INFO(tsk) \ 45#define INIT_THREAD_INFO(tsk) \
53{ \ 46{ \
54 .task = &tsk, \ 47 .task = &tsk, \
55 .exec_domain = &default_exec_domain, \
56 .flags = 0, \ 48 .flags = 0, \
57 .cpu = 0, \ 49 .cpu = 0, \
58 .preempt_count = INIT_PREEMPT_COUNT, \ 50 .preempt_count = INIT_PREEMPT_COUNT, \
59 .restart_block = { \
60 .fn = do_no_restart_syscall, \
61 }, \
62} 51}
63 52
64#define init_thread_info (init_thread_union.thread_info) 53#define init_thread_info (init_thread_union.thread_info)
@@ -70,6 +59,8 @@ static inline struct thread_info *current_thread_info(void)
70 return (struct thread_info *) S390_lowcore.thread_info; 59 return (struct thread_info *) S390_lowcore.thread_info;
71} 60}
72 61
62void arch_release_task_struct(struct task_struct *tsk);
63
73#define THREAD_SIZE_ORDER THREAD_ORDER 64#define THREAD_SIZE_ORDER THREAD_ORDER
74 65
75#endif 66#endif
@@ -84,11 +75,13 @@ static inline struct thread_info *current_thread_info(void)
84#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */ 75#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */
85#define TIF_SECCOMP 5 /* secure computing */ 76#define TIF_SECCOMP 5 /* secure computing */
86#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ 77#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
78#define TIF_UPROBE 7 /* breakpointed or single-stepping */
87#define TIF_31BIT 16 /* 32bit process */ 79#define TIF_31BIT 16 /* 32bit process */
88#define TIF_MEMDIE 17 /* is terminating due to OOM killer */ 80#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
89#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal() */ 81#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal() */
90#define TIF_SINGLE_STEP 19 /* This task is single stepped */ 82#define TIF_SINGLE_STEP 19 /* This task is single stepped */
91#define TIF_BLOCK_STEP 20 /* This task is block stepped */ 83#define TIF_BLOCK_STEP 20 /* This task is block stepped */
84#define TIF_UPROBE_SINGLESTEP 21 /* This task is uprobe single stepped */
92 85
93#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 86#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
94#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) 87#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
@@ -97,13 +90,10 @@ static inline struct thread_info *current_thread_info(void)
97#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 90#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
98#define _TIF_SECCOMP (1<<TIF_SECCOMP) 91#define _TIF_SECCOMP (1<<TIF_SECCOMP)
99#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) 92#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
93#define _TIF_UPROBE (1<<TIF_UPROBE)
100#define _TIF_31BIT (1<<TIF_31BIT) 94#define _TIF_31BIT (1<<TIF_31BIT)
101#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP) 95#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP)
102 96
103#ifdef CONFIG_64BIT
104#define is_32bit_task() (test_thread_flag(TIF_31BIT)) 97#define is_32bit_task() (test_thread_flag(TIF_31BIT))
105#else
106#define is_32bit_task() (1)
107#endif
108 98
109#endif /* _ASM_THREAD_INFO_H */ 99#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 8beee1cceba4..98eb2a579223 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -67,20 +67,22 @@ static inline void local_tick_enable(unsigned long long comp)
67 set_clock_comparator(S390_lowcore.clock_comparator); 67 set_clock_comparator(S390_lowcore.clock_comparator);
68} 68}
69 69
70#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ 70#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
71#define STORE_CLOCK_EXT_SIZE 16 /* stcke writes 16 bytes */
71 72
72typedef unsigned long long cycles_t; 73typedef unsigned long long cycles_t;
73 74
74static inline void get_tod_clock_ext(char clk[16]) 75static inline void get_tod_clock_ext(char *clk)
75{ 76{
76 typedef struct { char _[sizeof(clk)]; } addrtype; 77 typedef struct { char _[STORE_CLOCK_EXT_SIZE]; } addrtype;
77 78
78 asm volatile("stcke %0" : "=Q" (*(addrtype *) clk) : : "cc"); 79 asm volatile("stcke %0" : "=Q" (*(addrtype *) clk) : : "cc");
79} 80}
80 81
81static inline unsigned long long get_tod_clock(void) 82static inline unsigned long long get_tod_clock(void)
82{ 83{
83 unsigned char clk[16]; 84 unsigned char clk[STORE_CLOCK_EXT_SIZE];
85
84 get_tod_clock_ext(clk); 86 get_tod_clock_ext(clk);
85 return *((unsigned long long *)&clk[1]); 87 return *((unsigned long long *)&clk[1]);
86} 88}
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index a25f09fbaf36..7a92e69c50bc 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -105,7 +105,7 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
105static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, 105static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
106 unsigned long address) 106 unsigned long address)
107{ 107{
108 page_table_free_rcu(tlb, (unsigned long *) pte); 108 page_table_free_rcu(tlb, (unsigned long *) pte, address);
109} 109}
110 110
111/* 111/*
@@ -118,11 +118,10 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
118static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, 118static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
119 unsigned long address) 119 unsigned long address)
120{ 120{
121#ifdef CONFIG_64BIT
122 if (tlb->mm->context.asce_limit <= (1UL << 31)) 121 if (tlb->mm->context.asce_limit <= (1UL << 31))
123 return; 122 return;
123 pgtable_pmd_page_dtor(virt_to_page(pmd));
124 tlb_remove_table(tlb, pmd); 124 tlb_remove_table(tlb, pmd);
125#endif
126} 125}
127 126
128/* 127/*
@@ -135,11 +134,9 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
135static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, 134static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
136 unsigned long address) 135 unsigned long address)
137{ 136{
138#ifdef CONFIG_64BIT
139 if (tlb->mm->context.asce_limit <= (1UL << 42)) 137 if (tlb->mm->context.asce_limit <= (1UL << 42))
140 return; 138 return;
141 tlb_remove_table(tlb, pud); 139 tlb_remove_table(tlb, pud);
142#endif
143} 140}
144 141
145#define tlb_start_vma(tlb, vma) do { } while (0) 142#define tlb_start_vma(tlb, vma) do { } while (0)
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index 16c9c88658c8..ca148f7c3eaa 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -49,13 +49,6 @@ static inline void __tlb_flush_global(void)
49 register unsigned long reg4 asm("4"); 49 register unsigned long reg4 asm("4");
50 long dummy; 50 long dummy;
51 51
52#ifndef CONFIG_64BIT
53 if (!MACHINE_HAS_CSP) {
54 smp_ptlb_all();
55 return;
56 }
57#endif /* CONFIG_64BIT */
58
59 dummy = 0; 52 dummy = 0;
60 reg2 = reg3 = 0; 53 reg2 = reg3 = 0;
61 reg4 = ((unsigned long) &dummy) + 1; 54 reg4 = ((unsigned long) &dummy) + 1;
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 56af53093d24..b1453a2ae1ca 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -9,20 +9,24 @@ struct cpu;
9#ifdef CONFIG_SCHED_BOOK 9#ifdef CONFIG_SCHED_BOOK
10 10
11struct cpu_topology_s390 { 11struct cpu_topology_s390 {
12 unsigned short thread_id;
12 unsigned short core_id; 13 unsigned short core_id;
13 unsigned short socket_id; 14 unsigned short socket_id;
14 unsigned short book_id; 15 unsigned short book_id;
16 cpumask_t thread_mask;
15 cpumask_t core_mask; 17 cpumask_t core_mask;
16 cpumask_t book_mask; 18 cpumask_t book_mask;
17}; 19};
18 20
19extern struct cpu_topology_s390 cpu_topology[NR_CPUS]; 21DECLARE_PER_CPU(struct cpu_topology_s390, cpu_topology);
20 22
21#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id) 23#define topology_physical_package_id(cpu) (per_cpu(cpu_topology, cpu).socket_id)
22#define topology_core_id(cpu) (cpu_topology[cpu].core_id) 24#define topology_thread_id(cpu) (per_cpu(cpu_topology, cpu).thread_id)
23#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_mask) 25#define topology_thread_cpumask(cpu) (&per_cpu(cpu_topology, cpu).thread_mask)
24#define topology_book_id(cpu) (cpu_topology[cpu].book_id) 26#define topology_core_id(cpu) (per_cpu(cpu_topology, cpu).core_id)
25#define topology_book_cpumask(cpu) (&cpu_topology[cpu].book_mask) 27#define topology_core_cpumask(cpu) (&per_cpu(cpu_topology, cpu).core_mask)
28#define topology_book_id(cpu) (per_cpu(cpu_topology, cpu).book_id)
29#define topology_book_cpumask(cpu) (&per_cpu(cpu_topology, cpu).book_mask)
26 30
27#define mc_capable() 1 31#define mc_capable() 1
28 32
@@ -47,14 +51,6 @@ static inline void topology_expect_change(void) { }
47#define POLARIZATION_VM (2) 51#define POLARIZATION_VM (2)
48#define POLARIZATION_VH (3) 52#define POLARIZATION_VH (3)
49 53
50#ifdef CONFIG_SCHED_BOOK
51void s390_init_cpu_topology(void);
52#else
53static inline void s390_init_cpu_topology(void)
54{
55};
56#endif
57
58#include <asm-generic/topology.h> 54#include <asm-generic/topology.h>
59 55
60#endif /* _ASM_S390_TOPOLOGY_H */ 56#endif /* _ASM_S390_TOPOLOGY_H */
diff --git a/arch/s390/include/asm/types.h b/arch/s390/include/asm/types.h
index dccef3ca91fa..6740f4f9781f 100644
--- a/arch/s390/include/asm/types.h
+++ b/arch/s390/include/asm/types.h
@@ -8,21 +8,4 @@
8 8
9#include <uapi/asm/types.h> 9#include <uapi/asm/types.h>
10 10
11/*
12 * These aren't exported outside the kernel to avoid name space clashes
13 */
14
15#ifndef __ASSEMBLY__
16
17#ifndef CONFIG_64BIT
18typedef union {
19 unsigned long long pair;
20 struct {
21 unsigned long even;
22 unsigned long odd;
23 } subreg;
24} register_pair;
25
26#endif /* ! CONFIG_64BIT */
27#endif /* __ASSEMBLY__ */
28#endif /* _S390_TYPES_H */ 11#endif /* _S390_TYPES_H */
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index cd4c68e0398d..d64a7a62164f 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -372,5 +372,6 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo
372} 372}
373 373
374int copy_to_user_real(void __user *dest, void *src, unsigned long count); 374int copy_to_user_real(void __user *dest, void *src, unsigned long count);
375void s390_kernel_write(void *dst, const void *src, size_t size);
375 376
376#endif /* __S390_UACCESS_H */ 377#endif /* __S390_UACCESS_H */
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index 651886353551..91f56b1d8156 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -9,11 +9,7 @@
9#include <uapi/asm/unistd.h> 9#include <uapi/asm/unistd.h>
10 10
11 11
12#ifndef CONFIG_64BIT
13#define __IGNORE_select
14#else
15#define __IGNORE_time 12#define __IGNORE_time
16#endif
17 13
18/* Ignore NUMA system calls. Not wired up on s390. */ 14/* Ignore NUMA system calls. Not wired up on s390. */
19#define __IGNORE_mbind 15#define __IGNORE_mbind
@@ -43,10 +39,6 @@
43#define __ARCH_WANT_SYS_OLDUMOUNT 39#define __ARCH_WANT_SYS_OLDUMOUNT
44#define __ARCH_WANT_SYS_SIGPENDING 40#define __ARCH_WANT_SYS_SIGPENDING
45#define __ARCH_WANT_SYS_SIGPROCMASK 41#define __ARCH_WANT_SYS_SIGPROCMASK
46# ifndef CONFIG_64BIT
47# define __ARCH_WANT_STAT64
48# define __ARCH_WANT_SYS_TIME
49# endif
50# ifdef CONFIG_COMPAT 42# ifdef CONFIG_COMPAT
51# define __ARCH_WANT_COMPAT_SYS_TIME 43# define __ARCH_WANT_COMPAT_SYS_TIME
52# endif 44# endif
diff --git a/arch/s390/include/asm/uprobes.h b/arch/s390/include/asm/uprobes.h
new file mode 100644
index 000000000000..1411dff7fea7
--- /dev/null
+++ b/arch/s390/include/asm/uprobes.h
@@ -0,0 +1,42 @@
1/*
2 * User-space Probes (UProbes) for s390
3 *
4 * Copyright IBM Corp. 2014
5 * Author(s): Jan Willeke,
6 */
7
8#ifndef _ASM_UPROBES_H
9#define _ASM_UPROBES_H
10
11#include <linux/notifier.h>
12
13typedef u16 uprobe_opcode_t;
14
15#define UPROBE_XOL_SLOT_BYTES 256 /* cache aligned */
16
17#define UPROBE_SWBP_INSN 0x0002
18#define UPROBE_SWBP_INSN_SIZE 2
19
20struct arch_uprobe {
21 union{
22 uprobe_opcode_t insn[3];
23 uprobe_opcode_t ixol[3];
24 };
25 unsigned int saved_per : 1;
26 unsigned int saved_int_code;
27};
28
29struct arch_uprobe_task {
30};
31
32int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm,
33 unsigned long addr);
34int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs);
35int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
36bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
37int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
38 void *data);
39void arch_uprobe_abort_xol(struct arch_uprobe *ap, struct pt_regs *regs);
40unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
41 struct pt_regs *regs);
42#endif /* _ASM_UPROBES_H */
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index bc9746a7d47c..787acd4f9668 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -22,13 +22,17 @@ struct vdso_data {
22 __u64 xtime_tod_stamp; /* TOD clock for xtime 0x08 */ 22 __u64 xtime_tod_stamp; /* TOD clock for xtime 0x08 */
23 __u64 xtime_clock_sec; /* Kernel time 0x10 */ 23 __u64 xtime_clock_sec; /* Kernel time 0x10 */
24 __u64 xtime_clock_nsec; /* 0x18 */ 24 __u64 xtime_clock_nsec; /* 0x18 */
25 __u64 wtom_clock_sec; /* Wall to monotonic clock 0x20 */ 25 __u64 xtime_coarse_sec; /* Coarse kernel time 0x20 */
26 __u64 wtom_clock_nsec; /* 0x28 */ 26 __u64 xtime_coarse_nsec; /* 0x28 */
27 __u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */ 27 __u64 wtom_clock_sec; /* Wall to monotonic clock 0x30 */
28 __u32 tz_dsttime; /* Type of dst correction 0x34 */ 28 __u64 wtom_clock_nsec; /* 0x38 */
29 __u32 ectg_available; /* ECTG instruction present 0x38 */ 29 __u64 wtom_coarse_sec; /* Coarse wall to monotonic 0x40 */
30 __u32 tk_mult; /* Mult. used for xtime_nsec 0x3c */ 30 __u64 wtom_coarse_nsec; /* 0x48 */
31 __u32 tk_shift; /* Shift used for xtime_nsec 0x40 */ 31 __u32 tz_minuteswest; /* Minutes west of Greenwich 0x50 */
32 __u32 tz_dsttime; /* Type of dst correction 0x54 */
33 __u32 ectg_available; /* ECTG instruction present 0x58 */
34 __u32 tk_mult; /* Mult. used for xtime_nsec 0x5c */
35 __u32 tk_shift; /* Shift used for xtime_nsec 0x60 */
32}; 36};
33 37
34struct vdso_per_cpu_data { 38struct vdso_per_cpu_data {
@@ -38,10 +42,8 @@ struct vdso_per_cpu_data {
38 42
39extern struct vdso_data *vdso_data; 43extern struct vdso_data *vdso_data;
40 44
41#ifdef CONFIG_64BIT
42int vdso_alloc_per_cpu(struct _lowcore *lowcore); 45int vdso_alloc_per_cpu(struct _lowcore *lowcore);
43void vdso_free_per_cpu(struct _lowcore *lowcore); 46void vdso_free_per_cpu(struct _lowcore *lowcore);
44#endif
45 47
46#endif /* __ASSEMBLY__ */ 48#endif /* __ASSEMBLY__ */
47#endif /* __S390_VDSO_H__ */ 49#endif /* __S390_VDSO_H__ */
diff --git a/arch/s390/include/asm/vtimer.h b/arch/s390/include/asm/vtimer.h
index bfe25d513ad2..10a179af62d8 100644
--- a/arch/s390/include/asm/vtimer.h
+++ b/arch/s390/include/asm/vtimer.h
@@ -28,6 +28,4 @@ extern int del_virt_timer(struct vtimer_list *timer);
28extern void init_cpu_vtimer(void); 28extern void init_cpu_vtimer(void);
29extern void vtime_init(void); 29extern void vtime_init(void);
30 30
31extern void vtime_stop_cpu(void);
32
33#endif /* _ASM_S390_TIMER_H */ 31#endif /* _ASM_S390_TIMER_H */