aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-02-11 20:42:32 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-11 20:42:32 -0500
commitb3d6524ff7956c5a898d51a18eaecb62a60a2b84 (patch)
treecc049e7ec9edd9f5a76f286e04d8db9a1caa516a /arch/s390/kernel
parent07f80d41cf24b7e6e76cd97d420167932c9a7f82 (diff)
parent6a039eab53c01a58bfff95c78fc800ca7de27c77 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Martin Schwidefsky: - The remaining patches for the z13 machine support: kernel build option for z13, the cache synonym avoidance, SMT support, compare-and-delay for spinloops and the CES5S crypto adapater. - The ftrace support for function tracing with the gcc hotpatch option. This touches common code Makefiles, Steven is ok with the changes. - The hypfs file system gets an extension to access diagnose 0x0c data in user space for performance analysis for Linux running under z/VM. - The iucv hvc console gets wildcard spport for the user id filtering. - The cacheinfo code is converted to use the generic infrastructure. - Cleanup and bug fixes. * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (42 commits) s390/process: free vx save area when releasing tasks s390/hypfs: Eliminate hypfs interval s390/hypfs: Add diagnose 0c support s390/cacheinfo: don't use smp_processor_id() in preemptible context s390/zcrypt: fixed domain scanning problem (again) s390/smp: increase maximum value of NR_CPUS to 512 s390/jump label: use different nop instruction s390/jump label: add sanity checks s390/mm: correct missing space when reporting user process faults s390/dasd: cleanup profiling s390/dasd: add locking for global_profile access s390/ftrace: hotpatch support for function tracing ftrace: let notrace function attribute disable hotpatching if necessary ftrace: allow architectures to specify ftrace compile options s390: reintroduce diag 44 calls for cpu_relax() s390/zcrypt: Add support for new crypto express (CEX5S) adapter. s390/zcrypt: Number of supported ap domains is not retrievable. s390/spinlock: add compare-and-delay to lock wait loops s390/tape: remove redundant if statement s390/hvc_iucv: add simple wildcard matches to the iucv allow filter ...
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r--arch/s390/kernel/Makefile4
-rw-r--r--arch/s390/kernel/base.S3
-rw-r--r--arch/s390/kernel/cache.c391
-rw-r--r--arch/s390/kernel/dis.c9
-rw-r--r--arch/s390/kernel/early.c18
-rw-r--r--arch/s390/kernel/entry.h4
-rw-r--r--arch/s390/kernel/ftrace.c108
-rw-r--r--arch/s390/kernel/head.S4
-rw-r--r--arch/s390/kernel/ipl.c11
-rw-r--r--arch/s390/kernel/jump_label.c63
-rw-r--r--arch/s390/kernel/kprobes.c3
-rw-r--r--arch/s390/kernel/machine_kexec.c19
-rw-r--r--arch/s390/kernel/mcount.S2
-rw-r--r--arch/s390/kernel/process.c18
-rw-r--r--arch/s390/kernel/processor.c10
-rw-r--r--arch/s390/kernel/sclp.S3
-rw-r--r--arch/s390/kernel/setup.c3
-rw-r--r--arch/s390/kernel/smp.c261
-rw-r--r--arch/s390/kernel/sysinfo.c8
-rw-r--r--arch/s390/kernel/topology.c63
-rw-r--r--arch/s390/kernel/vtime.c58
21 files changed, 594 insertions, 469 deletions
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 204c43a4c245..31fab2676fe9 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -4,8 +4,8 @@
4 4
5ifdef CONFIG_FUNCTION_TRACER 5ifdef CONFIG_FUNCTION_TRACER
6# Don't trace early setup code and tracing code 6# Don't trace early setup code and tracing code
7CFLAGS_REMOVE_early.o = -pg 7CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE)
8CFLAGS_REMOVE_ftrace.o = -pg 8CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
9endif 9endif
10 10
11# 11#
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
index 797a823a2275..f74a53d339b0 100644
--- a/arch/s390/kernel/base.S
+++ b/arch/s390/kernel/base.S
@@ -97,7 +97,8 @@ ENTRY(diag308_reset)
97 lg %r4,0(%r4) # Save PSW 97 lg %r4,0(%r4) # Save PSW
98 sturg %r4,%r3 # Use sturg, because of large pages 98 sturg %r4,%r3 # Use sturg, because of large pages
99 lghi %r1,1 99 lghi %r1,1
100 diag %r1,%r1,0x308 100 lghi %r0,0
101 diag %r0,%r1,0x308
101.Lrestart_part2: 102.Lrestart_part2:
102 lhi %r0,0 # Load r0 with zero 103 lhi %r0,0 # Load r0 with zero
103 lhi %r1,2 # Use mode 2 = ESAME (dump) 104 lhi %r1,2 # Use mode 2 = ESAME (dump)
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
index c0b03c28d157..632fa06ea162 100644
--- a/arch/s390/kernel/cache.c
+++ b/arch/s390/kernel/cache.c
@@ -5,37 +5,11 @@
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> 5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */ 6 */
7 7
8#include <linux/notifier.h>
9#include <linux/seq_file.h> 8#include <linux/seq_file.h>
10#include <linux/init.h>
11#include <linux/list.h>
12#include <linux/slab.h>
13#include <linux/cpu.h> 9#include <linux/cpu.h>
10#include <linux/cacheinfo.h>
14#include <asm/facility.h> 11#include <asm/facility.h>
15 12
16struct cache {
17 unsigned long size;
18 unsigned int line_size;
19 unsigned int associativity;
20 unsigned int nr_sets;
21 unsigned int level : 3;
22 unsigned int type : 2;
23 unsigned int private : 1;
24 struct list_head list;
25};
26
27struct cache_dir {
28 struct kobject *kobj;
29 struct cache_index_dir *index;
30};
31
32struct cache_index_dir {
33 struct kobject kobj;
34 int cpu;
35 struct cache *cache;
36 struct cache_index_dir *next;
37};
38
39enum { 13enum {
40 CACHE_SCOPE_NOTEXISTS, 14 CACHE_SCOPE_NOTEXISTS,
41 CACHE_SCOPE_PRIVATE, 15 CACHE_SCOPE_PRIVATE,
@@ -44,10 +18,10 @@ enum {
44}; 18};
45 19
46enum { 20enum {
47 CACHE_TYPE_SEPARATE, 21 CTYPE_SEPARATE,
48 CACHE_TYPE_DATA, 22 CTYPE_DATA,
49 CACHE_TYPE_INSTRUCTION, 23 CTYPE_INSTRUCTION,
50 CACHE_TYPE_UNIFIED, 24 CTYPE_UNIFIED,
51}; 25};
52 26
53enum { 27enum {
@@ -70,37 +44,60 @@ struct cache_info {
70}; 44};
71 45
72#define CACHE_MAX_LEVEL 8 46#define CACHE_MAX_LEVEL 8
73
74union cache_topology { 47union cache_topology {
75 struct cache_info ci[CACHE_MAX_LEVEL]; 48 struct cache_info ci[CACHE_MAX_LEVEL];
76 unsigned long long raw; 49 unsigned long long raw;
77}; 50};
78 51
79static const char * const cache_type_string[] = { 52static const char * const cache_type_string[] = {
80 "Data", 53 "",
81 "Instruction", 54 "Instruction",
55 "Data",
56 "",
82 "Unified", 57 "Unified",
83}; 58};
84 59
85static struct cache_dir *cache_dir_cpu[NR_CPUS]; 60static const enum cache_type cache_type_map[] = {
86static LIST_HEAD(cache_list); 61 [CTYPE_SEPARATE] = CACHE_TYPE_SEPARATE,
62 [CTYPE_DATA] = CACHE_TYPE_DATA,
63 [CTYPE_INSTRUCTION] = CACHE_TYPE_INST,
64 [CTYPE_UNIFIED] = CACHE_TYPE_UNIFIED,
65};
87 66
88void show_cacheinfo(struct seq_file *m) 67void show_cacheinfo(struct seq_file *m)
89{ 68{
90 struct cache *cache; 69 struct cpu_cacheinfo *this_cpu_ci;
91 int index = 0; 70 struct cacheinfo *cache;
71 int idx;
92 72
93 list_for_each_entry(cache, &cache_list, list) { 73 get_online_cpus();
94 seq_printf(m, "cache%-11d: ", index); 74 this_cpu_ci = get_cpu_cacheinfo(cpumask_any(cpu_online_mask));
75 for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) {
76 cache = this_cpu_ci->info_list + idx;
77 seq_printf(m, "cache%-11d: ", idx);
95 seq_printf(m, "level=%d ", cache->level); 78 seq_printf(m, "level=%d ", cache->level);
96 seq_printf(m, "type=%s ", cache_type_string[cache->type]); 79 seq_printf(m, "type=%s ", cache_type_string[cache->type]);
97 seq_printf(m, "scope=%s ", cache->private ? "Private" : "Shared"); 80 seq_printf(m, "scope=%s ",
98 seq_printf(m, "size=%luK ", cache->size >> 10); 81 cache->disable_sysfs ? "Shared" : "Private");
99 seq_printf(m, "line_size=%u ", cache->line_size); 82 seq_printf(m, "size=%dK ", cache->size >> 10);
100 seq_printf(m, "associativity=%d", cache->associativity); 83 seq_printf(m, "line_size=%u ", cache->coherency_line_size);
84 seq_printf(m, "associativity=%d", cache->ways_of_associativity);
101 seq_puts(m, "\n"); 85 seq_puts(m, "\n");
102 index++;
103 } 86 }
87 put_online_cpus();
88}
89
90static inline enum cache_type get_cache_type(struct cache_info *ci, int level)
91{
92 if (level >= CACHE_MAX_LEVEL)
93 return CACHE_TYPE_NOCACHE;
94
95 ci += level;
96
97 if (ci->scope != CACHE_SCOPE_SHARED && ci->scope != CACHE_SCOPE_PRIVATE)
98 return CACHE_TYPE_NOCACHE;
99
100 return cache_type_map[ci->type];
104} 101}
105 102
106static inline unsigned long ecag(int ai, int li, int ti) 103static inline unsigned long ecag(int ai, int li, int ti)
@@ -113,277 +110,79 @@ static inline unsigned long ecag(int ai, int li, int ti)
113 return val; 110 return val;
114} 111}
115 112
116static int __init cache_add(int level, int private, int type) 113static void ci_leaf_init(struct cacheinfo *this_leaf, int private,
114 enum cache_type type, unsigned int level)
117{ 115{
118 struct cache *cache; 116 int ti, num_sets;
119 int ti; 117 int cpu = smp_processor_id();
120 118
121 cache = kzalloc(sizeof(*cache), GFP_KERNEL); 119 if (type == CACHE_TYPE_INST)
122 if (!cache)
123 return -ENOMEM;
124 if (type == CACHE_TYPE_INSTRUCTION)
125 ti = CACHE_TI_INSTRUCTION; 120 ti = CACHE_TI_INSTRUCTION;
126 else 121 else
127 ti = CACHE_TI_UNIFIED; 122 ti = CACHE_TI_UNIFIED;
128 cache->size = ecag(EXTRACT_SIZE, level, ti);
129 cache->line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
130 cache->associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti);
131 cache->nr_sets = cache->size / cache->associativity;
132 cache->nr_sets /= cache->line_size;
133 cache->private = private;
134 cache->level = level + 1;
135 cache->type = type - 1;
136 list_add_tail(&cache->list, &cache_list);
137 return 0;
138}
139
140static void __init cache_build_info(void)
141{
142 struct cache *cache, *next;
143 union cache_topology ct;
144 int level, private, rc;
145
146 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
147 for (level = 0; level < CACHE_MAX_LEVEL; level++) {
148 switch (ct.ci[level].scope) {
149 case CACHE_SCOPE_SHARED:
150 private = 0;
151 break;
152 case CACHE_SCOPE_PRIVATE:
153 private = 1;
154 break;
155 default:
156 return;
157 }
158 if (ct.ci[level].type == CACHE_TYPE_SEPARATE) {
159 rc = cache_add(level, private, CACHE_TYPE_DATA);
160 rc |= cache_add(level, private, CACHE_TYPE_INSTRUCTION);
161 } else {
162 rc = cache_add(level, private, ct.ci[level].type);
163 }
164 if (rc)
165 goto error;
166 }
167 return;
168error:
169 list_for_each_entry_safe(cache, next, &cache_list, list) {
170 list_del(&cache->list);
171 kfree(cache);
172 }
173}
174
175static struct cache_dir *cache_create_cache_dir(int cpu)
176{
177 struct cache_dir *cache_dir;
178 struct kobject *kobj = NULL;
179 struct device *dev;
180
181 dev = get_cpu_device(cpu);
182 if (!dev)
183 goto out;
184 kobj = kobject_create_and_add("cache", &dev->kobj);
185 if (!kobj)
186 goto out;
187 cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
188 if (!cache_dir)
189 goto out;
190 cache_dir->kobj = kobj;
191 cache_dir_cpu[cpu] = cache_dir;
192 return cache_dir;
193out:
194 kobject_put(kobj);
195 return NULL;
196}
197
198static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *kobj)
199{
200 return container_of(kobj, struct cache_index_dir, kobj);
201}
202
203static void cache_index_release(struct kobject *kobj)
204{
205 struct cache_index_dir *index;
206
207 index = kobj_to_cache_index_dir(kobj);
208 kfree(index);
209}
210
211static ssize_t cache_index_show(struct kobject *kobj,
212 struct attribute *attr, char *buf)
213{
214 struct kobj_attribute *kobj_attr;
215
216 kobj_attr = container_of(attr, struct kobj_attribute, attr);
217 return kobj_attr->show(kobj, kobj_attr, buf);
218}
219
220#define DEFINE_CACHE_ATTR(_name, _format, _value) \
221static ssize_t cache_##_name##_show(struct kobject *kobj, \
222 struct kobj_attribute *attr, \
223 char *buf) \
224{ \
225 struct cache_index_dir *index; \
226 \
227 index = kobj_to_cache_index_dir(kobj); \
228 return sprintf(buf, _format, _value); \
229} \
230static struct kobj_attribute cache_##_name##_attr = \
231 __ATTR(_name, 0444, cache_##_name##_show, NULL);
232 123
233DEFINE_CACHE_ATTR(size, "%luK\n", index->cache->size >> 10); 124 this_leaf->level = level + 1;
234DEFINE_CACHE_ATTR(coherency_line_size, "%u\n", index->cache->line_size); 125 this_leaf->type = type;
235DEFINE_CACHE_ATTR(number_of_sets, "%u\n", index->cache->nr_sets); 126 this_leaf->coherency_line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
236DEFINE_CACHE_ATTR(ways_of_associativity, "%u\n", index->cache->associativity); 127 this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY,
237DEFINE_CACHE_ATTR(type, "%s\n", cache_type_string[index->cache->type]); 128 level, ti);
238DEFINE_CACHE_ATTR(level, "%d\n", index->cache->level); 129 this_leaf->size = ecag(EXTRACT_SIZE, level, ti);
239 130
240static ssize_t shared_cpu_map_func(struct kobject *kobj, int type, char *buf) 131 num_sets = this_leaf->size / this_leaf->coherency_line_size;
241{ 132 num_sets /= this_leaf->ways_of_associativity;
242 struct cache_index_dir *index; 133 this_leaf->number_of_sets = num_sets;
243 int len; 134 cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
244 135 if (!private)
245 index = kobj_to_cache_index_dir(kobj); 136 this_leaf->disable_sysfs = true;
246 len = type ?
247 cpulist_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu)) :
248 cpumask_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu));
249 len += sprintf(&buf[len], "\n");
250 return len;
251}
252
253static ssize_t shared_cpu_map_show(struct kobject *kobj,
254 struct kobj_attribute *attr, char *buf)
255{
256 return shared_cpu_map_func(kobj, 0, buf);
257} 137}
258static struct kobj_attribute cache_shared_cpu_map_attr =
259 __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
260 138
261static ssize_t shared_cpu_list_show(struct kobject *kobj, 139int init_cache_level(unsigned int cpu)
262 struct kobj_attribute *attr, char *buf)
263{ 140{
264 return shared_cpu_map_func(kobj, 1, buf); 141 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
265} 142 unsigned int level = 0, leaves = 0;
266static struct kobj_attribute cache_shared_cpu_list_attr = 143 union cache_topology ct;
267 __ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL); 144 enum cache_type ctype;
268
269static struct attribute *cache_index_default_attrs[] = {
270 &cache_type_attr.attr,
271 &cache_size_attr.attr,
272 &cache_number_of_sets_attr.attr,
273 &cache_ways_of_associativity_attr.attr,
274 &cache_level_attr.attr,
275 &cache_coherency_line_size_attr.attr,
276 &cache_shared_cpu_map_attr.attr,
277 &cache_shared_cpu_list_attr.attr,
278 NULL,
279};
280
281static const struct sysfs_ops cache_index_ops = {
282 .show = cache_index_show,
283};
284
285static struct kobj_type cache_index_type = {
286 .sysfs_ops = &cache_index_ops,
287 .release = cache_index_release,
288 .default_attrs = cache_index_default_attrs,
289};
290
291static int cache_create_index_dir(struct cache_dir *cache_dir,
292 struct cache *cache, int index, int cpu)
293{
294 struct cache_index_dir *index_dir;
295 int rc;
296
297 index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
298 if (!index_dir)
299 return -ENOMEM;
300 index_dir->cache = cache;
301 index_dir->cpu = cpu;
302 rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
303 cache_dir->kobj, "index%d", index);
304 if (rc)
305 goto out;
306 index_dir->next = cache_dir->index;
307 cache_dir->index = index_dir;
308 return 0;
309out:
310 kfree(index_dir);
311 return rc;
312}
313 145
314static int cache_add_cpu(int cpu) 146 if (!this_cpu_ci)
315{ 147 return -EINVAL;
316 struct cache_dir *cache_dir;
317 struct cache *cache;
318 int rc, index = 0;
319 148
320 if (list_empty(&cache_list)) 149 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
321 return 0; 150 do {
322 cache_dir = cache_create_cache_dir(cpu); 151 ctype = get_cache_type(&ct.ci[0], level);
323 if (!cache_dir) 152 if (ctype == CACHE_TYPE_NOCACHE)
324 return -ENOMEM;
325 list_for_each_entry(cache, &cache_list, list) {
326 if (!cache->private)
327 break; 153 break;
328 rc = cache_create_index_dir(cache_dir, cache, index, cpu); 154 /* Separate instruction and data caches */
329 if (rc) 155 leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1;
330 return rc; 156 } while (++level < CACHE_MAX_LEVEL);
331 index++;
332 }
333 return 0;
334}
335 157
336static void cache_remove_cpu(int cpu) 158 this_cpu_ci->num_levels = level;
337{ 159 this_cpu_ci->num_leaves = leaves;
338 struct cache_index_dir *index, *next;
339 struct cache_dir *cache_dir;
340 160
341 cache_dir = cache_dir_cpu[cpu]; 161 return 0;
342 if (!cache_dir)
343 return;
344 index = cache_dir->index;
345 while (index) {
346 next = index->next;
347 kobject_put(&index->kobj);
348 index = next;
349 }
350 kobject_put(cache_dir->kobj);
351 kfree(cache_dir);
352 cache_dir_cpu[cpu] = NULL;
353} 162}
354 163
355static int cache_hotplug(struct notifier_block *nfb, unsigned long action, 164int populate_cache_leaves(unsigned int cpu)
356 void *hcpu)
357{ 165{
358 int cpu = (long)hcpu; 166 unsigned int level, idx, pvt;
359 int rc = 0; 167 union cache_topology ct;
168 enum cache_type ctype;
169 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
170 struct cacheinfo *this_leaf = this_cpu_ci->info_list;
360 171
361 switch (action & ~CPU_TASKS_FROZEN) { 172 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
362 case CPU_ONLINE: 173 for (idx = 0, level = 0; level < this_cpu_ci->num_levels &&
363 rc = cache_add_cpu(cpu); 174 idx < this_cpu_ci->num_leaves; idx++, level++) {
364 if (rc) 175 if (!this_leaf)
365 cache_remove_cpu(cpu); 176 return -EINVAL;
366 break; 177
367 case CPU_DEAD: 178 pvt = (ct.ci[level].scope == CACHE_SCOPE_PRIVATE) ? 1 : 0;
368 cache_remove_cpu(cpu); 179 ctype = get_cache_type(&ct.ci[0], level);
369 break; 180 if (ctype == CACHE_TYPE_SEPARATE) {
181 ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level);
182 ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level);
183 } else {
184 ci_leaf_init(this_leaf++, pvt, ctype, level);
185 }
370 } 186 }
371 return rc ? NOTIFY_BAD : NOTIFY_OK;
372}
373
374static int __init cache_init(void)
375{
376 int cpu;
377
378 if (!test_facility(34))
379 return 0;
380 cache_build_info();
381
382 cpu_notifier_register_begin();
383 for_each_online_cpu(cpu)
384 cache_add_cpu(cpu);
385 __hotcpu_notifier(cache_hotplug, 0);
386 cpu_notifier_register_done();
387 return 0; 187 return 0;
388} 188}
389device_initcall(cache_init);
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index f3762937dd82..533430307da8 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -137,7 +137,7 @@ enum {
137 INSTR_RSI_RRP, 137 INSTR_RSI_RRP,
138 INSTR_RSL_LRDFU, INSTR_RSL_R0RD, 138 INSTR_RSL_LRDFU, INSTR_RSL_R0RD,
139 INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, INSTR_RSY_RURD, 139 INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, INSTR_RSY_RURD,
140 INSTR_RSY_RDRM, 140 INSTR_RSY_RDRM, INSTR_RSY_RMRD,
141 INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD, 141 INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD,
142 INSTR_RS_RURD, 142 INSTR_RS_RURD,
143 INSTR_RXE_FRRD, INSTR_RXE_RRRD, INSTR_RXE_RRRDM, 143 INSTR_RXE_FRRD, INSTR_RXE_RRRD, INSTR_RXE_RRRDM,
@@ -226,7 +226,6 @@ static const struct s390_operand operands[] =
226 [U16_32] = { 16, 32, 0 }, 226 [U16_32] = { 16, 32, 0 },
227 [J16_16] = { 16, 16, OPERAND_PCREL }, 227 [J16_16] = { 16, 16, OPERAND_PCREL },
228 [J16_32] = { 16, 32, OPERAND_PCREL }, 228 [J16_32] = { 16, 32, OPERAND_PCREL },
229 [I16_32] = { 16, 32, OPERAND_SIGNED },
230 [I24_24] = { 24, 24, OPERAND_SIGNED }, 229 [I24_24] = { 24, 24, OPERAND_SIGNED },
231 [J32_16] = { 32, 16, OPERAND_PCREL }, 230 [J32_16] = { 32, 16, OPERAND_PCREL },
232 [I32_16] = { 32, 16, OPERAND_SIGNED }, 231 [I32_16] = { 32, 16, OPERAND_SIGNED },
@@ -308,6 +307,7 @@ static const unsigned char formats[][7] = {
308 [INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 }, 307 [INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 },
309 [INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 }, 308 [INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 },
310 [INSTR_RSY_RDRM] = { 0xff, R_8,D20_20,B_16,U4_12,0,0 }, 309 [INSTR_RSY_RDRM] = { 0xff, R_8,D20_20,B_16,U4_12,0,0 },
310 [INSTR_RSY_RMRD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 },
311 [INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 }, 311 [INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 },
312 [INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 }, 312 [INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 },
313 [INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 }, 313 [INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 },
@@ -451,7 +451,8 @@ enum {
451 LONG_INSN_VERLLV, 451 LONG_INSN_VERLLV,
452 LONG_INSN_VESRAV, 452 LONG_INSN_VESRAV,
453 LONG_INSN_VESRLV, 453 LONG_INSN_VESRLV,
454 LONG_INSN_VSBCBI 454 LONG_INSN_VSBCBI,
455 LONG_INSN_STCCTM
455}; 456};
456 457
457static char *long_insn_name[] = { 458static char *long_insn_name[] = {
@@ -531,6 +532,7 @@ static char *long_insn_name[] = {
531 [LONG_INSN_VESRAV] = "vesrav", 532 [LONG_INSN_VESRAV] = "vesrav",
532 [LONG_INSN_VESRLV] = "vesrlv", 533 [LONG_INSN_VESRLV] = "vesrlv",
533 [LONG_INSN_VSBCBI] = "vsbcbi", 534 [LONG_INSN_VSBCBI] = "vsbcbi",
535 [LONG_INSN_STCCTM] = "stcctm",
534}; 536};
535 537
536static struct s390_insn opcode[] = { 538static struct s390_insn opcode[] = {
@@ -1656,6 +1658,7 @@ static struct s390_insn opcode_eb[] = {
1656 { "lric", 0x60, INSTR_RSY_RDRM }, 1658 { "lric", 0x60, INSTR_RSY_RDRM },
1657 { "stric", 0x61, INSTR_RSY_RDRM }, 1659 { "stric", 0x61, INSTR_RSY_RDRM },
1658 { "mric", 0x62, INSTR_RSY_RDRM }, 1660 { "mric", 0x62, INSTR_RSY_RDRM },
1661 { { 0, LONG_INSN_STCCTM }, 0x17, INSTR_RSY_RMRD },
1659#endif 1662#endif
1660 { "rll", 0x1d, INSTR_RSY_RRRD }, 1663 { "rll", 0x1d, INSTR_RSY_RRRD },
1661 { "mvclu", 0x8e, INSTR_RSY_RRRD }, 1664 { "mvclu", 0x8e, INSTR_RSY_RRRD },
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 302ac1f7f8e7..70a329450901 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -393,9 +393,27 @@ static __init void detect_machine_facilities(void)
393 S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC; 393 S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
394 if (test_facility(129)) 394 if (test_facility(129))
395 S390_lowcore.machine_flags |= MACHINE_FLAG_VX; 395 S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
396 if (test_facility(128))
397 S390_lowcore.machine_flags |= MACHINE_FLAG_CAD;
396#endif 398#endif
397} 399}
398 400
401static int __init nocad_setup(char *str)
402{
403 S390_lowcore.machine_flags &= ~MACHINE_FLAG_CAD;
404 return 0;
405}
406early_param("nocad", nocad_setup);
407
408static int __init cad_init(void)
409{
410 if (MACHINE_HAS_CAD)
411 /* Enable problem state CAD. */
412 __ctl_set_bit(2, 3);
413 return 0;
414}
415early_initcall(cad_init);
416
399static __init void rescue_initrd(void) 417static __init void rescue_initrd(void)
400{ 418{
401#ifdef CONFIG_BLK_DEV_INITRD 419#ifdef CONFIG_BLK_DEV_INITRD
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 8e61393c8275..834df047d35f 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -71,9 +71,11 @@ struct s390_mmap_arg_struct;
71struct fadvise64_64_args; 71struct fadvise64_64_args;
72struct old_sigaction; 72struct old_sigaction;
73 73
74long sys_rt_sigreturn(void);
75long sys_sigreturn(void);
76
74long sys_s390_personality(unsigned int personality); 77long sys_s390_personality(unsigned int personality);
75long sys_s390_runtime_instr(int command, int signum); 78long sys_s390_runtime_instr(int command, int signum);
76
77long sys_s390_pci_mmio_write(unsigned long, const void __user *, size_t); 79long sys_s390_pci_mmio_write(unsigned long, const void __user *, size_t);
78long sys_s390_pci_mmio_read(unsigned long, void __user *, size_t); 80long sys_s390_pci_mmio_read(unsigned long, void __user *, size_t);
79#endif /* _ENTRY_H */ 81#endif /* _ENTRY_H */
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index b86bb8823f15..82c19899574f 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -46,6 +46,13 @@
46 * lg %r14,8(%r15) # offset 18 46 * lg %r14,8(%r15) # offset 18
47 * The jg instruction branches to offset 24 to skip as many instructions 47 * The jg instruction branches to offset 24 to skip as many instructions
48 * as possible. 48 * as possible.
49 * In case we use gcc's hotpatch feature the original and also the disabled
50 * function prologue contains only a single six byte instruction and looks
51 * like this:
52 * > brcl 0,0 # offset 0
53 * To enable ftrace the code gets patched like above and afterwards looks
54 * like this:
55 * > brasl %r0,ftrace_caller # offset 0
49 */ 56 */
50 57
51unsigned long ftrace_plt; 58unsigned long ftrace_plt;
@@ -59,62 +66,71 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
59int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, 66int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
60 unsigned long addr) 67 unsigned long addr)
61{ 68{
62 struct ftrace_insn insn; 69 struct ftrace_insn orig, new, old;
63 unsigned short op; 70
64 void *from, *to; 71 if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
65 size_t size;
66
67 ftrace_generate_nop_insn(&insn);
68 size = sizeof(insn);
69 from = &insn;
70 to = (void *) rec->ip;
71 if (probe_kernel_read(&op, (void *) rec->ip, sizeof(op)))
72 return -EFAULT; 72 return -EFAULT;
73 /* 73 if (addr == MCOUNT_ADDR) {
74 * If we find a breakpoint instruction, a kprobe has been placed 74 /* Initial code replacement */
75 * at the beginning of the function. We write the constant 75#ifdef CC_USING_HOTPATCH
76 * KPROBE_ON_FTRACE_NOP into the remaining four bytes of the original 76 /* We expect to see brcl 0,0 */
77 * instruction so that the kprobes handler can execute a nop, if it 77 ftrace_generate_nop_insn(&orig);
78 * reaches this breakpoint. 78#else
79 */ 79 /* We expect to see stg r14,8(r15) */
80 if (op == BREAKPOINT_INSTRUCTION) { 80 orig.opc = 0xe3e0;
81 size -= 2; 81 orig.disp = 0xf0080024;
82 from += 2; 82#endif
83 to += 2; 83 ftrace_generate_nop_insn(&new);
84 insn.disp = KPROBE_ON_FTRACE_NOP; 84 } else if (old.opc == BREAKPOINT_INSTRUCTION) {
85 /*
86 * If we find a breakpoint instruction, a kprobe has been
87 * placed at the beginning of the function. We write the
88 * constant KPROBE_ON_FTRACE_NOP into the remaining four
89 * bytes of the original instruction so that the kprobes
90 * handler can execute a nop, if it reaches this breakpoint.
91 */
92 new.opc = orig.opc = BREAKPOINT_INSTRUCTION;
93 orig.disp = KPROBE_ON_FTRACE_CALL;
94 new.disp = KPROBE_ON_FTRACE_NOP;
95 } else {
96 /* Replace ftrace call with a nop. */
97 ftrace_generate_call_insn(&orig, rec->ip);
98 ftrace_generate_nop_insn(&new);
85 } 99 }
86 if (probe_kernel_write(to, from, size)) 100 /* Verify that the to be replaced code matches what we expect. */
101 if (memcmp(&orig, &old, sizeof(old)))
102 return -EINVAL;
103 if (probe_kernel_write((void *) rec->ip, &new, sizeof(new)))
87 return -EPERM; 104 return -EPERM;
88 return 0; 105 return 0;
89} 106}
90 107
91int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 108int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
92{ 109{
93 struct ftrace_insn insn; 110 struct ftrace_insn orig, new, old;
94 unsigned short op; 111
95 void *from, *to; 112 if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
96 size_t size;
97
98 ftrace_generate_call_insn(&insn, rec->ip);
99 size = sizeof(insn);
100 from = &insn;
101 to = (void *) rec->ip;
102 if (probe_kernel_read(&op, (void *) rec->ip, sizeof(op)))
103 return -EFAULT; 113 return -EFAULT;
104 /* 114 if (old.opc == BREAKPOINT_INSTRUCTION) {
105 * If we find a breakpoint instruction, a kprobe has been placed 115 /*
106 * at the beginning of the function. We write the constant 116 * If we find a breakpoint instruction, a kprobe has been
107 * KPROBE_ON_FTRACE_CALL into the remaining four bytes of the original 117 * placed at the beginning of the function. We write the
108 * instruction so that the kprobes handler can execute a brasl if it 118 * constant KPROBE_ON_FTRACE_CALL into the remaining four
109 * reaches this breakpoint. 119 * bytes of the original instruction so that the kprobes
110 */ 120 * handler can execute a brasl if it reaches this breakpoint.
111 if (op == BREAKPOINT_INSTRUCTION) { 121 */
112 size -= 2; 122 new.opc = orig.opc = BREAKPOINT_INSTRUCTION;
113 from += 2; 123 orig.disp = KPROBE_ON_FTRACE_NOP;
114 to += 2; 124 new.disp = KPROBE_ON_FTRACE_CALL;
115 insn.disp = KPROBE_ON_FTRACE_CALL; 125 } else {
126 /* Replace nop with an ftrace call. */
127 ftrace_generate_nop_insn(&orig);
128 ftrace_generate_call_insn(&new, rec->ip);
116 } 129 }
117 if (probe_kernel_write(to, from, size)) 130 /* Verify that the to be replaced code matches what we expect. */
131 if (memcmp(&orig, &old, sizeof(old)))
132 return -EINVAL;
133 if (probe_kernel_write((void *) rec->ip, &new, sizeof(new)))
118 return -EPERM; 134 return -EPERM;
119 return 0; 135 return 0;
120} 136}
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index d62eee11f0b5..132f4c9ade60 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -436,7 +436,9 @@ ENTRY(startup_kdump)
436# followed by the facility words. 436# followed by the facility words.
437 437
438#if defined(CONFIG_64BIT) 438#if defined(CONFIG_64BIT)
439#if defined(CONFIG_MARCH_ZEC12) 439#if defined(CONFIG_MARCH_Z13)
440 .long 3, 0xc100eff2, 0xf46ce800, 0x00400000
441#elif defined(CONFIG_MARCH_ZEC12)
440 .long 3, 0xc100eff2, 0xf46ce800, 0x00400000 442 .long 3, 0xc100eff2, 0xf46ce800, 0x00400000
441#elif defined(CONFIG_MARCH_Z196) 443#elif defined(CONFIG_MARCH_Z196)
442 .long 2, 0xc100eff2, 0xf46c0000 444 .long 2, 0xc100eff2, 0xf46c0000
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 39badb9ca0b3..5c8651f36509 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -2074,7 +2074,8 @@ static void do_reset_calls(void)
2074 2074
2075u32 dump_prefix_page; 2075u32 dump_prefix_page;
2076 2076
2077void s390_reset_system(void (*func)(void *), void *data) 2077void s390_reset_system(void (*fn_pre)(void),
2078 void (*fn_post)(void *), void *data)
2078{ 2079{
2079 struct _lowcore *lc; 2080 struct _lowcore *lc;
2080 2081
@@ -2112,7 +2113,11 @@ void s390_reset_system(void (*func)(void *), void *data)
2112 /* Store status at absolute zero */ 2113 /* Store status at absolute zero */
2113 store_status(); 2114 store_status();
2114 2115
2116 /* Call function before reset */
2117 if (fn_pre)
2118 fn_pre();
2115 do_reset_calls(); 2119 do_reset_calls();
2116 if (func) 2120 /* Call function after reset */
2117 func(data); 2121 if (fn_post)
2122 fn_post(data);
2118} 2123}
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c
index b987ab2c1541..cb2d51e779df 100644
--- a/arch/s390/kernel/jump_label.c
+++ b/arch/s390/kernel/jump_label.c
@@ -22,31 +22,66 @@ struct insn_args {
22 enum jump_label_type type; 22 enum jump_label_type type;
23}; 23};
24 24
25static void jump_label_make_nop(struct jump_entry *entry, struct insn *insn)
26{
27 /* brcl 0,0 */
28 insn->opcode = 0xc004;
29 insn->offset = 0;
30}
31
32static void jump_label_make_branch(struct jump_entry *entry, struct insn *insn)
33{
34 /* brcl 15,offset */
35 insn->opcode = 0xc0f4;
36 insn->offset = (entry->target - entry->code) >> 1;
37}
38
39static void jump_label_bug(struct jump_entry *entry, struct insn *insn)
40{
41 unsigned char *ipc = (unsigned char *)entry->code;
42 unsigned char *ipe = (unsigned char *)insn;
43
44 pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc);
45 pr_emerg("Found: %02x %02x %02x %02x %02x %02x\n",
46 ipc[0], ipc[1], ipc[2], ipc[3], ipc[4], ipc[5]);
47 pr_emerg("Expected: %02x %02x %02x %02x %02x %02x\n",
48 ipe[0], ipe[1], ipe[2], ipe[3], ipe[4], ipe[5]);
49 panic("Corrupted kernel text");
50}
51
52static struct insn orignop = {
53 .opcode = 0xc004,
54 .offset = JUMP_LABEL_NOP_OFFSET >> 1,
55};
56
25static void __jump_label_transform(struct jump_entry *entry, 57static void __jump_label_transform(struct jump_entry *entry,
26 enum jump_label_type type) 58 enum jump_label_type type,
59 int init)
27{ 60{
28 struct insn insn; 61 struct insn old, new;
29 int rc;
30 62
31 if (type == JUMP_LABEL_ENABLE) { 63 if (type == JUMP_LABEL_ENABLE) {
32 /* brcl 15,offset */ 64 jump_label_make_nop(entry, &old);
33 insn.opcode = 0xc0f4; 65 jump_label_make_branch(entry, &new);
34 insn.offset = (entry->target - entry->code) >> 1;
35 } else { 66 } else {
36 /* brcl 0,0 */ 67 jump_label_make_branch(entry, &old);
37 insn.opcode = 0xc004; 68 jump_label_make_nop(entry, &new);
38 insn.offset = 0;
39 } 69 }
40 70 if (init) {
41 rc = probe_kernel_write((void *)entry->code, &insn, JUMP_LABEL_NOP_SIZE); 71 if (memcmp((void *)entry->code, &orignop, sizeof(orignop)))
42 WARN_ON_ONCE(rc < 0); 72 jump_label_bug(entry, &old);
73 } else {
74 if (memcmp((void *)entry->code, &old, sizeof(old)))
75 jump_label_bug(entry, &old);
76 }
77 probe_kernel_write((void *)entry->code, &new, sizeof(new));
43} 78}
44 79
45static int __sm_arch_jump_label_transform(void *data) 80static int __sm_arch_jump_label_transform(void *data)
46{ 81{
47 struct insn_args *args = data; 82 struct insn_args *args = data;
48 83
49 __jump_label_transform(args->entry, args->type); 84 __jump_label_transform(args->entry, args->type, 0);
50 return 0; 85 return 0;
51} 86}
52 87
@@ -64,7 +99,7 @@ void arch_jump_label_transform(struct jump_entry *entry,
64void arch_jump_label_transform_static(struct jump_entry *entry, 99void arch_jump_label_transform_static(struct jump_entry *entry,
65 enum jump_label_type type) 100 enum jump_label_type type)
66{ 101{
67 __jump_label_transform(entry, type); 102 __jump_label_transform(entry, type, 1);
68} 103}
69 104
70#endif 105#endif
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 1e4c710dfb92..f516edc1fbe3 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -69,7 +69,8 @@ static void copy_instruction(struct kprobe *p)
69 /* 69 /*
70 * If kprobes patches the instruction that is morphed by 70 * If kprobes patches the instruction that is morphed by
71 * ftrace make sure that kprobes always sees the branch 71 * ftrace make sure that kprobes always sees the branch
72 * "jg .+24" that skips the mcount block 72 * "jg .+24" that skips the mcount block or the "brcl 0,0"
73 * in case of hotpatch.
73 */ 74 */
74 ftrace_generate_nop_insn((struct ftrace_insn *)p->ainsn.insn); 75 ftrace_generate_nop_insn((struct ftrace_insn *)p->ainsn.insn);
75 p->ainsn.is_ftrace_insn = 1; 76 p->ainsn.is_ftrace_insn = 1;
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 4685337fa7c6..fb0901ec4306 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -103,21 +103,18 @@ static int __init machine_kdump_pm_init(void)
103 return 0; 103 return 0;
104} 104}
105arch_initcall(machine_kdump_pm_init); 105arch_initcall(machine_kdump_pm_init);
106#endif
107 106
108/* 107/*
109 * Start kdump: We expect here that a store status has been done on our CPU 108 * Start kdump: We expect here that a store status has been done on our CPU
110 */ 109 */
111static void __do_machine_kdump(void *image) 110static void __do_machine_kdump(void *image)
112{ 111{
113#ifdef CONFIG_CRASH_DUMP
114 int (*start_kdump)(int) = (void *)((struct kimage *) image)->start; 112 int (*start_kdump)(int) = (void *)((struct kimage *) image)->start;
115 113
116 setup_regs();
117 __load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA); 114 __load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA);
118 start_kdump(1); 115 start_kdump(1);
119#endif
120} 116}
117#endif
121 118
122/* 119/*
123 * Check if kdump checksums are valid: We call purgatory with parameter "0" 120 * Check if kdump checksums are valid: We call purgatory with parameter "0"
@@ -249,18 +246,18 @@ static void __do_machine_kexec(void *data)
249 */ 246 */
250static void __machine_kexec(void *data) 247static void __machine_kexec(void *data)
251{ 248{
252 struct kimage *image = data;
253
254 __arch_local_irq_stosm(0x04); /* enable DAT */ 249 __arch_local_irq_stosm(0x04); /* enable DAT */
255 pfault_fini(); 250 pfault_fini();
256 tracing_off(); 251 tracing_off();
257 debug_locks_off(); 252 debug_locks_off();
258 if (image->type == KEXEC_TYPE_CRASH) { 253#ifdef CONFIG_CRASH_DUMP
254 if (((struct kimage *) data)->type == KEXEC_TYPE_CRASH) {
255
259 lgr_info_log(); 256 lgr_info_log();
260 s390_reset_system(__do_machine_kdump, data); 257 s390_reset_system(setup_regs, __do_machine_kdump, data);
261 } else { 258 } else
262 s390_reset_system(__do_machine_kexec, data); 259#endif
263 } 260 s390_reset_system(NULL, __do_machine_kexec, data);
264 disabled_wait((unsigned long) __builtin_return_address(0)); 261 disabled_wait((unsigned long) __builtin_return_address(0));
265} 262}
266 263
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index b6dfc5bfcb89..e499370fbccb 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -27,7 +27,9 @@ ENTRY(ftrace_caller)
27 .globl ftrace_regs_caller 27 .globl ftrace_regs_caller
28 .set ftrace_regs_caller,ftrace_caller 28 .set ftrace_regs_caller,ftrace_caller
29 lgr %r1,%r15 29 lgr %r1,%r15
30#ifndef CC_USING_HOTPATCH
30 aghi %r0,MCOUNT_RETURN_FIXUP 31 aghi %r0,MCOUNT_RETURN_FIXUP
32#endif
31 aghi %r15,-STACK_FRAME_SIZE 33 aghi %r15,-STACK_FRAME_SIZE
32 stg %r1,__SF_BACKCHAIN(%r15) 34 stg %r1,__SF_BACKCHAIN(%r15)
33 stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15) 35 stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15)
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index aa7a83948c7b..13fc0978ca7e 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -79,6 +79,14 @@ void release_thread(struct task_struct *dead_task)
79{ 79{
80} 80}
81 81
82#ifdef CONFIG_64BIT
83void arch_release_task_struct(struct task_struct *tsk)
84{
85 if (tsk->thread.vxrs)
86 kfree(tsk->thread.vxrs);
87}
88#endif
89
82int copy_thread(unsigned long clone_flags, unsigned long new_stackp, 90int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
83 unsigned long arg, struct task_struct *p) 91 unsigned long arg, struct task_struct *p)
84{ 92{
@@ -243,13 +251,3 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
243 ret = PAGE_ALIGN(mm->brk + brk_rnd()); 251 ret = PAGE_ALIGN(mm->brk + brk_rnd());
244 return (ret > mm->brk) ? ret : mm->brk; 252 return (ret > mm->brk) ? ret : mm->brk;
245} 253}
246
247unsigned long randomize_et_dyn(unsigned long base)
248{
249 unsigned long ret;
250
251 if (!(current->flags & PF_RANDOMIZE))
252 return base;
253 ret = PAGE_ALIGN(base + brk_rnd());
254 return (ret > base) ? ret : base;
255}
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index dbdd33ee0102..26108232fcaa 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -8,16 +8,24 @@
8 8
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/smp.h>
12#include <linux/seq_file.h> 11#include <linux/seq_file.h>
13#include <linux/delay.h> 12#include <linux/delay.h>
14#include <linux/cpu.h> 13#include <linux/cpu.h>
15#include <asm/elf.h> 14#include <asm/elf.h>
16#include <asm/lowcore.h> 15#include <asm/lowcore.h>
17#include <asm/param.h> 16#include <asm/param.h>
17#include <asm/smp.h>
18 18
19static DEFINE_PER_CPU(struct cpuid, cpu_id); 19static DEFINE_PER_CPU(struct cpuid, cpu_id);
20 20
21void cpu_relax(void)
22{
23 if (!smp_cpu_mtid && MACHINE_HAS_DIAG44)
24 asm volatile("diag 0,0,0x44");
25 barrier();
26}
27EXPORT_SYMBOL(cpu_relax);
28
21/* 29/*
22 * cpu_init - initializes state that is per-CPU. 30 * cpu_init - initializes state that is per-CPU.
23 */ 31 */
diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S
index a41f2c99dcc8..7e77e03378f3 100644
--- a/arch/s390/kernel/sclp.S
+++ b/arch/s390/kernel/sclp.S
@@ -294,7 +294,8 @@ ENTRY(_sclp_print_early)
294#ifdef CONFIG_64BIT 294#ifdef CONFIG_64BIT
295 tm LC_AR_MODE_ID,1 295 tm LC_AR_MODE_ID,1
296 jno .Lesa3 296 jno .Lesa3
297 lmh %r6,%r15,96(%r15) # store upper register halves 297 lgfr %r2,%r2 # sign extend return value
298 lmh %r6,%r15,96(%r15) # restore upper register halves
298 ahi %r15,80 299 ahi %r15,80
299.Lesa3: 300.Lesa3:
300#endif 301#endif
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 4e532c67832f..bfac77ada4f2 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -810,6 +810,9 @@ static void __init setup_hwcaps(void)
810 case 0x2828: 810 case 0x2828:
811 strcpy(elf_platform, "zEC12"); 811 strcpy(elf_platform, "zEC12");
812 break; 812 break;
813 case 0x2964:
814 strcpy(elf_platform, "z13");
815 break;
813 } 816 }
814} 817}
815 818
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 0b499f5cbe19..a668993ff577 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -71,9 +71,30 @@ struct pcpu {
71}; 71};
72 72
73static u8 boot_cpu_type; 73static u8 boot_cpu_type;
74static u16 boot_cpu_address;
75static struct pcpu pcpu_devices[NR_CPUS]; 74static struct pcpu pcpu_devices[NR_CPUS];
76 75
76unsigned int smp_cpu_mt_shift;
77EXPORT_SYMBOL(smp_cpu_mt_shift);
78
79unsigned int smp_cpu_mtid;
80EXPORT_SYMBOL(smp_cpu_mtid);
81
82static unsigned int smp_max_threads __initdata = -1U;
83
84static int __init early_nosmt(char *s)
85{
86 smp_max_threads = 1;
87 return 0;
88}
89early_param("nosmt", early_nosmt);
90
91static int __init early_smt(char *s)
92{
93 get_option(&s, &smp_max_threads);
94 return 0;
95}
96early_param("smt", early_smt);
97
77/* 98/*
78 * The smp_cpu_state_mutex must be held when changing the state or polarization 99 * The smp_cpu_state_mutex must be held when changing the state or polarization
79 * member of a pcpu data structure within the pcpu_devices arreay. 100 * member of a pcpu data structure within the pcpu_devices arreay.
@@ -132,7 +153,7 @@ static inline int pcpu_running(struct pcpu *pcpu)
132/* 153/*
133 * Find struct pcpu by cpu address. 154 * Find struct pcpu by cpu address.
134 */ 155 */
135static struct pcpu *pcpu_find_address(const struct cpumask *mask, int address) 156static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address)
136{ 157{
137 int cpu; 158 int cpu;
138 159
@@ -299,6 +320,32 @@ static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
299} 320}
300 321
301/* 322/*
323 * Enable additional logical cpus for multi-threading.
324 */
325static int pcpu_set_smt(unsigned int mtid)
326{
327 register unsigned long reg1 asm ("1") = (unsigned long) mtid;
328 int cc;
329
330 if (smp_cpu_mtid == mtid)
331 return 0;
332 asm volatile(
333 " sigp %1,0,%2 # sigp set multi-threading\n"
334 " ipm %0\n"
335 " srl %0,28\n"
336 : "=d" (cc) : "d" (reg1), "K" (SIGP_SET_MULTI_THREADING)
337 : "cc");
338 if (cc == 0) {
339 smp_cpu_mtid = mtid;
340 smp_cpu_mt_shift = 0;
341 while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift))
342 smp_cpu_mt_shift++;
343 pcpu_devices[0].address = stap();
344 }
345 return cc;
346}
347
348/*
302 * Call function on an online CPU. 349 * Call function on an online CPU.
303 */ 350 */
304void smp_call_online_cpu(void (*func)(void *), void *data) 351void smp_call_online_cpu(void (*func)(void *), void *data)
@@ -512,22 +559,17 @@ EXPORT_SYMBOL(smp_ctl_clear_bit);
512 559
513#ifdef CONFIG_CRASH_DUMP 560#ifdef CONFIG_CRASH_DUMP
514 561
515static void __init smp_get_save_area(int cpu, u16 address) 562static inline void __smp_store_cpu_state(int cpu, u16 address, int is_boot_cpu)
516{ 563{
517 void *lc = pcpu_devices[0].lowcore; 564 void *lc = pcpu_devices[0].lowcore;
518 struct save_area_ext *sa_ext; 565 struct save_area_ext *sa_ext;
519 unsigned long vx_sa; 566 unsigned long vx_sa;
520 567
521 if (is_kdump_kernel())
522 return;
523 if (!OLDMEM_BASE && (address == boot_cpu_address ||
524 ipl_info.type != IPL_TYPE_FCP_DUMP))
525 return;
526 sa_ext = dump_save_area_create(cpu); 568 sa_ext = dump_save_area_create(cpu);
527 if (!sa_ext) 569 if (!sa_ext)
528 panic("could not allocate memory for save area\n"); 570 panic("could not allocate memory for save area\n");
529 if (address == boot_cpu_address) { 571 if (is_boot_cpu) {
530 /* Copy the registers of the boot cpu. */ 572 /* Copy the registers of the boot CPU. */
531 copy_oldmem_page(1, (void *) &sa_ext->sa, sizeof(sa_ext->sa), 573 copy_oldmem_page(1, (void *) &sa_ext->sa, sizeof(sa_ext->sa),
532 SAVE_AREA_BASE - PAGE_SIZE, 0); 574 SAVE_AREA_BASE - PAGE_SIZE, 0);
533 if (MACHINE_HAS_VX) 575 if (MACHINE_HAS_VX)
@@ -548,6 +590,64 @@ static void __init smp_get_save_area(int cpu, u16 address)
548 free_page(vx_sa); 590 free_page(vx_sa);
549} 591}
550 592
593/*
594 * Collect CPU state of the previous, crashed system.
595 * There are four cases:
596 * 1) standard zfcp dump
597 * condition: OLDMEM_BASE == NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
598 * The state for all CPUs except the boot CPU needs to be collected
599 * with sigp stop-and-store-status. The boot CPU state is located in
600 * the absolute lowcore of the memory stored in the HSA. The zcore code
601 * will allocate the save area and copy the boot CPU state from the HSA.
602 * 2) stand-alone kdump for SCSI (zfcp dump with swapped memory)
603 * condition: OLDMEM_BASE != NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
604 * The state for all CPUs except the boot CPU needs to be collected
605 * with sigp stop-and-store-status. The firmware or the boot-loader
606 * stored the registers of the boot CPU in the absolute lowcore in the
607 * memory of the old system.
608 * 3) kdump and the old kernel did not store the CPU state,
609 * or stand-alone kdump for DASD
610 * condition: OLDMEM_BASE != NULL && !is_kdump_kernel()
611 * The state for all CPUs except the boot CPU needs to be collected
612 * with sigp stop-and-store-status. The kexec code or the boot-loader
613 * stored the registers of the boot CPU in the memory of the old system.
614 * 4) kdump and the old kernel stored the CPU state
615 * condition: OLDMEM_BASE != NULL && is_kdump_kernel()
616 * The state of all CPUs is stored in ELF sections in the memory of the
617 * old system. The ELF sections are picked up by the crash_dump code
618 * via elfcorehdr_addr.
619 */
620static void __init smp_store_cpu_states(struct sclp_cpu_info *info)
621{
622 unsigned int cpu, address, i, j;
623 int is_boot_cpu;
624
625 if (is_kdump_kernel())
626 /* Previous system stored the CPU states. Nothing to do. */
627 return;
628 if (!(OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP))
629 /* No previous system present, normal boot. */
630 return;
631 /* Set multi-threading state to the previous system. */
632 pcpu_set_smt(sclp_get_mtid_prev());
633 /* Collect CPU states. */
634 cpu = 0;
635 for (i = 0; i < info->configured; i++) {
636 /* Skip CPUs with different CPU type. */
637 if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type)
638 continue;
639 for (j = 0; j <= smp_cpu_mtid; j++, cpu++) {
640 address = (info->cpu[i].core_id << smp_cpu_mt_shift) + j;
641 is_boot_cpu = (address == pcpu_devices[0].address);
642 if (is_boot_cpu && !OLDMEM_BASE)
643 /* Skip boot CPU for standard zfcp dump. */
644 continue;
645 /* Get state for this CPu. */
646 __smp_store_cpu_state(cpu, address, is_boot_cpu);
647 }
648 }
649}
650
551int smp_store_status(int cpu) 651int smp_store_status(int cpu)
552{ 652{
553 unsigned long vx_sa; 653 unsigned long vx_sa;
@@ -565,10 +665,6 @@ int smp_store_status(int cpu)
565 return 0; 665 return 0;
566} 666}
567 667
568#else /* CONFIG_CRASH_DUMP */
569
570static inline void smp_get_save_area(int cpu, u16 address) { }
571
572#endif /* CONFIG_CRASH_DUMP */ 668#endif /* CONFIG_CRASH_DUMP */
573 669
574void smp_cpu_set_polarization(int cpu, int val) 670void smp_cpu_set_polarization(int cpu, int val)
@@ -590,11 +686,13 @@ static struct sclp_cpu_info *smp_get_cpu_info(void)
590 info = kzalloc(sizeof(*info), GFP_KERNEL); 686 info = kzalloc(sizeof(*info), GFP_KERNEL);
591 if (info && (use_sigp_detection || sclp_get_cpu_info(info))) { 687 if (info && (use_sigp_detection || sclp_get_cpu_info(info))) {
592 use_sigp_detection = 1; 688 use_sigp_detection = 1;
593 for (address = 0; address <= MAX_CPU_ADDRESS; address++) { 689 for (address = 0; address <= MAX_CPU_ADDRESS;
690 address += (1U << smp_cpu_mt_shift)) {
594 if (__pcpu_sigp_relax(address, SIGP_SENSE, 0, NULL) == 691 if (__pcpu_sigp_relax(address, SIGP_SENSE, 0, NULL) ==
595 SIGP_CC_NOT_OPERATIONAL) 692 SIGP_CC_NOT_OPERATIONAL)
596 continue; 693 continue;
597 info->cpu[info->configured].address = address; 694 info->cpu[info->configured].core_id =
695 address >> smp_cpu_mt_shift;
598 info->configured++; 696 info->configured++;
599 } 697 }
600 info->combined = info->configured; 698 info->combined = info->configured;
@@ -608,7 +706,8 @@ static int __smp_rescan_cpus(struct sclp_cpu_info *info, int sysfs_add)
608{ 706{
609 struct pcpu *pcpu; 707 struct pcpu *pcpu;
610 cpumask_t avail; 708 cpumask_t avail;
611 int cpu, nr, i; 709 int cpu, nr, i, j;
710 u16 address;
612 711
613 nr = 0; 712 nr = 0;
614 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask); 713 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
@@ -616,51 +715,76 @@ static int __smp_rescan_cpus(struct sclp_cpu_info *info, int sysfs_add)
616 for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) { 715 for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
617 if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type) 716 if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type)
618 continue; 717 continue;
619 if (pcpu_find_address(cpu_present_mask, info->cpu[i].address)) 718 address = info->cpu[i].core_id << smp_cpu_mt_shift;
620 continue; 719 for (j = 0; j <= smp_cpu_mtid; j++) {
621 pcpu = pcpu_devices + cpu; 720 if (pcpu_find_address(cpu_present_mask, address + j))
622 pcpu->address = info->cpu[i].address; 721 continue;
623 pcpu->state = (i >= info->configured) ? 722 pcpu = pcpu_devices + cpu;
624 CPU_STATE_STANDBY : CPU_STATE_CONFIGURED; 723 pcpu->address = address + j;
625 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); 724 pcpu->state =
626 set_cpu_present(cpu, true); 725 (cpu >= info->configured*(smp_cpu_mtid + 1)) ?
627 if (sysfs_add && smp_add_present_cpu(cpu) != 0) 726 CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
628 set_cpu_present(cpu, false); 727 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
629 else 728 set_cpu_present(cpu, true);
630 nr++; 729 if (sysfs_add && smp_add_present_cpu(cpu) != 0)
631 cpu = cpumask_next(cpu, &avail); 730 set_cpu_present(cpu, false);
731 else
732 nr++;
733 cpu = cpumask_next(cpu, &avail);
734 if (cpu >= nr_cpu_ids)
735 break;
736 }
632 } 737 }
633 return nr; 738 return nr;
634} 739}
635 740
636static void __init smp_detect_cpus(void) 741static void __init smp_detect_cpus(void)
637{ 742{
638 unsigned int cpu, c_cpus, s_cpus; 743 unsigned int cpu, mtid, c_cpus, s_cpus;
639 struct sclp_cpu_info *info; 744 struct sclp_cpu_info *info;
745 u16 address;
640 746
747 /* Get CPU information */
641 info = smp_get_cpu_info(); 748 info = smp_get_cpu_info();
642 if (!info) 749 if (!info)
643 panic("smp_detect_cpus failed to allocate memory\n"); 750 panic("smp_detect_cpus failed to allocate memory\n");
751
752 /* Find boot CPU type */
644 if (info->has_cpu_type) { 753 if (info->has_cpu_type) {
645 for (cpu = 0; cpu < info->combined; cpu++) { 754 address = stap();
646 if (info->cpu[cpu].address != boot_cpu_address) 755 for (cpu = 0; cpu < info->combined; cpu++)
647 continue; 756 if (info->cpu[cpu].core_id == address) {
648 /* The boot cpu dictates the cpu type. */ 757 /* The boot cpu dictates the cpu type. */
649 boot_cpu_type = info->cpu[cpu].type; 758 boot_cpu_type = info->cpu[cpu].type;
650 break; 759 break;
651 } 760 }
761 if (cpu >= info->combined)
762 panic("Could not find boot CPU type");
652 } 763 }
764
765#ifdef CONFIG_CRASH_DUMP
766 /* Collect CPU state of previous system */
767 smp_store_cpu_states(info);
768#endif
769
770 /* Set multi-threading state for the current system */
771 mtid = sclp_get_mtid(boot_cpu_type);
772 mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1;
773 pcpu_set_smt(mtid);
774
775 /* Print number of CPUs */
653 c_cpus = s_cpus = 0; 776 c_cpus = s_cpus = 0;
654 for (cpu = 0; cpu < info->combined; cpu++) { 777 for (cpu = 0; cpu < info->combined; cpu++) {
655 if (info->has_cpu_type && info->cpu[cpu].type != boot_cpu_type) 778 if (info->has_cpu_type && info->cpu[cpu].type != boot_cpu_type)
656 continue; 779 continue;
657 if (cpu < info->configured) { 780 if (cpu < info->configured)
658 smp_get_save_area(c_cpus, info->cpu[cpu].address); 781 c_cpus += smp_cpu_mtid + 1;
659 c_cpus++; 782 else
660 } else 783 s_cpus += smp_cpu_mtid + 1;
661 s_cpus++;
662 } 784 }
663 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus); 785 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
786
787 /* Add CPUs present at boot */
664 get_online_cpus(); 788 get_online_cpus();
665 __smp_rescan_cpus(info, 0); 789 __smp_rescan_cpus(info, 0);
666 put_online_cpus(); 790 put_online_cpus();
@@ -696,12 +820,23 @@ static void smp_start_secondary(void *cpuvoid)
696int __cpu_up(unsigned int cpu, struct task_struct *tidle) 820int __cpu_up(unsigned int cpu, struct task_struct *tidle)
697{ 821{
698 struct pcpu *pcpu; 822 struct pcpu *pcpu;
699 int rc; 823 int base, i, rc;
700 824
701 pcpu = pcpu_devices + cpu; 825 pcpu = pcpu_devices + cpu;
702 if (pcpu->state != CPU_STATE_CONFIGURED) 826 if (pcpu->state != CPU_STATE_CONFIGURED)
703 return -EIO; 827 return -EIO;
704 if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) != 828 base = cpu - (cpu % (smp_cpu_mtid + 1));
829 for (i = 0; i <= smp_cpu_mtid; i++) {
830 if (base + i < nr_cpu_ids)
831 if (cpu_online(base + i))
832 break;
833 }
834 /*
835 * If this is the first CPU of the core to get online
836 * do an initial CPU reset.
837 */
838 if (i > smp_cpu_mtid &&
839 pcpu_sigp_retry(pcpu_devices + base, SIGP_INITIAL_CPU_RESET, 0) !=
705 SIGP_CC_ORDER_CODE_ACCEPTED) 840 SIGP_CC_ORDER_CODE_ACCEPTED)
706 return -EIO; 841 return -EIO;
707 842
@@ -774,7 +909,8 @@ void __init smp_fill_possible_mask(void)
774{ 909{
775 unsigned int possible, sclp, cpu; 910 unsigned int possible, sclp, cpu;
776 911
777 sclp = sclp_get_max_cpu() ?: nr_cpu_ids; 912 sclp = min(smp_max_threads, sclp_get_mtid_max() + 1);
913 sclp = sclp_get_max_cpu()*sclp ?: nr_cpu_ids;
778 possible = setup_possible_cpus ?: nr_cpu_ids; 914 possible = setup_possible_cpus ?: nr_cpu_ids;
779 possible = min(possible, sclp); 915 possible = min(possible, sclp);
780 for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++) 916 for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
@@ -796,9 +932,8 @@ void __init smp_prepare_boot_cpu(void)
796{ 932{
797 struct pcpu *pcpu = pcpu_devices; 933 struct pcpu *pcpu = pcpu_devices;
798 934
799 boot_cpu_address = stap();
800 pcpu->state = CPU_STATE_CONFIGURED; 935 pcpu->state = CPU_STATE_CONFIGURED;
801 pcpu->address = boot_cpu_address; 936 pcpu->address = stap();
802 pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix(); 937 pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
803 pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE 938 pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE
804 + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); 939 + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
@@ -848,7 +983,7 @@ static ssize_t cpu_configure_store(struct device *dev,
848 const char *buf, size_t count) 983 const char *buf, size_t count)
849{ 984{
850 struct pcpu *pcpu; 985 struct pcpu *pcpu;
851 int cpu, val, rc; 986 int cpu, val, rc, i;
852 char delim; 987 char delim;
853 988
854 if (sscanf(buf, "%d %c", &val, &delim) != 1) 989 if (sscanf(buf, "%d %c", &val, &delim) != 1)
@@ -860,29 +995,43 @@ static ssize_t cpu_configure_store(struct device *dev,
860 rc = -EBUSY; 995 rc = -EBUSY;
861 /* disallow configuration changes of online cpus and cpu 0 */ 996 /* disallow configuration changes of online cpus and cpu 0 */
862 cpu = dev->id; 997 cpu = dev->id;
863 if (cpu_online(cpu) || cpu == 0) 998 cpu -= cpu % (smp_cpu_mtid + 1);
999 if (cpu == 0)
864 goto out; 1000 goto out;
1001 for (i = 0; i <= smp_cpu_mtid; i++)
1002 if (cpu_online(cpu + i))
1003 goto out;
865 pcpu = pcpu_devices + cpu; 1004 pcpu = pcpu_devices + cpu;
866 rc = 0; 1005 rc = 0;
867 switch (val) { 1006 switch (val) {
868 case 0: 1007 case 0:
869 if (pcpu->state != CPU_STATE_CONFIGURED) 1008 if (pcpu->state != CPU_STATE_CONFIGURED)
870 break; 1009 break;
871 rc = sclp_cpu_deconfigure(pcpu->address); 1010 rc = sclp_cpu_deconfigure(pcpu->address >> smp_cpu_mt_shift);
872 if (rc) 1011 if (rc)
873 break; 1012 break;
874 pcpu->state = CPU_STATE_STANDBY; 1013 for (i = 0; i <= smp_cpu_mtid; i++) {
875 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); 1014 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1015 continue;
1016 pcpu[i].state = CPU_STATE_STANDBY;
1017 smp_cpu_set_polarization(cpu + i,
1018 POLARIZATION_UNKNOWN);
1019 }
876 topology_expect_change(); 1020 topology_expect_change();
877 break; 1021 break;
878 case 1: 1022 case 1:
879 if (pcpu->state != CPU_STATE_STANDBY) 1023 if (pcpu->state != CPU_STATE_STANDBY)
880 break; 1024 break;
881 rc = sclp_cpu_configure(pcpu->address); 1025 rc = sclp_cpu_configure(pcpu->address >> smp_cpu_mt_shift);
882 if (rc) 1026 if (rc)
883 break; 1027 break;
884 pcpu->state = CPU_STATE_CONFIGURED; 1028 for (i = 0; i <= smp_cpu_mtid; i++) {
885 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); 1029 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1030 continue;
1031 pcpu[i].state = CPU_STATE_CONFIGURED;
1032 smp_cpu_set_polarization(cpu + i,
1033 POLARIZATION_UNKNOWN);
1034 }
886 topology_expect_change(); 1035 topology_expect_change();
887 break; 1036 break;
888 default: 1037 default:
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index 811f542b8ed4..85565f1ff474 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -194,6 +194,14 @@ static void stsi_2_2_2(struct seq_file *m, struct sysinfo_2_2_2 *info)
194 seq_printf(m, "LPAR CPUs Reserved: %d\n", info->cpus_reserved); 194 seq_printf(m, "LPAR CPUs Reserved: %d\n", info->cpus_reserved);
195 seq_printf(m, "LPAR CPUs Dedicated: %d\n", info->cpus_dedicated); 195 seq_printf(m, "LPAR CPUs Dedicated: %d\n", info->cpus_dedicated);
196 seq_printf(m, "LPAR CPUs Shared: %d\n", info->cpus_shared); 196 seq_printf(m, "LPAR CPUs Shared: %d\n", info->cpus_shared);
197 if (info->mt_installed & 0x80) {
198 seq_printf(m, "LPAR CPUs G-MTID: %d\n",
199 info->mt_general & 0x1f);
200 seq_printf(m, "LPAR CPUs S-MTID: %d\n",
201 info->mt_installed & 0x1f);
202 seq_printf(m, "LPAR CPUs PS-MTID: %d\n",
203 info->mt_psmtid & 0x1f);
204 }
197} 205}
198 206
199static void stsi_3_2_2(struct seq_file *m, struct sysinfo_3_2_2 *info) 207static void stsi_3_2_2(struct seq_file *m, struct sysinfo_3_2_2 *info)
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index b93bed76ea94..24ee33f1af24 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -59,32 +59,50 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
59 return mask; 59 return mask;
60} 60}
61 61
62static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu, 62static cpumask_t cpu_thread_map(unsigned int cpu)
63{
64 cpumask_t mask;
65 int i;
66
67 cpumask_copy(&mask, cpumask_of(cpu));
68 if (!topology_enabled || !MACHINE_HAS_TOPOLOGY)
69 return mask;
70 cpu -= cpu % (smp_cpu_mtid + 1);
71 for (i = 0; i <= smp_cpu_mtid; i++)
72 if (cpu_present(cpu + i))
73 cpumask_set_cpu(cpu + i, &mask);
74 return mask;
75}
76
77static struct mask_info *add_cpus_to_mask(struct topology_core *tl_core,
63 struct mask_info *book, 78 struct mask_info *book,
64 struct mask_info *socket, 79 struct mask_info *socket,
65 int one_socket_per_cpu) 80 int one_socket_per_cpu)
66{ 81{
67 unsigned int cpu; 82 unsigned int core;
68 83
69 for_each_set_bit(cpu, &tl_cpu->mask[0], TOPOLOGY_CPU_BITS) { 84 for_each_set_bit(core, &tl_core->mask[0], TOPOLOGY_CORE_BITS) {
70 unsigned int rcpu; 85 unsigned int rcore;
71 int lcpu; 86 int lcpu, i;
72 87
73 rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin; 88 rcore = TOPOLOGY_CORE_BITS - 1 - core + tl_core->origin;
74 lcpu = smp_find_processor_id(rcpu); 89 lcpu = smp_find_processor_id(rcore << smp_cpu_mt_shift);
75 if (lcpu < 0) 90 if (lcpu < 0)
76 continue; 91 continue;
77 cpumask_set_cpu(lcpu, &book->mask); 92 for (i = 0; i <= smp_cpu_mtid; i++) {
78 cpu_topology[lcpu].book_id = book->id; 93 cpu_topology[lcpu + i].book_id = book->id;
79 cpumask_set_cpu(lcpu, &socket->mask); 94 cpu_topology[lcpu + i].core_id = rcore;
80 cpu_topology[lcpu].core_id = rcpu; 95 cpu_topology[lcpu + i].thread_id = lcpu + i;
81 if (one_socket_per_cpu) { 96 cpumask_set_cpu(lcpu + i, &book->mask);
82 cpu_topology[lcpu].socket_id = rcpu; 97 cpumask_set_cpu(lcpu + i, &socket->mask);
83 socket = socket->next; 98 if (one_socket_per_cpu)
84 } else { 99 cpu_topology[lcpu + i].socket_id = rcore;
85 cpu_topology[lcpu].socket_id = socket->id; 100 else
101 cpu_topology[lcpu + i].socket_id = socket->id;
102 smp_cpu_set_polarization(lcpu + i, tl_core->pp);
86 } 103 }
87 smp_cpu_set_polarization(lcpu, tl_cpu->pp); 104 if (one_socket_per_cpu)
105 socket = socket->next;
88 } 106 }
89 return socket; 107 return socket;
90} 108}
@@ -108,7 +126,7 @@ static void clear_masks(void)
108static union topology_entry *next_tle(union topology_entry *tle) 126static union topology_entry *next_tle(union topology_entry *tle)
109{ 127{
110 if (!tle->nl) 128 if (!tle->nl)
111 return (union topology_entry *)((struct topology_cpu *)tle + 1); 129 return (union topology_entry *)((struct topology_core *)tle + 1);
112 return (union topology_entry *)((struct topology_container *)tle + 1); 130 return (union topology_entry *)((struct topology_container *)tle + 1);
113} 131}
114 132
@@ -231,9 +249,11 @@ static void update_cpu_masks(void)
231 249
232 spin_lock_irqsave(&topology_lock, flags); 250 spin_lock_irqsave(&topology_lock, flags);
233 for_each_possible_cpu(cpu) { 251 for_each_possible_cpu(cpu) {
252 cpu_topology[cpu].thread_mask = cpu_thread_map(cpu);
234 cpu_topology[cpu].core_mask = cpu_group_map(&socket_info, cpu); 253 cpu_topology[cpu].core_mask = cpu_group_map(&socket_info, cpu);
235 cpu_topology[cpu].book_mask = cpu_group_map(&book_info, cpu); 254 cpu_topology[cpu].book_mask = cpu_group_map(&book_info, cpu);
236 if (!MACHINE_HAS_TOPOLOGY) { 255 if (!MACHINE_HAS_TOPOLOGY) {
256 cpu_topology[cpu].thread_id = cpu;
237 cpu_topology[cpu].core_id = cpu; 257 cpu_topology[cpu].core_id = cpu;
238 cpu_topology[cpu].socket_id = cpu; 258 cpu_topology[cpu].socket_id = cpu;
239 cpu_topology[cpu].book_id = cpu; 259 cpu_topology[cpu].book_id = cpu;
@@ -445,6 +465,12 @@ int topology_cpu_init(struct cpu *cpu)
445 return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group); 465 return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
446} 466}
447 467
468const struct cpumask *cpu_thread_mask(int cpu)
469{
470 return &cpu_topology[cpu].thread_mask;
471}
472
473
448const struct cpumask *cpu_coregroup_mask(int cpu) 474const struct cpumask *cpu_coregroup_mask(int cpu)
449{ 475{
450 return &cpu_topology[cpu].core_mask; 476 return &cpu_topology[cpu].core_mask;
@@ -456,6 +482,7 @@ static const struct cpumask *cpu_book_mask(int cpu)
456} 482}
457 483
458static struct sched_domain_topology_level s390_topology[] = { 484static struct sched_domain_topology_level s390_topology[] = {
485 { cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
459 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, 486 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
460 { cpu_book_mask, SD_INIT_NAME(BOOK) }, 487 { cpu_book_mask, SD_INIT_NAME(BOOK) },
461 { cpu_cpu_mask, SD_INIT_NAME(DIE) }, 488 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index e34122e539a1..e53d3595a7c8 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -15,6 +15,8 @@
15#include <asm/cputime.h> 15#include <asm/cputime.h>
16#include <asm/vtimer.h> 16#include <asm/vtimer.h>
17#include <asm/vtime.h> 17#include <asm/vtime.h>
18#include <asm/cpu_mf.h>
19#include <asm/smp.h>
18 20
19static void virt_timer_expire(void); 21static void virt_timer_expire(void);
20 22
@@ -23,6 +25,10 @@ static DEFINE_SPINLOCK(virt_timer_lock);
23static atomic64_t virt_timer_current; 25static atomic64_t virt_timer_current;
24static atomic64_t virt_timer_elapsed; 26static atomic64_t virt_timer_elapsed;
25 27
28static DEFINE_PER_CPU(u64, mt_cycles[32]);
29static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 };
30static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 };
31
26static inline u64 get_vtimer(void) 32static inline u64 get_vtimer(void)
27{ 33{
28 u64 timer; 34 u64 timer;
@@ -61,6 +67,8 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
61{ 67{
62 struct thread_info *ti = task_thread_info(tsk); 68 struct thread_info *ti = task_thread_info(tsk);
63 u64 timer, clock, user, system, steal; 69 u64 timer, clock, user, system, steal;
70 u64 user_scaled, system_scaled;
71 int i;
64 72
65 timer = S390_lowcore.last_update_timer; 73 timer = S390_lowcore.last_update_timer;
66 clock = S390_lowcore.last_update_clock; 74 clock = S390_lowcore.last_update_clock;
@@ -76,15 +84,49 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
76 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; 84 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
77 S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock; 85 S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock;
78 86
87 /* Do MT utilization calculation */
88 if (smp_cpu_mtid) {
89 u64 cycles_new[32], *cycles_old;
90 u64 delta, mult, div;
91
92 cycles_old = this_cpu_ptr(mt_cycles);
93 if (stcctm5(smp_cpu_mtid + 1, cycles_new) < 2) {
94 mult = div = 0;
95 for (i = 0; i <= smp_cpu_mtid; i++) {
96 delta = cycles_new[i] - cycles_old[i];
97 mult += delta;
98 div += (i + 1) * delta;
99 }
100 if (mult > 0) {
101 /* Update scaling factor */
102 __this_cpu_write(mt_scaling_mult, mult);
103 __this_cpu_write(mt_scaling_div, div);
104 memcpy(cycles_old, cycles_new,
105 sizeof(u64) * (smp_cpu_mtid + 1));
106 }
107 }
108 }
109
79 user = S390_lowcore.user_timer - ti->user_timer; 110 user = S390_lowcore.user_timer - ti->user_timer;
80 S390_lowcore.steal_timer -= user; 111 S390_lowcore.steal_timer -= user;
81 ti->user_timer = S390_lowcore.user_timer; 112 ti->user_timer = S390_lowcore.user_timer;
82 account_user_time(tsk, user, user);
83 113
84 system = S390_lowcore.system_timer - ti->system_timer; 114 system = S390_lowcore.system_timer - ti->system_timer;
85 S390_lowcore.steal_timer -= system; 115 S390_lowcore.steal_timer -= system;
86 ti->system_timer = S390_lowcore.system_timer; 116 ti->system_timer = S390_lowcore.system_timer;
87 account_system_time(tsk, hardirq_offset, system, system); 117
118 user_scaled = user;
119 system_scaled = system;
120 /* Do MT utilization scaling */
121 if (smp_cpu_mtid) {
122 u64 mult = __this_cpu_read(mt_scaling_mult);
123 u64 div = __this_cpu_read(mt_scaling_div);
124
125 user_scaled = (user_scaled * mult) / div;
126 system_scaled = (system_scaled * mult) / div;
127 }
128 account_user_time(tsk, user, user_scaled);
129 account_system_time(tsk, hardirq_offset, system, system_scaled);
88 130
89 steal = S390_lowcore.steal_timer; 131 steal = S390_lowcore.steal_timer;
90 if ((s64) steal > 0) { 132 if ((s64) steal > 0) {
@@ -126,7 +168,7 @@ void vtime_account_user(struct task_struct *tsk)
126void vtime_account_irq_enter(struct task_struct *tsk) 168void vtime_account_irq_enter(struct task_struct *tsk)
127{ 169{
128 struct thread_info *ti = task_thread_info(tsk); 170 struct thread_info *ti = task_thread_info(tsk);
129 u64 timer, system; 171 u64 timer, system, system_scaled;
130 172
131 timer = S390_lowcore.last_update_timer; 173 timer = S390_lowcore.last_update_timer;
132 S390_lowcore.last_update_timer = get_vtimer(); 174 S390_lowcore.last_update_timer = get_vtimer();
@@ -135,7 +177,15 @@ void vtime_account_irq_enter(struct task_struct *tsk)
135 system = S390_lowcore.system_timer - ti->system_timer; 177 system = S390_lowcore.system_timer - ti->system_timer;
136 S390_lowcore.steal_timer -= system; 178 S390_lowcore.steal_timer -= system;
137 ti->system_timer = S390_lowcore.system_timer; 179 ti->system_timer = S390_lowcore.system_timer;
138 account_system_time(tsk, 0, system, system); 180 system_scaled = system;
181 /* Do MT utilization scaling */
182 if (smp_cpu_mtid) {
183 u64 mult = __this_cpu_read(mt_scaling_mult);
184 u64 div = __this_cpu_read(mt_scaling_div);
185
186 system_scaled = (system_scaled * mult) / div;
187 }
188 account_system_time(tsk, 0, system, system_scaled);
139 189
140 virt_timer_forward(system); 190 virt_timer_forward(system);
141} 191}