aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Kconfig8
-rw-r--r--arch/s390/include/asm/hardirq.h4
-rw-r--r--arch/s390/include/asm/irqflags.h51
-rw-r--r--arch/s390/include/asm/perf_event.h3
-rw-r--r--arch/s390/include/asm/system.h3
-rw-r--r--arch/s390/include/asm/topology.h27
-rw-r--r--arch/s390/kernel/mem_detect.c4
-rw-r--r--arch/s390/kernel/topology.c150
-rw-r--r--arch/s390/mm/init.c3
-rw-r--r--arch/s390/mm/maccess.c4
10 files changed, 159 insertions, 98 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index f0777a47e3a5..75976a141947 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -95,6 +95,7 @@ config S390
95 select HAVE_KVM if 64BIT 95 select HAVE_KVM if 64BIT
96 select HAVE_ARCH_TRACEHOOK 96 select HAVE_ARCH_TRACEHOOK
97 select INIT_ALL_POSSIBLE 97 select INIT_ALL_POSSIBLE
98 select HAVE_IRQ_WORK
98 select HAVE_PERF_EVENTS 99 select HAVE_PERF_EVENTS
99 select HAVE_KERNEL_GZIP 100 select HAVE_KERNEL_GZIP
100 select HAVE_KERNEL_BZIP2 101 select HAVE_KERNEL_BZIP2
@@ -198,6 +199,13 @@ config HOTPLUG_CPU
198 can be controlled through /sys/devices/system/cpu/cpu#. 199 can be controlled through /sys/devices/system/cpu/cpu#.
199 Say N if you want to disable CPU hotplug. 200 Say N if you want to disable CPU hotplug.
200 201
202config SCHED_BOOK
203 bool "Book scheduler support"
204 depends on SMP
205 help
206 Book scheduler support improves the CPU scheduler's decision making
207 when dealing with machines that have several books.
208
201config MATHEMU 209config MATHEMU
202 bool "IEEE FPU emulation" 210 bool "IEEE FPU emulation"
203 depends on MARCH_G5 211 depends on MARCH_G5
diff --git a/arch/s390/include/asm/hardirq.h b/arch/s390/include/asm/hardirq.h
index 498bc3892385..881d94590aeb 100644
--- a/arch/s390/include/asm/hardirq.h
+++ b/arch/s390/include/asm/hardirq.h
@@ -12,10 +12,6 @@
12#ifndef __ASM_HARDIRQ_H 12#ifndef __ASM_HARDIRQ_H
13#define __ASM_HARDIRQ_H 13#define __ASM_HARDIRQ_H
14 14
15#include <linux/threads.h>
16#include <linux/sched.h>
17#include <linux/cache.h>
18#include <linux/interrupt.h>
19#include <asm/lowcore.h> 15#include <asm/lowcore.h>
20 16
21#define local_softirq_pending() (S390_lowcore.softirq_pending) 17#define local_softirq_pending() (S390_lowcore.softirq_pending)
diff --git a/arch/s390/include/asm/irqflags.h b/arch/s390/include/asm/irqflags.h
index 15b3ac253898..865d6d891ace 100644
--- a/arch/s390/include/asm/irqflags.h
+++ b/arch/s390/include/asm/irqflags.h
@@ -8,8 +8,8 @@
8 8
9#include <linux/types.h> 9#include <linux/types.h>
10 10
11/* store then or system mask. */ 11/* store then OR system mask. */
12#define __raw_local_irq_stosm(__or) \ 12#define __arch_local_irq_stosm(__or) \
13({ \ 13({ \
14 unsigned long __mask; \ 14 unsigned long __mask; \
15 asm volatile( \ 15 asm volatile( \
@@ -18,8 +18,8 @@
18 __mask; \ 18 __mask; \
19}) 19})
20 20
21/* store then and system mask. */ 21/* store then AND system mask. */
22#define __raw_local_irq_stnsm(__and) \ 22#define __arch_local_irq_stnsm(__and) \
23({ \ 23({ \
24 unsigned long __mask; \ 24 unsigned long __mask; \
25 asm volatile( \ 25 asm volatile( \
@@ -29,39 +29,44 @@
29}) 29})
30 30
31/* set system mask. */ 31/* set system mask. */
32#define __raw_local_irq_ssm(__mask) \ 32static inline void __arch_local_irq_ssm(unsigned long flags)
33({ \ 33{
34 asm volatile("ssm %0" : : "Q" (__mask) : "memory"); \ 34 asm volatile("ssm %0" : : "Q" (flags) : "memory");
35}) 35}
36 36
37/* interrupt control.. */ 37static inline unsigned long arch_local_save_flags(void)
38static inline unsigned long raw_local_irq_enable(void)
39{ 38{
40 return __raw_local_irq_stosm(0x03); 39 return __arch_local_irq_stosm(0x00);
41} 40}
42 41
43static inline unsigned long raw_local_irq_disable(void) 42static inline unsigned long arch_local_irq_save(void)
44{ 43{
45 return __raw_local_irq_stnsm(0xfc); 44 return __arch_local_irq_stnsm(0xfc);
46} 45}
47 46
48#define raw_local_save_flags(x) \ 47static inline void arch_local_irq_disable(void)
49do { \ 48{
50 typecheck(unsigned long, x); \ 49 arch_local_irq_save();
51 (x) = __raw_local_irq_stosm(0x00); \ 50}
52} while (0)
53 51
54static inline void raw_local_irq_restore(unsigned long flags) 52static inline void arch_local_irq_enable(void)
55{ 53{
56 __raw_local_irq_ssm(flags); 54 __arch_local_irq_stosm(0x03);
57} 55}
58 56
59static inline int raw_irqs_disabled_flags(unsigned long flags) 57static inline void arch_local_irq_restore(unsigned long flags)
58{
59 __arch_local_irq_ssm(flags);
60}
61
62static inline bool arch_irqs_disabled_flags(unsigned long flags)
60{ 63{
61 return !(flags & (3UL << (BITS_PER_LONG - 8))); 64 return !(flags & (3UL << (BITS_PER_LONG - 8)));
62} 65}
63 66
64/* For spinlocks etc */ 67static inline bool arch_irqs_disabled(void)
65#define raw_local_irq_save(x) ((x) = raw_local_irq_disable()) 68{
69 return arch_irqs_disabled_flags(arch_local_save_flags());
70}
66 71
67#endif /* __ASM_IRQFLAGS_H */ 72#endif /* __ASM_IRQFLAGS_H */
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
index 3840cbe77637..a75f168d2718 100644
--- a/arch/s390/include/asm/perf_event.h
+++ b/arch/s390/include/asm/perf_event.h
@@ -4,7 +4,6 @@
4 * Copyright 2009 Martin Schwidefsky, IBM Corporation. 4 * Copyright 2009 Martin Schwidefsky, IBM Corporation.
5 */ 5 */
6 6
7static inline void set_perf_event_pending(void) {} 7/* Empty, just to avoid compiling error */
8static inline void clear_perf_event_pending(void) {}
9 8
10#define PERF_EVENT_INDEX_OFFSET 0 9#define PERF_EVENT_INDEX_OFFSET 0
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
index cef66210c846..1f2ebc4afd82 100644
--- a/arch/s390/include/asm/system.h
+++ b/arch/s390/include/asm/system.h
@@ -97,7 +97,6 @@ static inline void restore_access_regs(unsigned int *acrs)
97 97
98extern void account_vtime(struct task_struct *, struct task_struct *); 98extern void account_vtime(struct task_struct *, struct task_struct *);
99extern void account_tick_vtime(struct task_struct *); 99extern void account_tick_vtime(struct task_struct *);
100extern void account_system_vtime(struct task_struct *);
101 100
102#ifdef CONFIG_PFAULT 101#ifdef CONFIG_PFAULT
103extern void pfault_irq_init(void); 102extern void pfault_irq_init(void);
@@ -399,7 +398,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
399static inline void 398static inline void
400__set_psw_mask(unsigned long mask) 399__set_psw_mask(unsigned long mask)
401{ 400{
402 __load_psw_mask(mask | (__raw_local_irq_stosm(0x00) & ~(-1UL >> 8))); 401 __load_psw_mask(mask | (arch_local_save_flags() & ~(-1UL >> 8)));
403} 402}
404 403
405#define local_mcck_enable() __set_psw_mask(psw_kernel_bits) 404#define local_mcck_enable() __set_psw_mask(psw_kernel_bits)
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 831bd033ea77..051107a2c5e2 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -3,15 +3,32 @@
3 3
4#include <linux/cpumask.h> 4#include <linux/cpumask.h>
5 5
6#define mc_capable() (1)
7
8const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
9
10extern unsigned char cpu_core_id[NR_CPUS]; 6extern unsigned char cpu_core_id[NR_CPUS];
11extern cpumask_t cpu_core_map[NR_CPUS]; 7extern cpumask_t cpu_core_map[NR_CPUS];
12 8
9static inline const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
10{
11 return &cpu_core_map[cpu];
12}
13
13#define topology_core_id(cpu) (cpu_core_id[cpu]) 14#define topology_core_id(cpu) (cpu_core_id[cpu])
14#define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) 15#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
16#define mc_capable() (1)
17
18#ifdef CONFIG_SCHED_BOOK
19
20extern unsigned char cpu_book_id[NR_CPUS];
21extern cpumask_t cpu_book_map[NR_CPUS];
22
23static inline const struct cpumask *cpu_book_mask(unsigned int cpu)
24{
25 return &cpu_book_map[cpu];
26}
27
28#define topology_book_id(cpu) (cpu_book_id[cpu])
29#define topology_book_cpumask(cpu) (&cpu_book_map[cpu])
30
31#endif /* CONFIG_SCHED_BOOK */
15 32
16int topology_set_cpu_management(int fc); 33int topology_set_cpu_management(int fc);
17void topology_schedule_update(void); 34void topology_schedule_update(void);
@@ -30,6 +47,8 @@ static inline void s390_init_cpu_topology(void)
30}; 47};
31#endif 48#endif
32 49
50#define SD_BOOK_INIT SD_CPU_INIT
51
33#include <asm-generic/topology.h> 52#include <asm-generic/topology.h>
34 53
35#endif /* _ASM_S390_TOPOLOGY_H */ 54#endif /* _ASM_S390_TOPOLOGY_H */
diff --git a/arch/s390/kernel/mem_detect.c b/arch/s390/kernel/mem_detect.c
index 559af0d07878..0fbe4e32f7ba 100644
--- a/arch/s390/kernel/mem_detect.c
+++ b/arch/s390/kernel/mem_detect.c
@@ -54,11 +54,11 @@ void detect_memory_layout(struct mem_chunk chunk[])
54 * right thing and we don't get scheduled away with low address 54 * right thing and we don't get scheduled away with low address
55 * protection disabled. 55 * protection disabled.
56 */ 56 */
57 flags = __raw_local_irq_stnsm(0xf8); 57 flags = __arch_local_irq_stnsm(0xf8);
58 __ctl_store(cr0, 0, 0); 58 __ctl_store(cr0, 0, 0);
59 __ctl_clear_bit(0, 28); 59 __ctl_clear_bit(0, 28);
60 find_memory_chunks(chunk); 60 find_memory_chunks(chunk);
61 __ctl_load(cr0, 0, 0); 61 __ctl_load(cr0, 0, 0);
62 __raw_local_irq_ssm(flags); 62 arch_local_irq_restore(flags);
63} 63}
64EXPORT_SYMBOL(detect_memory_layout); 64EXPORT_SYMBOL(detect_memory_layout);
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index bcef00766a64..13559c993847 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -57,8 +57,8 @@ struct tl_info {
57 union tl_entry tle[0]; 57 union tl_entry tle[0];
58}; 58};
59 59
60struct core_info { 60struct mask_info {
61 struct core_info *next; 61 struct mask_info *next;
62 unsigned char id; 62 unsigned char id;
63 cpumask_t mask; 63 cpumask_t mask;
64}; 64};
@@ -66,7 +66,6 @@ struct core_info {
66static int topology_enabled; 66static int topology_enabled;
67static void topology_work_fn(struct work_struct *work); 67static void topology_work_fn(struct work_struct *work);
68static struct tl_info *tl_info; 68static struct tl_info *tl_info;
69static struct core_info core_info;
70static int machine_has_topology; 69static int machine_has_topology;
71static struct timer_list topology_timer; 70static struct timer_list topology_timer;
72static void set_topology_timer(void); 71static void set_topology_timer(void);
@@ -74,38 +73,37 @@ static DECLARE_WORK(topology_work, topology_work_fn);
74/* topology_lock protects the core linked list */ 73/* topology_lock protects the core linked list */
75static DEFINE_SPINLOCK(topology_lock); 74static DEFINE_SPINLOCK(topology_lock);
76 75
76static struct mask_info core_info;
77cpumask_t cpu_core_map[NR_CPUS]; 77cpumask_t cpu_core_map[NR_CPUS];
78unsigned char cpu_core_id[NR_CPUS]; 78unsigned char cpu_core_id[NR_CPUS];
79 79
80static cpumask_t cpu_coregroup_map(unsigned int cpu) 80#ifdef CONFIG_SCHED_BOOK
81static struct mask_info book_info;
82cpumask_t cpu_book_map[NR_CPUS];
83unsigned char cpu_book_id[NR_CPUS];
84#endif
85
86static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
81{ 87{
82 struct core_info *core = &core_info;
83 unsigned long flags;
84 cpumask_t mask; 88 cpumask_t mask;
85 89
86 cpus_clear(mask); 90 cpus_clear(mask);
87 if (!topology_enabled || !machine_has_topology) 91 if (!topology_enabled || !machine_has_topology)
88 return cpu_possible_map; 92 return cpu_possible_map;
89 spin_lock_irqsave(&topology_lock, flags); 93 while (info) {
90 while (core) { 94 if (cpu_isset(cpu, info->mask)) {
91 if (cpu_isset(cpu, core->mask)) { 95 mask = info->mask;
92 mask = core->mask;
93 break; 96 break;
94 } 97 }
95 core = core->next; 98 info = info->next;
96 } 99 }
97 spin_unlock_irqrestore(&topology_lock, flags);
98 if (cpus_empty(mask)) 100 if (cpus_empty(mask))
99 mask = cpumask_of_cpu(cpu); 101 mask = cpumask_of_cpu(cpu);
100 return mask; 102 return mask;
101} 103}
102 104
103const struct cpumask *cpu_coregroup_mask(unsigned int cpu) 105static void add_cpus_to_mask(struct tl_cpu *tl_cpu, struct mask_info *book,
104{ 106 struct mask_info *core)
105 return &cpu_core_map[cpu];
106}
107
108static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core)
109{ 107{
110 unsigned int cpu; 108 unsigned int cpu;
111 109
@@ -117,23 +115,35 @@ static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core)
117 115
118 rcpu = CPU_BITS - 1 - cpu + tl_cpu->origin; 116 rcpu = CPU_BITS - 1 - cpu + tl_cpu->origin;
119 for_each_present_cpu(lcpu) { 117 for_each_present_cpu(lcpu) {
120 if (cpu_logical_map(lcpu) == rcpu) { 118 if (cpu_logical_map(lcpu) != rcpu)
121 cpu_set(lcpu, core->mask); 119 continue;
122 cpu_core_id[lcpu] = core->id; 120#ifdef CONFIG_SCHED_BOOK
123 smp_cpu_polarization[lcpu] = tl_cpu->pp; 121 cpu_set(lcpu, book->mask);
124 } 122 cpu_book_id[lcpu] = book->id;
123#endif
124 cpu_set(lcpu, core->mask);
125 cpu_core_id[lcpu] = core->id;
126 smp_cpu_polarization[lcpu] = tl_cpu->pp;
125 } 127 }
126 } 128 }
127} 129}
128 130
129static void clear_cores(void) 131static void clear_masks(void)
130{ 132{
131 struct core_info *core = &core_info; 133 struct mask_info *info;
132 134
133 while (core) { 135 info = &core_info;
134 cpus_clear(core->mask); 136 while (info) {
135 core = core->next; 137 cpus_clear(info->mask);
138 info = info->next;
139 }
140#ifdef CONFIG_SCHED_BOOK
141 info = &book_info;
142 while (info) {
143 cpus_clear(info->mask);
144 info = info->next;
136 } 145 }
146#endif
137} 147}
138 148
139static union tl_entry *next_tle(union tl_entry *tle) 149static union tl_entry *next_tle(union tl_entry *tle)
@@ -146,29 +156,36 @@ static union tl_entry *next_tle(union tl_entry *tle)
146 156
147static void tl_to_cores(struct tl_info *info) 157static void tl_to_cores(struct tl_info *info)
148{ 158{
159#ifdef CONFIG_SCHED_BOOK
160 struct mask_info *book = &book_info;
161#else
162 struct mask_info *book = NULL;
163#endif
164 struct mask_info *core = &core_info;
149 union tl_entry *tle, *end; 165 union tl_entry *tle, *end;
150 struct core_info *core = &core_info; 166
151 167
152 spin_lock_irq(&topology_lock); 168 spin_lock_irq(&topology_lock);
153 clear_cores(); 169 clear_masks();
154 tle = info->tle; 170 tle = info->tle;
155 end = (union tl_entry *)((unsigned long)info + info->length); 171 end = (union tl_entry *)((unsigned long)info + info->length);
156 while (tle < end) { 172 while (tle < end) {
157 switch (tle->nl) { 173 switch (tle->nl) {
158 case 5: 174#ifdef CONFIG_SCHED_BOOK
159 case 4:
160 case 3:
161 case 2: 175 case 2:
176 book = book->next;
177 book->id = tle->container.id;
162 break; 178 break;
179#endif
163 case 1: 180 case 1:
164 core = core->next; 181 core = core->next;
165 core->id = tle->container.id; 182 core->id = tle->container.id;
166 break; 183 break;
167 case 0: 184 case 0:
168 add_cpus_to_core(&tle->cpu, core); 185 add_cpus_to_mask(&tle->cpu, book, core);
169 break; 186 break;
170 default: 187 default:
171 clear_cores(); 188 clear_masks();
172 machine_has_topology = 0; 189 machine_has_topology = 0;
173 goto out; 190 goto out;
174 } 191 }
@@ -221,10 +238,29 @@ int topology_set_cpu_management(int fc)
221 238
222static void update_cpu_core_map(void) 239static void update_cpu_core_map(void)
223{ 240{
241 unsigned long flags;
224 int cpu; 242 int cpu;
225 243
226 for_each_possible_cpu(cpu) 244 spin_lock_irqsave(&topology_lock, flags);
227 cpu_core_map[cpu] = cpu_coregroup_map(cpu); 245 for_each_possible_cpu(cpu) {
246 cpu_core_map[cpu] = cpu_group_map(&core_info, cpu);
247#ifdef CONFIG_SCHED_BOOK
248 cpu_book_map[cpu] = cpu_group_map(&book_info, cpu);
249#endif
250 }
251 spin_unlock_irqrestore(&topology_lock, flags);
252}
253
254static void store_topology(struct tl_info *info)
255{
256#ifdef CONFIG_SCHED_BOOK
257 int rc;
258
259 rc = stsi(info, 15, 1, 3);
260 if (rc != -ENOSYS)
261 return;
262#endif
263 stsi(info, 15, 1, 2);
228} 264}
229 265
230int arch_update_cpu_topology(void) 266int arch_update_cpu_topology(void)
@@ -238,7 +274,7 @@ int arch_update_cpu_topology(void)
238 topology_update_polarization_simple(); 274 topology_update_polarization_simple();
239 return 0; 275 return 0;
240 } 276 }
241 stsi(info, 15, 1, 2); 277 store_topology(info);
242 tl_to_cores(info); 278 tl_to_cores(info);
243 update_cpu_core_map(); 279 update_cpu_core_map();
244 for_each_online_cpu(cpu) { 280 for_each_online_cpu(cpu) {
@@ -299,12 +335,24 @@ out:
299} 335}
300__initcall(init_topology_update); 336__initcall(init_topology_update);
301 337
338static void alloc_masks(struct tl_info *info, struct mask_info *mask, int offset)
339{
340 int i, nr_masks;
341
342 nr_masks = info->mag[NR_MAG - offset];
343 for (i = 0; i < info->mnest - offset; i++)
344 nr_masks *= info->mag[NR_MAG - offset - 1 - i];
345 nr_masks = max(nr_masks, 1);
346 for (i = 0; i < nr_masks; i++) {
347 mask->next = alloc_bootmem(sizeof(struct mask_info));
348 mask = mask->next;
349 }
350}
351
302void __init s390_init_cpu_topology(void) 352void __init s390_init_cpu_topology(void)
303{ 353{
304 unsigned long long facility_bits; 354 unsigned long long facility_bits;
305 struct tl_info *info; 355 struct tl_info *info;
306 struct core_info *core;
307 int nr_cores;
308 int i; 356 int i;
309 357
310 if (stfle(&facility_bits, 1) <= 0) 358 if (stfle(&facility_bits, 1) <= 0)
@@ -315,25 +363,13 @@ void __init s390_init_cpu_topology(void)
315 363
316 tl_info = alloc_bootmem_pages(PAGE_SIZE); 364 tl_info = alloc_bootmem_pages(PAGE_SIZE);
317 info = tl_info; 365 info = tl_info;
318 stsi(info, 15, 1, 2); 366 store_topology(info);
319
320 nr_cores = info->mag[NR_MAG - 2];
321 for (i = 0; i < info->mnest - 2; i++)
322 nr_cores *= info->mag[NR_MAG - 3 - i];
323
324 pr_info("The CPU configuration topology of the machine is:"); 367 pr_info("The CPU configuration topology of the machine is:");
325 for (i = 0; i < NR_MAG; i++) 368 for (i = 0; i < NR_MAG; i++)
326 printk(" %d", info->mag[i]); 369 printk(" %d", info->mag[i]);
327 printk(" / %d\n", info->mnest); 370 printk(" / %d\n", info->mnest);
328 371 alloc_masks(info, &core_info, 2);
329 core = &core_info; 372#ifdef CONFIG_SCHED_BOOK
330 for (i = 0; i < nr_cores; i++) { 373 alloc_masks(info, &book_info, 3);
331 core->next = alloc_bootmem(sizeof(struct core_info)); 374#endif
332 core = core->next;
333 if (!core)
334 goto error;
335 }
336 return;
337error:
338 machine_has_topology = 0;
339} 375}
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 30eb6d02ddb8..94b8ba2ec857 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -50,7 +50,6 @@ EXPORT_SYMBOL(empty_zero_page);
50 */ 50 */
51void __init paging_init(void) 51void __init paging_init(void)
52{ 52{
53 static const int ssm_mask = 0x04000000L;
54 unsigned long max_zone_pfns[MAX_NR_ZONES]; 53 unsigned long max_zone_pfns[MAX_NR_ZONES];
55 unsigned long pgd_type; 54 unsigned long pgd_type;
56 55
@@ -72,7 +71,7 @@ void __init paging_init(void)
72 __ctl_load(S390_lowcore.kernel_asce, 1, 1); 71 __ctl_load(S390_lowcore.kernel_asce, 1, 1);
73 __ctl_load(S390_lowcore.kernel_asce, 7, 7); 72 __ctl_load(S390_lowcore.kernel_asce, 7, 7);
74 __ctl_load(S390_lowcore.kernel_asce, 13, 13); 73 __ctl_load(S390_lowcore.kernel_asce, 13, 13);
75 __raw_local_irq_ssm(ssm_mask); 74 arch_local_irq_restore(4UL << (BITS_PER_LONG - 8));
76 75
77 atomic_set(&init_mm.context.attach_count, 1); 76 atomic_set(&init_mm.context.attach_count, 1);
78 77
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index a8c2af8c650f..71a4b0d34be0 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -71,7 +71,7 @@ int memcpy_real(void *dest, void *src, size_t count)
71 71
72 if (!count) 72 if (!count)
73 return 0; 73 return 0;
74 flags = __raw_local_irq_stnsm(0xf8UL); 74 flags = __arch_local_irq_stnsm(0xf8UL);
75 asm volatile ( 75 asm volatile (
76 "0: mvcle %1,%2,0x0\n" 76 "0: mvcle %1,%2,0x0\n"
77 "1: jo 0b\n" 77 "1: jo 0b\n"
@@ -82,6 +82,6 @@ int memcpy_real(void *dest, void *src, size_t count)
82 "+d" (_len2), "=m" (*((long *) dest)) 82 "+d" (_len2), "=m" (*((long *) dest))
83 : "m" (*((long *) src)) 83 : "m" (*((long *) src))
84 : "cc", "memory"); 84 : "cc", "memory");
85 __raw_local_irq_ssm(flags); 85 arch_local_irq_restore(flags);
86 return rc; 86 return rc;
87} 87}