aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/s390/appldata/appldata_base.c12
-rw-r--r--arch/s390/kernel/compat_wrapper.S10
-rw-r--r--arch/s390/kernel/entry.S6
-rw-r--r--arch/s390/kernel/entry64.S6
-rw-r--r--arch/s390/kernel/head.S1
-rw-r--r--arch/s390/kernel/smp.c30
-rw-r--r--arch/s390/kernel/sys_s390.c20
-rw-r--r--arch/s390/kernel/syscalls.S2
-rw-r--r--arch/s390/kernel/vmlinux.lds.S1
-rw-r--r--arch/s390/kernel/vtime.c2
-rw-r--r--arch/s390/mm/vmem.c6
11 files changed, 64 insertions, 32 deletions
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 6ffbab77ae4d..62391fb1f61f 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -173,7 +173,7 @@ int appldata_diag(char record_nr, u16 function, unsigned long buffer,
173/* 173/*
174 * appldata_mod_vtimer_wrap() 174 * appldata_mod_vtimer_wrap()
175 * 175 *
176 * wrapper function for mod_virt_timer(), because smp_call_function_on() 176 * wrapper function for mod_virt_timer(), because smp_call_function_single()
177 * accepts only one parameter. 177 * accepts only one parameter.
178 */ 178 */
179static void __appldata_mod_vtimer_wrap(void *p) { 179static void __appldata_mod_vtimer_wrap(void *p) {
@@ -208,9 +208,9 @@ __appldata_vtimer_setup(int cmd)
208 num_online_cpus()) * TOD_MICRO; 208 num_online_cpus()) * TOD_MICRO;
209 for_each_online_cpu(i) { 209 for_each_online_cpu(i) {
210 per_cpu(appldata_timer, i).expires = per_cpu_interval; 210 per_cpu(appldata_timer, i).expires = per_cpu_interval;
211 smp_call_function_on(add_virt_timer_periodic, 211 smp_call_function_single(i, add_virt_timer_periodic,
212 &per_cpu(appldata_timer, i), 212 &per_cpu(appldata_timer, i),
213 0, 1, i); 213 0, 1);
214 } 214 }
215 appldata_timer_active = 1; 215 appldata_timer_active = 1;
216 P_INFO("Monitoring timer started.\n"); 216 P_INFO("Monitoring timer started.\n");
@@ -236,8 +236,8 @@ __appldata_vtimer_setup(int cmd)
236 } args; 236 } args;
237 args.timer = &per_cpu(appldata_timer, i); 237 args.timer = &per_cpu(appldata_timer, i);
238 args.expires = per_cpu_interval; 238 args.expires = per_cpu_interval;
239 smp_call_function_on(__appldata_mod_vtimer_wrap, 239 smp_call_function_single(i, __appldata_mod_vtimer_wrap,
240 &args, 0, 1, i); 240 &args, 0, 1);
241 } 241 }
242 } 242 }
243} 243}
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index acc415457b45..6ee1bedbd1bf 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -1710,3 +1710,13 @@ compat_sys_timerfd_wrapper:
1710sys_eventfd_wrapper: 1710sys_eventfd_wrapper:
1711 llgfr %r2,%r2 # unsigned int 1711 llgfr %r2,%r2 # unsigned int
1712 jg sys_eventfd 1712 jg sys_eventfd
1713
1714 .globl sys_fallocate_wrapper
1715sys_fallocate_wrapper:
1716 lgfr %r2,%r2 # int
1717 lgfr %r3,%r3 # int
1718 sllg %r4,%r4,32 # get high word of 64bit loff_t
1719 lr %r4,%r5 # get low word of 64bit loff_t
1720 sllg %r5,%r6,32 # get high word of 64bit loff_t
1721 l %r5,164(%r15) # get low word of 64bit loff_t
1722 jg sys_fallocate
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index bc7ff3658c3d..f3bceb165321 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -624,9 +624,11 @@ io_work_loop:
624# _TIF_MCCK_PENDING is set, call handler 624# _TIF_MCCK_PENDING is set, call handler
625# 625#
626io_mcck_pending: 626io_mcck_pending:
627 TRACE_IRQS_OFF
627 l %r1,BASED(.Ls390_handle_mcck) 628 l %r1,BASED(.Ls390_handle_mcck)
628 la %r14,BASED(io_work_loop) 629 basr %r14,%r1 # TIF bit will be cleared by handler
629 br %r1 # TIF bit will be cleared by handler 630 TRACE_IRQS_ON
631 b BASED(io_work_loop)
630 632
631# 633#
632# _TIF_NEED_RESCHED is set, call schedule 634# _TIF_NEED_RESCHED is set, call schedule
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 2a7b1304418b..9c0d5cc8269d 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -611,8 +611,10 @@ io_work_loop:
611# _TIF_MCCK_PENDING is set, call handler 611# _TIF_MCCK_PENDING is set, call handler
612# 612#
613io_mcck_pending: 613io_mcck_pending:
614 larl %r14,io_work_loop 614 TRACE_IRQS_OFF
615 jg s390_handle_mcck # TIF bit will be cleared by handler 615 brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler
616 TRACE_IRQS_ON
617 j io_work_loop
616 618
617# 619#
618# _TIF_NEED_RESCHED is set, call schedule 620# _TIF_NEED_RESCHED is set, call schedule
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 8f8c802f1bcf..83477c7dc743 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -35,6 +35,7 @@
35#define ARCH_OFFSET 0 35#define ARCH_OFFSET 0
36#endif 36#endif
37 37
38.section ".text.head","ax"
38#ifndef CONFIG_IPL 39#ifndef CONFIG_IPL
39 .org 0 40 .org 0
40 .long 0x00080000,0x80000000+startup # Just a restart PSW 41 .long 0x00080000,0x80000000+startup # Just a restart PSW
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 182c085ae4dd..03674fbe598f 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -120,7 +120,7 @@ static void __smp_call_function_map(void (*func) (void *info), void *info,
120 if (wait) 120 if (wait)
121 data.finished = CPU_MASK_NONE; 121 data.finished = CPU_MASK_NONE;
122 122
123 spin_lock_bh(&call_lock); 123 spin_lock(&call_lock);
124 call_data = &data; 124 call_data = &data;
125 125
126 for_each_cpu_mask(cpu, map) 126 for_each_cpu_mask(cpu, map)
@@ -129,18 +129,16 @@ static void __smp_call_function_map(void (*func) (void *info), void *info,
129 /* Wait for response */ 129 /* Wait for response */
130 while (!cpus_equal(map, data.started)) 130 while (!cpus_equal(map, data.started))
131 cpu_relax(); 131 cpu_relax();
132
133 if (wait) 132 if (wait)
134 while (!cpus_equal(map, data.finished)) 133 while (!cpus_equal(map, data.finished))
135 cpu_relax(); 134 cpu_relax();
136 135 spin_unlock(&call_lock);
137 spin_unlock_bh(&call_lock);
138
139out: 136out:
140 local_irq_disable(); 137 if (local) {
141 if (local) 138 local_irq_disable();
142 func(info); 139 func(info);
143 local_irq_enable(); 140 local_irq_enable();
141 }
144} 142}
145 143
146/* 144/*
@@ -170,30 +168,28 @@ int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
170EXPORT_SYMBOL(smp_call_function); 168EXPORT_SYMBOL(smp_call_function);
171 169
172/* 170/*
173 * smp_call_function_on: 171 * smp_call_function_single:
172 * @cpu: the CPU where func should run
174 * @func: the function to run; this must be fast and non-blocking 173 * @func: the function to run; this must be fast and non-blocking
175 * @info: an arbitrary pointer to pass to the function 174 * @info: an arbitrary pointer to pass to the function
176 * @nonatomic: unused 175 * @nonatomic: unused
177 * @wait: if true, wait (atomically) until function has completed on other CPUs 176 * @wait: if true, wait (atomically) until function has completed on other CPUs
178 * @cpu: the CPU where func should run
179 * 177 *
180 * Run a function on one processor. 178 * Run a function on one processor.
181 * 179 *
182 * You must not call this function with disabled interrupts, from a 180 * You must not call this function with disabled interrupts, from a
183 * hardware interrupt handler or from a bottom half. 181 * hardware interrupt handler or from a bottom half.
184 */ 182 */
185int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic, 183int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
186 int wait, int cpu) 184 int nonatomic, int wait)
187{ 185{
188 cpumask_t map = CPU_MASK_NONE;
189
190 preempt_disable(); 186 preempt_disable();
191 cpu_set(cpu, map); 187 __smp_call_function_map(func, info, nonatomic, wait,
192 __smp_call_function_map(func, info, nonatomic, wait, map); 188 cpumask_of_cpu(cpu));
193 preempt_enable(); 189 preempt_enable();
194 return 0; 190 return 0;
195} 191}
196EXPORT_SYMBOL(smp_call_function_on); 192EXPORT_SYMBOL(smp_call_function_single);
197 193
198static void do_send_stop(void) 194static void do_send_stop(void)
199{ 195{
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c
index 1c90c7e99978..13e27bdb96e2 100644
--- a/arch/s390/kernel/sys_s390.c
+++ b/arch/s390/kernel/sys_s390.c
@@ -265,3 +265,23 @@ s390_fadvise64_64(struct fadvise64_64_args __user *args)
265 return -EFAULT; 265 return -EFAULT;
266 return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice); 266 return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice);
267} 267}
268
269#ifndef CONFIG_64BIT
270/*
271 * This is a wrapper to call sys_fallocate(). For 31 bit s390 the last
272 * 64 bit argument "len" is split into the upper and lower 32 bits. The
273 * system call wrapper in the user space loads the value to %r6/%r7.
274 * The code in entry.S keeps the values in %r2 - %r6 where they are and
275 * stores %r7 to 96(%r15). But the standard C linkage requires that
276 * the whole 64 bit value for len is stored on the stack and doesn't
277 * use %r6 at all. So s390_fallocate has to convert the arguments from
278 * %r2: fd, %r3: mode, %r4/%r5: offset, %r6/96(%r15)-99(%r15): len
279 * to
280 * %r2: fd, %r3: mode, %r4/%r5: offset, 96(%r15)-103(%r15): len
281 */
282asmlinkage long s390_fallocate(int fd, int mode, loff_t offset,
283 u32 len_high, u32 len_low)
284{
285 return sys_fallocate(fd, mode, offset, ((u64)len_high << 32) | len_low);
286}
287#endif
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 738feb4a0aad..9e26ed9fe4e7 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -322,7 +322,7 @@ NI_SYSCALL /* 310 sys_move_pages */
322SYSCALL(sys_getcpu,sys_getcpu,sys_getcpu_wrapper) 322SYSCALL(sys_getcpu,sys_getcpu,sys_getcpu_wrapper)
323SYSCALL(sys_epoll_pwait,sys_epoll_pwait,compat_sys_epoll_pwait_wrapper) 323SYSCALL(sys_epoll_pwait,sys_epoll_pwait,compat_sys_epoll_pwait_wrapper)
324SYSCALL(sys_utimes,sys_utimes,compat_sys_utimes_wrapper) 324SYSCALL(sys_utimes,sys_utimes,compat_sys_utimes_wrapper)
325NI_SYSCALL /* 314 sys_fallocate */ 325SYSCALL(s390_fallocate,sys_fallocate,sys_fallocate_wrapper)
326SYSCALL(sys_utimensat,sys_utimensat,compat_sys_utimensat_wrapper) /* 315 */ 326SYSCALL(sys_utimensat,sys_utimensat,compat_sys_utimensat_wrapper) /* 315 */
327SYSCALL(sys_signalfd,sys_signalfd,compat_sys_signalfd_wrapper) 327SYSCALL(sys_signalfd,sys_signalfd,compat_sys_signalfd_wrapper)
328SYSCALL(sys_timerfd,sys_timerfd,compat_sys_timerfd_wrapper) 328SYSCALL(sys_timerfd,sys_timerfd,compat_sys_timerfd_wrapper)
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 6ab7d4ee13a4..b4622a3889b0 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -21,6 +21,7 @@ SECTIONS
21 . = 0x00000000; 21 . = 0x00000000;
22 _text = .; /* Text and read-only data */ 22 _text = .; /* Text and read-only data */
23 .text : { 23 .text : {
24 *(.text.head)
24 TEXT_TEXT 25 TEXT_TEXT
25 SCHED_TEXT 26 SCHED_TEXT
26 LOCK_TEXT 27 LOCK_TEXT
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index b6ed143e8597..84ff78de6bac 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -415,7 +415,7 @@ EXPORT_SYMBOL(add_virt_timer_periodic);
415 415
416/* 416/*
417 * If we change a pending timer the function must be called on the CPU 417 * If we change a pending timer the function must be called on the CPU
418 * where the timer is running on, e.g. by smp_call_function_on() 418 * where the timer is running on, e.g. by smp_call_function_single()
419 * 419 *
420 * The original mod_timer adds the timer if it is not pending. For compatibility 420 * The original mod_timer adds the timer if it is not pending. For compatibility
421 * we do the same. The timer will be added on the current CPU as a oneshot timer. 421 * we do the same. The timer will be added on the current CPU as a oneshot timer.
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 92a565190028..fd594d5fe142 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -29,8 +29,8 @@ struct memory_segment {
29 29
30static LIST_HEAD(mem_segs); 30static LIST_HEAD(mem_segs);
31 31
32void memmap_init(unsigned long size, int nid, unsigned long zone, 32void __meminit memmap_init(unsigned long size, int nid, unsigned long zone,
33 unsigned long start_pfn) 33 unsigned long start_pfn)
34{ 34{
35 struct page *start, *end; 35 struct page *start, *end;
36 struct page *map_start, *map_end; 36 struct page *map_start, *map_end;
@@ -66,7 +66,7 @@ void memmap_init(unsigned long size, int nid, unsigned long zone,
66 } 66 }
67} 67}
68 68
69static inline void *vmem_alloc_pages(unsigned int order) 69static void __init_refok *vmem_alloc_pages(unsigned int order)
70{ 70{
71 if (slab_is_available()) 71 if (slab_is_available())
72 return (void *)__get_free_pages(GFP_KERNEL, order); 72 return (void *)__get_free_pages(GFP_KERNEL, order);