aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/s390/s390dbf.txt21
-rw-r--r--arch/s390/Kconfig33
-rw-r--r--arch/s390/crypto/aes_s390.c8
-rw-r--r--arch/s390/crypto/des_s390.c8
-rw-r--r--arch/s390/crypto/sha1_s390.c8
-rw-r--r--arch/s390/crypto/sha256_s390.c8
-rw-r--r--arch/s390/defconfig1
-rw-r--r--arch/s390/kernel/Makefile2
-rw-r--r--arch/s390/kernel/compat_linux.h73
-rw-r--r--arch/s390/kernel/compat_signal.c11
-rw-r--r--arch/s390/kernel/debug.c53
-rw-r--r--arch/s390/kernel/early.c1
-rw-r--r--arch/s390/kernel/entry.h60
-rw-r--r--arch/s390/kernel/entry64.S2
-rw-r--r--arch/s390/kernel/ipl.c2
-rw-r--r--arch/s390/kernel/kprobes.c2
-rw-r--r--arch/s390/kernel/process.c77
-rw-r--r--arch/s390/kernel/ptrace.c1
-rw-r--r--arch/s390/kernel/s390_ext.c14
-rw-r--r--arch/s390/kernel/setup.c15
-rw-r--r--arch/s390/kernel/signal.c16
-rw-r--r--arch/s390/kernel/smp.c91
-rw-r--r--arch/s390/kernel/sys_s390.c2
-rw-r--r--arch/s390/kernel/time.c259
-rw-r--r--arch/s390/kernel/topology.c314
-rw-r--r--arch/s390/kernel/traps.c17
-rw-r--r--arch/s390/lib/delay.c14
-rw-r--r--arch/s390/lib/uaccess_pt.c59
-rw-r--r--arch/s390/mm/extmem.c67
-rw-r--r--arch/s390/mm/fault.c21
-rw-r--r--arch/s390/mm/init.c1
-rw-r--r--drivers/char/hw_random/core.c4
-rw-r--r--drivers/crypto/Kconfig1
-rw-r--r--drivers/s390/block/Kconfig1
-rw-r--r--drivers/s390/block/dasd.c10
-rw-r--r--drivers/s390/block/dasd_3990_erp.c34
-rw-r--r--drivers/s390/block/dasd_alias.c49
-rw-r--r--drivers/s390/block/dasd_eckd.c7
-rw-r--r--drivers/s390/block/dasd_fba.c3
-rw-r--r--drivers/s390/block/dasd_int.h6
-rw-r--r--drivers/s390/block/dcssblk.c53
-rw-r--r--drivers/s390/char/monreader.c54
-rw-r--r--drivers/s390/char/sclp_tty.c2
-rw-r--r--drivers/s390/char/sclp_vt220.c13
-rw-r--r--drivers/s390/char/tape_34xx.c2
-rw-r--r--drivers/s390/char/tape_char.c4
-rw-r--r--drivers/s390/char/tape_class.c5
-rw-r--r--drivers/s390/char/tape_class.h2
-rw-r--r--drivers/s390/char/vmur.c24
-rw-r--r--drivers/s390/char/vmur.h4
-rw-r--r--drivers/s390/char/vmwatchdog.c4
-rw-r--r--drivers/s390/char/zcore.c2
-rw-r--r--drivers/s390/cio/chsc.c15
-rw-r--r--drivers/s390/cio/cio.c30
-rw-r--r--drivers/s390/cio/cio.h1
-rw-r--r--drivers/s390/cio/css.c16
-rw-r--r--drivers/s390/cio/css.h1
-rw-r--r--drivers/s390/cio/device.c1
-rw-r--r--drivers/s390/cio/device.h1
-rw-r--r--drivers/s390/cio/device_ops.c9
-rw-r--r--drivers/s390/cio/device_status.c8
-rw-r--r--drivers/s390/cio/qdio.c180
-rw-r--r--drivers/s390/cio/qdio.h28
-rw-r--r--drivers/s390/crypto/ap_bus.c189
-rw-r--r--drivers/s390/crypto/ap_bus.h15
-rw-r--r--drivers/s390/crypto/zcrypt_api.c181
-rw-r--r--drivers/s390/crypto/zcrypt_api.h16
-rw-r--r--drivers/s390/crypto/zcrypt_cca_key.h4
-rw-r--r--drivers/s390/crypto/zcrypt_error.h2
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.c4
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c201
-rw-r--r--drivers/s390/net/claw.c344
-rw-r--r--drivers/s390/net/netiucv.c97
-rw-r--r--drivers/s390/s390mach.c8
-rw-r--r--drivers/s390/s390mach.h4
-rw-r--r--drivers/s390/scsi/zfcp_def.h2
-rw-r--r--drivers/s390/sysinfo.c116
-rw-r--r--include/asm-s390/cio.h4
-rw-r--r--include/asm-s390/cpu.h8
-rw-r--r--include/asm-s390/debug.h5
-rw-r--r--include/asm-s390/extmem.h11
-rw-r--r--include/asm-s390/hardirq.h2
-rw-r--r--include/asm-s390/lowcore.h11
-rw-r--r--include/asm-s390/processor.h7
-rw-r--r--include/asm-s390/smp.h3
-rw-r--r--include/asm-s390/sysinfo.h116
-rw-r--r--include/asm-s390/system.h2
-rw-r--r--include/asm-s390/timex.h13
-rw-r--r--include/asm-s390/tlbflush.h36
-rw-r--r--include/asm-s390/topology.h23
-rw-r--r--include/linux/interrupt.h19
-rw-r--r--include/linux/irq.h10
-rw-r--r--kernel/time/tick-broadcast.c2
-rw-r--r--kernel/time/tick-common.c4
-rw-r--r--kernel/time/tick-oneshot.c2
95 files changed, 2180 insertions, 1116 deletions
diff --git a/Documentation/s390/s390dbf.txt b/Documentation/s390/s390dbf.txt
index 0eb7c58916de..e05420973698 100644
--- a/Documentation/s390/s390dbf.txt
+++ b/Documentation/s390/s390dbf.txt
@@ -115,6 +115,27 @@ Return Value: Handle for generated debug area
115Description: Allocates memory for a debug log 115Description: Allocates memory for a debug log
116 Must not be called within an interrupt handler 116 Must not be called within an interrupt handler
117 117
118----------------------------------------------------------------------------
119debug_info_t *debug_register_mode(char *name, int pages, int nr_areas,
120 int buf_size, mode_t mode, uid_t uid,
121 gid_t gid);
122
123Parameter: name: Name of debug log (e.g. used for debugfs entry)
124 pages: Number of pages, which will be allocated per area
125 nr_areas: Number of debug areas
126 buf_size: Size of data area in each debug entry
127 mode: File mode for debugfs files. E.g. S_IRWXUGO
128 uid: User ID for debugfs files. Currently only 0 is
129 supported.
130 gid: Group ID for debugfs files. Currently only 0 is
131 supported.
132
133Return Value: Handle for generated debug area
134 NULL if register failed
135
136Description: Allocates memory for a debug log
137 Must not be called within an interrupt handler
138
118--------------------------------------------------------------------------- 139---------------------------------------------------------------------------
119void debug_unregister (debug_info_t * id); 140void debug_unregister (debug_info_t * id);
120 141
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 1831833c430e..f6a68e178fc5 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -3,6 +3,10 @@
3# see Documentation/kbuild/kconfig-language.txt. 3# see Documentation/kbuild/kconfig-language.txt.
4# 4#
5 5
6config SCHED_MC
7 def_bool y
8 depends on SMP
9
6config MMU 10config MMU
7 def_bool y 11 def_bool y
8 12
@@ -39,6 +43,9 @@ config GENERIC_HWEIGHT
39config GENERIC_TIME 43config GENERIC_TIME
40 def_bool y 44 def_bool y
41 45
46config GENERIC_CLOCKEVENTS
47 def_bool y
48
42config GENERIC_BUG 49config GENERIC_BUG
43 bool 50 bool
44 depends on BUG 51 depends on BUG
@@ -69,6 +76,8 @@ menu "Base setup"
69 76
70comment "Processor type and features" 77comment "Processor type and features"
71 78
79source "kernel/time/Kconfig"
80
72config 64BIT 81config 64BIT
73 bool "64 bit kernel" 82 bool "64 bit kernel"
74 help 83 help
@@ -301,10 +310,7 @@ config QDIO
301 tristate "QDIO support" 310 tristate "QDIO support"
302 ---help--- 311 ---help---
303 This driver provides the Queued Direct I/O base support for 312 This driver provides the Queued Direct I/O base support for
304 IBM mainframes. 313 IBM System z.
305
306 For details please refer to the documentation provided by IBM at
307 <http://www10.software.ibm.com/developerworks/opensource/linux390>
308 314
309 To compile this driver as a module, choose M here: the 315 To compile this driver as a module, choose M here: the
310 module will be called qdio. 316 module will be called qdio.
@@ -486,25 +492,6 @@ config APPLDATA_NET_SUM
486 492
487source kernel/Kconfig.hz 493source kernel/Kconfig.hz
488 494
489config NO_IDLE_HZ
490 bool "No HZ timer ticks in idle"
491 help
492 Switches the regular HZ timer off when the system is going idle.
493 This helps z/VM to detect that the Linux system is idle. VM can
494 then "swap-out" this guest which reduces memory usage. It also
495 reduces the overhead of idle systems.
496
497 The HZ timer can be switched on/off via /proc/sys/kernel/hz_timer.
498 hz_timer=0 means HZ timer is disabled. hz_timer=1 means HZ
499 timer is active.
500
501config NO_IDLE_HZ_INIT
502 bool "HZ timer in idle off by default"
503 depends on NO_IDLE_HZ
504 help
505 The HZ timer is switched off in idle by default. That means the
506 HZ timer is already disabled at boot time.
507
508config S390_HYPFS_FS 495config S390_HYPFS_FS
509 bool "s390 hypervisor file system support" 496 bool "s390 hypervisor file system support"
510 select SYS_HYPERVISOR 497 select SYS_HYPERVISOR
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index a3f67f8b5427..e33f32b54c08 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -499,7 +499,7 @@ static struct crypto_alg cbc_aes_alg = {
499 } 499 }
500}; 500};
501 501
502static int __init aes_init(void) 502static int __init aes_s390_init(void)
503{ 503{
504 int ret; 504 int ret;
505 505
@@ -542,15 +542,15 @@ aes_err:
542 goto out; 542 goto out;
543} 543}
544 544
545static void __exit aes_fini(void) 545static void __exit aes_s390_fini(void)
546{ 546{
547 crypto_unregister_alg(&cbc_aes_alg); 547 crypto_unregister_alg(&cbc_aes_alg);
548 crypto_unregister_alg(&ecb_aes_alg); 548 crypto_unregister_alg(&ecb_aes_alg);
549 crypto_unregister_alg(&aes_alg); 549 crypto_unregister_alg(&aes_alg);
550} 550}
551 551
552module_init(aes_init); 552module_init(aes_s390_init);
553module_exit(aes_fini); 553module_exit(aes_s390_fini);
554 554
555MODULE_ALIAS("aes"); 555MODULE_ALIAS("aes");
556 556
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index ea22707f435f..4aba83b31596 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -550,7 +550,7 @@ static struct crypto_alg cbc_des3_192_alg = {
550 } 550 }
551}; 551};
552 552
553static int init(void) 553static int des_s390_init(void)
554{ 554{
555 int ret = 0; 555 int ret = 0;
556 556
@@ -612,7 +612,7 @@ des_err:
612 goto out; 612 goto out;
613} 613}
614 614
615static void __exit fini(void) 615static void __exit des_s390_fini(void)
616{ 616{
617 crypto_unregister_alg(&cbc_des3_192_alg); 617 crypto_unregister_alg(&cbc_des3_192_alg);
618 crypto_unregister_alg(&ecb_des3_192_alg); 618 crypto_unregister_alg(&ecb_des3_192_alg);
@@ -625,8 +625,8 @@ static void __exit fini(void)
625 crypto_unregister_alg(&des_alg); 625 crypto_unregister_alg(&des_alg);
626} 626}
627 627
628module_init(init); 628module_init(des_s390_init);
629module_exit(fini); 629module_exit(des_s390_fini);
630 630
631MODULE_ALIAS("des"); 631MODULE_ALIAS("des");
632MODULE_ALIAS("des3_ede"); 632MODULE_ALIAS("des3_ede");
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index 5a834f6578ab..9cf9eca22747 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -137,7 +137,7 @@ static struct crypto_alg alg = {
137 .dia_final = sha1_final } } 137 .dia_final = sha1_final } }
138}; 138};
139 139
140static int __init init(void) 140static int __init sha1_s390_init(void)
141{ 141{
142 if (!crypt_s390_func_available(KIMD_SHA_1)) 142 if (!crypt_s390_func_available(KIMD_SHA_1))
143 return -EOPNOTSUPP; 143 return -EOPNOTSUPP;
@@ -145,13 +145,13 @@ static int __init init(void)
145 return crypto_register_alg(&alg); 145 return crypto_register_alg(&alg);
146} 146}
147 147
148static void __exit fini(void) 148static void __exit sha1_s390_fini(void)
149{ 149{
150 crypto_unregister_alg(&alg); 150 crypto_unregister_alg(&alg);
151} 151}
152 152
153module_init(init); 153module_init(sha1_s390_init);
154module_exit(fini); 154module_exit(sha1_s390_fini);
155 155
156MODULE_ALIAS("sha1"); 156MODULE_ALIAS("sha1");
157 157
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index ccf8633c4f65..2a3d756b35d4 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -133,7 +133,7 @@ static struct crypto_alg alg = {
133 .dia_final = sha256_final } } 133 .dia_final = sha256_final } }
134}; 134};
135 135
136static int init(void) 136static int sha256_s390_init(void)
137{ 137{
138 if (!crypt_s390_func_available(KIMD_SHA_256)) 138 if (!crypt_s390_func_available(KIMD_SHA_256))
139 return -EOPNOTSUPP; 139 return -EOPNOTSUPP;
@@ -141,13 +141,13 @@ static int init(void)
141 return crypto_register_alg(&alg); 141 return crypto_register_alg(&alg);
142} 142}
143 143
144static void __exit fini(void) 144static void __exit sha256_s390_fini(void)
145{ 145{
146 crypto_unregister_alg(&alg); 146 crypto_unregister_alg(&alg);
147} 147}
148 148
149module_init(init); 149module_init(sha256_s390_init);
150module_exit(fini); 150module_exit(sha256_s390_fini);
151 151
152MODULE_ALIAS("sha256"); 152MODULE_ALIAS("sha256");
153 153
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 62f6b5a606dd..dcc3ec2ef643 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -3,6 +3,7 @@
3# Linux kernel version: 2.6.25-rc4 3# Linux kernel version: 2.6.25-rc4
4# Wed Mar 5 11:22:59 2008 4# Wed Mar 5 11:22:59 2008
5# 5#
6CONFIG_SCHED_MC=y
6CONFIG_MMU=y 7CONFIG_MMU=y
7CONFIG_ZONE_DMA=y 8CONFIG_ZONE_DMA=y
8CONFIG_LOCKDEP_SUPPORT=y 9CONFIG_LOCKDEP_SUPPORT=y
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 4d3e38392cb1..ef2b2470c25c 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -19,7 +19,7 @@ obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
19extra-y += head.o init_task.o vmlinux.lds 19extra-y += head.o init_task.o vmlinux.lds
20 20
21obj-$(CONFIG_MODULES) += s390_ksyms.o module.o 21obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
22obj-$(CONFIG_SMP) += smp.o 22obj-$(CONFIG_SMP) += smp.o topology.o
23 23
24obj-$(CONFIG_AUDIT) += audit.o 24obj-$(CONFIG_AUDIT) += audit.o
25compat-obj-$(CONFIG_AUDIT) += compat_audit.o 25compat-obj-$(CONFIG_AUDIT) += compat_audit.o
diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h
index e89f8c0c42a0..20723a062017 100644
--- a/arch/s390/kernel/compat_linux.h
+++ b/arch/s390/kernel/compat_linux.h
@@ -162,4 +162,77 @@ struct ucontext32 {
162 compat_sigset_t uc_sigmask; /* mask last for extensibility */ 162 compat_sigset_t uc_sigmask; /* mask last for extensibility */
163}; 163};
164 164
165struct __sysctl_args32;
166struct stat64_emu31;
167struct mmap_arg_struct_emu31;
168struct fadvise64_64_args;
169struct old_sigaction32;
170struct old_sigaction32;
171
172long sys32_chown16(const char __user * filename, u16 user, u16 group);
173long sys32_lchown16(const char __user * filename, u16 user, u16 group);
174long sys32_fchown16(unsigned int fd, u16 user, u16 group);
175long sys32_setregid16(u16 rgid, u16 egid);
176long sys32_setgid16(u16 gid);
177long sys32_setreuid16(u16 ruid, u16 euid);
178long sys32_setuid16(u16 uid);
179long sys32_setresuid16(u16 ruid, u16 euid, u16 suid);
180long sys32_getresuid16(u16 __user *ruid, u16 __user *euid, u16 __user *suid);
181long sys32_setresgid16(u16 rgid, u16 egid, u16 sgid);
182long sys32_getresgid16(u16 __user *rgid, u16 __user *egid, u16 __user *sgid);
183long sys32_setfsuid16(u16 uid);
184long sys32_setfsgid16(u16 gid);
185long sys32_getgroups16(int gidsetsize, u16 __user *grouplist);
186long sys32_setgroups16(int gidsetsize, u16 __user *grouplist);
187long sys32_getuid16(void);
188long sys32_geteuid16(void);
189long sys32_getgid16(void);
190long sys32_getegid16(void);
191long sys32_ipc(u32 call, int first, int second, int third, u32 ptr);
192long sys32_truncate64(const char __user * path, unsigned long high,
193 unsigned long low);
194long sys32_ftruncate64(unsigned int fd, unsigned long high, unsigned long low);
195long sys32_sched_rr_get_interval(compat_pid_t pid,
196 struct compat_timespec __user *interval);
197long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
198 compat_sigset_t __user *oset, size_t sigsetsize);
199long sys32_rt_sigpending(compat_sigset_t __user *set, size_t sigsetsize);
200long sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo);
201long sys32_execve(void);
202long sys32_init_module(void __user *umod, unsigned long len,
203 const char __user *uargs);
204long sys32_delete_module(const char __user *name_user, unsigned int flags);
205long sys32_gettimeofday(struct compat_timeval __user *tv,
206 struct timezone __user *tz);
207long sys32_settimeofday(struct compat_timeval __user *tv,
208 struct timezone __user *tz);
209long sys32_pause(void);
210long sys32_pread64(unsigned int fd, char __user *ubuf, size_t count,
211 u32 poshi, u32 poslo);
212long sys32_pwrite64(unsigned int fd, const char __user *ubuf,
213 size_t count, u32 poshi, u32 poslo);
214compat_ssize_t sys32_readahead(int fd, u32 offhi, u32 offlo, s32 count);
215long sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset,
216 size_t count);
217long sys32_sendfile64(int out_fd, int in_fd, compat_loff_t __user *offset,
218 s32 count);
219long sys32_sysctl(struct __sysctl_args32 __user *args);
220long sys32_stat64(char __user * filename, struct stat64_emu31 __user * statbuf);
221long sys32_lstat64(char __user * filename,
222 struct stat64_emu31 __user * statbuf);
223long sys32_fstat64(unsigned long fd, struct stat64_emu31 __user * statbuf);
224long sys32_fstatat64(unsigned int dfd, char __user *filename,
225 struct stat64_emu31 __user* statbuf, int flag);
226unsigned long old32_mmap(struct mmap_arg_struct_emu31 __user *arg);
227long sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg);
228long sys32_read(unsigned int fd, char __user * buf, size_t count);
229long sys32_write(unsigned int fd, char __user * buf, size_t count);
230long sys32_clone(void);
231long sys32_fadvise64(int fd, loff_t offset, size_t len, int advise);
232long sys32_fadvise64_64(struct fadvise64_64_args __user *args);
233long sys32_sigaction(int sig, const struct old_sigaction32 __user *act,
234 struct old_sigaction32 __user *oact);
235long sys32_rt_sigaction(int sig, const struct sigaction32 __user *act,
236 struct sigaction32 __user *oact, size_t sigsetsize);
237long sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss);
165#endif /* _ASM_S390X_S390_H */ 238#endif /* _ASM_S390X_S390_H */
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index a5692c460bad..c7f02e777af2 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -29,6 +29,7 @@
29#include <asm/lowcore.h> 29#include <asm/lowcore.h>
30#include "compat_linux.h" 30#include "compat_linux.h"
31#include "compat_ptrace.h" 31#include "compat_ptrace.h"
32#include "entry.h"
32 33
33#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 34#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
34 35
@@ -428,6 +429,10 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
428 /* Default to using normal stack */ 429 /* Default to using normal stack */
429 sp = (unsigned long) A(regs->gprs[15]); 430 sp = (unsigned long) A(regs->gprs[15]);
430 431
432 /* Overflow on alternate signal stack gives SIGSEGV. */
433 if (on_sig_stack(sp) && !on_sig_stack((sp - frame_size) & -8UL))
434 return (void __user *) -1UL;
435
431 /* This is the X/Open sanctioned signal stack switching. */ 436 /* This is the X/Open sanctioned signal stack switching. */
432 if (ka->sa.sa_flags & SA_ONSTACK) { 437 if (ka->sa.sa_flags & SA_ONSTACK) {
433 if (! sas_ss_flags(sp)) 438 if (! sas_ss_flags(sp))
@@ -461,6 +466,9 @@ static int setup_frame32(int sig, struct k_sigaction *ka,
461 if (!access_ok(VERIFY_WRITE, frame, sizeof(sigframe32))) 466 if (!access_ok(VERIFY_WRITE, frame, sizeof(sigframe32)))
462 goto give_sigsegv; 467 goto give_sigsegv;
463 468
469 if (frame == (void __user *) -1UL)
470 goto give_sigsegv;
471
464 if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE32)) 472 if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE32))
465 goto give_sigsegv; 473 goto give_sigsegv;
466 474
@@ -514,6 +522,9 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
514 if (!access_ok(VERIFY_WRITE, frame, sizeof(rt_sigframe32))) 522 if (!access_ok(VERIFY_WRITE, frame, sizeof(rt_sigframe32)))
515 goto give_sigsegv; 523 goto give_sigsegv;
516 524
525 if (frame == (void __user *) -1UL)
526 goto give_sigsegv;
527
517 if (copy_siginfo_to_user32(&frame->info, info)) 528 if (copy_siginfo_to_user32(&frame->info, info))
518 goto give_sigsegv; 529 goto give_sigsegv;
519 530
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 1b2f5ce45320..1e7d4ac7068b 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -73,7 +73,7 @@ static ssize_t debug_input(struct file *file, const char __user *user_buf,
73static int debug_open(struct inode *inode, struct file *file); 73static int debug_open(struct inode *inode, struct file *file);
74static int debug_close(struct inode *inode, struct file *file); 74static int debug_close(struct inode *inode, struct file *file);
75static debug_info_t* debug_info_create(char *name, int pages_per_area, 75static debug_info_t* debug_info_create(char *name, int pages_per_area,
76 int nr_areas, int buf_size); 76 int nr_areas, int buf_size, mode_t mode);
77static void debug_info_get(debug_info_t *); 77static void debug_info_get(debug_info_t *);
78static void debug_info_put(debug_info_t *); 78static void debug_info_put(debug_info_t *);
79static int debug_prolog_level_fn(debug_info_t * id, 79static int debug_prolog_level_fn(debug_info_t * id,
@@ -157,7 +157,7 @@ struct debug_view debug_sprintf_view = {
157}; 157};
158 158
159/* used by dump analysis tools to determine version of debug feature */ 159/* used by dump analysis tools to determine version of debug feature */
160unsigned int debug_feature_version = __DEBUG_FEATURE_VERSION; 160static unsigned int __used debug_feature_version = __DEBUG_FEATURE_VERSION;
161 161
162/* static globals */ 162/* static globals */
163 163
@@ -327,7 +327,8 @@ debug_info_free(debug_info_t* db_info){
327 */ 327 */
328 328
329static debug_info_t* 329static debug_info_t*
330debug_info_create(char *name, int pages_per_area, int nr_areas, int buf_size) 330debug_info_create(char *name, int pages_per_area, int nr_areas, int buf_size,
331 mode_t mode)
331{ 332{
332 debug_info_t* rc; 333 debug_info_t* rc;
333 334
@@ -336,6 +337,8 @@ debug_info_create(char *name, int pages_per_area, int nr_areas, int buf_size)
336 if(!rc) 337 if(!rc)
337 goto out; 338 goto out;
338 339
340 rc->mode = mode & ~S_IFMT;
341
339 /* create root directory */ 342 /* create root directory */
340 rc->debugfs_root_entry = debugfs_create_dir(rc->name, 343 rc->debugfs_root_entry = debugfs_create_dir(rc->name,
341 debug_debugfs_root_entry); 344 debug_debugfs_root_entry);
@@ -676,23 +679,30 @@ debug_close(struct inode *inode, struct file *file)
676} 679}
677 680
678/* 681/*
679 * debug_register: 682 * debug_register_mode:
680 * - creates and initializes debug area for the caller 683 * - Creates and initializes debug area for the caller
681 * - returns handle for debug area 684 * The mode parameter allows to specify access rights for the s390dbf files
685 * - Returns handle for debug area
682 */ 686 */
683 687
684debug_info_t* 688debug_info_t *debug_register_mode(char *name, int pages_per_area, int nr_areas,
685debug_register (char *name, int pages_per_area, int nr_areas, int buf_size) 689 int buf_size, mode_t mode, uid_t uid,
690 gid_t gid)
686{ 691{
687 debug_info_t *rc = NULL; 692 debug_info_t *rc = NULL;
688 693
694 /* Since debugfs currently does not support uid/gid other than root, */
695 /* we do not allow gid/uid != 0 until we get support for that. */
696 if ((uid != 0) || (gid != 0))
697 printk(KERN_WARNING "debug: Warning - Currently only uid/gid "
698 "= 0 are supported. Using root as owner now!");
689 if (!initialized) 699 if (!initialized)
690 BUG(); 700 BUG();
691 mutex_lock(&debug_mutex); 701 mutex_lock(&debug_mutex);
692 702
693 /* create new debug_info */ 703 /* create new debug_info */
694 704
695 rc = debug_info_create(name, pages_per_area, nr_areas, buf_size); 705 rc = debug_info_create(name, pages_per_area, nr_areas, buf_size, mode);
696 if(!rc) 706 if(!rc)
697 goto out; 707 goto out;
698 debug_register_view(rc, &debug_level_view); 708 debug_register_view(rc, &debug_level_view);
@@ -705,6 +715,20 @@ out:
705 mutex_unlock(&debug_mutex); 715 mutex_unlock(&debug_mutex);
706 return rc; 716 return rc;
707} 717}
718EXPORT_SYMBOL(debug_register_mode);
719
720/*
721 * debug_register:
722 * - creates and initializes debug area for the caller
723 * - returns handle for debug area
724 */
725
726debug_info_t *debug_register(char *name, int pages_per_area, int nr_areas,
727 int buf_size)
728{
729 return debug_register_mode(name, pages_per_area, nr_areas, buf_size,
730 S_IRUSR | S_IWUSR, 0, 0);
731}
708 732
709/* 733/*
710 * debug_unregister: 734 * debug_unregister:
@@ -1073,15 +1097,16 @@ debug_register_view(debug_info_t * id, struct debug_view *view)
1073 int rc = 0; 1097 int rc = 0;
1074 int i; 1098 int i;
1075 unsigned long flags; 1099 unsigned long flags;
1076 mode_t mode = S_IFREG; 1100 mode_t mode;
1077 struct dentry *pde; 1101 struct dentry *pde;
1078 1102
1079 if (!id) 1103 if (!id)
1080 goto out; 1104 goto out;
1081 if (view->prolog_proc || view->format_proc || view->header_proc) 1105 mode = (id->mode | S_IFREG) & ~S_IXUGO;
1082 mode |= S_IRUSR; 1106 if (!(view->prolog_proc || view->format_proc || view->header_proc))
1083 if (view->input_proc) 1107 mode &= ~(S_IRUSR | S_IRGRP | S_IROTH);
1084 mode |= S_IWUSR; 1108 if (!view->input_proc)
1109 mode &= ~(S_IWUSR | S_IWGRP | S_IWOTH);
1085 pde = debugfs_create_file(view->name, mode, id->debugfs_root_entry, 1110 pde = debugfs_create_file(view->name, mode, id->debugfs_root_entry,
1086 id , &debug_file_ops); 1111 id , &debug_file_ops);
1087 if (!pde){ 1112 if (!pde){
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 01832c440636..540a67f979b6 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -21,6 +21,7 @@
21#include <asm/setup.h> 21#include <asm/setup.h>
22#include <asm/cpcmd.h> 22#include <asm/cpcmd.h>
23#include <asm/sclp.h> 23#include <asm/sclp.h>
24#include "entry.h"
24 25
25/* 26/*
26 * Create a Kernel NSS if the SAVESYS= parameter is defined 27 * Create a Kernel NSS if the SAVESYS= parameter is defined
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
new file mode 100644
index 000000000000..6b1896345eda
--- /dev/null
+++ b/arch/s390/kernel/entry.h
@@ -0,0 +1,60 @@
1#ifndef _ENTRY_H
2#define _ENTRY_H
3
4#include <linux/types.h>
5#include <linux/signal.h>
6#include <asm/ptrace.h>
7
8typedef void pgm_check_handler_t(struct pt_regs *, long);
9extern pgm_check_handler_t *pgm_check_table[128];
10pgm_check_handler_t do_protection_exception;
11pgm_check_handler_t do_dat_exception;
12
13extern int sysctl_userprocess_debug;
14
15void do_single_step(struct pt_regs *regs);
16void syscall_trace(struct pt_regs *regs, int entryexit);
17void kernel_stack_overflow(struct pt_regs * regs);
18void do_signal(struct pt_regs *regs);
19int handle_signal32(unsigned long sig, struct k_sigaction *ka,
20 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs);
21
22void do_extint(struct pt_regs *regs, unsigned short code);
23int __cpuinit start_secondary(void *cpuvoid);
24void __init startup_init(void);
25void die(const char * str, struct pt_regs * regs, long err);
26
27struct new_utsname;
28struct mmap_arg_struct;
29struct fadvise64_64_args;
30struct old_sigaction;
31struct sel_arg_struct;
32
33long sys_pipe(unsigned long __user *fildes);
34long sys_mmap2(struct mmap_arg_struct __user *arg);
35long old_mmap(struct mmap_arg_struct __user *arg);
36long sys_ipc(uint call, int first, unsigned long second,
37 unsigned long third, void __user *ptr);
38long s390x_newuname(struct new_utsname __user *name);
39long s390x_personality(unsigned long personality);
40long s390_fadvise64(int fd, u32 offset_high, u32 offset_low,
41 size_t len, int advice);
42long s390_fadvise64_64(struct fadvise64_64_args __user *args);
43long s390_fallocate(int fd, int mode, loff_t offset, u32 len_high, u32 len_low);
44long sys_fork(void);
45long sys_clone(void);
46long sys_vfork(void);
47void execve_tail(void);
48long sys_execve(void);
49int sys_sigsuspend(int history0, int history1, old_sigset_t mask);
50long sys_sigaction(int sig, const struct old_sigaction __user *act,
51 struct old_sigaction __user *oact);
52long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss);
53long sys_sigreturn(void);
54long sys_rt_sigreturn(void);
55long sys32_sigreturn(void);
56long sys32_rt_sigreturn(void);
57long old_select(struct sel_arg_struct __user *arg);
58long sys_ptrace(long request, long pid, long addr, long data);
59
60#endif /* _ENTRY_H */
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index efde6e178f6c..cd959c0b2e16 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -475,6 +475,7 @@ pgm_check_handler:
475pgm_no_vtime: 475pgm_no_vtime:
476#endif 476#endif
477 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct 477 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
478 mvc SP_ARGS(8,%r15),__LC_LAST_BREAK
478 TRACE_IRQS_OFF 479 TRACE_IRQS_OFF
479 lgf %r3,__LC_PGM_ILC # load program interruption code 480 lgf %r3,__LC_PGM_ILC # load program interruption code
480 lghi %r8,0x7f 481 lghi %r8,0x7f
@@ -847,6 +848,7 @@ stack_overflow:
847 je 0f 848 je 0f
848 la %r1,__LC_SAVE_AREA+32 849 la %r1,__LC_SAVE_AREA+32
8490: mvc SP_R12(32,%r15),0(%r1) # move %r12-%r15 to stack 8500: mvc SP_R12(32,%r15),0(%r1) # move %r12-%r15 to stack
851 mvc SP_ARGS(8,%r15),__LC_LAST_BREAK
850 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain 852 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain
851 la %r2,SP_PTREGS(%r15) # load pt_regs 853 la %r2,SP_PTREGS(%r15) # load pt_regs
852 jg kernel_stack_overflow 854 jg kernel_stack_overflow
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 375232c46c7a..532542447d66 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -655,7 +655,7 @@ static struct kobj_attribute reipl_type_attr =
655 655
656static struct kset *reipl_kset; 656static struct kset *reipl_kset;
657 657
658void reipl_run(struct shutdown_trigger *trigger) 658static void reipl_run(struct shutdown_trigger *trigger)
659{ 659{
660 struct ccw_dev_id devid; 660 struct ccw_dev_id devid;
661 static char buf[100]; 661 static char buf[100];
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index c5549a206284..ed04d1372d5d 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -360,7 +360,7 @@ no_kprobe:
360 * - When the probed function returns, this probe 360 * - When the probed function returns, this probe
361 * causes the handlers to fire 361 * causes the handlers to fire
362 */ 362 */
363void kretprobe_trampoline_holder(void) 363static void __used kretprobe_trampoline_holder(void)
364{ 364{
365 asm volatile(".global kretprobe_trampoline\n" 365 asm volatile(".global kretprobe_trampoline\n"
366 "kretprobe_trampoline: bcr 0,0\n"); 366 "kretprobe_trampoline: bcr 0,0\n");
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index ce203154d8ce..c1aff194141d 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -36,6 +36,8 @@
36#include <linux/module.h> 36#include <linux/module.h>
37#include <linux/notifier.h> 37#include <linux/notifier.h>
38#include <linux/utsname.h> 38#include <linux/utsname.h>
39#include <linux/tick.h>
40#include <linux/elfcore.h>
39#include <asm/uaccess.h> 41#include <asm/uaccess.h>
40#include <asm/pgtable.h> 42#include <asm/pgtable.h>
41#include <asm/system.h> 43#include <asm/system.h>
@@ -44,6 +46,7 @@
44#include <asm/irq.h> 46#include <asm/irq.h>
45#include <asm/timer.h> 47#include <asm/timer.h>
46#include <asm/cpu.h> 48#include <asm/cpu.h>
49#include "entry.h"
47 50
48asmlinkage void ret_from_fork(void) asm ("ret_from_fork"); 51asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
49 52
@@ -76,6 +79,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
76 * Need to know about CPUs going idle? 79 * Need to know about CPUs going idle?
77 */ 80 */
78static ATOMIC_NOTIFIER_HEAD(idle_chain); 81static ATOMIC_NOTIFIER_HEAD(idle_chain);
82DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
79 83
80int register_idle_notifier(struct notifier_block *nb) 84int register_idle_notifier(struct notifier_block *nb)
81{ 85{
@@ -89,9 +93,33 @@ int unregister_idle_notifier(struct notifier_block *nb)
89} 93}
90EXPORT_SYMBOL(unregister_idle_notifier); 94EXPORT_SYMBOL(unregister_idle_notifier);
91 95
92void do_monitor_call(struct pt_regs *regs, long interruption_code) 96static int s390_idle_enter(void)
97{
98 struct s390_idle_data *idle;
99 int nr_calls = 0;
100 void *hcpu;
101 int rc;
102
103 hcpu = (void *)(long)smp_processor_id();
104 rc = __atomic_notifier_call_chain(&idle_chain, S390_CPU_IDLE, hcpu, -1,
105 &nr_calls);
106 if (rc == NOTIFY_BAD) {
107 nr_calls--;
108 __atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
109 hcpu, nr_calls, NULL);
110 return rc;
111 }
112 idle = &__get_cpu_var(s390_idle);
113 spin_lock(&idle->lock);
114 idle->idle_count++;
115 idle->in_idle = 1;
116 idle->idle_enter = get_clock();
117 spin_unlock(&idle->lock);
118 return NOTIFY_OK;
119}
120
121void s390_idle_leave(void)
93{ 122{
94#ifdef CONFIG_SMP
95 struct s390_idle_data *idle; 123 struct s390_idle_data *idle;
96 124
97 idle = &__get_cpu_var(s390_idle); 125 idle = &__get_cpu_var(s390_idle);
@@ -99,10 +127,6 @@ void do_monitor_call(struct pt_regs *regs, long interruption_code)
99 idle->idle_time += get_clock() - idle->idle_enter; 127 idle->idle_time += get_clock() - idle->idle_enter;
100 idle->in_idle = 0; 128 idle->in_idle = 0;
101 spin_unlock(&idle->lock); 129 spin_unlock(&idle->lock);
102#endif
103 /* disable monitor call class 0 */
104 __ctl_clear_bit(8, 15);
105
106 atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE, 130 atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
107 (void *)(long) smp_processor_id()); 131 (void *)(long) smp_processor_id());
108} 132}
@@ -113,61 +137,30 @@ extern void s390_handle_mcck(void);
113 */ 137 */
114static void default_idle(void) 138static void default_idle(void)
115{ 139{
116 int cpu, rc;
117 int nr_calls = 0;
118 void *hcpu;
119#ifdef CONFIG_SMP
120 struct s390_idle_data *idle;
121#endif
122
123 /* CPU is going idle. */ 140 /* CPU is going idle. */
124 cpu = smp_processor_id();
125 hcpu = (void *)(long)cpu;
126 local_irq_disable(); 141 local_irq_disable();
127 if (need_resched()) { 142 if (need_resched()) {
128 local_irq_enable(); 143 local_irq_enable();
129 return; 144 return;
130 } 145 }
131 146 if (s390_idle_enter() == NOTIFY_BAD) {
132 rc = __atomic_notifier_call_chain(&idle_chain, S390_CPU_IDLE, hcpu, -1,
133 &nr_calls);
134 if (rc == NOTIFY_BAD) {
135 nr_calls--;
136 __atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
137 hcpu, nr_calls, NULL);
138 local_irq_enable(); 147 local_irq_enable();
139 return; 148 return;
140 } 149 }
141
142 /* enable monitor call class 0 */
143 __ctl_set_bit(8, 15);
144
145#ifdef CONFIG_HOTPLUG_CPU 150#ifdef CONFIG_HOTPLUG_CPU
146 if (cpu_is_offline(cpu)) { 151 if (cpu_is_offline(smp_processor_id())) {
147 preempt_enable_no_resched(); 152 preempt_enable_no_resched();
148 cpu_die(); 153 cpu_die();
149 } 154 }
150#endif 155#endif
151
152 local_mcck_disable(); 156 local_mcck_disable();
153 if (test_thread_flag(TIF_MCCK_PENDING)) { 157 if (test_thread_flag(TIF_MCCK_PENDING)) {
154 local_mcck_enable(); 158 local_mcck_enable();
155 /* disable monitor call class 0 */ 159 s390_idle_leave();
156 __ctl_clear_bit(8, 15);
157 atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
158 hcpu);
159 local_irq_enable(); 160 local_irq_enable();
160 s390_handle_mcck(); 161 s390_handle_mcck();
161 return; 162 return;
162 } 163 }
163#ifdef CONFIG_SMP
164 idle = &__get_cpu_var(s390_idle);
165 spin_lock(&idle->lock);
166 idle->idle_count++;
167 idle->in_idle = 1;
168 idle->idle_enter = get_clock();
169 spin_unlock(&idle->lock);
170#endif
171 trace_hardirqs_on(); 164 trace_hardirqs_on();
172 /* Wait for external, I/O or machine check interrupt. */ 165 /* Wait for external, I/O or machine check interrupt. */
173 __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | 166 __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
@@ -177,9 +170,10 @@ static void default_idle(void)
177void cpu_idle(void) 170void cpu_idle(void)
178{ 171{
179 for (;;) { 172 for (;;) {
173 tick_nohz_stop_sched_tick();
180 while (!need_resched()) 174 while (!need_resched())
181 default_idle(); 175 default_idle();
182 176 tick_nohz_restart_sched_tick();
183 preempt_enable_no_resched(); 177 preempt_enable_no_resched();
184 schedule(); 178 schedule();
185 preempt_disable(); 179 preempt_disable();
@@ -201,6 +195,7 @@ void show_regs(struct pt_regs *regs)
201 /* Show stack backtrace if pt_regs is from kernel mode */ 195 /* Show stack backtrace if pt_regs is from kernel mode */
202 if (!(regs->psw.mask & PSW_MASK_PSTATE)) 196 if (!(regs->psw.mask & PSW_MASK_PSTATE))
203 show_trace(NULL, (unsigned long *) regs->gprs[15]); 197 show_trace(NULL, (unsigned long *) regs->gprs[15]);
198 show_last_breaking_event(regs);
204} 199}
205 200
206extern void kernel_thread_starter(void); 201extern void kernel_thread_starter(void);
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 6e036bae9875..58a064296987 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -41,6 +41,7 @@
41#include <asm/system.h> 41#include <asm/system.h>
42#include <asm/uaccess.h> 42#include <asm/uaccess.h>
43#include <asm/unistd.h> 43#include <asm/unistd.h>
44#include "entry.h"
44 45
45#ifdef CONFIG_COMPAT 46#ifdef CONFIG_COMPAT
46#include "compat_ptrace.h" 47#include "compat_ptrace.h"
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c
index acf93dba7727..e019b419efc6 100644
--- a/arch/s390/kernel/s390_ext.c
+++ b/arch/s390/kernel/s390_ext.c
@@ -13,11 +13,12 @@
13#include <linux/errno.h> 13#include <linux/errno.h>
14#include <linux/kernel_stat.h> 14#include <linux/kernel_stat.h>
15#include <linux/interrupt.h> 15#include <linux/interrupt.h>
16 16#include <asm/cpu.h>
17#include <asm/lowcore.h> 17#include <asm/lowcore.h>
18#include <asm/s390_ext.h> 18#include <asm/s390_ext.h>
19#include <asm/irq_regs.h> 19#include <asm/irq_regs.h>
20#include <asm/irq.h> 20#include <asm/irq.h>
21#include "entry.h"
21 22
22/* 23/*
23 * ext_int_hash[index] is the start of the list for all external interrupts 24 * ext_int_hash[index] is the start of the list for all external interrupts
@@ -119,13 +120,10 @@ void do_extint(struct pt_regs *regs, unsigned short code)
119 120
120 old_regs = set_irq_regs(regs); 121 old_regs = set_irq_regs(regs);
121 irq_enter(); 122 irq_enter();
122 asm volatile ("mc 0,0"); 123 s390_idle_check();
123 if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer) 124 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
124 /** 125 /* Serve timer interrupts first. */
125 * Make sure that the i/o interrupt did not "overtake" 126 clock_comparator_work();
126 * the last HZ timer interrupt.
127 */
128 account_ticks(S390_lowcore.int_clock);
129 kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++; 127 kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
130 index = ext_hash(code); 128 index = ext_hash(code);
131 for (p = ext_int_hash[index]; p; p = p->next) { 129 for (p = ext_int_hash[index]; p; p = p->next) {
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 290e504061a3..7141147e6b63 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -39,6 +39,7 @@
39#include <linux/pfn.h> 39#include <linux/pfn.h>
40#include <linux/ctype.h> 40#include <linux/ctype.h>
41#include <linux/reboot.h> 41#include <linux/reboot.h>
42#include <linux/topology.h>
42 43
43#include <asm/ipl.h> 44#include <asm/ipl.h>
44#include <asm/uaccess.h> 45#include <asm/uaccess.h>
@@ -427,7 +428,7 @@ setup_lowcore(void)
427 lc->io_new_psw.mask = psw_kernel_bits; 428 lc->io_new_psw.mask = psw_kernel_bits;
428 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; 429 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
429 lc->ipl_device = S390_lowcore.ipl_device; 430 lc->ipl_device = S390_lowcore.ipl_device;
430 lc->jiffy_timer = -1LL; 431 lc->clock_comparator = -1ULL;
431 lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; 432 lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE;
432 lc->async_stack = (unsigned long) 433 lc->async_stack = (unsigned long)
433 __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE; 434 __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE;
@@ -687,7 +688,7 @@ static __init unsigned int stfl(void)
687 return S390_lowcore.stfl_fac_list; 688 return S390_lowcore.stfl_fac_list;
688} 689}
689 690
690static __init int stfle(unsigned long long *list, int doublewords) 691static int __init __stfle(unsigned long long *list, int doublewords)
691{ 692{
692 typedef struct { unsigned long long _[doublewords]; } addrtype; 693 typedef struct { unsigned long long _[doublewords]; } addrtype;
693 register unsigned long __nr asm("0") = doublewords - 1; 694 register unsigned long __nr asm("0") = doublewords - 1;
@@ -697,6 +698,13 @@ static __init int stfle(unsigned long long *list, int doublewords)
697 return __nr + 1; 698 return __nr + 1;
698} 699}
699 700
701int __init stfle(unsigned long long *list, int doublewords)
702{
703 if (!(stfl() & (1UL << 24)))
704 return -EOPNOTSUPP;
705 return __stfle(list, doublewords);
706}
707
700/* 708/*
701 * Setup hardware capabilities. 709 * Setup hardware capabilities.
702 */ 710 */
@@ -741,7 +749,7 @@ static void __init setup_hwcaps(void)
741 * HWCAP_S390_DFP bit 6. 749 * HWCAP_S390_DFP bit 6.
742 */ 750 */
743 if ((elf_hwcap & (1UL << 2)) && 751 if ((elf_hwcap & (1UL << 2)) &&
744 stfle(&facility_list_extended, 1) > 0) { 752 __stfle(&facility_list_extended, 1) > 0) {
745 if (facility_list_extended & (1ULL << (64 - 43))) 753 if (facility_list_extended & (1ULL << (64 - 43)))
746 elf_hwcap |= 1UL << 6; 754 elf_hwcap |= 1UL << 6;
747 } 755 }
@@ -823,6 +831,7 @@ setup_arch(char **cmdline_p)
823 831
824 cpu_init(); 832 cpu_init();
825 __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr; 833 __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr;
834 s390_init_cpu_topology();
826 835
827 /* 836 /*
828 * Setup capabilities (ELF_HWCAP & ELF_PLATFORM). 837 * Setup capabilities (ELF_HWCAP & ELF_PLATFORM).
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 4449bf32cbf1..b97682040215 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -27,6 +27,7 @@
27#include <asm/ucontext.h> 27#include <asm/ucontext.h>
28#include <asm/uaccess.h> 28#include <asm/uaccess.h>
29#include <asm/lowcore.h> 29#include <asm/lowcore.h>
30#include "entry.h"
30 31
31#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 32#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
32 33
@@ -235,6 +236,10 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
235 /* Default to using normal stack */ 236 /* Default to using normal stack */
236 sp = regs->gprs[15]; 237 sp = regs->gprs[15];
237 238
239 /* Overflow on alternate signal stack gives SIGSEGV. */
240 if (on_sig_stack(sp) && !on_sig_stack((sp - frame_size) & -8UL))
241 return (void __user *) -1UL;
242
238 /* This is the X/Open sanctioned signal stack switching. */ 243 /* This is the X/Open sanctioned signal stack switching. */
239 if (ka->sa.sa_flags & SA_ONSTACK) { 244 if (ka->sa.sa_flags & SA_ONSTACK) {
240 if (! sas_ss_flags(sp)) 245 if (! sas_ss_flags(sp))
@@ -270,6 +275,9 @@ static int setup_frame(int sig, struct k_sigaction *ka,
270 if (!access_ok(VERIFY_WRITE, frame, sizeof(sigframe))) 275 if (!access_ok(VERIFY_WRITE, frame, sizeof(sigframe)))
271 goto give_sigsegv; 276 goto give_sigsegv;
272 277
278 if (frame == (void __user *) -1UL)
279 goto give_sigsegv;
280
273 if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE)) 281 if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE))
274 goto give_sigsegv; 282 goto give_sigsegv;
275 283
@@ -327,6 +335,9 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
327 if (!access_ok(VERIFY_WRITE, frame, sizeof(rt_sigframe))) 335 if (!access_ok(VERIFY_WRITE, frame, sizeof(rt_sigframe)))
328 goto give_sigsegv; 336 goto give_sigsegv;
329 337
338 if (frame == (void __user *) -1UL)
339 goto give_sigsegv;
340
330 if (copy_siginfo_to_user(&frame->info, info)) 341 if (copy_siginfo_to_user(&frame->info, info))
331 goto give_sigsegv; 342 goto give_sigsegv;
332 343
@@ -474,11 +485,6 @@ void do_signal(struct pt_regs *regs)
474 int ret; 485 int ret;
475#ifdef CONFIG_COMPAT 486#ifdef CONFIG_COMPAT
476 if (test_thread_flag(TIF_31BIT)) { 487 if (test_thread_flag(TIF_31BIT)) {
477 extern int handle_signal32(unsigned long sig,
478 struct k_sigaction *ka,
479 siginfo_t *info,
480 sigset_t *oldset,
481 struct pt_regs *regs);
482 ret = handle_signal32(signr, &ka, &info, oldset, regs); 488 ret = handle_signal32(signr, &ka, &info, oldset, regs);
483 } 489 }
484 else 490 else
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 8f894d380a62..0dfa988c1b26 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -44,6 +44,7 @@
44#include <asm/lowcore.h> 44#include <asm/lowcore.h>
45#include <asm/sclp.h> 45#include <asm/sclp.h>
46#include <asm/cpu.h> 46#include <asm/cpu.h>
47#include "entry.h"
47 48
48/* 49/*
49 * An array with a pointer the lowcore of every CPU. 50 * An array with a pointer the lowcore of every CPU.
@@ -67,13 +68,12 @@ enum s390_cpu_state {
67 CPU_STATE_CONFIGURED, 68 CPU_STATE_CONFIGURED,
68}; 69};
69 70
70#ifdef CONFIG_HOTPLUG_CPU 71DEFINE_MUTEX(smp_cpu_state_mutex);
71static DEFINE_MUTEX(smp_cpu_state_mutex); 72int smp_cpu_polarization[NR_CPUS];
72#endif
73static int smp_cpu_state[NR_CPUS]; 73static int smp_cpu_state[NR_CPUS];
74static int cpu_management;
74 75
75static DEFINE_PER_CPU(struct cpu, cpu_devices); 76static DEFINE_PER_CPU(struct cpu, cpu_devices);
76DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
77 77
78static void smp_ext_bitcall(int, ec_bit_sig); 78static void smp_ext_bitcall(int, ec_bit_sig);
79 79
@@ -298,7 +298,7 @@ static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
298/* 298/*
299 * this function sends a 'purge tlb' signal to another CPU. 299 * this function sends a 'purge tlb' signal to another CPU.
300 */ 300 */
301void smp_ptlb_callback(void *info) 301static void smp_ptlb_callback(void *info)
302{ 302{
303 __tlb_flush_local(); 303 __tlb_flush_local();
304} 304}
@@ -456,6 +456,7 @@ static int smp_rescan_cpus_sigp(cpumask_t avail)
456 if (cpu_known(cpu_id)) 456 if (cpu_known(cpu_id))
457 continue; 457 continue;
458 __cpu_logical_map[logical_cpu] = cpu_id; 458 __cpu_logical_map[logical_cpu] = cpu_id;
459 smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
459 if (!cpu_stopped(logical_cpu)) 460 if (!cpu_stopped(logical_cpu))
460 continue; 461 continue;
461 cpu_set(logical_cpu, cpu_present_map); 462 cpu_set(logical_cpu, cpu_present_map);
@@ -489,6 +490,7 @@ static int smp_rescan_cpus_sclp(cpumask_t avail)
489 if (cpu_known(cpu_id)) 490 if (cpu_known(cpu_id))
490 continue; 491 continue;
491 __cpu_logical_map[logical_cpu] = cpu_id; 492 __cpu_logical_map[logical_cpu] = cpu_id;
493 smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
492 cpu_set(logical_cpu, cpu_present_map); 494 cpu_set(logical_cpu, cpu_present_map);
493 if (cpu >= info->configured) 495 if (cpu >= info->configured)
494 smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY; 496 smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
@@ -846,6 +848,7 @@ void __init smp_prepare_boot_cpu(void)
846 S390_lowcore.percpu_offset = __per_cpu_offset[0]; 848 S390_lowcore.percpu_offset = __per_cpu_offset[0];
847 current_set[0] = current; 849 current_set[0] = current;
848 smp_cpu_state[0] = CPU_STATE_CONFIGURED; 850 smp_cpu_state[0] = CPU_STATE_CONFIGURED;
851 smp_cpu_polarization[0] = POLARIZATION_UNKNWN;
849 spin_lock_init(&(&__get_cpu_var(s390_idle))->lock); 852 spin_lock_init(&(&__get_cpu_var(s390_idle))->lock);
850} 853}
851 854
@@ -897,15 +900,19 @@ static ssize_t cpu_configure_store(struct sys_device *dev, const char *buf,
897 case 0: 900 case 0:
898 if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) { 901 if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) {
899 rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]); 902 rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]);
900 if (!rc) 903 if (!rc) {
901 smp_cpu_state[cpu] = CPU_STATE_STANDBY; 904 smp_cpu_state[cpu] = CPU_STATE_STANDBY;
905 smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
906 }
902 } 907 }
903 break; 908 break;
904 case 1: 909 case 1:
905 if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) { 910 if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) {
906 rc = sclp_cpu_configure(__cpu_logical_map[cpu]); 911 rc = sclp_cpu_configure(__cpu_logical_map[cpu]);
907 if (!rc) 912 if (!rc) {
908 smp_cpu_state[cpu] = CPU_STATE_CONFIGURED; 913 smp_cpu_state[cpu] = CPU_STATE_CONFIGURED;
914 smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
915 }
909 } 916 }
910 break; 917 break;
911 default: 918 default:
@@ -919,6 +926,34 @@ out:
919static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store); 926static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
920#endif /* CONFIG_HOTPLUG_CPU */ 927#endif /* CONFIG_HOTPLUG_CPU */
921 928
929static ssize_t cpu_polarization_show(struct sys_device *dev, char *buf)
930{
931 int cpu = dev->id;
932 ssize_t count;
933
934 mutex_lock(&smp_cpu_state_mutex);
935 switch (smp_cpu_polarization[cpu]) {
936 case POLARIZATION_HRZ:
937 count = sprintf(buf, "horizontal\n");
938 break;
939 case POLARIZATION_VL:
940 count = sprintf(buf, "vertical:low\n");
941 break;
942 case POLARIZATION_VM:
943 count = sprintf(buf, "vertical:medium\n");
944 break;
945 case POLARIZATION_VH:
946 count = sprintf(buf, "vertical:high\n");
947 break;
948 default:
949 count = sprintf(buf, "unknown\n");
950 break;
951 }
952 mutex_unlock(&smp_cpu_state_mutex);
953 return count;
954}
955static SYSDEV_ATTR(polarization, 0444, cpu_polarization_show, NULL);
956
922static ssize_t show_cpu_address(struct sys_device *dev, char *buf) 957static ssize_t show_cpu_address(struct sys_device *dev, char *buf)
923{ 958{
924 return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]); 959 return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]);
@@ -931,6 +966,7 @@ static struct attribute *cpu_common_attrs[] = {
931 &attr_configure.attr, 966 &attr_configure.attr,
932#endif 967#endif
933 &attr_address.attr, 968 &attr_address.attr,
969 &attr_polarization.attr,
934 NULL, 970 NULL,
935}; 971};
936 972
@@ -1075,11 +1111,48 @@ static ssize_t __ref rescan_store(struct sys_device *dev,
1075out: 1111out:
1076 put_online_cpus(); 1112 put_online_cpus();
1077 mutex_unlock(&smp_cpu_state_mutex); 1113 mutex_unlock(&smp_cpu_state_mutex);
1114 if (!cpus_empty(newcpus))
1115 topology_schedule_update();
1078 return rc ? rc : count; 1116 return rc ? rc : count;
1079} 1117}
1080static SYSDEV_ATTR(rescan, 0200, NULL, rescan_store); 1118static SYSDEV_ATTR(rescan, 0200, NULL, rescan_store);
1081#endif /* CONFIG_HOTPLUG_CPU */ 1119#endif /* CONFIG_HOTPLUG_CPU */
1082 1120
1121static ssize_t dispatching_show(struct sys_device *dev, char *buf)
1122{
1123 ssize_t count;
1124
1125 mutex_lock(&smp_cpu_state_mutex);
1126 count = sprintf(buf, "%d\n", cpu_management);
1127 mutex_unlock(&smp_cpu_state_mutex);
1128 return count;
1129}
1130
1131static ssize_t dispatching_store(struct sys_device *dev, const char *buf,
1132 size_t count)
1133{
1134 int val, rc;
1135 char delim;
1136
1137 if (sscanf(buf, "%d %c", &val, &delim) != 1)
1138 return -EINVAL;
1139 if (val != 0 && val != 1)
1140 return -EINVAL;
1141 rc = 0;
1142 mutex_lock(&smp_cpu_state_mutex);
1143 get_online_cpus();
1144 if (cpu_management == val)
1145 goto out;
1146 rc = topology_set_cpu_management(val);
1147 if (!rc)
1148 cpu_management = val;
1149out:
1150 put_online_cpus();
1151 mutex_unlock(&smp_cpu_state_mutex);
1152 return rc ? rc : count;
1153}
1154static SYSDEV_ATTR(dispatching, 0644, dispatching_show, dispatching_store);
1155
1083static int __init topology_init(void) 1156static int __init topology_init(void)
1084{ 1157{
1085 int cpu; 1158 int cpu;
@@ -1093,6 +1166,10 @@ static int __init topology_init(void)
1093 if (rc) 1166 if (rc)
1094 return rc; 1167 return rc;
1095#endif 1168#endif
1169 rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
1170 &attr_dispatching.attr);
1171 if (rc)
1172 return rc;
1096 for_each_present_cpu(cpu) { 1173 for_each_present_cpu(cpu) {
1097 rc = smp_add_present_cpu(cpu); 1174 rc = smp_add_present_cpu(cpu);
1098 if (rc) 1175 if (rc)
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c
index fefee99f28aa..988d0d64c2c8 100644
--- a/arch/s390/kernel/sys_s390.c
+++ b/arch/s390/kernel/sys_s390.c
@@ -29,8 +29,8 @@
29#include <linux/personality.h> 29#include <linux/personality.h>
30#include <linux/unistd.h> 30#include <linux/unistd.h>
31#include <linux/ipc.h> 31#include <linux/ipc.h>
32
33#include <asm/uaccess.h> 32#include <asm/uaccess.h>
33#include "entry.h"
34 34
35/* 35/*
36 * sys_pipe() is the normal C calling standard for creating 36 * sys_pipe() is the normal C calling standard for creating
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index cb232c155360..7aec676fefd5 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -30,7 +30,7 @@
30#include <linux/timex.h> 30#include <linux/timex.h>
31#include <linux/notifier.h> 31#include <linux/notifier.h>
32#include <linux/clocksource.h> 32#include <linux/clocksource.h>
33 33#include <linux/clockchips.h>
34#include <asm/uaccess.h> 34#include <asm/uaccess.h>
35#include <asm/delay.h> 35#include <asm/delay.h>
36#include <asm/s390_ext.h> 36#include <asm/s390_ext.h>
@@ -39,6 +39,7 @@
39#include <asm/irq_regs.h> 39#include <asm/irq_regs.h>
40#include <asm/timer.h> 40#include <asm/timer.h>
41#include <asm/etr.h> 41#include <asm/etr.h>
42#include <asm/cio.h>
42 43
43/* change this if you have some constant time drift */ 44/* change this if you have some constant time drift */
44#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ) 45#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ)
@@ -57,16 +58,16 @@
57 58
58static ext_int_info_t ext_int_info_cc; 59static ext_int_info_t ext_int_info_cc;
59static ext_int_info_t ext_int_etr_cc; 60static ext_int_info_t ext_int_etr_cc;
60static u64 init_timer_cc;
61static u64 jiffies_timer_cc; 61static u64 jiffies_timer_cc;
62static u64 xtime_cc; 62
63static DEFINE_PER_CPU(struct clock_event_device, comparators);
63 64
64/* 65/*
65 * Scheduler clock - returns current time in nanosec units. 66 * Scheduler clock - returns current time in nanosec units.
66 */ 67 */
67unsigned long long sched_clock(void) 68unsigned long long sched_clock(void)
68{ 69{
69 return ((get_clock() - jiffies_timer_cc) * 125) >> 9; 70 return ((get_clock_xt() - jiffies_timer_cc) * 125) >> 9;
70} 71}
71 72
72/* 73/*
@@ -95,162 +96,40 @@ void tod_to_timeval(__u64 todval, struct timespec *xtime)
95#define s390_do_profile() do { ; } while(0) 96#define s390_do_profile() do { ; } while(0)
96#endif /* CONFIG_PROFILING */ 97#endif /* CONFIG_PROFILING */
97 98
98/* 99void clock_comparator_work(void)
99 * Advance the per cpu tick counter up to the time given with the
100 * "time" argument. The per cpu update consists of accounting
101 * the virtual cpu time, calling update_process_times and calling
102 * the profiling hook. If xtime is before time it is advanced as well.
103 */
104void account_ticks(u64 time)
105{ 100{
106 __u32 ticks; 101 struct clock_event_device *cd;
107 __u64 tmp;
108
109 /* Calculate how many ticks have passed. */
110 if (time < S390_lowcore.jiffy_timer)
111 return;
112 tmp = time - S390_lowcore.jiffy_timer;
113 if (tmp >= 2*CLK_TICKS_PER_JIFFY) { /* more than two ticks ? */
114 ticks = __div(tmp, CLK_TICKS_PER_JIFFY) + 1;
115 S390_lowcore.jiffy_timer +=
116 CLK_TICKS_PER_JIFFY * (__u64) ticks;
117 } else if (tmp >= CLK_TICKS_PER_JIFFY) {
118 ticks = 2;
119 S390_lowcore.jiffy_timer += 2*CLK_TICKS_PER_JIFFY;
120 } else {
121 ticks = 1;
122 S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY;
123 }
124
125#ifdef CONFIG_SMP
126 /*
127 * Do not rely on the boot cpu to do the calls to do_timer.
128 * Spread it over all cpus instead.
129 */
130 write_seqlock(&xtime_lock);
131 if (S390_lowcore.jiffy_timer > xtime_cc) {
132 __u32 xticks;
133 tmp = S390_lowcore.jiffy_timer - xtime_cc;
134 if (tmp >= 2*CLK_TICKS_PER_JIFFY) {
135 xticks = __div(tmp, CLK_TICKS_PER_JIFFY);
136 xtime_cc += (__u64) xticks * CLK_TICKS_PER_JIFFY;
137 } else {
138 xticks = 1;
139 xtime_cc += CLK_TICKS_PER_JIFFY;
140 }
141 do_timer(xticks);
142 }
143 write_sequnlock(&xtime_lock);
144#else
145 do_timer(ticks);
146#endif
147
148 while (ticks--)
149 update_process_times(user_mode(get_irq_regs()));
150 102
103 S390_lowcore.clock_comparator = -1ULL;
104 set_clock_comparator(S390_lowcore.clock_comparator);
105 cd = &__get_cpu_var(comparators);
106 cd->event_handler(cd);
151 s390_do_profile(); 107 s390_do_profile();
152} 108}
153 109
154#ifdef CONFIG_NO_IDLE_HZ
155
156#ifdef CONFIG_NO_IDLE_HZ_INIT
157int sysctl_hz_timer = 0;
158#else
159int sysctl_hz_timer = 1;
160#endif
161
162/*
163 * Stop the HZ tick on the current CPU.
164 * Only cpu_idle may call this function.
165 */
166static void stop_hz_timer(void)
167{
168 unsigned long flags;
169 unsigned long seq, next;
170 __u64 timer, todval;
171 int cpu = smp_processor_id();
172
173 if (sysctl_hz_timer != 0)
174 return;
175
176 cpu_set(cpu, nohz_cpu_mask);
177
178 /*
179 * Leave the clock comparator set up for the next timer
180 * tick if either rcu or a softirq is pending.
181 */
182 if (rcu_needs_cpu(cpu) || local_softirq_pending()) {
183 cpu_clear(cpu, nohz_cpu_mask);
184 return;
185 }
186
187 /*
188 * This cpu is going really idle. Set up the clock comparator
189 * for the next event.
190 */
191 next = next_timer_interrupt();
192 do {
193 seq = read_seqbegin_irqsave(&xtime_lock, flags);
194 timer = ((__u64) next) - ((__u64) jiffies) + jiffies_64;
195 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
196 todval = -1ULL;
197 /* Be careful about overflows. */
198 if (timer < (-1ULL / CLK_TICKS_PER_JIFFY)) {
199 timer = jiffies_timer_cc + timer * CLK_TICKS_PER_JIFFY;
200 if (timer >= jiffies_timer_cc)
201 todval = timer;
202 }
203 set_clock_comparator(todval);
204}
205
206/* 110/*
207 * Start the HZ tick on the current CPU. 111 * Fixup the clock comparator.
208 * Only cpu_idle may call this function.
209 */ 112 */
210static void start_hz_timer(void) 113static void fixup_clock_comparator(unsigned long long delta)
211{ 114{
212 if (!cpu_isset(smp_processor_id(), nohz_cpu_mask)) 115 /* If nobody is waiting there's nothing to fix. */
116 if (S390_lowcore.clock_comparator == -1ULL)
213 return; 117 return;
214 account_ticks(get_clock()); 118 S390_lowcore.clock_comparator += delta;
215 set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION); 119 set_clock_comparator(S390_lowcore.clock_comparator);
216 cpu_clear(smp_processor_id(), nohz_cpu_mask);
217}
218
219static int nohz_idle_notify(struct notifier_block *self,
220 unsigned long action, void *hcpu)
221{
222 switch (action) {
223 case S390_CPU_IDLE:
224 stop_hz_timer();
225 break;
226 case S390_CPU_NOT_IDLE:
227 start_hz_timer();
228 break;
229 }
230 return NOTIFY_OK;
231} 120}
232 121
233static struct notifier_block nohz_idle_nb = { 122static int s390_next_event(unsigned long delta,
234 .notifier_call = nohz_idle_notify, 123 struct clock_event_device *evt)
235};
236
237static void __init nohz_init(void)
238{ 124{
239 if (register_idle_notifier(&nohz_idle_nb)) 125 S390_lowcore.clock_comparator = get_clock() + delta;
240 panic("Couldn't register idle notifier"); 126 set_clock_comparator(S390_lowcore.clock_comparator);
127 return 0;
241} 128}
242 129
243#endif 130static void s390_set_mode(enum clock_event_mode mode,
244 131 struct clock_event_device *evt)
245/*
246 * Set up per cpu jiffy timer and set the clock comparator.
247 */
248static void setup_jiffy_timer(void)
249{ 132{
250 /* Set up clock comparator to next jiffy. */
251 S390_lowcore.jiffy_timer =
252 jiffies_timer_cc + (jiffies_64 + 1) * CLK_TICKS_PER_JIFFY;
253 set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION);
254} 133}
255 134
256/* 135/*
@@ -259,7 +138,26 @@ static void setup_jiffy_timer(void)
259 */ 138 */
260void init_cpu_timer(void) 139void init_cpu_timer(void)
261{ 140{
262 setup_jiffy_timer(); 141 struct clock_event_device *cd;
142 int cpu;
143
144 S390_lowcore.clock_comparator = -1ULL;
145 set_clock_comparator(S390_lowcore.clock_comparator);
146
147 cpu = smp_processor_id();
148 cd = &per_cpu(comparators, cpu);
149 cd->name = "comparator";
150 cd->features = CLOCK_EVT_FEAT_ONESHOT;
151 cd->mult = 16777;
152 cd->shift = 12;
153 cd->min_delta_ns = 1;
154 cd->max_delta_ns = LONG_MAX;
155 cd->rating = 400;
156 cd->cpumask = cpumask_of_cpu(cpu);
157 cd->set_next_event = s390_next_event;
158 cd->set_mode = s390_set_mode;
159
160 clockevents_register_device(cd);
263 161
264 /* Enable clock comparator timer interrupt. */ 162 /* Enable clock comparator timer interrupt. */
265 __ctl_set_bit(0,11); 163 __ctl_set_bit(0,11);
@@ -270,8 +168,6 @@ void init_cpu_timer(void)
270 168
271static void clock_comparator_interrupt(__u16 code) 169static void clock_comparator_interrupt(__u16 code)
272{ 170{
273 /* set clock comparator for next tick */
274 set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION);
275} 171}
276 172
277static void etr_reset(void); 173static void etr_reset(void);
@@ -316,8 +212,9 @@ static struct clocksource clocksource_tod = {
316 */ 212 */
317void __init time_init(void) 213void __init time_init(void)
318{ 214{
215 u64 init_timer_cc;
216
319 init_timer_cc = reset_tod_clock(); 217 init_timer_cc = reset_tod_clock();
320 xtime_cc = init_timer_cc + CLK_TICKS_PER_JIFFY;
321 jiffies_timer_cc = init_timer_cc - jiffies_64 * CLK_TICKS_PER_JIFFY; 218 jiffies_timer_cc = init_timer_cc - jiffies_64 * CLK_TICKS_PER_JIFFY;
322 219
323 /* set xtime */ 220 /* set xtime */
@@ -342,10 +239,6 @@ void __init time_init(void)
342 /* Enable TOD clock interrupts on the boot cpu. */ 239 /* Enable TOD clock interrupts on the boot cpu. */
343 init_cpu_timer(); 240 init_cpu_timer();
344 241
345#ifdef CONFIG_NO_IDLE_HZ
346 nohz_init();
347#endif
348
349#ifdef CONFIG_VIRT_TIMER 242#ifdef CONFIG_VIRT_TIMER
350 vtime_init(); 243 vtime_init();
351#endif 244#endif
@@ -699,53 +592,49 @@ static int etr_aib_follows(struct etr_aib *a1, struct etr_aib *a2, int p)
699} 592}
700 593
701/* 594/*
702 * The time is "clock". xtime is what we think the time is. 595 * The time is "clock". old is what we think the time is.
703 * Adjust the value by a multiple of jiffies and add the delta to ntp. 596 * Adjust the value by a multiple of jiffies and add the delta to ntp.
704 * "delay" is an approximation how long the synchronization took. If 597 * "delay" is an approximation how long the synchronization took. If
705 * the time correction is positive, then "delay" is subtracted from 598 * the time correction is positive, then "delay" is subtracted from
706 * the time difference and only the remaining part is passed to ntp. 599 * the time difference and only the remaining part is passed to ntp.
707 */ 600 */
708static void etr_adjust_time(unsigned long long clock, unsigned long long delay) 601static unsigned long long etr_adjust_time(unsigned long long old,
602 unsigned long long clock,
603 unsigned long long delay)
709{ 604{
710 unsigned long long delta, ticks; 605 unsigned long long delta, ticks;
711 struct timex adjust; 606 struct timex adjust;
712 607
713 /* 608 if (clock > old) {
714 * We don't have to take the xtime lock because the cpu
715 * executing etr_adjust_time is running disabled in
716 * tasklet context and all other cpus are looping in
717 * etr_sync_cpu_start.
718 */
719 if (clock > xtime_cc) {
720 /* It is later than we thought. */ 609 /* It is later than we thought. */
721 delta = ticks = clock - xtime_cc; 610 delta = ticks = clock - old;
722 delta = ticks = (delta < delay) ? 0 : delta - delay; 611 delta = ticks = (delta < delay) ? 0 : delta - delay;
723 delta -= do_div(ticks, CLK_TICKS_PER_JIFFY); 612 delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
724 init_timer_cc = init_timer_cc + delta;
725 jiffies_timer_cc = jiffies_timer_cc + delta;
726 xtime_cc = xtime_cc + delta;
727 adjust.offset = ticks * (1000000 / HZ); 613 adjust.offset = ticks * (1000000 / HZ);
728 } else { 614 } else {
729 /* It is earlier than we thought. */ 615 /* It is earlier than we thought. */
730 delta = ticks = xtime_cc - clock; 616 delta = ticks = old - clock;
731 delta -= do_div(ticks, CLK_TICKS_PER_JIFFY); 617 delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
732 init_timer_cc = init_timer_cc - delta; 618 delta = -delta;
733 jiffies_timer_cc = jiffies_timer_cc - delta;
734 xtime_cc = xtime_cc - delta;
735 adjust.offset = -ticks * (1000000 / HZ); 619 adjust.offset = -ticks * (1000000 / HZ);
736 } 620 }
621 jiffies_timer_cc += delta;
737 if (adjust.offset != 0) { 622 if (adjust.offset != 0) {
738 printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n", 623 printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n",
739 adjust.offset); 624 adjust.offset);
740 adjust.modes = ADJ_OFFSET_SINGLESHOT; 625 adjust.modes = ADJ_OFFSET_SINGLESHOT;
741 do_adjtimex(&adjust); 626 do_adjtimex(&adjust);
742 } 627 }
628 return delta;
743} 629}
744 630
631static struct {
632 int in_sync;
633 unsigned long long fixup_cc;
634} etr_sync;
635
745static void etr_sync_cpu_start(void *dummy) 636static void etr_sync_cpu_start(void *dummy)
746{ 637{
747 int *in_sync = dummy;
748
749 etr_enable_sync_clock(); 638 etr_enable_sync_clock();
750 /* 639 /*
751 * This looks like a busy wait loop but it isn't. etr_sync_cpus 640 * This looks like a busy wait loop but it isn't. etr_sync_cpus
@@ -753,7 +642,7 @@ static void etr_sync_cpu_start(void *dummy)
753 * __udelay will stop the cpu on an enabled wait psw until the 642 * __udelay will stop the cpu on an enabled wait psw until the
754 * TOD is running again. 643 * TOD is running again.
755 */ 644 */
756 while (*in_sync == 0) { 645 while (etr_sync.in_sync == 0) {
757 __udelay(1); 646 __udelay(1);
758 /* 647 /*
759 * A different cpu changes *in_sync. Therefore use 648 * A different cpu changes *in_sync. Therefore use
@@ -761,14 +650,14 @@ static void etr_sync_cpu_start(void *dummy)
761 */ 650 */
762 barrier(); 651 barrier();
763 } 652 }
764 if (*in_sync != 1) 653 if (etr_sync.in_sync != 1)
765 /* Didn't work. Clear per-cpu in sync bit again. */ 654 /* Didn't work. Clear per-cpu in sync bit again. */
766 etr_disable_sync_clock(NULL); 655 etr_disable_sync_clock(NULL);
767 /* 656 /*
768 * This round of TOD syncing is done. Set the clock comparator 657 * This round of TOD syncing is done. Set the clock comparator
769 * to the next tick and let the processor continue. 658 * to the next tick and let the processor continue.
770 */ 659 */
771 setup_jiffy_timer(); 660 fixup_clock_comparator(etr_sync.fixup_cc);
772} 661}
773 662
774static void etr_sync_cpu_end(void *dummy) 663static void etr_sync_cpu_end(void *dummy)
@@ -783,8 +672,8 @@ static void etr_sync_cpu_end(void *dummy)
783static int etr_sync_clock(struct etr_aib *aib, int port) 672static int etr_sync_clock(struct etr_aib *aib, int port)
784{ 673{
785 struct etr_aib *sync_port; 674 struct etr_aib *sync_port;
786 unsigned long long clock, delay; 675 unsigned long long clock, old_clock, delay, delta;
787 int in_sync, follows; 676 int follows;
788 int rc; 677 int rc;
789 678
790 /* Check if the current aib is adjacent to the sync port aib. */ 679 /* Check if the current aib is adjacent to the sync port aib. */
@@ -799,9 +688,9 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
799 * successfully synced the clock. smp_call_function will 688 * successfully synced the clock. smp_call_function will
800 * return after all other cpus are in etr_sync_cpu_start. 689 * return after all other cpus are in etr_sync_cpu_start.
801 */ 690 */
802 in_sync = 0; 691 memset(&etr_sync, 0, sizeof(etr_sync));
803 preempt_disable(); 692 preempt_disable();
804 smp_call_function(etr_sync_cpu_start,&in_sync,0,0); 693 smp_call_function(etr_sync_cpu_start, NULL, 0, 0);
805 local_irq_disable(); 694 local_irq_disable();
806 etr_enable_sync_clock(); 695 etr_enable_sync_clock();
807 696
@@ -809,6 +698,7 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
809 __ctl_set_bit(14, 21); 698 __ctl_set_bit(14, 21);
810 __ctl_set_bit(0, 29); 699 __ctl_set_bit(0, 29);
811 clock = ((unsigned long long) (aib->edf2.etv + 1)) << 32; 700 clock = ((unsigned long long) (aib->edf2.etv + 1)) << 32;
701 old_clock = get_clock();
812 if (set_clock(clock) == 0) { 702 if (set_clock(clock) == 0) {
813 __udelay(1); /* Wait for the clock to start. */ 703 __udelay(1); /* Wait for the clock to start. */
814 __ctl_clear_bit(0, 29); 704 __ctl_clear_bit(0, 29);
@@ -817,16 +707,17 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
817 /* Adjust Linux timing variables. */ 707 /* Adjust Linux timing variables. */
818 delay = (unsigned long long) 708 delay = (unsigned long long)
819 (aib->edf2.etv - sync_port->edf2.etv) << 32; 709 (aib->edf2.etv - sync_port->edf2.etv) << 32;
820 etr_adjust_time(clock, delay); 710 delta = etr_adjust_time(old_clock, clock, delay);
821 setup_jiffy_timer(); 711 etr_sync.fixup_cc = delta;
712 fixup_clock_comparator(delta);
822 /* Verify that the clock is properly set. */ 713 /* Verify that the clock is properly set. */
823 if (!etr_aib_follows(sync_port, aib, port)) { 714 if (!etr_aib_follows(sync_port, aib, port)) {
824 /* Didn't work. */ 715 /* Didn't work. */
825 etr_disable_sync_clock(NULL); 716 etr_disable_sync_clock(NULL);
826 in_sync = -EAGAIN; 717 etr_sync.in_sync = -EAGAIN;
827 rc = -EAGAIN; 718 rc = -EAGAIN;
828 } else { 719 } else {
829 in_sync = 1; 720 etr_sync.in_sync = 1;
830 rc = 0; 721 rc = 0;
831 } 722 }
832 } else { 723 } else {
@@ -834,7 +725,7 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
834 __ctl_clear_bit(0, 29); 725 __ctl_clear_bit(0, 29);
835 __ctl_clear_bit(14, 21); 726 __ctl_clear_bit(14, 21);
836 etr_disable_sync_clock(NULL); 727 etr_disable_sync_clock(NULL);
837 in_sync = -EAGAIN; 728 etr_sync.in_sync = -EAGAIN;
838 rc = -EAGAIN; 729 rc = -EAGAIN;
839 } 730 }
840 local_irq_enable(); 731 local_irq_enable();
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
new file mode 100644
index 000000000000..12b39b3d9c38
--- /dev/null
+++ b/arch/s390/kernel/topology.c
@@ -0,0 +1,314 @@
1/*
2 * Copyright IBM Corp. 2007
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
4 */
5
6#include <linux/kernel.h>
7#include <linux/mm.h>
8#include <linux/init.h>
9#include <linux/device.h>
10#include <linux/bootmem.h>
11#include <linux/sched.h>
12#include <linux/workqueue.h>
13#include <linux/cpu.h>
14#include <linux/smp.h>
15#include <asm/delay.h>
16#include <asm/s390_ext.h>
17#include <asm/sysinfo.h>
18
19#define CPU_BITS 64
20#define NR_MAG 6
21
22#define PTF_HORIZONTAL (0UL)
23#define PTF_VERTICAL (1UL)
24#define PTF_CHECK (2UL)
25
26struct tl_cpu {
27 unsigned char reserved0[4];
28 unsigned char :6;
29 unsigned char pp:2;
30 unsigned char reserved1;
31 unsigned short origin;
32 unsigned long mask[CPU_BITS / BITS_PER_LONG];
33};
34
35struct tl_container {
36 unsigned char reserved[8];
37};
38
39union tl_entry {
40 unsigned char nl;
41 struct tl_cpu cpu;
42 struct tl_container container;
43};
44
45struct tl_info {
46 unsigned char reserved0[2];
47 unsigned short length;
48 unsigned char mag[NR_MAG];
49 unsigned char reserved1;
50 unsigned char mnest;
51 unsigned char reserved2[4];
52 union tl_entry tle[0];
53};
54
55struct core_info {
56 struct core_info *next;
57 cpumask_t mask;
58};
59
60static void topology_work_fn(struct work_struct *work);
61static struct tl_info *tl_info;
62static struct core_info core_info;
63static int machine_has_topology;
64static int machine_has_topology_irq;
65static struct timer_list topology_timer;
66static void set_topology_timer(void);
67static DECLARE_WORK(topology_work, topology_work_fn);
68
69cpumask_t cpu_coregroup_map(unsigned int cpu)
70{
71 struct core_info *core = &core_info;
72 cpumask_t mask;
73
74 cpus_clear(mask);
75 if (!machine_has_topology)
76 return cpu_present_map;
77 mutex_lock(&smp_cpu_state_mutex);
78 while (core) {
79 if (cpu_isset(cpu, core->mask)) {
80 mask = core->mask;
81 break;
82 }
83 core = core->next;
84 }
85 mutex_unlock(&smp_cpu_state_mutex);
86 if (cpus_empty(mask))
87 mask = cpumask_of_cpu(cpu);
88 return mask;
89}
90
91static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core)
92{
93 unsigned int cpu;
94
95 for (cpu = find_first_bit(&tl_cpu->mask[0], CPU_BITS);
96 cpu < CPU_BITS;
97 cpu = find_next_bit(&tl_cpu->mask[0], CPU_BITS, cpu + 1))
98 {
99 unsigned int rcpu, lcpu;
100
101 rcpu = CPU_BITS - 1 - cpu + tl_cpu->origin;
102 for_each_present_cpu(lcpu) {
103 if (__cpu_logical_map[lcpu] == rcpu) {
104 cpu_set(lcpu, core->mask);
105 smp_cpu_polarization[lcpu] = tl_cpu->pp;
106 }
107 }
108 }
109}
110
111static void clear_cores(void)
112{
113 struct core_info *core = &core_info;
114
115 while (core) {
116 cpus_clear(core->mask);
117 core = core->next;
118 }
119}
120
121static union tl_entry *next_tle(union tl_entry *tle)
122{
123 if (tle->nl)
124 return (union tl_entry *)((struct tl_container *)tle + 1);
125 else
126 return (union tl_entry *)((struct tl_cpu *)tle + 1);
127}
128
129static void tl_to_cores(struct tl_info *info)
130{
131 union tl_entry *tle, *end;
132 struct core_info *core = &core_info;
133
134 mutex_lock(&smp_cpu_state_mutex);
135 clear_cores();
136 tle = info->tle;
137 end = (union tl_entry *)((unsigned long)info + info->length);
138 while (tle < end) {
139 switch (tle->nl) {
140 case 5:
141 case 4:
142 case 3:
143 case 2:
144 break;
145 case 1:
146 core = core->next;
147 break;
148 case 0:
149 add_cpus_to_core(&tle->cpu, core);
150 break;
151 default:
152 clear_cores();
153 machine_has_topology = 0;
154 return;
155 }
156 tle = next_tle(tle);
157 }
158 mutex_unlock(&smp_cpu_state_mutex);
159}
160
161static void topology_update_polarization_simple(void)
162{
163 int cpu;
164
165 mutex_lock(&smp_cpu_state_mutex);
166 for_each_present_cpu(cpu)
167 smp_cpu_polarization[cpu] = POLARIZATION_HRZ;
168 mutex_unlock(&smp_cpu_state_mutex);
169}
170
171static int ptf(unsigned long fc)
172{
173 int rc;
174
175 asm volatile(
176 " .insn rre,0xb9a20000,%1,%1\n"
177 " ipm %0\n"
178 " srl %0,28\n"
179 : "=d" (rc)
180 : "d" (fc) : "cc");
181 return rc;
182}
183
184int topology_set_cpu_management(int fc)
185{
186 int cpu;
187 int rc;
188
189 if (!machine_has_topology)
190 return -EOPNOTSUPP;
191 if (fc)
192 rc = ptf(PTF_VERTICAL);
193 else
194 rc = ptf(PTF_HORIZONTAL);
195 if (rc)
196 return -EBUSY;
197 for_each_present_cpu(cpu)
198 smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
199 return rc;
200}
201
202void arch_update_cpu_topology(void)
203{
204 struct tl_info *info = tl_info;
205 struct sys_device *sysdev;
206 int cpu;
207
208 if (!machine_has_topology) {
209 topology_update_polarization_simple();
210 return;
211 }
212 stsi(info, 15, 1, 2);
213 tl_to_cores(info);
214 for_each_online_cpu(cpu) {
215 sysdev = get_cpu_sysdev(cpu);
216 kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
217 }
218}
219
220static void topology_work_fn(struct work_struct *work)
221{
222 arch_reinit_sched_domains();
223}
224
225void topology_schedule_update(void)
226{
227 schedule_work(&topology_work);
228}
229
230static void topology_timer_fn(unsigned long ignored)
231{
232 if (ptf(PTF_CHECK))
233 topology_schedule_update();
234 set_topology_timer();
235}
236
237static void set_topology_timer(void)
238{
239 topology_timer.function = topology_timer_fn;
240 topology_timer.data = 0;
241 topology_timer.expires = jiffies + 60 * HZ;
242 add_timer(&topology_timer);
243}
244
245static void topology_interrupt(__u16 code)
246{
247 schedule_work(&topology_work);
248}
249
250static int __init init_topology_update(void)
251{
252 int rc;
253
254 if (!machine_has_topology) {
255 topology_update_polarization_simple();
256 return 0;
257 }
258 init_timer_deferrable(&topology_timer);
259 if (machine_has_topology_irq) {
260 rc = register_external_interrupt(0x2005, topology_interrupt);
261 if (rc)
262 return rc;
263 ctl_set_bit(0, 8);
264 }
265 else
266 set_topology_timer();
267 return 0;
268}
269__initcall(init_topology_update);
270
271void __init s390_init_cpu_topology(void)
272{
273 unsigned long long facility_bits;
274 struct tl_info *info;
275 struct core_info *core;
276 int nr_cores;
277 int i;
278
279 if (stfle(&facility_bits, 1) <= 0)
280 return;
281 if (!(facility_bits & (1ULL << 52)) || !(facility_bits & (1ULL << 61)))
282 return;
283 machine_has_topology = 1;
284
285 if (facility_bits & (1ULL << 51))
286 machine_has_topology_irq = 1;
287
288 tl_info = alloc_bootmem_pages(PAGE_SIZE);
289 if (!tl_info)
290 goto error;
291 info = tl_info;
292 stsi(info, 15, 1, 2);
293
294 nr_cores = info->mag[NR_MAG - 2];
295 for (i = 0; i < info->mnest - 2; i++)
296 nr_cores *= info->mag[NR_MAG - 3 - i];
297
298 printk(KERN_INFO "CPU topology:");
299 for (i = 0; i < NR_MAG; i++)
300 printk(" %d", info->mag[i]);
301 printk(" / %d\n", info->mnest);
302
303 core = &core_info;
304 for (i = 0; i < nr_cores; i++) {
305 core->next = alloc_bootmem(sizeof(struct core_info));
306 core = core->next;
307 if (!core)
308 goto error;
309 }
310 return;
311error:
312 machine_has_topology = 0;
313 machine_has_topology_irq = 0;
314}
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 60f728aeaf12..57b607b61100 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -42,11 +42,8 @@
42#include <asm/s390_ext.h> 42#include <asm/s390_ext.h>
43#include <asm/lowcore.h> 43#include <asm/lowcore.h>
44#include <asm/debug.h> 44#include <asm/debug.h>
45#include "entry.h"
45 46
46/* Called from entry.S only */
47extern void handle_per_exception(struct pt_regs *regs);
48
49typedef void pgm_check_handler_t(struct pt_regs *, long);
50pgm_check_handler_t *pgm_check_table[128]; 47pgm_check_handler_t *pgm_check_table[128];
51 48
52#ifdef CONFIG_SYSCTL 49#ifdef CONFIG_SYSCTL
@@ -59,7 +56,6 @@ int sysctl_userprocess_debug = 0;
59 56
60extern pgm_check_handler_t do_protection_exception; 57extern pgm_check_handler_t do_protection_exception;
61extern pgm_check_handler_t do_dat_exception; 58extern pgm_check_handler_t do_dat_exception;
62extern pgm_check_handler_t do_monitor_call;
63extern pgm_check_handler_t do_asce_exception; 59extern pgm_check_handler_t do_asce_exception;
64 60
65#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) 61#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
@@ -138,7 +134,6 @@ void show_trace(struct task_struct *task, unsigned long *stack)
138 else 134 else
139 __show_trace(sp, S390_lowcore.thread_info, 135 __show_trace(sp, S390_lowcore.thread_info,
140 S390_lowcore.thread_info + THREAD_SIZE); 136 S390_lowcore.thread_info + THREAD_SIZE);
141 printk("\n");
142 if (!task) 137 if (!task)
143 task = current; 138 task = current;
144 debug_show_held_locks(task); 139 debug_show_held_locks(task);
@@ -166,6 +161,15 @@ void show_stack(struct task_struct *task, unsigned long *sp)
166 show_trace(task, sp); 161 show_trace(task, sp);
167} 162}
168 163
164#ifdef CONFIG_64BIT
165void show_last_breaking_event(struct pt_regs *regs)
166{
167 printk("Last Breaking-Event-Address:\n");
168 printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN);
169 print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN);
170}
171#endif
172
169/* 173/*
170 * The architecture-independent dump_stack generator 174 * The architecture-independent dump_stack generator
171 */ 175 */
@@ -739,6 +743,5 @@ void __init trap_init(void)
739 pgm_check_table[0x15] = &operand_exception; 743 pgm_check_table[0x15] = &operand_exception;
740 pgm_check_table[0x1C] = &space_switch_exception; 744 pgm_check_table[0x1C] = &space_switch_exception;
741 pgm_check_table[0x1D] = &hfp_sqrt_exception; 745 pgm_check_table[0x1D] = &hfp_sqrt_exception;
742 pgm_check_table[0x40] = &do_monitor_call;
743 pfault_irq_init(); 746 pfault_irq_init();
744} 747}
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index 70f2a862b670..eae21a8ac72d 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -34,7 +34,7 @@ void __delay(unsigned long loops)
34 */ 34 */
35void __udelay(unsigned long usecs) 35void __udelay(unsigned long usecs)
36{ 36{
37 u64 end, time, jiffy_timer = 0; 37 u64 end, time, old_cc = 0;
38 unsigned long flags, cr0, mask, dummy; 38 unsigned long flags, cr0, mask, dummy;
39 int irq_context; 39 int irq_context;
40 40
@@ -43,8 +43,8 @@ void __udelay(unsigned long usecs)
43 local_bh_disable(); 43 local_bh_disable();
44 local_irq_save(flags); 44 local_irq_save(flags);
45 if (raw_irqs_disabled_flags(flags)) { 45 if (raw_irqs_disabled_flags(flags)) {
46 jiffy_timer = S390_lowcore.jiffy_timer; 46 old_cc = S390_lowcore.clock_comparator;
47 S390_lowcore.jiffy_timer = -1ULL - (4096 << 12); 47 S390_lowcore.clock_comparator = -1ULL;
48 __ctl_store(cr0, 0, 0); 48 __ctl_store(cr0, 0, 0);
49 dummy = (cr0 & 0xffff00e0) | 0x00000800; 49 dummy = (cr0 & 0xffff00e0) | 0x00000800;
50 __ctl_load(dummy , 0, 0); 50 __ctl_load(dummy , 0, 0);
@@ -55,8 +55,8 @@ void __udelay(unsigned long usecs)
55 55
56 end = get_clock() + ((u64) usecs << 12); 56 end = get_clock() + ((u64) usecs << 12);
57 do { 57 do {
58 time = end < S390_lowcore.jiffy_timer ? 58 time = end < S390_lowcore.clock_comparator ?
59 end : S390_lowcore.jiffy_timer; 59 end : S390_lowcore.clock_comparator;
60 set_clock_comparator(time); 60 set_clock_comparator(time);
61 trace_hardirqs_on(); 61 trace_hardirqs_on();
62 __load_psw_mask(mask); 62 __load_psw_mask(mask);
@@ -65,10 +65,10 @@ void __udelay(unsigned long usecs)
65 65
66 if (raw_irqs_disabled_flags(flags)) { 66 if (raw_irqs_disabled_flags(flags)) {
67 __ctl_load(cr0, 0, 0); 67 __ctl_load(cr0, 0, 0);
68 S390_lowcore.jiffy_timer = jiffy_timer; 68 S390_lowcore.clock_comparator = old_cc;
69 } 69 }
70 if (!irq_context) 70 if (!irq_context)
71 _local_bh_enable(); 71 _local_bh_enable();
72 set_clock_comparator(S390_lowcore.jiffy_timer); 72 set_clock_comparator(S390_lowcore.clock_comparator);
73 local_irq_restore(flags); 73 local_irq_restore(flags);
74} 74}
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 5efdfe9f5e76..d66215b0fde9 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -302,6 +302,10 @@ static size_t copy_in_user_pt(size_t n, void __user *to,
302 pte_t *pte_from, *pte_to; 302 pte_t *pte_from, *pte_to;
303 int write_user; 303 int write_user;
304 304
305 if (segment_eq(get_fs(), KERNEL_DS)) {
306 memcpy((void __force *) to, (void __force *) from, n);
307 return 0;
308 }
305 done = 0; 309 done = 0;
306retry: 310retry:
307 spin_lock(&mm->page_table_lock); 311 spin_lock(&mm->page_table_lock);
@@ -361,18 +365,10 @@ fault:
361 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ 365 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
362 "m" (*uaddr) : "cc" ); 366 "m" (*uaddr) : "cc" );
363 367
364int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) 368static int __futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
365{ 369{
366 int oldval = 0, newval, ret; 370 int oldval = 0, newval, ret;
367 371
368 spin_lock(&current->mm->page_table_lock);
369 uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
370 if (!uaddr) {
371 spin_unlock(&current->mm->page_table_lock);
372 return -EFAULT;
373 }
374 get_page(virt_to_page(uaddr));
375 spin_unlock(&current->mm->page_table_lock);
376 switch (op) { 372 switch (op) {
377 case FUTEX_OP_SET: 373 case FUTEX_OP_SET:
378 __futex_atomic_op("lr %2,%5\n", 374 __futex_atomic_op("lr %2,%5\n",
@@ -397,17 +393,17 @@ int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
397 default: 393 default:
398 ret = -ENOSYS; 394 ret = -ENOSYS;
399 } 395 }
400 put_page(virt_to_page(uaddr)); 396 if (ret == 0)
401 *old = oldval; 397 *old = oldval;
402 return ret; 398 return ret;
403} 399}
404 400
405int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval) 401int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
406{ 402{
407 int ret; 403 int ret;
408 404
409 if (!current->mm) 405 if (segment_eq(get_fs(), KERNEL_DS))
410 return -EFAULT; 406 return __futex_atomic_op_pt(op, uaddr, oparg, old);
411 spin_lock(&current->mm->page_table_lock); 407 spin_lock(&current->mm->page_table_lock);
412 uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr); 408 uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
413 if (!uaddr) { 409 if (!uaddr) {
@@ -416,13 +412,40 @@ int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
416 } 412 }
417 get_page(virt_to_page(uaddr)); 413 get_page(virt_to_page(uaddr));
418 spin_unlock(&current->mm->page_table_lock); 414 spin_unlock(&current->mm->page_table_lock);
419 asm volatile(" cs %1,%4,0(%5)\n" 415 ret = __futex_atomic_op_pt(op, uaddr, oparg, old);
420 "0: lr %0,%1\n" 416 put_page(virt_to_page(uaddr));
421 "1:\n" 417 return ret;
422 EX_TABLE(0b,1b) 418}
419
420static int __futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
421{
422 int ret;
423
424 asm volatile("0: cs %1,%4,0(%5)\n"
425 "1: lr %0,%1\n"
426 "2:\n"
427 EX_TABLE(0b,2b) EX_TABLE(1b,2b)
423 : "=d" (ret), "+d" (oldval), "=m" (*uaddr) 428 : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
424 : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) 429 : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
425 : "cc", "memory" ); 430 : "cc", "memory" );
431 return ret;
432}
433
434int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
435{
436 int ret;
437
438 if (segment_eq(get_fs(), KERNEL_DS))
439 return __futex_atomic_cmpxchg_pt(uaddr, oldval, newval);
440 spin_lock(&current->mm->page_table_lock);
441 uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
442 if (!uaddr) {
443 spin_unlock(&current->mm->page_table_lock);
444 return -EFAULT;
445 }
446 get_page(virt_to_page(uaddr));
447 spin_unlock(&current->mm->page_table_lock);
448 ret = __futex_atomic_cmpxchg_pt(uaddr, oldval, newval);
426 put_page(virt_to_page(uaddr)); 449 put_page(virt_to_page(uaddr));
427 return ret; 450 return ret;
428} 451}
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 880b0ebf894b..ed2af0a3303b 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -289,22 +289,8 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
289 289
290 rc = add_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); 290 rc = add_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
291 291
292 switch (rc) { 292 if (rc)
293 case 0:
294 break;
295 case -ENOSPC:
296 PRINT_WARN("segment_load: not loading segment %s - overlaps "
297 "storage/segment\n", name);
298 goto out_free;
299 case -ERANGE:
300 PRINT_WARN("segment_load: not loading segment %s - exceeds "
301 "kernel mapping range\n", name);
302 goto out_free;
303 default:
304 PRINT_WARN("segment_load: not loading segment %s (rc: %d)\n",
305 name, rc);
306 goto out_free; 293 goto out_free;
307 }
308 294
309 seg->res = kzalloc(sizeof(struct resource), GFP_KERNEL); 295 seg->res = kzalloc(sizeof(struct resource), GFP_KERNEL);
310 if (seg->res == NULL) { 296 if (seg->res == NULL) {
@@ -582,8 +568,59 @@ out:
582 mutex_unlock(&dcss_lock); 568 mutex_unlock(&dcss_lock);
583} 569}
584 570
571/*
572 * print appropriate error message for segment_load()/segment_type()
573 * return code
574 */
575void segment_warning(int rc, char *seg_name)
576{
577 switch (rc) {
578 case -ENOENT:
579 PRINT_WARN("cannot load/query segment %s, "
580 "does not exist\n", seg_name);
581 break;
582 case -ENOSYS:
583 PRINT_WARN("cannot load/query segment %s, "
584 "not running on VM\n", seg_name);
585 break;
586 case -EIO:
587 PRINT_WARN("cannot load/query segment %s, "
588 "hardware error\n", seg_name);
589 break;
590 case -ENOTSUPP:
591 PRINT_WARN("cannot load/query segment %s, "
592 "is a multi-part segment\n", seg_name);
593 break;
594 case -ENOSPC:
595 PRINT_WARN("cannot load/query segment %s, "
596 "overlaps with storage\n", seg_name);
597 break;
598 case -EBUSY:
599 PRINT_WARN("cannot load/query segment %s, "
600 "overlaps with already loaded dcss\n", seg_name);
601 break;
602 case -EPERM:
603 PRINT_WARN("cannot load/query segment %s, "
604 "already loaded in incompatible mode\n", seg_name);
605 break;
606 case -ENOMEM:
607 PRINT_WARN("cannot load/query segment %s, "
608 "out of memory\n", seg_name);
609 break;
610 case -ERANGE:
611 PRINT_WARN("cannot load/query segment %s, "
612 "exceeds kernel mapping range\n", seg_name);
613 break;
614 default:
615 PRINT_WARN("cannot load/query segment %s, "
616 "return value %i\n", seg_name, rc);
617 break;
618 }
619}
620
585EXPORT_SYMBOL(segment_load); 621EXPORT_SYMBOL(segment_load);
586EXPORT_SYMBOL(segment_unload); 622EXPORT_SYMBOL(segment_unload);
587EXPORT_SYMBOL(segment_save); 623EXPORT_SYMBOL(segment_save);
588EXPORT_SYMBOL(segment_type); 624EXPORT_SYMBOL(segment_type);
589EXPORT_SYMBOL(segment_modify_shared); 625EXPORT_SYMBOL(segment_modify_shared);
626EXPORT_SYMBOL(segment_warning);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index ed13d429a487..2650f46001d0 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -28,11 +28,11 @@
28#include <linux/hardirq.h> 28#include <linux/hardirq.h>
29#include <linux/kprobes.h> 29#include <linux/kprobes.h>
30#include <linux/uaccess.h> 30#include <linux/uaccess.h>
31
32#include <asm/system.h> 31#include <asm/system.h>
33#include <asm/pgtable.h> 32#include <asm/pgtable.h>
34#include <asm/s390_ext.h> 33#include <asm/s390_ext.h>
35#include <asm/mmu_context.h> 34#include <asm/mmu_context.h>
35#include "../kernel/entry.h"
36 36
37#ifndef CONFIG_64BIT 37#ifndef CONFIG_64BIT
38#define __FAIL_ADDR_MASK 0x7ffff000 38#define __FAIL_ADDR_MASK 0x7ffff000
@@ -50,8 +50,6 @@
50extern int sysctl_userprocess_debug; 50extern int sysctl_userprocess_debug;
51#endif 51#endif
52 52
53extern void die(const char *,struct pt_regs *,long);
54
55#ifdef CONFIG_KPROBES 53#ifdef CONFIG_KPROBES
56static inline int notify_page_fault(struct pt_regs *regs, long err) 54static inline int notify_page_fault(struct pt_regs *regs, long err)
57{ 55{
@@ -245,11 +243,6 @@ static void do_sigbus(struct pt_regs *regs, unsigned long error_code,
245} 243}
246 244
247#ifdef CONFIG_S390_EXEC_PROTECT 245#ifdef CONFIG_S390_EXEC_PROTECT
248extern long sys_sigreturn(struct pt_regs *regs);
249extern long sys_rt_sigreturn(struct pt_regs *regs);
250extern long sys32_sigreturn(struct pt_regs *regs);
251extern long sys32_rt_sigreturn(struct pt_regs *regs);
252
253static int signal_return(struct mm_struct *mm, struct pt_regs *regs, 246static int signal_return(struct mm_struct *mm, struct pt_regs *regs,
254 unsigned long address, unsigned long error_code) 247 unsigned long address, unsigned long error_code)
255{ 248{
@@ -270,15 +263,15 @@ static int signal_return(struct mm_struct *mm, struct pt_regs *regs,
270#ifdef CONFIG_COMPAT 263#ifdef CONFIG_COMPAT
271 compat = test_tsk_thread_flag(current, TIF_31BIT); 264 compat = test_tsk_thread_flag(current, TIF_31BIT);
272 if (compat && instruction == 0x0a77) 265 if (compat && instruction == 0x0a77)
273 sys32_sigreturn(regs); 266 sys32_sigreturn();
274 else if (compat && instruction == 0x0aad) 267 else if (compat && instruction == 0x0aad)
275 sys32_rt_sigreturn(regs); 268 sys32_rt_sigreturn();
276 else 269 else
277#endif 270#endif
278 if (instruction == 0x0a77) 271 if (instruction == 0x0a77)
279 sys_sigreturn(regs); 272 sys_sigreturn();
280 else if (instruction == 0x0aad) 273 else if (instruction == 0x0aad)
281 sys_rt_sigreturn(regs); 274 sys_rt_sigreturn();
282 else { 275 else {
283 current->thread.prot_addr = address; 276 current->thread.prot_addr = address;
284 current->thread.trap_no = error_code; 277 current->thread.trap_no = error_code;
@@ -424,7 +417,7 @@ no_context:
424} 417}
425 418
426void __kprobes do_protection_exception(struct pt_regs *regs, 419void __kprobes do_protection_exception(struct pt_regs *regs,
427 unsigned long error_code) 420 long error_code)
428{ 421{
429 /* Protection exception is supressing, decrement psw address. */ 422 /* Protection exception is supressing, decrement psw address. */
430 regs->psw.addr -= (error_code >> 16); 423 regs->psw.addr -= (error_code >> 16);
@@ -440,7 +433,7 @@ void __kprobes do_protection_exception(struct pt_regs *regs,
440 do_exception(regs, 4, 1); 433 do_exception(regs, 4, 1);
441} 434}
442 435
443void __kprobes do_dat_exception(struct pt_regs *regs, unsigned long error_code) 436void __kprobes do_dat_exception(struct pt_regs *regs, long error_code)
444{ 437{
445 do_exception(regs, error_code & 0xff, 0); 438 do_exception(regs, error_code & 0xff, 0);
446} 439}
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 8053245fe259..202c952a29b4 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -50,7 +50,6 @@ void show_mem(void)
50 50
51 printk("Mem-info:\n"); 51 printk("Mem-info:\n");
52 show_free_areas(); 52 show_free_areas();
53 printk("Free swap: %6ldkB\n", nr_swap_pages << (PAGE_SHIFT - 10));
54 i = max_mapnr; 53 i = max_mapnr;
55 while (i-- > 0) { 54 while (i-- > 0) {
56 if (!pfn_valid(i)) 55 if (!pfn_valid(i))
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 84cdf9025737..349b6edc5794 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -116,6 +116,10 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
116 err = -EAGAIN; 116 err = -EAGAIN;
117 if (!bytes_read && (filp->f_flags & O_NONBLOCK)) 117 if (!bytes_read && (filp->f_flags & O_NONBLOCK))
118 goto out; 118 goto out;
119 if (bytes_read < 0) {
120 err = bytes_read;
121 goto out;
122 }
119 123
120 err = -EFAULT; 124 err = -EFAULT;
121 while (bytes_read && size) { 125 while (bytes_read && size) {
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 6b658d84d521..6d2f0c8d419a 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -64,6 +64,7 @@ config ZCRYPT
64 tristate "Support for PCI-attached cryptographic adapters" 64 tristate "Support for PCI-attached cryptographic adapters"
65 depends on S390 65 depends on S390
66 select ZCRYPT_MONOLITHIC if ZCRYPT="y" 66 select ZCRYPT_MONOLITHIC if ZCRYPT="y"
67 select HW_RANDOM
67 help 68 help
68 Select this option if you want to use a PCI-attached cryptographic 69 Select this option if you want to use a PCI-attached cryptographic
69 adapter like: 70 adapter like:
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index e879b212cf43..07883197f474 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -20,6 +20,7 @@ config DCSSBLK
20config DASD 20config DASD
21 tristate "Support for DASD devices" 21 tristate "Support for DASD devices"
22 depends on CCW && BLOCK 22 depends on CCW && BLOCK
23 select IOSCHED_DEADLINE
23 help 24 help
24 Enable this option if you want to access DASDs directly utilizing 25 Enable this option if you want to access DASDs directly utilizing
25 S/390s channel subsystem commands. This is necessary for running 26 S/390s channel subsystem commands. This is necessary for running
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index ccf46c96adb4..ac6d4d3218b3 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -980,12 +980,12 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
980 break; 980 break;
981 case -ETIMEDOUT: 981 case -ETIMEDOUT:
982 printk(KERN_WARNING"%s(%s): request timed out\n", 982 printk(KERN_WARNING"%s(%s): request timed out\n",
983 __FUNCTION__, cdev->dev.bus_id); 983 __func__, cdev->dev.bus_id);
984 //FIXME - dasd uses own timeout interface... 984 //FIXME - dasd uses own timeout interface...
985 break; 985 break;
986 default: 986 default:
987 printk(KERN_WARNING"%s(%s): unknown error %ld\n", 987 printk(KERN_WARNING"%s(%s): unknown error %ld\n",
988 __FUNCTION__, cdev->dev.bus_id, PTR_ERR(irb)); 988 __func__, cdev->dev.bus_id, PTR_ERR(irb));
989 } 989 }
990 return; 990 return;
991 } 991 }
@@ -1956,6 +1956,7 @@ static int dasd_alloc_queue(struct dasd_block *block)
1956 block->request_queue->queuedata = block; 1956 block->request_queue->queuedata = block;
1957 1957
1958 elevator_exit(block->request_queue->elevator); 1958 elevator_exit(block->request_queue->elevator);
1959 block->request_queue->elevator = NULL;
1959 rc = elevator_init(block->request_queue, "deadline"); 1960 rc = elevator_init(block->request_queue, "deadline");
1960 if (rc) { 1961 if (rc) {
1961 blk_cleanup_queue(block->request_queue); 1962 blk_cleanup_queue(block->request_queue);
@@ -2298,9 +2299,8 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
2298 * in the other openers. 2299 * in the other openers.
2299 */ 2300 */
2300 if (device->block) { 2301 if (device->block) {
2301 struct dasd_block *block = device->block; 2302 max_count = device->block->bdev ? 0 : -1;
2302 max_count = block->bdev ? 0 : -1; 2303 open_count = atomic_read(&device->block->open_count);
2303 open_count = (int) atomic_read(&block->open_count);
2304 if (open_count > max_count) { 2304 if (open_count > max_count) {
2305 if (open_count > 0) 2305 if (open_count > 0)
2306 printk(KERN_WARNING "Can't offline dasd " 2306 printk(KERN_WARNING "Can't offline dasd "
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index b19db20a0bef..e6700df52df4 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -1996,6 +1996,36 @@ dasd_3990_erp_compound(struct dasd_ccw_req * erp, char *sense)
1996} /* end dasd_3990_erp_compound */ 1996} /* end dasd_3990_erp_compound */
1997 1997
1998/* 1998/*
1999 *DASD_3990_ERP_HANDLE_SIM
2000 *
2001 *DESCRIPTION
2002 * inspects the SIM SENSE data and starts an appropriate action
2003 *
2004 * PARAMETER
2005 * sense sense data of the actual error
2006 *
2007 * RETURN VALUES
2008 * none
2009 */
2010void
2011dasd_3990_erp_handle_sim(struct dasd_device *device, char *sense)
2012{
2013 /* print message according to log or message to operator mode */
2014 if ((sense[24] & DASD_SIM_MSG_TO_OP) || (sense[1] & 0x10)) {
2015
2016 /* print SIM SRC from RefCode */
2017 DEV_MESSAGE(KERN_ERR, device, "SIM - SRC: "
2018 "%02x%02x%02x%02x", sense[22],
2019 sense[23], sense[11], sense[12]);
2020 } else if (sense[24] & DASD_SIM_LOG) {
2021 /* print SIM SRC Refcode */
2022 DEV_MESSAGE(KERN_WARNING, device, "SIM - SRC: "
2023 "%02x%02x%02x%02x", sense[22],
2024 sense[23], sense[11], sense[12]);
2025 }
2026}
2027
2028/*
1999 * DASD_3990_ERP_INSPECT_32 2029 * DASD_3990_ERP_INSPECT_32
2000 * 2030 *
2001 * DESCRIPTION 2031 * DESCRIPTION
@@ -2018,6 +2048,10 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
2018 2048
2019 erp->function = dasd_3990_erp_inspect_32; 2049 erp->function = dasd_3990_erp_inspect_32;
2020 2050
2051 /* check for SIM sense data */
2052 if ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)
2053 dasd_3990_erp_handle_sim(device, sense);
2054
2021 if (sense[25] & DASD_SENSE_BIT_0) { 2055 if (sense[25] & DASD_SENSE_BIT_0) {
2022 2056
2023 /* compound program action codes (byte25 bit 0 == '1') */ 2057 /* compound program action codes (byte25 bit 0 == '1') */
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 3a40bee9d358..2d8df0b30538 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -745,6 +745,19 @@ static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
745 spin_unlock_irqrestore(&lcu->lock, flags); 745 spin_unlock_irqrestore(&lcu->lock, flags);
746} 746}
747 747
748static void __stop_device_on_lcu(struct dasd_device *device,
749 struct dasd_device *pos)
750{
751 /* If pos == device then device is already locked! */
752 if (pos == device) {
753 pos->stopped |= DASD_STOPPED_SU;
754 return;
755 }
756 spin_lock(get_ccwdev_lock(pos->cdev));
757 pos->stopped |= DASD_STOPPED_SU;
758 spin_unlock(get_ccwdev_lock(pos->cdev));
759}
760
748/* 761/*
749 * This function is called in interrupt context, so the 762 * This function is called in interrupt context, so the
750 * cdev lock for device is already locked! 763 * cdev lock for device is already locked!
@@ -755,35 +768,15 @@ static void _stop_all_devices_on_lcu(struct alias_lcu *lcu,
755 struct alias_pav_group *pavgroup; 768 struct alias_pav_group *pavgroup;
756 struct dasd_device *pos; 769 struct dasd_device *pos;
757 770
758 list_for_each_entry(pos, &lcu->active_devices, alias_list) { 771 list_for_each_entry(pos, &lcu->active_devices, alias_list)
759 if (pos != device) 772 __stop_device_on_lcu(device, pos);
760 spin_lock(get_ccwdev_lock(pos->cdev)); 773 list_for_each_entry(pos, &lcu->inactive_devices, alias_list)
761 pos->stopped |= DASD_STOPPED_SU; 774 __stop_device_on_lcu(device, pos);
762 if (pos != device)
763 spin_unlock(get_ccwdev_lock(pos->cdev));
764 }
765 list_for_each_entry(pos, &lcu->inactive_devices, alias_list) {
766 if (pos != device)
767 spin_lock(get_ccwdev_lock(pos->cdev));
768 pos->stopped |= DASD_STOPPED_SU;
769 if (pos != device)
770 spin_unlock(get_ccwdev_lock(pos->cdev));
771 }
772 list_for_each_entry(pavgroup, &lcu->grouplist, group) { 775 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
773 list_for_each_entry(pos, &pavgroup->baselist, alias_list) { 776 list_for_each_entry(pos, &pavgroup->baselist, alias_list)
774 if (pos != device) 777 __stop_device_on_lcu(device, pos);
775 spin_lock(get_ccwdev_lock(pos->cdev)); 778 list_for_each_entry(pos, &pavgroup->aliaslist, alias_list)
776 pos->stopped |= DASD_STOPPED_SU; 779 __stop_device_on_lcu(device, pos);
777 if (pos != device)
778 spin_unlock(get_ccwdev_lock(pos->cdev));
779 }
780 list_for_each_entry(pos, &pavgroup->aliaslist, alias_list) {
781 if (pos != device)
782 spin_lock(get_ccwdev_lock(pos->cdev));
783 pos->stopped |= DASD_STOPPED_SU;
784 if (pos != device)
785 spin_unlock(get_ccwdev_lock(pos->cdev));
786 }
787 } 780 }
788} 781}
789 782
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 61f16937c1e0..a0edae091b5e 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1415,6 +1415,13 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
1415 return; 1415 return;
1416 } 1416 }
1417 1417
1418
1419 /* service information message SIM */
1420 if ((irb->ecw[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE) {
1421 dasd_3990_erp_handle_sim(device, irb->ecw);
1422 return;
1423 }
1424
1418 /* just report other unsolicited interrupts */ 1425 /* just report other unsolicited interrupts */
1419 DEV_MESSAGE(KERN_DEBUG, device, "%s", 1426 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1420 "unsolicited interrupt received"); 1427 "unsolicited interrupt received");
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index d13ea05089a7..116611583df8 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -125,7 +125,8 @@ dasd_fba_check_characteristics(struct dasd_device *device)
125 125
126 private = (struct dasd_fba_private *) device->private; 126 private = (struct dasd_fba_private *) device->private;
127 if (private == NULL) { 127 if (private == NULL) {
128 private = kzalloc(sizeof(struct dasd_fba_private), GFP_KERNEL); 128 private = kzalloc(sizeof(struct dasd_fba_private),
129 GFP_KERNEL | GFP_DMA);
129 if (private == NULL) { 130 if (private == NULL) {
130 DEV_MESSAGE(KERN_WARNING, device, "%s", 131 DEV_MESSAGE(KERN_WARNING, device, "%s",
131 "memory allocation failed for private " 132 "memory allocation failed for private "
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 44b2984dfbee..6c624bf44617 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -72,6 +72,11 @@ struct dasd_block;
72#define DASD_SENSE_BIT_2 0x20 72#define DASD_SENSE_BIT_2 0x20
73#define DASD_SENSE_BIT_3 0x10 73#define DASD_SENSE_BIT_3 0x10
74 74
75/* BIT DEFINITIONS FOR SIM SENSE */
76#define DASD_SIM_SENSE 0x0F
77#define DASD_SIM_MSG_TO_OP 0x03
78#define DASD_SIM_LOG 0x0C
79
75/* 80/*
76 * SECTION: MACROs for klogd and s390 debug feature (dbf) 81 * SECTION: MACROs for klogd and s390 debug feature (dbf)
77 */ 82 */
@@ -621,6 +626,7 @@ void dasd_log_sense(struct dasd_ccw_req *, struct irb *);
621 626
622/* externals in dasd_3990_erp.c */ 627/* externals in dasd_3990_erp.c */
623struct dasd_ccw_req *dasd_3990_erp_action(struct dasd_ccw_req *); 628struct dasd_ccw_req *dasd_3990_erp_action(struct dasd_ccw_req *);
629void dasd_3990_erp_handle_sim(struct dasd_device *, char *);
624 630
625/* externals in dasd_eer.c */ 631/* externals in dasd_eer.c */
626#ifdef CONFIG_DASD_EER 632#ifdef CONFIG_DASD_EER
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index e6c94dbfdeaa..04787eab1016 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -142,57 +142,6 @@ dcssblk_get_device_by_name(char *name)
142 return NULL; 142 return NULL;
143} 143}
144 144
145/*
146 * print appropriate error message for segment_load()/segment_type()
147 * return code
148 */
149static void
150dcssblk_segment_warn(int rc, char* seg_name)
151{
152 switch (rc) {
153 case -ENOENT:
154 PRINT_WARN("cannot load/query segment %s, does not exist\n",
155 seg_name);
156 break;
157 case -ENOSYS:
158 PRINT_WARN("cannot load/query segment %s, not running on VM\n",
159 seg_name);
160 break;
161 case -EIO:
162 PRINT_WARN("cannot load/query segment %s, hardware error\n",
163 seg_name);
164 break;
165 case -ENOTSUPP:
166 PRINT_WARN("cannot load/query segment %s, is a multi-part "
167 "segment\n", seg_name);
168 break;
169 case -ENOSPC:
170 PRINT_WARN("cannot load/query segment %s, overlaps with "
171 "storage\n", seg_name);
172 break;
173 case -EBUSY:
174 PRINT_WARN("cannot load/query segment %s, overlaps with "
175 "already loaded dcss\n", seg_name);
176 break;
177 case -EPERM:
178 PRINT_WARN("cannot load/query segment %s, already loaded in "
179 "incompatible mode\n", seg_name);
180 break;
181 case -ENOMEM:
182 PRINT_WARN("cannot load/query segment %s, out of memory\n",
183 seg_name);
184 break;
185 case -ERANGE:
186 PRINT_WARN("cannot load/query segment %s, exceeds kernel "
187 "mapping range\n", seg_name);
188 break;
189 default:
190 PRINT_WARN("cannot load/query segment %s, return value %i\n",
191 seg_name, rc);
192 break;
193 }
194}
195
196static void dcssblk_unregister_callback(struct device *dev) 145static void dcssblk_unregister_callback(struct device *dev)
197{ 146{
198 device_unregister(dev); 147 device_unregister(dev);
@@ -423,7 +372,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
423 rc = segment_load(local_buf, SEGMENT_SHARED, 372 rc = segment_load(local_buf, SEGMENT_SHARED,
424 &dev_info->start, &dev_info->end); 373 &dev_info->start, &dev_info->end);
425 if (rc < 0) { 374 if (rc < 0) {
426 dcssblk_segment_warn(rc, dev_info->segment_name); 375 segment_warning(rc, dev_info->segment_name);
427 goto dealloc_gendisk; 376 goto dealloc_gendisk;
428 } 377 }
429 seg_byte_size = (dev_info->end - dev_info->start + 1); 378 seg_byte_size = (dev_info->end - dev_info->start + 1);
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 67009bfa093e..1e1f50655bbf 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -111,56 +111,6 @@ static void dcss_mkname(char *ascii_name, char *ebcdic_name)
111 ASCEBC(ebcdic_name, 8); 111 ASCEBC(ebcdic_name, 8);
112} 112}
113 113
114/*
115 * print appropriate error message for segment_load()/segment_type()
116 * return code
117 */
118static void mon_segment_warn(int rc, char* seg_name)
119{
120 switch (rc) {
121 case -ENOENT:
122 P_WARNING("cannot load/query segment %s, does not exist\n",
123 seg_name);
124 break;
125 case -ENOSYS:
126 P_WARNING("cannot load/query segment %s, not running on VM\n",
127 seg_name);
128 break;
129 case -EIO:
130 P_WARNING("cannot load/query segment %s, hardware error\n",
131 seg_name);
132 break;
133 case -ENOTSUPP:
134 P_WARNING("cannot load/query segment %s, is a multi-part "
135 "segment\n", seg_name);
136 break;
137 case -ENOSPC:
138 P_WARNING("cannot load/query segment %s, overlaps with "
139 "storage\n", seg_name);
140 break;
141 case -EBUSY:
142 P_WARNING("cannot load/query segment %s, overlaps with "
143 "already loaded dcss\n", seg_name);
144 break;
145 case -EPERM:
146 P_WARNING("cannot load/query segment %s, already loaded in "
147 "incompatible mode\n", seg_name);
148 break;
149 case -ENOMEM:
150 P_WARNING("cannot load/query segment %s, out of memory\n",
151 seg_name);
152 break;
153 case -ERANGE:
154 P_WARNING("cannot load/query segment %s, exceeds kernel "
155 "mapping range\n", seg_name);
156 break;
157 default:
158 P_WARNING("cannot load/query segment %s, return value %i\n",
159 seg_name, rc);
160 break;
161 }
162}
163
164static inline unsigned long mon_mca_start(struct mon_msg *monmsg) 114static inline unsigned long mon_mca_start(struct mon_msg *monmsg)
165{ 115{
166 return *(u32 *) &monmsg->msg.rmmsg; 116 return *(u32 *) &monmsg->msg.rmmsg;
@@ -585,7 +535,7 @@ static int __init mon_init(void)
585 535
586 rc = segment_type(mon_dcss_name); 536 rc = segment_type(mon_dcss_name);
587 if (rc < 0) { 537 if (rc < 0) {
588 mon_segment_warn(rc, mon_dcss_name); 538 segment_warning(rc, mon_dcss_name);
589 goto out_iucv; 539 goto out_iucv;
590 } 540 }
591 if (rc != SEG_TYPE_SC) { 541 if (rc != SEG_TYPE_SC) {
@@ -598,7 +548,7 @@ static int __init mon_init(void)
598 rc = segment_load(mon_dcss_name, SEGMENT_SHARED, 548 rc = segment_load(mon_dcss_name, SEGMENT_SHARED,
599 &mon_dcss_start, &mon_dcss_end); 549 &mon_dcss_start, &mon_dcss_end);
600 if (rc < 0) { 550 if (rc < 0) {
601 mon_segment_warn(rc, mon_dcss_name); 551 segment_warning(rc, mon_dcss_name);
602 rc = -EINVAL; 552 rc = -EINVAL;
603 goto out_iucv; 553 goto out_iucv;
604 } 554 }
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index 2e616e33891d..e3b3d390b4a3 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -332,7 +332,7 @@ sclp_tty_write_string(const unsigned char *str, int count)
332 if (sclp_ttybuf == NULL) { 332 if (sclp_ttybuf == NULL) {
333 while (list_empty(&sclp_tty_pages)) { 333 while (list_empty(&sclp_tty_pages)) {
334 spin_unlock_irqrestore(&sclp_tty_lock, flags); 334 spin_unlock_irqrestore(&sclp_tty_lock, flags);
335 if (in_atomic()) 335 if (in_interrupt())
336 sclp_sync_wait(); 336 sclp_sync_wait();
337 else 337 else
338 wait_event(sclp_tty_waitq, 338 wait_event(sclp_tty_waitq,
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index f7b258dfd52c..ed507594e62b 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -383,7 +383,7 @@ sclp_vt220_timeout(unsigned long data)
383 */ 383 */
384static int 384static int
385__sclp_vt220_write(const unsigned char *buf, int count, int do_schedule, 385__sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
386 int convertlf) 386 int convertlf, int may_schedule)
387{ 387{
388 unsigned long flags; 388 unsigned long flags;
389 void *page; 389 void *page;
@@ -398,9 +398,8 @@ __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
398 /* Create a sclp output buffer if none exists yet */ 398 /* Create a sclp output buffer if none exists yet */
399 if (sclp_vt220_current_request == NULL) { 399 if (sclp_vt220_current_request == NULL) {
400 while (list_empty(&sclp_vt220_empty)) { 400 while (list_empty(&sclp_vt220_empty)) {
401 spin_unlock_irqrestore(&sclp_vt220_lock, 401 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
402 flags); 402 if (in_interrupt() || !may_schedule)
403 if (in_atomic())
404 sclp_sync_wait(); 403 sclp_sync_wait();
405 else 404 else
406 wait_event(sclp_vt220_waitq, 405 wait_event(sclp_vt220_waitq,
@@ -450,7 +449,7 @@ __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
450static int 449static int
451sclp_vt220_write(struct tty_struct *tty, const unsigned char *buf, int count) 450sclp_vt220_write(struct tty_struct *tty, const unsigned char *buf, int count)
452{ 451{
453 return __sclp_vt220_write(buf, count, 1, 0); 452 return __sclp_vt220_write(buf, count, 1, 0, 1);
454} 453}
455 454
456#define SCLP_VT220_SESSION_ENDED 0x01 455#define SCLP_VT220_SESSION_ENDED 0x01
@@ -529,7 +528,7 @@ sclp_vt220_close(struct tty_struct *tty, struct file *filp)
529static void 528static void
530sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch) 529sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch)
531{ 530{
532 __sclp_vt220_write(&ch, 1, 0, 0); 531 __sclp_vt220_write(&ch, 1, 0, 0, 1);
533} 532}
534 533
535/* 534/*
@@ -746,7 +745,7 @@ __initcall(sclp_vt220_tty_init);
746static void 745static void
747sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count) 746sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count)
748{ 747{
749 __sclp_vt220_write((const unsigned char *) buf, count, 1, 1); 748 __sclp_vt220_write((const unsigned char *) buf, count, 1, 1, 0);
750} 749}
751 750
752static struct tty_driver * 751static struct tty_driver *
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 5b47e9cce75f..874adf365e46 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -394,7 +394,7 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
394 return tape_34xx_erp_failed(request, -ENOSPC); 394 return tape_34xx_erp_failed(request, -ENOSPC);
395 default: 395 default:
396 PRINT_ERR("Invalid op in %s:%i\n", 396 PRINT_ERR("Invalid op in %s:%i\n",
397 __FUNCTION__, __LINE__); 397 __func__, __LINE__);
398 return tape_34xx_erp_failed(request, 0); 398 return tape_34xx_erp_failed(request, 0);
399 } 399 }
400 } 400 }
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index b830a8cbef78..ebe84067bae9 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -83,9 +83,9 @@ tapechar_setup_device(struct tape_device * device)
83void 83void
84tapechar_cleanup_device(struct tape_device *device) 84tapechar_cleanup_device(struct tape_device *device)
85{ 85{
86 unregister_tape_dev(device->rt); 86 unregister_tape_dev(&device->cdev->dev, device->rt);
87 device->rt = NULL; 87 device->rt = NULL;
88 unregister_tape_dev(device->nt); 88 unregister_tape_dev(&device->cdev->dev, device->nt);
89 device->nt = NULL; 89 device->nt = NULL;
90} 90}
91 91
diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c
index aa7f166f4034..6dfdb7c17981 100644
--- a/drivers/s390/char/tape_class.c
+++ b/drivers/s390/char/tape_class.c
@@ -99,11 +99,10 @@ fail_with_tcd:
99} 99}
100EXPORT_SYMBOL(register_tape_dev); 100EXPORT_SYMBOL(register_tape_dev);
101 101
102void unregister_tape_dev(struct tape_class_device *tcd) 102void unregister_tape_dev(struct device *device, struct tape_class_device *tcd)
103{ 103{
104 if (tcd != NULL && !IS_ERR(tcd)) { 104 if (tcd != NULL && !IS_ERR(tcd)) {
105 sysfs_remove_link(&tcd->class_device->kobj, 105 sysfs_remove_link(&device->kobj, tcd->mode_name);
106 tcd->mode_name);
107 device_destroy(tape_class, tcd->char_device->dev); 106 device_destroy(tape_class, tcd->char_device->dev);
108 cdev_del(tcd->char_device); 107 cdev_del(tcd->char_device);
109 kfree(tcd); 108 kfree(tcd);
diff --git a/drivers/s390/char/tape_class.h b/drivers/s390/char/tape_class.h
index e2b5ac918acf..707b7f48c232 100644
--- a/drivers/s390/char/tape_class.h
+++ b/drivers/s390/char/tape_class.h
@@ -56,6 +56,6 @@ struct tape_class_device *register_tape_dev(
56 char * device_name, 56 char * device_name,
57 char * node_name 57 char * node_name
58); 58);
59void unregister_tape_dev(struct tape_class_device *tcd); 59void unregister_tape_dev(struct device *device, struct tape_class_device *tcd);
60 60
61#endif /* __TAPE_CLASS_H__ */ 61#endif /* __TAPE_CLASS_H__ */
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 7689b500a104..83ae9a852f00 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -100,7 +100,8 @@ static struct urdev *urdev_alloc(struct ccw_device *cdev)
100 urd->reclen = cdev->id.driver_info; 100 urd->reclen = cdev->id.driver_info;
101 ccw_device_get_id(cdev, &urd->dev_id); 101 ccw_device_get_id(cdev, &urd->dev_id);
102 mutex_init(&urd->io_mutex); 102 mutex_init(&urd->io_mutex);
103 mutex_init(&urd->open_mutex); 103 init_waitqueue_head(&urd->wait);
104 spin_lock_init(&urd->open_lock);
104 atomic_set(&urd->ref_count, 1); 105 atomic_set(&urd->ref_count, 1);
105 urd->cdev = cdev; 106 urd->cdev = cdev;
106 get_device(&cdev->dev); 107 get_device(&cdev->dev);
@@ -678,17 +679,21 @@ static int ur_open(struct inode *inode, struct file *file)
678 if (!urd) 679 if (!urd)
679 return -ENXIO; 680 return -ENXIO;
680 681
681 if (file->f_flags & O_NONBLOCK) { 682 spin_lock(&urd->open_lock);
682 if (!mutex_trylock(&urd->open_mutex)) { 683 while (urd->open_flag) {
684 spin_unlock(&urd->open_lock);
685 if (file->f_flags & O_NONBLOCK) {
683 rc = -EBUSY; 686 rc = -EBUSY;
684 goto fail_put; 687 goto fail_put;
685 } 688 }
686 } else { 689 if (wait_event_interruptible(urd->wait, urd->open_flag == 0)) {
687 if (mutex_lock_interruptible(&urd->open_mutex)) {
688 rc = -ERESTARTSYS; 690 rc = -ERESTARTSYS;
689 goto fail_put; 691 goto fail_put;
690 } 692 }
693 spin_lock(&urd->open_lock);
691 } 694 }
695 urd->open_flag++;
696 spin_unlock(&urd->open_lock);
692 697
693 TRACE("ur_open\n"); 698 TRACE("ur_open\n");
694 699
@@ -720,7 +725,9 @@ static int ur_open(struct inode *inode, struct file *file)
720fail_urfile_free: 725fail_urfile_free:
721 urfile_free(urf); 726 urfile_free(urf);
722fail_unlock: 727fail_unlock:
723 mutex_unlock(&urd->open_mutex); 728 spin_lock(&urd->open_lock);
729 urd->open_flag--;
730 spin_unlock(&urd->open_lock);
724fail_put: 731fail_put:
725 urdev_put(urd); 732 urdev_put(urd);
726 return rc; 733 return rc;
@@ -731,7 +738,10 @@ static int ur_release(struct inode *inode, struct file *file)
731 struct urfile *urf = file->private_data; 738 struct urfile *urf = file->private_data;
732 739
733 TRACE("ur_release\n"); 740 TRACE("ur_release\n");
734 mutex_unlock(&urf->urd->open_mutex); 741 spin_lock(&urf->urd->open_lock);
742 urf->urd->open_flag--;
743 spin_unlock(&urf->urd->open_lock);
744 wake_up_interruptible(&urf->urd->wait);
735 urdev_put(urf->urd); 745 urdev_put(urf->urd);
736 urfile_free(urf); 746 urfile_free(urf);
737 return 0; 747 return 0;
diff --git a/drivers/s390/char/vmur.h b/drivers/s390/char/vmur.h
index fa959644735a..fa320ad4593d 100644
--- a/drivers/s390/char/vmur.h
+++ b/drivers/s390/char/vmur.h
@@ -62,7 +62,6 @@ struct file_control_block {
62struct urdev { 62struct urdev {
63 struct ccw_device *cdev; /* Backpointer to ccw device */ 63 struct ccw_device *cdev; /* Backpointer to ccw device */
64 struct mutex io_mutex; /* Serialises device IO */ 64 struct mutex io_mutex; /* Serialises device IO */
65 struct mutex open_mutex; /* Serialises access to device */
66 struct completion *io_done; /* do_ur_io waits; irq completes */ 65 struct completion *io_done; /* do_ur_io waits; irq completes */
67 struct device *device; 66 struct device *device;
68 struct cdev *char_device; 67 struct cdev *char_device;
@@ -71,6 +70,9 @@ struct urdev {
71 int class; /* VM device class */ 70 int class; /* VM device class */
72 int io_request_rc; /* return code from I/O request */ 71 int io_request_rc; /* return code from I/O request */
73 atomic_t ref_count; /* reference counter */ 72 atomic_t ref_count; /* reference counter */
73 wait_queue_head_t wait; /* wait queue to serialize open */
74 int open_flag; /* "urdev is open" flag */
75 spinlock_t open_lock; /* serialize critical sections */
74}; 76};
75 77
76/* 78/*
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
index 6f40facb1c4d..19f8389291b6 100644
--- a/drivers/s390/char/vmwatchdog.c
+++ b/drivers/s390/char/vmwatchdog.c
@@ -96,7 +96,7 @@ static int vmwdt_keepalive(void)
96 96
97 if (ret) { 97 if (ret) {
98 printk(KERN_WARNING "%s: problem setting interval %d, " 98 printk(KERN_WARNING "%s: problem setting interval %d, "
99 "cmd %s\n", __FUNCTION__, vmwdt_interval, 99 "cmd %s\n", __func__, vmwdt_interval,
100 vmwdt_cmd); 100 vmwdt_cmd);
101 } 101 }
102 return ret; 102 return ret;
@@ -107,7 +107,7 @@ static int vmwdt_disable(void)
107 int ret = __diag288(wdt_cancel, 0, "", 0); 107 int ret = __diag288(wdt_cancel, 0, "", 0);
108 if (ret) { 108 if (ret) {
109 printk(KERN_WARNING "%s: problem disabling watchdog\n", 109 printk(KERN_WARNING "%s: problem disabling watchdog\n",
110 __FUNCTION__); 110 __func__);
111 } 111 }
112 return ret; 112 return ret;
113} 113}
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index f523501e6e6c..bbbd14e9d48f 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -224,7 +224,7 @@ static int __init init_cpu_info(enum arch_id arch)
224 224
225 sa = kmalloc(sizeof(*sa), GFP_KERNEL); 225 sa = kmalloc(sizeof(*sa), GFP_KERNEL);
226 if (!sa) { 226 if (!sa) {
227 ERROR_MSG("kmalloc failed: %s: %i\n",__FUNCTION__, __LINE__); 227 ERROR_MSG("kmalloc failed: %s: %i\n",__func__, __LINE__);
228 return -ENOMEM; 228 return -ENOMEM;
229 } 229 }
230 if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) { 230 if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) {
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 007aaeb4f532..5de86908b0d0 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -217,6 +217,8 @@ void chsc_chp_offline(struct chp_id chpid)
217 217
218 if (chp_get_status(chpid) <= 0) 218 if (chp_get_status(chpid) <= 0)
219 return; 219 return;
220 /* Wait until previous actions have settled. */
221 css_wait_for_slow_path();
220 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &chpid); 222 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &chpid);
221} 223}
222 224
@@ -303,7 +305,8 @@ static void s390_process_res_acc (struct res_acc_data *res_data)
303 sprintf(dbf_txt, "fla%x", res_data->fla); 305 sprintf(dbf_txt, "fla%x", res_data->fla);
304 CIO_TRACE_EVENT( 2, dbf_txt); 306 CIO_TRACE_EVENT( 2, dbf_txt);
305 } 307 }
306 308 /* Wait until previous actions have settled. */
309 css_wait_for_slow_path();
307 /* 310 /*
308 * I/O resources may have become accessible. 311 * I/O resources may have become accessible.
309 * Scan through all subchannels that may be concerned and 312 * Scan through all subchannels that may be concerned and
@@ -561,9 +564,12 @@ void chsc_chp_online(struct chp_id chpid)
561 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); 564 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
562 CIO_TRACE_EVENT(2, dbf_txt); 565 CIO_TRACE_EVENT(2, dbf_txt);
563 566
564 if (chp_get_status(chpid) != 0) 567 if (chp_get_status(chpid) != 0) {
568 /* Wait until previous actions have settled. */
569 css_wait_for_slow_path();
565 for_each_subchannel_staged(__chp_add, __chp_add_new_sch, 570 for_each_subchannel_staged(__chp_add, __chp_add_new_sch,
566 &chpid); 571 &chpid);
572 }
567} 573}
568 574
569static void __s390_subchannel_vary_chpid(struct subchannel *sch, 575static void __s390_subchannel_vary_chpid(struct subchannel *sch,
@@ -650,6 +656,8 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data)
650 */ 656 */
651int chsc_chp_vary(struct chp_id chpid, int on) 657int chsc_chp_vary(struct chp_id chpid, int on)
652{ 658{
659 /* Wait until previous actions have settled. */
660 css_wait_for_slow_path();
653 /* 661 /*
654 * Redo PathVerification on the devices the chpid connects to 662 * Redo PathVerification on the devices the chpid connects to
655 */ 663 */
@@ -758,7 +766,6 @@ chsc_secm(struct channel_subsystem *css, int enable)
758 if (!secm_area) 766 if (!secm_area)
759 return -ENOMEM; 767 return -ENOMEM;
760 768
761 mutex_lock(&css->mutex);
762 if (enable && !css->cm_enabled) { 769 if (enable && !css->cm_enabled) {
763 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 770 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
764 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 771 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
@@ -766,7 +773,6 @@ chsc_secm(struct channel_subsystem *css, int enable)
766 free_page((unsigned long)css->cub_addr1); 773 free_page((unsigned long)css->cub_addr1);
767 free_page((unsigned long)css->cub_addr2); 774 free_page((unsigned long)css->cub_addr2);
768 free_page((unsigned long)secm_area); 775 free_page((unsigned long)secm_area);
769 mutex_unlock(&css->mutex);
770 return -ENOMEM; 776 return -ENOMEM;
771 } 777 }
772 } 778 }
@@ -787,7 +793,6 @@ chsc_secm(struct channel_subsystem *css, int enable)
787 free_page((unsigned long)css->cub_addr1); 793 free_page((unsigned long)css->cub_addr1);
788 free_page((unsigned long)css->cub_addr2); 794 free_page((unsigned long)css->cub_addr2);
789 } 795 }
790 mutex_unlock(&css->mutex);
791 free_page((unsigned long)secm_area); 796 free_page((unsigned long)secm_area);
792 return ret; 797 return ret;
793} 798}
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 60590a12d529..23ffcc4768a7 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -24,6 +24,7 @@
24#include <asm/ipl.h> 24#include <asm/ipl.h>
25#include <asm/chpid.h> 25#include <asm/chpid.h>
26#include <asm/airq.h> 26#include <asm/airq.h>
27#include <asm/cpu.h>
27#include "cio.h" 28#include "cio.h"
28#include "css.h" 29#include "css.h"
29#include "chsc.h" 30#include "chsc.h"
@@ -649,13 +650,10 @@ do_IRQ (struct pt_regs *regs)
649 650
650 old_regs = set_irq_regs(regs); 651 old_regs = set_irq_regs(regs);
651 irq_enter(); 652 irq_enter();
652 asm volatile ("mc 0,0"); 653 s390_idle_check();
653 if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer) 654 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
654 /** 655 /* Serve timer interrupts first. */
655 * Make sure that the i/o interrupt did not "overtake" 656 clock_comparator_work();
656 * the last HZ timer interrupt.
657 */
658 account_ticks(S390_lowcore.int_clock);
659 /* 657 /*
660 * Get interrupt information from lowcore 658 * Get interrupt information from lowcore
661 */ 659 */
@@ -672,10 +670,14 @@ do_IRQ (struct pt_regs *regs)
672 continue; 670 continue;
673 } 671 }
674 sch = (struct subchannel *)(unsigned long)tpi_info->intparm; 672 sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
675 if (sch) 673 if (!sch) {
676 spin_lock(sch->lock); 674 /* Clear pending interrupt condition. */
675 tsch(tpi_info->schid, irb);
676 continue;
677 }
678 spin_lock(sch->lock);
677 /* Store interrupt response block to lowcore. */ 679 /* Store interrupt response block to lowcore. */
678 if (tsch (tpi_info->schid, irb) == 0 && sch) { 680 if (tsch(tpi_info->schid, irb) == 0) {
679 /* Keep subchannel information word up to date. */ 681 /* Keep subchannel information word up to date. */
680 memcpy (&sch->schib.scsw, &irb->scsw, 682 memcpy (&sch->schib.scsw, &irb->scsw,
681 sizeof (irb->scsw)); 683 sizeof (irb->scsw));
@@ -683,8 +685,7 @@ do_IRQ (struct pt_regs *regs)
683 if (sch->driver && sch->driver->irq) 685 if (sch->driver && sch->driver->irq)
684 sch->driver->irq(sch); 686 sch->driver->irq(sch);
685 } 687 }
686 if (sch) 688 spin_unlock(sch->lock);
687 spin_unlock(sch->lock);
688 /* 689 /*
689 * Are more interrupts pending? 690 * Are more interrupts pending?
690 * If so, the tpi instruction will update the lowcore 691 * If so, the tpi instruction will update the lowcore
@@ -710,8 +711,9 @@ void *cio_get_console_priv(void)
710/* 711/*
711 * busy wait for the next interrupt on the console 712 * busy wait for the next interrupt on the console
712 */ 713 */
713void 714void wait_cons_dev(void)
714wait_cons_dev (void) 715 __releases(console_subchannel.lock)
716 __acquires(console_subchannel.lock)
715{ 717{
716 unsigned long cr6 __attribute__ ((aligned (8))); 718 unsigned long cr6 __attribute__ ((aligned (8)));
717 unsigned long save_cr6 __attribute__ ((aligned (8))); 719 unsigned long save_cr6 __attribute__ ((aligned (8)));
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 52afa4c784de..08f2235c5a6f 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -100,6 +100,7 @@ extern int cio_modify (struct subchannel *);
100 100
101int cio_create_sch_lock(struct subchannel *); 101int cio_create_sch_lock(struct subchannel *);
102void do_adapter_IO(void); 102void do_adapter_IO(void);
103void do_IRQ(struct pt_regs *);
103 104
104/* Use with care. */ 105/* Use with care. */
105#ifdef CONFIG_CCW_CONSOLE 106#ifdef CONFIG_CCW_CONSOLE
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 3b45bbe6cce0..c1afab5f72d6 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -533,6 +533,12 @@ void css_schedule_eval_all(void)
533 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 533 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
534} 534}
535 535
536void css_wait_for_slow_path(void)
537{
538 flush_workqueue(ccw_device_notify_work);
539 flush_workqueue(slow_path_wq);
540}
541
536/* Reprobe subchannel if unregistered. */ 542/* Reprobe subchannel if unregistered. */
537static int reprobe_subchannel(struct subchannel_id schid, void *data) 543static int reprobe_subchannel(struct subchannel_id schid, void *data)
538{ 544{
@@ -683,10 +689,14 @@ css_cm_enable_show(struct device *dev, struct device_attribute *attr,
683 char *buf) 689 char *buf)
684{ 690{
685 struct channel_subsystem *css = to_css(dev); 691 struct channel_subsystem *css = to_css(dev);
692 int ret;
686 693
687 if (!css) 694 if (!css)
688 return 0; 695 return 0;
689 return sprintf(buf, "%x\n", css->cm_enabled); 696 mutex_lock(&css->mutex);
697 ret = sprintf(buf, "%x\n", css->cm_enabled);
698 mutex_unlock(&css->mutex);
699 return ret;
690} 700}
691 701
692static ssize_t 702static ssize_t
@@ -696,6 +706,7 @@ css_cm_enable_store(struct device *dev, struct device_attribute *attr,
696 struct channel_subsystem *css = to_css(dev); 706 struct channel_subsystem *css = to_css(dev);
697 int ret; 707 int ret;
698 708
709 mutex_lock(&css->mutex);
699 switch (buf[0]) { 710 switch (buf[0]) {
700 case '0': 711 case '0':
701 ret = css->cm_enabled ? chsc_secm(css, 0) : 0; 712 ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
@@ -706,6 +717,7 @@ css_cm_enable_store(struct device *dev, struct device_attribute *attr,
706 default: 717 default:
707 ret = -EINVAL; 718 ret = -EINVAL;
708 } 719 }
720 mutex_unlock(&css->mutex);
709 return ret < 0 ? ret : count; 721 return ret < 0 ? ret : count;
710} 722}
711 723
@@ -752,9 +764,11 @@ static int css_reboot_event(struct notifier_block *this,
752 struct channel_subsystem *css; 764 struct channel_subsystem *css;
753 765
754 css = channel_subsystems[i]; 766 css = channel_subsystems[i];
767 mutex_lock(&css->mutex);
755 if (css->cm_enabled) 768 if (css->cm_enabled)
756 if (chsc_secm(css, 0)) 769 if (chsc_secm(css, 0))
757 ret = NOTIFY_BAD; 770 ret = NOTIFY_BAD;
771 mutex_unlock(&css->mutex);
758 } 772 }
759 773
760 return ret; 774 return ret;
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index b70554523552..e1913518f354 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -144,6 +144,7 @@ struct schib;
144int css_sch_is_valid(struct schib *); 144int css_sch_is_valid(struct schib *);
145 145
146extern struct workqueue_struct *slow_path_wq; 146extern struct workqueue_struct *slow_path_wq;
147void css_wait_for_slow_path(void);
147 148
148extern struct attribute_group *subch_attr_groups[]; 149extern struct attribute_group *subch_attr_groups[];
149#endif 150#endif
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index fec004f62bcf..e0c7adb8958e 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -577,7 +577,6 @@ static DEVICE_ATTR(devtype, 0444, devtype_show, NULL);
577static DEVICE_ATTR(cutype, 0444, cutype_show, NULL); 577static DEVICE_ATTR(cutype, 0444, cutype_show, NULL);
578static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); 578static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
579static DEVICE_ATTR(online, 0644, online_show, online_store); 579static DEVICE_ATTR(online, 0644, online_show, online_store);
580extern struct device_attribute dev_attr_cmb_enable;
581static DEVICE_ATTR(availability, 0444, available_show, NULL); 580static DEVICE_ATTR(availability, 0444, available_show, NULL);
582 581
583static struct attribute * subch_attrs[] = { 582static struct attribute * subch_attrs[] = {
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index d40a2ffaa000..cb08092be39f 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -127,4 +127,5 @@ extern struct bus_type ccw_bus_type;
127void retry_set_schib(struct ccw_device *cdev); 127void retry_set_schib(struct ccw_device *cdev);
128void cmf_retry_copy_block(struct ccw_device *); 128void cmf_retry_copy_block(struct ccw_device *);
129int cmf_reenable(struct ccw_device *); 129int cmf_reenable(struct ccw_device *);
130extern struct device_attribute dev_attr_cmb_enable;
130#endif 131#endif
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 49b58eb0fab8..a1718a0aa539 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -193,8 +193,15 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
193 return -EACCES; 193 return -EACCES;
194 } 194 }
195 ret = cio_start_key (sch, cpa, lpm, key); 195 ret = cio_start_key (sch, cpa, lpm, key);
196 if (ret == 0) 196 switch (ret) {
197 case 0:
197 cdev->private->intparm = intparm; 198 cdev->private->intparm = intparm;
199 break;
200 case -EACCES:
201 case -ENODEV:
202 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
203 break;
204 }
198 return ret; 205 return ret;
199} 206}
200 207
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index ebe0848cfe33..4a38993000f2 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -62,7 +62,7 @@ ccw_device_path_notoper(struct ccw_device *cdev)
62 stsch (sch->schid, &sch->schib); 62 stsch (sch->schid, &sch->schib);
63 63
64 CIO_MSG_EVENT(0, "%s(0.%x.%04x) - path(s) %02x are " 64 CIO_MSG_EVENT(0, "%s(0.%x.%04x) - path(s) %02x are "
65 "not operational \n", __FUNCTION__, 65 "not operational \n", __func__,
66 sch->schid.ssid, sch->schid.sch_no, 66 sch->schid.ssid, sch->schid.sch_no,
67 sch->schib.pmcw.pnom); 67 sch->schib.pmcw.pnom);
68 68
@@ -312,6 +312,7 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
312{ 312{
313 struct subchannel *sch; 313 struct subchannel *sch;
314 struct ccw1 *sense_ccw; 314 struct ccw1 *sense_ccw;
315 int rc;
315 316
316 sch = to_subchannel(cdev->dev.parent); 317 sch = to_subchannel(cdev->dev.parent);
317 318
@@ -337,7 +338,10 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
337 /* Reset internal retry indication. */ 338 /* Reset internal retry indication. */
338 cdev->private->flags.intretry = 0; 339 cdev->private->flags.intretry = 0;
339 340
340 return cio_start(sch, sense_ccw, 0xff); 341 rc = cio_start(sch, sense_ccw, 0xff);
342 if (rc == -ENODEV || rc == -EACCES)
343 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
344 return rc;
341} 345}
342 346
343/* 347/*
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index 2b5bfb7c69e5..c359386708e9 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -1399,7 +1399,7 @@ __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set)
1399 * q->dev_st_chg_ind is the indicator, be it shared or not. 1399 * q->dev_st_chg_ind is the indicator, be it shared or not.
1400 * only clear it, if indicator is non-shared 1400 * only clear it, if indicator is non-shared
1401 */ 1401 */
1402 if (!spare_ind_was_set) 1402 if (q->dev_st_chg_ind != &spare_indicator)
1403 tiqdio_clear_summary_bit((__u32*)q->dev_st_chg_ind); 1403 tiqdio_clear_summary_bit((__u32*)q->dev_st_chg_ind);
1404 1404
1405 if (q->hydra_gives_outbound_pcis) { 1405 if (q->hydra_gives_outbound_pcis) {
@@ -2217,9 +2217,78 @@ qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
2217 return cc; 2217 return cc;
2218} 2218}
2219 2219
2220static int
2221qdio_get_ssqd_information(struct subchannel_id *schid,
2222 struct qdio_chsc_ssqd **ssqd_area)
2223{
2224 int result;
2225
2226 QDIO_DBF_TEXT0(0, setup, "getssqd");
2227 *ssqd_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC);
2228 if (!ssqd_area) {
2229 QDIO_PRINT_WARN("Could not get memory for chsc on sch x%x.\n",
2230 schid->sch_no);
2231 return -ENOMEM;
2232 }
2233
2234 (*ssqd_area)->request = (struct chsc_header) {
2235 .length = 0x0010,
2236 .code = 0x0024,
2237 };
2238 (*ssqd_area)->first_sch = schid->sch_no;
2239 (*ssqd_area)->last_sch = schid->sch_no;
2240 (*ssqd_area)->ssid = schid->ssid;
2241 result = chsc(*ssqd_area);
2242
2243 if (result) {
2244 QDIO_PRINT_WARN("CHSC returned cc %i on sch 0.%x.%x.\n",
2245 result, schid->ssid, schid->sch_no);
2246 goto out;
2247 }
2248
2249 if ((*ssqd_area)->response.code != QDIO_CHSC_RESPONSE_CODE_OK) {
2250 QDIO_PRINT_WARN("CHSC response is 0x%x on sch 0.%x.%x.\n",
2251 (*ssqd_area)->response.code,
2252 schid->ssid, schid->sch_no);
2253 goto out;
2254 }
2255 if (!((*ssqd_area)->flags & CHSC_FLAG_QDIO_CAPABILITY) ||
2256 !((*ssqd_area)->flags & CHSC_FLAG_VALIDITY) ||
2257 ((*ssqd_area)->sch != schid->sch_no)) {
2258 QDIO_PRINT_WARN("huh? problems checking out sch 0.%x.%x... " \
2259 "using all SIGAs.\n",
2260 schid->ssid, schid->sch_no);
2261 goto out;
2262 }
2263 return 0;
2264out:
2265 return -EINVAL;
2266}
2267
2268int
2269qdio_get_ssqd_pct(struct ccw_device *cdev)
2270{
2271 struct qdio_chsc_ssqd *ssqd_area;
2272 struct subchannel_id schid;
2273 char dbf_text[15];
2274 int rc;
2275 int pct = 0;
2276
2277 QDIO_DBF_TEXT0(0, setup, "getpct");
2278 schid = ccw_device_get_subchannel_id(cdev);
2279 rc = qdio_get_ssqd_information(&schid, &ssqd_area);
2280 if (!rc)
2281 pct = (int)ssqd_area->pct;
2282 if (rc != -ENOMEM)
2283 mempool_free(ssqd_area, qdio_mempool_scssc);
2284 sprintf(dbf_text, "pct: %d", pct);
2285 QDIO_DBF_TEXT2(0, setup, dbf_text);
2286 return pct;
2287}
2288EXPORT_SYMBOL(qdio_get_ssqd_pct);
2289
2220static void 2290static void
2221qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac, 2291qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned long token)
2222 unsigned long token)
2223{ 2292{
2224 struct qdio_q *q; 2293 struct qdio_q *q;
2225 int i; 2294 int i;
@@ -2227,7 +2296,7 @@ qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac,
2227 char dbf_text[15]; 2296 char dbf_text[15];
2228 2297
2229 /*check if QEBSM is disabled */ 2298 /*check if QEBSM is disabled */
2230 if (!(irq_ptr->is_qebsm) || !(qdioac & 0x01)) { 2299 if (!(irq_ptr->is_qebsm) || !(irq_ptr->qdioac & 0x01)) {
2231 irq_ptr->is_qebsm = 0; 2300 irq_ptr->is_qebsm = 0;
2232 irq_ptr->sch_token = 0; 2301 irq_ptr->sch_token = 0;
2233 irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM; 2302 irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
@@ -2256,102 +2325,27 @@ qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac,
2256} 2325}
2257 2326
2258static void 2327static void
2259qdio_get_ssqd_information(struct qdio_irq *irq_ptr) 2328qdio_get_ssqd_siga(struct qdio_irq *irq_ptr)
2260{ 2329{
2261 int result; 2330 int rc;
2262 unsigned char qdioac; 2331 struct qdio_chsc_ssqd *ssqd_area;
2263 struct {
2264 struct chsc_header request;
2265 u16 reserved1:10;
2266 u16 ssid:2;
2267 u16 fmt:4;
2268 u16 first_sch;
2269 u16 reserved2;
2270 u16 last_sch;
2271 u32 reserved3;
2272 struct chsc_header response;
2273 u32 reserved4;
2274 u8 flags;
2275 u8 reserved5;
2276 u16 sch;
2277 u8 qfmt;
2278 u8 parm;
2279 u8 qdioac1;
2280 u8 sch_class;
2281 u8 reserved7;
2282 u8 icnt;
2283 u8 reserved8;
2284 u8 ocnt;
2285 u8 reserved9;
2286 u8 mbccnt;
2287 u16 qdioac2;
2288 u64 sch_token;
2289 } *ssqd_area;
2290 2332
2291 QDIO_DBF_TEXT0(0,setup,"getssqd"); 2333 QDIO_DBF_TEXT0(0,setup,"getssqd");
2292 qdioac = 0; 2334 irq_ptr->qdioac = 0;
2293 ssqd_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC); 2335 rc = qdio_get_ssqd_information(&irq_ptr->schid, &ssqd_area);
2294 if (!ssqd_area) { 2336 if (rc) {
2295 QDIO_PRINT_WARN("Could not get memory for chsc. Using all " \ 2337 QDIO_PRINT_WARN("using all SIGAs for sch x%x.n",
2296 "SIGAs for sch x%x.\n", irq_ptr->schid.sch_no); 2338 irq_ptr->schid.sch_no);
2297 irq_ptr->qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY | 2339 irq_ptr->qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
2298 CHSC_FLAG_SIGA_OUTPUT_NECESSARY | 2340 CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
2299 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */ 2341 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
2300 irq_ptr->is_qebsm = 0; 2342 irq_ptr->is_qebsm = 0;
2301 irq_ptr->sch_token = 0; 2343 } else
2302 irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM; 2344 irq_ptr->qdioac = ssqd_area->qdioac1;
2303 return;
2304 }
2305
2306 ssqd_area->request = (struct chsc_header) {
2307 .length = 0x0010,
2308 .code = 0x0024,
2309 };
2310 ssqd_area->first_sch = irq_ptr->schid.sch_no;
2311 ssqd_area->last_sch = irq_ptr->schid.sch_no;
2312 ssqd_area->ssid = irq_ptr->schid.ssid;
2313 result = chsc(ssqd_area);
2314
2315 if (result) {
2316 QDIO_PRINT_WARN("CHSC returned cc %i. Using all " \
2317 "SIGAs for sch 0.%x.%x.\n", result,
2318 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2319 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
2320 CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
2321 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
2322 irq_ptr->is_qebsm = 0;
2323 goto out;
2324 }
2325 2345
2326 if (ssqd_area->response.code != QDIO_CHSC_RESPONSE_CODE_OK) { 2346 qdio_check_subchannel_qebsm(irq_ptr, ssqd_area->sch_token);
2327 QDIO_PRINT_WARN("response upon checking SIGA needs " \ 2347 if (rc != -ENOMEM)
2328 "is 0x%x. Using all SIGAs for sch 0.%x.%x.\n", 2348 mempool_free(ssqd_area, qdio_mempool_scssc);
2329 ssqd_area->response.code,
2330 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2331 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
2332 CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
2333 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
2334 irq_ptr->is_qebsm = 0;
2335 goto out;
2336 }
2337 if (!(ssqd_area->flags & CHSC_FLAG_QDIO_CAPABILITY) ||
2338 !(ssqd_area->flags & CHSC_FLAG_VALIDITY) ||
2339 (ssqd_area->sch != irq_ptr->schid.sch_no)) {
2340 QDIO_PRINT_WARN("huh? problems checking out sch 0.%x.%x... " \
2341 "using all SIGAs.\n",
2342 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2343 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
2344 CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
2345 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* worst case */
2346 irq_ptr->is_qebsm = 0;
2347 goto out;
2348 }
2349 qdioac = ssqd_area->qdioac1;
2350out:
2351 qdio_check_subchannel_qebsm(irq_ptr, qdioac,
2352 ssqd_area->sch_token);
2353 mempool_free(ssqd_area, qdio_mempool_scssc);
2354 irq_ptr->qdioac = qdioac;
2355} 2349}
2356 2350
2357static unsigned int 2351static unsigned int
@@ -3227,7 +3221,7 @@ qdio_establish(struct qdio_initialize *init_data)
3227 return -EIO; 3221 return -EIO;
3228 } 3222 }
3229 3223
3230 qdio_get_ssqd_information(irq_ptr); 3224 qdio_get_ssqd_siga(irq_ptr);
3231 /* if this gets set once, we're running under VM and can omit SVSes */ 3225 /* if this gets set once, we're running under VM and can omit SVSes */
3232 if (irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY) 3226 if (irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY)
3233 omit_svs=1; 3227 omit_svs=1;
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index da8a272fd75b..c3df6b2c38b7 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -406,6 +406,34 @@ do_clear_global_summary(void)
406#define CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS 0x08 406#define CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS 0x08
407#define CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS 0x04 407#define CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS 0x04
408 408
409struct qdio_chsc_ssqd {
410 struct chsc_header request;
411 u16 reserved1:10;
412 u16 ssid:2;
413 u16 fmt:4;
414 u16 first_sch;
415 u16 reserved2;
416 u16 last_sch;
417 u32 reserved3;
418 struct chsc_header response;
419 u32 reserved4;
420 u8 flags;
421 u8 reserved5;
422 u16 sch;
423 u8 qfmt;
424 u8 parm;
425 u8 qdioac1;
426 u8 sch_class;
427 u8 pct;
428 u8 icnt;
429 u8 reserved7;
430 u8 ocnt;
431 u8 reserved8;
432 u8 mbccnt;
433 u16 qdioac2;
434 u64 sch_token;
435};
436
409struct qdio_perf_stats { 437struct qdio_perf_stats {
410#ifdef CONFIG_64BIT 438#ifdef CONFIG_64BIT
411 atomic64_t tl_runs; 439 atomic64_t tl_runs;
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 7b0b81901297..a1ab3e3efd11 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -45,7 +45,7 @@ static int ap_poll_thread_start(void);
45static void ap_poll_thread_stop(void); 45static void ap_poll_thread_stop(void);
46static void ap_request_timeout(unsigned long); 46static void ap_request_timeout(unsigned long);
47 47
48/** 48/*
49 * Module description. 49 * Module description.
50 */ 50 */
51MODULE_AUTHOR("IBM Corporation"); 51MODULE_AUTHOR("IBM Corporation");
@@ -53,7 +53,7 @@ MODULE_DESCRIPTION("Adjunct Processor Bus driver, "
53 "Copyright 2006 IBM Corporation"); 53 "Copyright 2006 IBM Corporation");
54MODULE_LICENSE("GPL"); 54MODULE_LICENSE("GPL");
55 55
56/** 56/*
57 * Module parameter 57 * Module parameter
58 */ 58 */
59int ap_domain_index = -1; /* Adjunct Processor Domain Index */ 59int ap_domain_index = -1; /* Adjunct Processor Domain Index */
@@ -69,7 +69,7 @@ static struct device *ap_root_device = NULL;
69static DEFINE_SPINLOCK(ap_device_lock); 69static DEFINE_SPINLOCK(ap_device_lock);
70static LIST_HEAD(ap_device_list); 70static LIST_HEAD(ap_device_list);
71 71
72/** 72/*
73 * Workqueue & timer for bus rescan. 73 * Workqueue & timer for bus rescan.
74 */ 74 */
75static struct workqueue_struct *ap_work_queue; 75static struct workqueue_struct *ap_work_queue;
@@ -77,7 +77,7 @@ static struct timer_list ap_config_timer;
77static int ap_config_time = AP_CONFIG_TIME; 77static int ap_config_time = AP_CONFIG_TIME;
78static DECLARE_WORK(ap_config_work, ap_scan_bus); 78static DECLARE_WORK(ap_config_work, ap_scan_bus);
79 79
80/** 80/*
81 * Tasklet & timer for AP request polling. 81 * Tasklet & timer for AP request polling.
82 */ 82 */
83static struct timer_list ap_poll_timer = TIMER_INITIALIZER(ap_poll_timeout,0,0); 83static struct timer_list ap_poll_timer = TIMER_INITIALIZER(ap_poll_timeout,0,0);
@@ -88,9 +88,9 @@ static struct task_struct *ap_poll_kthread = NULL;
88static DEFINE_MUTEX(ap_poll_thread_mutex); 88static DEFINE_MUTEX(ap_poll_thread_mutex);
89 89
90/** 90/**
91 * Test if ap instructions are available. 91 * ap_intructions_available() - Test if AP instructions are available.
92 * 92 *
93 * Returns 0 if the ap instructions are installed. 93 * Returns 0 if the AP instructions are installed.
94 */ 94 */
95static inline int ap_instructions_available(void) 95static inline int ap_instructions_available(void)
96{ 96{
@@ -108,12 +108,12 @@ static inline int ap_instructions_available(void)
108} 108}
109 109
110/** 110/**
111 * Test adjunct processor queue. 111 * ap_test_queue(): Test adjunct processor queue.
112 * @qid: the ap queue number 112 * @qid: The AP queue number
113 * @queue_depth: pointer to queue depth value 113 * @queue_depth: Pointer to queue depth value
114 * @device_type: pointer to device type value 114 * @device_type: Pointer to device type value
115 * 115 *
116 * Returns ap queue status structure. 116 * Returns AP queue status structure.
117 */ 117 */
118static inline struct ap_queue_status 118static inline struct ap_queue_status
119ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type) 119ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type)
@@ -130,10 +130,10 @@ ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type)
130} 130}
131 131
132/** 132/**
133 * Reset adjunct processor queue. 133 * ap_reset_queue(): Reset adjunct processor queue.
134 * @qid: the ap queue number 134 * @qid: The AP queue number
135 * 135 *
136 * Returns ap queue status structure. 136 * Returns AP queue status structure.
137 */ 137 */
138static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid) 138static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid)
139{ 139{
@@ -148,16 +148,14 @@ static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid)
148} 148}
149 149
150/** 150/**
151 * Send message to adjunct processor queue. 151 * __ap_send(): Send message to adjunct processor queue.
152 * @qid: the ap queue number 152 * @qid: The AP queue number
153 * @psmid: the program supplied message identifier 153 * @psmid: The program supplied message identifier
154 * @msg: the message text 154 * @msg: The message text
155 * @length: the message length 155 * @length: The message length
156 *
157 * Returns ap queue status structure.
158 * 156 *
157 * Returns AP queue status structure.
159 * Condition code 1 on NQAP can't happen because the L bit is 1. 158 * Condition code 1 on NQAP can't happen because the L bit is 1.
160 *
161 * Condition code 2 on NQAP also means the send is incomplete, 159 * Condition code 2 on NQAP also means the send is incomplete,
162 * because a segment boundary was reached. The NQAP is repeated. 160 * because a segment boundary was reached. The NQAP is repeated.
163 */ 161 */
@@ -198,23 +196,20 @@ int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
198} 196}
199EXPORT_SYMBOL(ap_send); 197EXPORT_SYMBOL(ap_send);
200 198
201/* 199/**
202 * Receive message from adjunct processor queue. 200 * __ap_recv(): Receive message from adjunct processor queue.
203 * @qid: the ap queue number 201 * @qid: The AP queue number
204 * @psmid: pointer to program supplied message identifier 202 * @psmid: Pointer to program supplied message identifier
205 * @msg: the message text 203 * @msg: The message text
206 * @length: the message length 204 * @length: The message length
207 *
208 * Returns ap queue status structure.
209 * 205 *
206 * Returns AP queue status structure.
210 * Condition code 1 on DQAP means the receive has taken place 207 * Condition code 1 on DQAP means the receive has taken place
211 * but only partially. The response is incomplete, hence the 208 * but only partially. The response is incomplete, hence the
212 * DQAP is repeated. 209 * DQAP is repeated.
213 *
214 * Condition code 2 on DQAP also means the receive is incomplete, 210 * Condition code 2 on DQAP also means the receive is incomplete,
215 * this time because a segment boundary was reached. Again, the 211 * this time because a segment boundary was reached. Again, the
216 * DQAP is repeated. 212 * DQAP is repeated.
217 *
218 * Note that gpr2 is used by the DQAP instruction to keep track of 213 * Note that gpr2 is used by the DQAP instruction to keep track of
219 * any 'residual' length, in case the instruction gets interrupted. 214 * any 'residual' length, in case the instruction gets interrupted.
220 * Hence it gets zeroed before the instruction. 215 * Hence it gets zeroed before the instruction.
@@ -263,11 +258,12 @@ int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
263EXPORT_SYMBOL(ap_recv); 258EXPORT_SYMBOL(ap_recv);
264 259
265/** 260/**
266 * Check if an AP queue is available. The test is repeated for 261 * ap_query_queue(): Check if an AP queue is available.
267 * AP_MAX_RESET times. 262 * @qid: The AP queue number
268 * @qid: the ap queue number 263 * @queue_depth: Pointer to queue depth value
269 * @queue_depth: pointer to queue depth value 264 * @device_type: Pointer to device type value
270 * @device_type: pointer to device type value 265 *
266 * The test is repeated for AP_MAX_RESET times.
271 */ 267 */
272static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type) 268static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type)
273{ 269{
@@ -308,8 +304,10 @@ static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type)
308} 304}
309 305
310/** 306/**
307 * ap_init_queue(): Reset an AP queue.
308 * @qid: The AP queue number
309 *
311 * Reset an AP queue and wait for it to become available again. 310 * Reset an AP queue and wait for it to become available again.
312 * @qid: the ap queue number
313 */ 311 */
314static int ap_init_queue(ap_qid_t qid) 312static int ap_init_queue(ap_qid_t qid)
315{ 313{
@@ -346,7 +344,10 @@ static int ap_init_queue(ap_qid_t qid)
346} 344}
347 345
348/** 346/**
349 * Arm request timeout if a AP device was idle and a new request is submitted. 347 * ap_increase_queue_count(): Arm request timeout.
348 * @ap_dev: Pointer to an AP device.
349 *
350 * Arm request timeout if an AP device was idle and a new request is submitted.
350 */ 351 */
351static void ap_increase_queue_count(struct ap_device *ap_dev) 352static void ap_increase_queue_count(struct ap_device *ap_dev)
352{ 353{
@@ -360,7 +361,10 @@ static void ap_increase_queue_count(struct ap_device *ap_dev)
360} 361}
361 362
362/** 363/**
363 * AP device is still alive, re-schedule request timeout if there are still 364 * ap_decrease_queue_count(): Decrease queue count.
365 * @ap_dev: Pointer to an AP device.
366 *
367 * If AP device is still alive, re-schedule request timeout if there are still
364 * pending requests. 368 * pending requests.
365 */ 369 */
366static void ap_decrease_queue_count(struct ap_device *ap_dev) 370static void ap_decrease_queue_count(struct ap_device *ap_dev)
@@ -371,7 +375,7 @@ static void ap_decrease_queue_count(struct ap_device *ap_dev)
371 if (ap_dev->queue_count > 0) 375 if (ap_dev->queue_count > 0)
372 mod_timer(&ap_dev->timeout, jiffies + timeout); 376 mod_timer(&ap_dev->timeout, jiffies + timeout);
373 else 377 else
374 /** 378 /*
375 * The timeout timer should to be disabled now - since 379 * The timeout timer should to be disabled now - since
376 * del_timer_sync() is very expensive, we just tell via the 380 * del_timer_sync() is very expensive, we just tell via the
377 * reset flag to ignore the pending timeout timer. 381 * reset flag to ignore the pending timeout timer.
@@ -379,7 +383,7 @@ static void ap_decrease_queue_count(struct ap_device *ap_dev)
379 ap_dev->reset = AP_RESET_IGNORE; 383 ap_dev->reset = AP_RESET_IGNORE;
380} 384}
381 385
382/** 386/*
383 * AP device related attributes. 387 * AP device related attributes.
384 */ 388 */
385static ssize_t ap_hwtype_show(struct device *dev, 389static ssize_t ap_hwtype_show(struct device *dev,
@@ -433,6 +437,10 @@ static struct attribute_group ap_dev_attr_group = {
433}; 437};
434 438
435/** 439/**
440 * ap_bus_match()
441 * @dev: Pointer to device
442 * @drv: Pointer to device_driver
443 *
436 * AP bus driver registration/unregistration. 444 * AP bus driver registration/unregistration.
437 */ 445 */
438static int ap_bus_match(struct device *dev, struct device_driver *drv) 446static int ap_bus_match(struct device *dev, struct device_driver *drv)
@@ -441,7 +449,7 @@ static int ap_bus_match(struct device *dev, struct device_driver *drv)
441 struct ap_driver *ap_drv = to_ap_drv(drv); 449 struct ap_driver *ap_drv = to_ap_drv(drv);
442 struct ap_device_id *id; 450 struct ap_device_id *id;
443 451
444 /** 452 /*
445 * Compare device type of the device with the list of 453 * Compare device type of the device with the list of
446 * supported types of the device_driver. 454 * supported types of the device_driver.
447 */ 455 */
@@ -455,8 +463,12 @@ static int ap_bus_match(struct device *dev, struct device_driver *drv)
455} 463}
456 464
457/** 465/**
458 * uevent function for AP devices. It sets up a single environment 466 * ap_uevent(): Uevent function for AP devices.
459 * variable DEV_TYPE which contains the hardware device type. 467 * @dev: Pointer to device
468 * @env: Pointer to kobj_uevent_env
469 *
470 * It sets up a single environment variable DEV_TYPE which contains the
471 * hardware device type.
460 */ 472 */
461static int ap_uevent (struct device *dev, struct kobj_uevent_env *env) 473static int ap_uevent (struct device *dev, struct kobj_uevent_env *env)
462{ 474{
@@ -500,8 +512,10 @@ static int ap_device_probe(struct device *dev)
500} 512}
501 513
502/** 514/**
515 * __ap_flush_queue(): Flush requests.
516 * @ap_dev: Pointer to the AP device
517 *
503 * Flush all requests from the request/pending queue of an AP device. 518 * Flush all requests from the request/pending queue of an AP device.
504 * @ap_dev: pointer to the AP device.
505 */ 519 */
506static void __ap_flush_queue(struct ap_device *ap_dev) 520static void __ap_flush_queue(struct ap_device *ap_dev)
507{ 521{
@@ -565,7 +579,7 @@ void ap_driver_unregister(struct ap_driver *ap_drv)
565} 579}
566EXPORT_SYMBOL(ap_driver_unregister); 580EXPORT_SYMBOL(ap_driver_unregister);
567 581
568/** 582/*
569 * AP bus attributes. 583 * AP bus attributes.
570 */ 584 */
571static ssize_t ap_domain_show(struct bus_type *bus, char *buf) 585static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
@@ -630,14 +644,16 @@ static struct bus_attribute *const ap_bus_attrs[] = {
630}; 644};
631 645
632/** 646/**
633 * Pick one of the 16 ap domains. 647 * ap_select_domain(): Select an AP domain.
648 *
649 * Pick one of the 16 AP domains.
634 */ 650 */
635static int ap_select_domain(void) 651static int ap_select_domain(void)
636{ 652{
637 int queue_depth, device_type, count, max_count, best_domain; 653 int queue_depth, device_type, count, max_count, best_domain;
638 int rc, i, j; 654 int rc, i, j;
639 655
640 /** 656 /*
641 * We want to use a single domain. Either the one specified with 657 * We want to use a single domain. Either the one specified with
642 * the "domain=" parameter or the domain with the maximum number 658 * the "domain=" parameter or the domain with the maximum number
643 * of devices. 659 * of devices.
@@ -669,8 +685,10 @@ static int ap_select_domain(void)
669} 685}
670 686
671/** 687/**
672 * Find the device type if query queue returned a device type of 0. 688 * ap_probe_device_type(): Find the device type of an AP.
673 * @ap_dev: pointer to the AP device. 689 * @ap_dev: pointer to the AP device.
690 *
691 * Find the device type if query queue returned a device type of 0.
674 */ 692 */
675static int ap_probe_device_type(struct ap_device *ap_dev) 693static int ap_probe_device_type(struct ap_device *ap_dev)
676{ 694{
@@ -764,7 +782,11 @@ out:
764} 782}
765 783
766/** 784/**
767 * Scan the ap bus for new devices. 785 * __ap_scan_bus(): Scan the AP bus.
786 * @dev: Pointer to device
787 * @data: Pointer to data
788 *
789 * Scan the AP bus for new devices.
768 */ 790 */
769static int __ap_scan_bus(struct device *dev, void *data) 791static int __ap_scan_bus(struct device *dev, void *data)
770{ 792{
@@ -867,6 +889,8 @@ ap_config_timeout(unsigned long ptr)
867} 889}
868 890
869/** 891/**
892 * ap_schedule_poll_timer(): Schedule poll timer.
893 *
870 * Set up the timer to run the poll tasklet 894 * Set up the timer to run the poll tasklet
871 */ 895 */
872static inline void ap_schedule_poll_timer(void) 896static inline void ap_schedule_poll_timer(void)
@@ -877,10 +901,11 @@ static inline void ap_schedule_poll_timer(void)
877} 901}
878 902
879/** 903/**
880 * Receive pending reply messages from an AP device. 904 * ap_poll_read(): Receive pending reply messages from an AP device.
881 * @ap_dev: pointer to the AP device 905 * @ap_dev: pointer to the AP device
882 * @flags: pointer to control flags, bit 2^0 is set if another poll is 906 * @flags: pointer to control flags, bit 2^0 is set if another poll is
883 * required, bit 2^1 is set if the poll timer needs to get armed 907 * required, bit 2^1 is set if the poll timer needs to get armed
908 *
884 * Returns 0 if the device is still present, -ENODEV if not. 909 * Returns 0 if the device is still present, -ENODEV if not.
885 */ 910 */
886static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags) 911static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
@@ -925,10 +950,11 @@ static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
925} 950}
926 951
927/** 952/**
928 * Send messages from the request queue to an AP device. 953 * ap_poll_write(): Send messages from the request queue to an AP device.
929 * @ap_dev: pointer to the AP device 954 * @ap_dev: pointer to the AP device
930 * @flags: pointer to control flags, bit 2^0 is set if another poll is 955 * @flags: pointer to control flags, bit 2^0 is set if another poll is
931 * required, bit 2^1 is set if the poll timer needs to get armed 956 * required, bit 2^1 is set if the poll timer needs to get armed
957 *
932 * Returns 0 if the device is still present, -ENODEV if not. 958 * Returns 0 if the device is still present, -ENODEV if not.
933 */ 959 */
934static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags) 960static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
@@ -968,11 +994,13 @@ static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
968} 994}
969 995
970/** 996/**
971 * Poll AP device for pending replies and send new messages. If either 997 * ap_poll_queue(): Poll AP device for pending replies and send new messages.
972 * ap_poll_read or ap_poll_write returns -ENODEV unregister the device.
973 * @ap_dev: pointer to the bus device 998 * @ap_dev: pointer to the bus device
974 * @flags: pointer to control flags, bit 2^0 is set if another poll is 999 * @flags: pointer to control flags, bit 2^0 is set if another poll is
975 * required, bit 2^1 is set if the poll timer needs to get armed 1000 * required, bit 2^1 is set if the poll timer needs to get armed
1001 *
1002 * Poll AP device for pending replies and send new messages. If either
1003 * ap_poll_read or ap_poll_write returns -ENODEV unregister the device.
976 * Returns 0. 1004 * Returns 0.
977 */ 1005 */
978static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags) 1006static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags)
@@ -986,9 +1014,11 @@ static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags)
986} 1014}
987 1015
988/** 1016/**
989 * Queue a message to a device. 1017 * __ap_queue_message(): Queue a message to a device.
990 * @ap_dev: pointer to the AP device 1018 * @ap_dev: pointer to the AP device
991 * @ap_msg: the message to be queued 1019 * @ap_msg: the message to be queued
1020 *
1021 * Queue a message to a device. Returns 0 if successful.
992 */ 1022 */
993static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg) 1023static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
994{ 1024{
@@ -1055,12 +1085,14 @@ void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1055EXPORT_SYMBOL(ap_queue_message); 1085EXPORT_SYMBOL(ap_queue_message);
1056 1086
1057/** 1087/**
1088 * ap_cancel_message(): Cancel a crypto request.
1089 * @ap_dev: The AP device that has the message queued
1090 * @ap_msg: The message that is to be removed
1091 *
1058 * Cancel a crypto request. This is done by removing the request 1092 * Cancel a crypto request. This is done by removing the request
1059 * from the devive pendingq or requestq queue. Note that the 1093 * from the device pending or request queue. Note that the
1060 * request stays on the AP queue. When it finishes the message 1094 * request stays on the AP queue. When it finishes the message
1061 * reply will be discarded because the psmid can't be found. 1095 * reply will be discarded because the psmid can't be found.
1062 * @ap_dev: AP device that has the message queued
1063 * @ap_msg: the message that is to be removed
1064 */ 1096 */
1065void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg) 1097void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1066{ 1098{
@@ -1082,7 +1114,10 @@ void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1082EXPORT_SYMBOL(ap_cancel_message); 1114EXPORT_SYMBOL(ap_cancel_message);
1083 1115
1084/** 1116/**
1085 * AP receive polling for finished AP requests 1117 * ap_poll_timeout(): AP receive polling for finished AP requests.
1118 * @unused: Unused variable.
1119 *
1120 * Schedules the AP tasklet.
1086 */ 1121 */
1087static void ap_poll_timeout(unsigned long unused) 1122static void ap_poll_timeout(unsigned long unused)
1088{ 1123{
@@ -1090,6 +1125,9 @@ static void ap_poll_timeout(unsigned long unused)
1090} 1125}
1091 1126
1092/** 1127/**
1128 * ap_reset(): Reset a not responding AP device.
1129 * @ap_dev: Pointer to the AP device
1130 *
1093 * Reset a not responding AP device and move all requests from the 1131 * Reset a not responding AP device and move all requests from the
1094 * pending queue to the request queue. 1132 * pending queue to the request queue.
1095 */ 1133 */
@@ -1108,11 +1146,6 @@ static void ap_reset(struct ap_device *ap_dev)
1108 ap_dev->unregistered = 1; 1146 ap_dev->unregistered = 1;
1109} 1147}
1110 1148
1111/**
1112 * Poll all AP devices on the bus in a round robin fashion. Continue
1113 * polling until bit 2^0 of the control flags is not set. If bit 2^1
1114 * of the control flags has been set arm the poll timer.
1115 */
1116static int __ap_poll_all(struct ap_device *ap_dev, unsigned long *flags) 1149static int __ap_poll_all(struct ap_device *ap_dev, unsigned long *flags)
1117{ 1150{
1118 spin_lock(&ap_dev->lock); 1151 spin_lock(&ap_dev->lock);
@@ -1126,6 +1159,14 @@ static int __ap_poll_all(struct ap_device *ap_dev, unsigned long *flags)
1126 return 0; 1159 return 0;
1127} 1160}
1128 1161
1162/**
1163 * ap_poll_all(): Poll all AP devices.
1164 * @dummy: Unused variable
1165 *
1166 * Poll all AP devices on the bus in a round robin fashion. Continue
1167 * polling until bit 2^0 of the control flags is not set. If bit 2^1
1168 * of the control flags has been set arm the poll timer.
1169 */
1129static void ap_poll_all(unsigned long dummy) 1170static void ap_poll_all(unsigned long dummy)
1130{ 1171{
1131 unsigned long flags; 1172 unsigned long flags;
@@ -1144,6 +1185,9 @@ static void ap_poll_all(unsigned long dummy)
1144} 1185}
1145 1186
1146/** 1187/**
1188 * ap_poll_thread(): Thread that polls for finished requests.
1189 * @data: Unused pointer
1190 *
1147 * AP bus poll thread. The purpose of this thread is to poll for 1191 * AP bus poll thread. The purpose of this thread is to poll for
1148 * finished requests in a loop if there is a "free" cpu - that is 1192 * finished requests in a loop if there is a "free" cpu - that is
1149 * a cpu that doesn't have anything better to do. The polling stops 1193 * a cpu that doesn't have anything better to do. The polling stops
@@ -1213,7 +1257,10 @@ static void ap_poll_thread_stop(void)
1213} 1257}
1214 1258
1215/** 1259/**
1216 * Handling of request timeouts 1260 * ap_request_timeout(): Handling of request timeouts
1261 * @data: Holds the AP device.
1262 *
1263 * Handles request timeouts.
1217 */ 1264 */
1218static void ap_request_timeout(unsigned long data) 1265static void ap_request_timeout(unsigned long data)
1219{ 1266{
@@ -1246,7 +1293,9 @@ static struct reset_call ap_reset_call = {
1246}; 1293};
1247 1294
1248/** 1295/**
1249 * The module initialization code. 1296 * ap_module_init(): The module initialization code.
1297 *
1298 * Initializes the module.
1250 */ 1299 */
1251int __init ap_module_init(void) 1300int __init ap_module_init(void)
1252{ 1301{
@@ -1288,7 +1337,7 @@ int __init ap_module_init(void)
1288 if (ap_select_domain() == 0) 1337 if (ap_select_domain() == 0)
1289 ap_scan_bus(NULL); 1338 ap_scan_bus(NULL);
1290 1339
1291 /* Setup the ap bus rescan timer. */ 1340 /* Setup the AP bus rescan timer. */
1292 init_timer(&ap_config_timer); 1341 init_timer(&ap_config_timer);
1293 ap_config_timer.function = ap_config_timeout; 1342 ap_config_timer.function = ap_config_timeout;
1294 ap_config_timer.data = 0; 1343 ap_config_timer.data = 0;
@@ -1325,7 +1374,9 @@ static int __ap_match_all(struct device *dev, void *data)
1325} 1374}
1326 1375
1327/** 1376/**
1328 * The module termination code 1377 * ap_modules_exit(): The module termination code
1378 *
1379 * Terminates the module.
1329 */ 1380 */
1330void ap_module_exit(void) 1381void ap_module_exit(void)
1331{ 1382{
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 87c2d6442875..c1e1200c43fc 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -50,6 +50,15 @@ typedef unsigned int ap_qid_t;
50#define AP_QID_QUEUE(_qid) ((_qid) & 15) 50#define AP_QID_QUEUE(_qid) ((_qid) & 15)
51 51
52/** 52/**
53 * structy ap_queue_status - Holds the AP queue status.
54 * @queue_empty: Shows if queue is empty
55 * @replies_waiting: Waiting replies
56 * @queue_full: Is 1 if the queue is full
57 * @pad: A 4 bit pad
58 * @int_enabled: Shows if interrupts are enabled for the AP
59 * @response_conde: Holds the 8 bit response code
60 * @pad2: A 16 bit pad
61 *
53 * The ap queue status word is returned by all three AP functions 62 * The ap queue status word is returned by all three AP functions
54 * (PQAP, NQAP and DQAP). There's a set of flags in the first 63 * (PQAP, NQAP and DQAP). There's a set of flags in the first
55 * byte, followed by a 1 byte response code. 64 * byte, followed by a 1 byte response code.
@@ -75,7 +84,7 @@ struct ap_queue_status {
75#define AP_RESPONSE_NO_FIRST_PART 0x13 84#define AP_RESPONSE_NO_FIRST_PART 0x13
76#define AP_RESPONSE_MESSAGE_TOO_BIG 0x15 85#define AP_RESPONSE_MESSAGE_TOO_BIG 0x15
77 86
78/** 87/*
79 * Known device types 88 * Known device types
80 */ 89 */
81#define AP_DEVICE_TYPE_PCICC 3 90#define AP_DEVICE_TYPE_PCICC 3
@@ -84,7 +93,7 @@ struct ap_queue_status {
84#define AP_DEVICE_TYPE_CEX2A 6 93#define AP_DEVICE_TYPE_CEX2A 6
85#define AP_DEVICE_TYPE_CEX2C 7 94#define AP_DEVICE_TYPE_CEX2C 7
86 95
87/** 96/*
88 * AP reset flag states 97 * AP reset flag states
89 */ 98 */
90#define AP_RESET_IGNORE 0 /* request timeout will be ignored */ 99#define AP_RESET_IGNORE 0 /* request timeout will be ignored */
@@ -152,7 +161,7 @@ struct ap_message {
152 .dev_type=(dt), \ 161 .dev_type=(dt), \
153 .match_flags=AP_DEVICE_ID_MATCH_DEVICE_TYPE, 162 .match_flags=AP_DEVICE_ID_MATCH_DEVICE_TYPE,
154 163
155/** 164/*
156 * Note: don't use ap_send/ap_recv after using ap_queue_message 165 * Note: don't use ap_send/ap_recv after using ap_queue_message
157 * for the first time. Otherwise the ap message queue will get 166 * for the first time. Otherwise the ap message queue will get
158 * confused. 167 * confused.
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index e3625a47a596..4d36e805a234 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -36,10 +36,11 @@
36#include <linux/compat.h> 36#include <linux/compat.h>
37#include <asm/atomic.h> 37#include <asm/atomic.h>
38#include <asm/uaccess.h> 38#include <asm/uaccess.h>
39#include <linux/hw_random.h>
39 40
40#include "zcrypt_api.h" 41#include "zcrypt_api.h"
41 42
42/** 43/*
43 * Module description. 44 * Module description.
44 */ 45 */
45MODULE_AUTHOR("IBM Corporation"); 46MODULE_AUTHOR("IBM Corporation");
@@ -52,7 +53,10 @@ static LIST_HEAD(zcrypt_device_list);
52static int zcrypt_device_count = 0; 53static int zcrypt_device_count = 0;
53static atomic_t zcrypt_open_count = ATOMIC_INIT(0); 54static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
54 55
55/** 56static int zcrypt_rng_device_add(void);
57static void zcrypt_rng_device_remove(void);
58
59/*
56 * Device attributes common for all crypto devices. 60 * Device attributes common for all crypto devices.
57 */ 61 */
58static ssize_t zcrypt_type_show(struct device *dev, 62static ssize_t zcrypt_type_show(struct device *dev,
@@ -99,6 +103,9 @@ static struct attribute_group zcrypt_device_attr_group = {
99}; 103};
100 104
101/** 105/**
106 * __zcrypt_increase_preference(): Increase preference of a crypto device.
107 * @zdev: Pointer the crypto device
108 *
102 * Move the device towards the head of the device list. 109 * Move the device towards the head of the device list.
103 * Need to be called while holding the zcrypt device list lock. 110 * Need to be called while holding the zcrypt device list lock.
104 * Note: cards with speed_rating of 0 are kept at the end of the list. 111 * Note: cards with speed_rating of 0 are kept at the end of the list.
@@ -125,6 +132,9 @@ static void __zcrypt_increase_preference(struct zcrypt_device *zdev)
125} 132}
126 133
127/** 134/**
135 * __zcrypt_decrease_preference(): Decrease preference of a crypto device.
136 * @zdev: Pointer to a crypto device.
137 *
128 * Move the device towards the tail of the device list. 138 * Move the device towards the tail of the device list.
129 * Need to be called while holding the zcrypt device list lock. 139 * Need to be called while holding the zcrypt device list lock.
130 * Note: cards with speed_rating of 0 are kept at the end of the list. 140 * Note: cards with speed_rating of 0 are kept at the end of the list.
@@ -198,7 +208,10 @@ void zcrypt_device_free(struct zcrypt_device *zdev)
198EXPORT_SYMBOL(zcrypt_device_free); 208EXPORT_SYMBOL(zcrypt_device_free);
199 209
200/** 210/**
201 * Register a crypto device. 211 * zcrypt_device_register() - Register a crypto device.
212 * @zdev: Pointer to a crypto device
213 *
214 * Register a crypto device. Returns 0 if successful.
202 */ 215 */
203int zcrypt_device_register(struct zcrypt_device *zdev) 216int zcrypt_device_register(struct zcrypt_device *zdev)
204{ 217{
@@ -216,16 +229,37 @@ int zcrypt_device_register(struct zcrypt_device *zdev)
216 __zcrypt_increase_preference(zdev); 229 __zcrypt_increase_preference(zdev);
217 zcrypt_device_count++; 230 zcrypt_device_count++;
218 spin_unlock_bh(&zcrypt_device_lock); 231 spin_unlock_bh(&zcrypt_device_lock);
232 if (zdev->ops->rng) {
233 rc = zcrypt_rng_device_add();
234 if (rc)
235 goto out_unregister;
236 }
237 return 0;
238
239out_unregister:
240 spin_lock_bh(&zcrypt_device_lock);
241 zcrypt_device_count--;
242 list_del_init(&zdev->list);
243 spin_unlock_bh(&zcrypt_device_lock);
244 sysfs_remove_group(&zdev->ap_dev->device.kobj,
245 &zcrypt_device_attr_group);
246 put_device(&zdev->ap_dev->device);
247 zcrypt_device_put(zdev);
219out: 248out:
220 return rc; 249 return rc;
221} 250}
222EXPORT_SYMBOL(zcrypt_device_register); 251EXPORT_SYMBOL(zcrypt_device_register);
223 252
224/** 253/**
254 * zcrypt_device_unregister(): Unregister a crypto device.
255 * @zdev: Pointer to crypto device
256 *
225 * Unregister a crypto device. 257 * Unregister a crypto device.
226 */ 258 */
227void zcrypt_device_unregister(struct zcrypt_device *zdev) 259void zcrypt_device_unregister(struct zcrypt_device *zdev)
228{ 260{
261 if (zdev->ops->rng)
262 zcrypt_rng_device_remove();
229 spin_lock_bh(&zcrypt_device_lock); 263 spin_lock_bh(&zcrypt_device_lock);
230 zcrypt_device_count--; 264 zcrypt_device_count--;
231 list_del_init(&zdev->list); 265 list_del_init(&zdev->list);
@@ -238,7 +272,9 @@ void zcrypt_device_unregister(struct zcrypt_device *zdev)
238EXPORT_SYMBOL(zcrypt_device_unregister); 272EXPORT_SYMBOL(zcrypt_device_unregister);
239 273
240/** 274/**
241 * zcrypt_read is not be supported beyond zcrypt 1.3.1 275 * zcrypt_read (): Not supported beyond zcrypt 1.3.1.
276 *
277 * This function is not supported beyond zcrypt 1.3.1.
242 */ 278 */
243static ssize_t zcrypt_read(struct file *filp, char __user *buf, 279static ssize_t zcrypt_read(struct file *filp, char __user *buf,
244 size_t count, loff_t *f_pos) 280 size_t count, loff_t *f_pos)
@@ -247,6 +283,8 @@ static ssize_t zcrypt_read(struct file *filp, char __user *buf,
247} 283}
248 284
249/** 285/**
286 * zcrypt_write(): Not allowed.
287 *
250 * Write is is not allowed 288 * Write is is not allowed
251 */ 289 */
252static ssize_t zcrypt_write(struct file *filp, const char __user *buf, 290static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
@@ -256,7 +294,9 @@ static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
256} 294}
257 295
258/** 296/**
259 * Device open/close functions to count number of users. 297 * zcrypt_open(): Count number of users.
298 *
299 * Device open function to count number of users.
260 */ 300 */
261static int zcrypt_open(struct inode *inode, struct file *filp) 301static int zcrypt_open(struct inode *inode, struct file *filp)
262{ 302{
@@ -264,13 +304,18 @@ static int zcrypt_open(struct inode *inode, struct file *filp)
264 return 0; 304 return 0;
265} 305}
266 306
307/**
308 * zcrypt_release(): Count number of users.
309 *
310 * Device close function to count number of users.
311 */
267static int zcrypt_release(struct inode *inode, struct file *filp) 312static int zcrypt_release(struct inode *inode, struct file *filp)
268{ 313{
269 atomic_dec(&zcrypt_open_count); 314 atomic_dec(&zcrypt_open_count);
270 return 0; 315 return 0;
271} 316}
272 317
273/** 318/*
274 * zcrypt ioctls. 319 * zcrypt ioctls.
275 */ 320 */
276static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex) 321static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
@@ -280,7 +325,7 @@ static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
280 325
281 if (mex->outputdatalength < mex->inputdatalength) 326 if (mex->outputdatalength < mex->inputdatalength)
282 return -EINVAL; 327 return -EINVAL;
283 /** 328 /*
284 * As long as outputdatalength is big enough, we can set the 329 * As long as outputdatalength is big enough, we can set the
285 * outputdatalength equal to the inputdatalength, since that is the 330 * outputdatalength equal to the inputdatalength, since that is the
286 * number of bytes we will copy in any case 331 * number of bytes we will copy in any case
@@ -326,7 +371,7 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
326 if (crt->outputdatalength < crt->inputdatalength || 371 if (crt->outputdatalength < crt->inputdatalength ||
327 (crt->inputdatalength & 1)) 372 (crt->inputdatalength & 1))
328 return -EINVAL; 373 return -EINVAL;
329 /** 374 /*
330 * As long as outputdatalength is big enough, we can set the 375 * As long as outputdatalength is big enough, we can set the
331 * outputdatalength equal to the inputdatalength, since that is the 376 * outputdatalength equal to the inputdatalength, since that is the
332 * number of bytes we will copy in any case 377 * number of bytes we will copy in any case
@@ -343,7 +388,7 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
343 zdev->max_mod_size < crt->inputdatalength) 388 zdev->max_mod_size < crt->inputdatalength)
344 continue; 389 continue;
345 if (zdev->short_crt && crt->inputdatalength > 240) { 390 if (zdev->short_crt && crt->inputdatalength > 240) {
346 /** 391 /*
347 * Check inputdata for leading zeros for cards 392 * Check inputdata for leading zeros for cards
348 * that can't handle np_prime, bp_key, or 393 * that can't handle np_prime, bp_key, or
349 * u_mult_inv > 128 bytes. 394 * u_mult_inv > 128 bytes.
@@ -359,7 +404,7 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
359 copy_from_user(&z3, crt->u_mult_inv, len)) 404 copy_from_user(&z3, crt->u_mult_inv, len))
360 return -EFAULT; 405 return -EFAULT;
361 copied = 1; 406 copied = 1;
362 /** 407 /*
363 * We have to restart device lookup - 408 * We have to restart device lookup -
364 * the device list may have changed by now. 409 * the device list may have changed by now.
365 */ 410 */
@@ -427,6 +472,37 @@ static long zcrypt_send_cprb(struct ica_xcRB *xcRB)
427 return -ENODEV; 472 return -ENODEV;
428} 473}
429 474
475static long zcrypt_rng(char *buffer)
476{
477 struct zcrypt_device *zdev;
478 int rc;
479
480 spin_lock_bh(&zcrypt_device_lock);
481 list_for_each_entry(zdev, &zcrypt_device_list, list) {
482 if (!zdev->online || !zdev->ops->rng)
483 continue;
484 zcrypt_device_get(zdev);
485 get_device(&zdev->ap_dev->device);
486 zdev->request_count++;
487 __zcrypt_decrease_preference(zdev);
488 if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
489 spin_unlock_bh(&zcrypt_device_lock);
490 rc = zdev->ops->rng(zdev, buffer);
491 spin_lock_bh(&zcrypt_device_lock);
492 module_put(zdev->ap_dev->drv->driver.owner);
493 } else
494 rc = -EAGAIN;
495 zdev->request_count--;
496 __zcrypt_increase_preference(zdev);
497 put_device(&zdev->ap_dev->device);
498 zcrypt_device_put(zdev);
499 spin_unlock_bh(&zcrypt_device_lock);
500 return rc;
501 }
502 spin_unlock_bh(&zcrypt_device_lock);
503 return -ENODEV;
504}
505
430static void zcrypt_status_mask(char status[AP_DEVICES]) 506static void zcrypt_status_mask(char status[AP_DEVICES])
431{ 507{
432 struct zcrypt_device *zdev; 508 struct zcrypt_device *zdev;
@@ -514,6 +590,8 @@ static int zcrypt_count_type(int type)
514} 590}
515 591
516/** 592/**
593 * zcrypt_ica_status(): Old, depracted combi status call.
594 *
517 * Old, deprecated combi status call. 595 * Old, deprecated combi status call.
518 */ 596 */
519static long zcrypt_ica_status(struct file *filp, unsigned long arg) 597static long zcrypt_ica_status(struct file *filp, unsigned long arg)
@@ -615,7 +693,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
615 (int __user *) arg); 693 (int __user *) arg);
616 case Z90STAT_DOMAIN_INDEX: 694 case Z90STAT_DOMAIN_INDEX:
617 return put_user(ap_domain_index, (int __user *) arg); 695 return put_user(ap_domain_index, (int __user *) arg);
618 /** 696 /*
619 * Deprecated ioctls. Don't add another device count ioctl, 697 * Deprecated ioctls. Don't add another device count ioctl,
620 * you can count them yourself in the user space with the 698 * you can count them yourself in the user space with the
621 * output of the Z90STAT_STATUS_MASK ioctl. 699 * output of the Z90STAT_STATUS_MASK ioctl.
@@ -653,7 +731,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
653} 731}
654 732
655#ifdef CONFIG_COMPAT 733#ifdef CONFIG_COMPAT
656/** 734/*
657 * ioctl32 conversion routines 735 * ioctl32 conversion routines
658 */ 736 */
659struct compat_ica_rsa_modexpo { 737struct compat_ica_rsa_modexpo {
@@ -804,7 +882,7 @@ static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd,
804} 882}
805#endif 883#endif
806 884
807/** 885/*
808 * Misc device file operations. 886 * Misc device file operations.
809 */ 887 */
810static const struct file_operations zcrypt_fops = { 888static const struct file_operations zcrypt_fops = {
@@ -819,7 +897,7 @@ static const struct file_operations zcrypt_fops = {
819 .release = zcrypt_release 897 .release = zcrypt_release
820}; 898};
821 899
822/** 900/*
823 * Misc device. 901 * Misc device.
824 */ 902 */
825static struct miscdevice zcrypt_misc_device = { 903static struct miscdevice zcrypt_misc_device = {
@@ -828,7 +906,7 @@ static struct miscdevice zcrypt_misc_device = {
828 .fops = &zcrypt_fops, 906 .fops = &zcrypt_fops,
829}; 907};
830 908
831/** 909/*
832 * Deprecated /proc entry support. 910 * Deprecated /proc entry support.
833 */ 911 */
834static struct proc_dir_entry *zcrypt_entry; 912static struct proc_dir_entry *zcrypt_entry;
@@ -1022,7 +1100,7 @@ static int zcrypt_status_write(struct file *file, const char __user *buffer,
1022 } 1100 }
1023 1101
1024 for (j = 0; j < 64 && *ptr; ptr++) { 1102 for (j = 0; j < 64 && *ptr; ptr++) {
1025 /** 1103 /*
1026 * '0' for no device, '1' for PCICA, '2' for PCICC, 1104 * '0' for no device, '1' for PCICA, '2' for PCICC,
1027 * '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3, 1105 * '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3,
1028 * '5' for CEX2C and '6' for CEX2A' 1106 * '5' for CEX2C and '6' for CEX2A'
@@ -1041,7 +1119,76 @@ out:
1041 return count; 1119 return count;
1042} 1120}
1043 1121
1122static int zcrypt_rng_device_count;
1123static u32 *zcrypt_rng_buffer;
1124static int zcrypt_rng_buffer_index;
1125static DEFINE_MUTEX(zcrypt_rng_mutex);
1126
1127static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
1128{
1129 int rc;
1130
1131 /*
1132 * We don't need locking here because the RNG API guarantees serialized
1133 * read method calls.
1134 */
1135 if (zcrypt_rng_buffer_index == 0) {
1136 rc = zcrypt_rng((char *) zcrypt_rng_buffer);
1137 if (rc < 0)
1138 return -EIO;
1139 zcrypt_rng_buffer_index = rc / sizeof *data;
1140 }
1141 *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index];
1142 return sizeof *data;
1143}
1144
1145static struct hwrng zcrypt_rng_dev = {
1146 .name = "zcrypt",
1147 .data_read = zcrypt_rng_data_read,
1148};
1149
1150static int zcrypt_rng_device_add(void)
1151{
1152 int rc = 0;
1153
1154 mutex_lock(&zcrypt_rng_mutex);
1155 if (zcrypt_rng_device_count == 0) {
1156 zcrypt_rng_buffer = (u32 *) get_zeroed_page(GFP_KERNEL);
1157 if (!zcrypt_rng_buffer) {
1158 rc = -ENOMEM;
1159 goto out;
1160 }
1161 zcrypt_rng_buffer_index = 0;
1162 rc = hwrng_register(&zcrypt_rng_dev);
1163 if (rc)
1164 goto out_free;
1165 zcrypt_rng_device_count = 1;
1166 } else
1167 zcrypt_rng_device_count++;
1168 mutex_unlock(&zcrypt_rng_mutex);
1169 return 0;
1170
1171out_free:
1172 free_page((unsigned long) zcrypt_rng_buffer);
1173out:
1174 mutex_unlock(&zcrypt_rng_mutex);
1175 return rc;
1176}
1177
1178static void zcrypt_rng_device_remove(void)
1179{
1180 mutex_lock(&zcrypt_rng_mutex);
1181 zcrypt_rng_device_count--;
1182 if (zcrypt_rng_device_count == 0) {
1183 hwrng_unregister(&zcrypt_rng_dev);
1184 free_page((unsigned long) zcrypt_rng_buffer);
1185 }
1186 mutex_unlock(&zcrypt_rng_mutex);
1187}
1188
1044/** 1189/**
1190 * zcrypt_api_init(): Module initialization.
1191 *
1045 * The module initialization code. 1192 * The module initialization code.
1046 */ 1193 */
1047int __init zcrypt_api_init(void) 1194int __init zcrypt_api_init(void)
@@ -1076,6 +1223,8 @@ out:
1076} 1223}
1077 1224
1078/** 1225/**
1226 * zcrypt_api_exit(): Module termination.
1227 *
1079 * The module termination code. 1228 * The module termination code.
1080 */ 1229 */
1081void zcrypt_api_exit(void) 1230void zcrypt_api_exit(void)
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index de4877ee618f..5c6e222b2ac4 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -43,17 +43,17 @@
43#define DEV_NAME "zcrypt" 43#define DEV_NAME "zcrypt"
44 44
45#define PRINTK(fmt, args...) \ 45#define PRINTK(fmt, args...) \
46 printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args) 46 printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __func__ , ## args)
47#define PRINTKN(fmt, args...) \ 47#define PRINTKN(fmt, args...) \
48 printk(KERN_DEBUG DEV_NAME ": " fmt, ## args) 48 printk(KERN_DEBUG DEV_NAME ": " fmt, ## args)
49#define PRINTKW(fmt, args...) \ 49#define PRINTKW(fmt, args...) \
50 printk(KERN_WARNING DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args) 50 printk(KERN_WARNING DEV_NAME ": %s -> " fmt, __func__ , ## args)
51#define PRINTKC(fmt, args...) \ 51#define PRINTKC(fmt, args...) \
52 printk(KERN_CRIT DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args) 52 printk(KERN_CRIT DEV_NAME ": %s -> " fmt, __func__ , ## args)
53 53
54#ifdef ZCRYPT_DEBUG 54#ifdef ZCRYPT_DEBUG
55#define PDEBUG(fmt, args...) \ 55#define PDEBUG(fmt, args...) \
56 printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args) 56 printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __func__ , ## args)
57#else 57#else
58#define PDEBUG(fmt, args...) do {} while (0) 58#define PDEBUG(fmt, args...) do {} while (0)
59#endif 59#endif
@@ -100,6 +100,13 @@ struct ica_z90_status {
100#define ZCRYPT_CEX2C 5 100#define ZCRYPT_CEX2C 5
101#define ZCRYPT_CEX2A 6 101#define ZCRYPT_CEX2A 6
102 102
103/**
104 * Large random numbers are pulled in 4096 byte chunks from the crypto cards
105 * and stored in a page. Be carefull when increasing this buffer due to size
106 * limitations for AP requests.
107 */
108#define ZCRYPT_RNG_BUFFER_SIZE 4096
109
103struct zcrypt_device; 110struct zcrypt_device;
104 111
105struct zcrypt_ops { 112struct zcrypt_ops {
@@ -107,6 +114,7 @@ struct zcrypt_ops {
107 long (*rsa_modexpo_crt)(struct zcrypt_device *, 114 long (*rsa_modexpo_crt)(struct zcrypt_device *,
108 struct ica_rsa_modexpo_crt *); 115 struct ica_rsa_modexpo_crt *);
109 long (*send_cprb)(struct zcrypt_device *, struct ica_xcRB *); 116 long (*send_cprb)(struct zcrypt_device *, struct ica_xcRB *);
117 long (*rng)(struct zcrypt_device *, char *);
110}; 118};
111 119
112struct zcrypt_device { 120struct zcrypt_device {
diff --git a/drivers/s390/crypto/zcrypt_cca_key.h b/drivers/s390/crypto/zcrypt_cca_key.h
index 8dbcf0eef3e5..ed82f2f59b17 100644
--- a/drivers/s390/crypto/zcrypt_cca_key.h
+++ b/drivers/s390/crypto/zcrypt_cca_key.h
@@ -174,7 +174,7 @@ static inline int zcrypt_type6_mex_key_de(struct ica_rsa_modexpo *mex,
174 key->pvtMeHdr = static_pvt_me_hdr; 174 key->pvtMeHdr = static_pvt_me_hdr;
175 key->pvtMeSec = static_pvt_me_sec; 175 key->pvtMeSec = static_pvt_me_sec;
176 key->pubMeSec = static_pub_me_sec; 176 key->pubMeSec = static_pub_me_sec;
177 /** 177 /*
178 * In a private key, the modulus doesn't appear in the public 178 * In a private key, the modulus doesn't appear in the public
179 * section. So, an arbitrary public exponent of 0x010001 will be 179 * section. So, an arbitrary public exponent of 0x010001 will be
180 * used. 180 * used.
@@ -338,7 +338,7 @@ static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt,
338 pub = (struct cca_public_sec *)(key->key_parts + key_len); 338 pub = (struct cca_public_sec *)(key->key_parts + key_len);
339 *pub = static_cca_pub_sec; 339 *pub = static_cca_pub_sec;
340 pub->modulus_bit_len = 8 * crt->inputdatalength; 340 pub->modulus_bit_len = 8 * crt->inputdatalength;
341 /** 341 /*
342 * In a private key, the modulus doesn't appear in the public 342 * In a private key, the modulus doesn't appear in the public
343 * section. So, an arbitrary public exponent of 0x010001 will be 343 * section. So, an arbitrary public exponent of 0x010001 will be
344 * used. 344 * used.
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index 2cb616ba8bec..3e27fe77d207 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -108,7 +108,7 @@ static inline int convert_error(struct zcrypt_device *zdev,
108 return -EINVAL; 108 return -EINVAL;
109 case REP82_ERROR_MESSAGE_TYPE: 109 case REP82_ERROR_MESSAGE_TYPE:
110 // REP88_ERROR_MESSAGE_TYPE // '20' CEX2A 110 // REP88_ERROR_MESSAGE_TYPE // '20' CEX2A
111 /** 111 /*
112 * To sent a message of the wrong type is a bug in the 112 * To sent a message of the wrong type is a bug in the
113 * device driver. Warn about it, disable the device 113 * device driver. Warn about it, disable the device
114 * and then repeat the request. 114 * and then repeat the request.
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c
index d6d59bf9ac38..17ea56ce1c11 100644
--- a/drivers/s390/crypto/zcrypt_pcicc.c
+++ b/drivers/s390/crypto/zcrypt_pcicc.c
@@ -42,7 +42,7 @@
42#define PCICC_MAX_MOD_SIZE_OLD 128 /* 1024 bits */ 42#define PCICC_MAX_MOD_SIZE_OLD 128 /* 1024 bits */
43#define PCICC_MAX_MOD_SIZE 256 /* 2048 bits */ 43#define PCICC_MAX_MOD_SIZE 256 /* 2048 bits */
44 44
45/** 45/*
46 * PCICC cards need a speed rating of 0. This keeps them at the end of 46 * PCICC cards need a speed rating of 0. This keeps them at the end of
47 * the zcrypt device list (see zcrypt_api.c). PCICC cards are only 47 * the zcrypt device list (see zcrypt_api.c). PCICC cards are only
48 * used if no other cards are present because they are slow and can only 48 * used if no other cards are present because they are slow and can only
@@ -388,7 +388,7 @@ static int convert_type86(struct zcrypt_device *zdev,
388 reply_len = le16_to_cpu(msg->length) - 2; 388 reply_len = le16_to_cpu(msg->length) - 2;
389 if (reply_len > outputdatalength) 389 if (reply_len > outputdatalength)
390 return -EINVAL; 390 return -EINVAL;
391 /** 391 /*
392 * For all encipher requests, the length of the ciphertext (reply_len) 392 * For all encipher requests, the length of the ciphertext (reply_len)
393 * will always equal the modulus length. For MEX decipher requests 393 * will always equal the modulus length. For MEX decipher requests
394 * the output needs to get padded. Minimum pad size is 10. 394 * the output needs to get padded. Minimum pad size is 10.
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index 70b9ddc8cf9d..0bc9b3188e64 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -356,6 +356,55 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
356} 356}
357 357
358/** 358/**
359 * Prepare a type6 CPRB message for random number generation
360 *
361 * @ap_dev: AP device pointer
362 * @ap_msg: pointer to AP message
363 */
364static void rng_type6CPRB_msgX(struct ap_device *ap_dev,
365 struct ap_message *ap_msg,
366 unsigned random_number_length)
367{
368 struct {
369 struct type6_hdr hdr;
370 struct CPRBX cprbx;
371 char function_code[2];
372 short int rule_length;
373 char rule[8];
374 short int verb_length;
375 short int key_length;
376 } __attribute__((packed)) *msg = ap_msg->message;
377 static struct type6_hdr static_type6_hdrX = {
378 .type = 0x06,
379 .offset1 = 0x00000058,
380 .agent_id = {'C', 'A'},
381 .function_code = {'R', 'L'},
382 .ToCardLen1 = sizeof *msg - sizeof(msg->hdr),
383 .FromCardLen1 = sizeof *msg - sizeof(msg->hdr),
384 };
385 static struct CPRBX static_cprbx = {
386 .cprb_len = 0x00dc,
387 .cprb_ver_id = 0x02,
388 .func_id = {0x54, 0x32},
389 .req_parml = sizeof *msg - sizeof(msg->hdr) -
390 sizeof(msg->cprbx),
391 .rpl_msgbl = sizeof *msg - sizeof(msg->hdr),
392 };
393
394 msg->hdr = static_type6_hdrX;
395 msg->hdr.FromCardLen2 = random_number_length,
396 msg->cprbx = static_cprbx;
397 msg->cprbx.rpl_datal = random_number_length,
398 msg->cprbx.domain = AP_QID_QUEUE(ap_dev->qid);
399 memcpy(msg->function_code, msg->hdr.function_code, 0x02);
400 msg->rule_length = 0x0a;
401 memcpy(msg->rule, "RANDOM ", 8);
402 msg->verb_length = 0x02;
403 msg->key_length = 0x02;
404 ap_msg->length = sizeof *msg;
405}
406
407/**
359 * Copy results from a type 86 ICA reply message back to user space. 408 * Copy results from a type 86 ICA reply message back to user space.
360 * 409 *
361 * @zdev: crypto device pointer 410 * @zdev: crypto device pointer
@@ -452,7 +501,7 @@ static int convert_type86_ica(struct zcrypt_device *zdev,
452 reply_len = msg->length - 2; 501 reply_len = msg->length - 2;
453 if (reply_len > outputdatalength) 502 if (reply_len > outputdatalength)
454 return -EINVAL; 503 return -EINVAL;
455 /** 504 /*
456 * For all encipher requests, the length of the ciphertext (reply_len) 505 * For all encipher requests, the length of the ciphertext (reply_len)
457 * will always equal the modulus length. For MEX decipher requests 506 * will always equal the modulus length. For MEX decipher requests
458 * the output needs to get padded. Minimum pad size is 10. 507 * the output needs to get padded. Minimum pad size is 10.
@@ -509,6 +558,26 @@ static int convert_type86_xcrb(struct zcrypt_device *zdev,
509 return 0; 558 return 0;
510} 559}
511 560
561static int convert_type86_rng(struct zcrypt_device *zdev,
562 struct ap_message *reply,
563 char *buffer)
564{
565 struct {
566 struct type86_hdr hdr;
567 struct type86_fmt2_ext fmt2;
568 struct CPRBX cprbx;
569 } __attribute__((packed)) *msg = reply->message;
570 char *data = reply->message;
571
572 if (msg->cprbx.ccp_rtcode != 0 || msg->cprbx.ccp_rscode != 0) {
573 PDEBUG("RNG response error on PCIXCC/CEX2C rc=%hu/rs=%hu\n",
574 rc, rs);
575 return -EINVAL;
576 }
577 memcpy(buffer, data + msg->fmt2.offset2, msg->fmt2.count2);
578 return msg->fmt2.count2;
579}
580
512static int convert_response_ica(struct zcrypt_device *zdev, 581static int convert_response_ica(struct zcrypt_device *zdev,
513 struct ap_message *reply, 582 struct ap_message *reply,
514 char __user *outputdata, 583 char __user *outputdata,
@@ -567,6 +636,31 @@ static int convert_response_xcrb(struct zcrypt_device *zdev,
567 } 636 }
568} 637}
569 638
639static int convert_response_rng(struct zcrypt_device *zdev,
640 struct ap_message *reply,
641 char *data)
642{
643 struct type86x_reply *msg = reply->message;
644
645 switch (msg->hdr.type) {
646 case TYPE82_RSP_CODE:
647 case TYPE88_RSP_CODE:
648 return -EINVAL;
649 case TYPE86_RSP_CODE:
650 if (msg->hdr.reply_code)
651 return -EINVAL;
652 if (msg->cprbx.cprb_ver_id == 0x02)
653 return convert_type86_rng(zdev, reply, data);
654 /* no break, incorrect cprb version is an unknown response */
655 default: /* Unknown response type, this should NEVER EVER happen */
656 PRINTK("Unrecognized Message Header: %08x%08x\n",
657 *(unsigned int *) reply->message,
658 *(unsigned int *) (reply->message+4));
659 zdev->online = 0;
660 return -EAGAIN; /* repeat the request on a different device. */
661 }
662}
663
570/** 664/**
571 * This function is called from the AP bus code after a crypto request 665 * This function is called from the AP bus code after a crypto request
572 * "msg" has finished with the reply message "reply". 666 * "msg" has finished with the reply message "reply".
@@ -736,6 +830,42 @@ out_free:
736} 830}
737 831
738/** 832/**
833 * The request distributor calls this function if it picked the PCIXCC/CEX2C
834 * device to generate random data.
835 * @zdev: pointer to zcrypt_device structure that identifies the
836 * PCIXCC/CEX2C device to the request distributor
837 * @buffer: pointer to a memory page to return random data
838 */
839
840static long zcrypt_pcixcc_rng(struct zcrypt_device *zdev,
841 char *buffer)
842{
843 struct ap_message ap_msg;
844 struct response_type resp_type = {
845 .type = PCIXCC_RESPONSE_TYPE_XCRB,
846 };
847 int rc;
848
849 ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL);
850 if (!ap_msg.message)
851 return -ENOMEM;
852 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
853 atomic_inc_return(&zcrypt_step);
854 ap_msg.private = &resp_type;
855 rng_type6CPRB_msgX(zdev->ap_dev, &ap_msg, ZCRYPT_RNG_BUFFER_SIZE);
856 init_completion(&resp_type.work);
857 ap_queue_message(zdev->ap_dev, &ap_msg);
858 rc = wait_for_completion_interruptible(&resp_type.work);
859 if (rc == 0)
860 rc = convert_response_rng(zdev, &ap_msg, buffer);
861 else
862 /* Signal pending. */
863 ap_cancel_message(zdev->ap_dev, &ap_msg);
864 kfree(ap_msg.message);
865 return rc;
866}
867
868/**
739 * The crypto operations for a PCIXCC/CEX2C card. 869 * The crypto operations for a PCIXCC/CEX2C card.
740 */ 870 */
741static struct zcrypt_ops zcrypt_pcixcc_ops = { 871static struct zcrypt_ops zcrypt_pcixcc_ops = {
@@ -744,6 +874,13 @@ static struct zcrypt_ops zcrypt_pcixcc_ops = {
744 .send_cprb = zcrypt_pcixcc_send_cprb, 874 .send_cprb = zcrypt_pcixcc_send_cprb,
745}; 875};
746 876
877static struct zcrypt_ops zcrypt_pcixcc_with_rng_ops = {
878 .rsa_modexpo = zcrypt_pcixcc_modexpo,
879 .rsa_modexpo_crt = zcrypt_pcixcc_modexpo_crt,
880 .send_cprb = zcrypt_pcixcc_send_cprb,
881 .rng = zcrypt_pcixcc_rng,
882};
883
747/** 884/**
748 * Micro-code detection function. Its sends a message to a pcixcc card 885 * Micro-code detection function. Its sends a message to a pcixcc card
749 * to find out the microcode level. 886 * to find out the microcode level.
@@ -859,6 +996,58 @@ out_free:
859} 996}
860 997
861/** 998/**
999 * Large random number detection function. Its sends a message to a pcixcc
1000 * card to find out if large random numbers are supported.
1001 * @ap_dev: pointer to the AP device.
1002 *
1003 * Returns 1 if large random numbers are supported, 0 if not and < 0 on error.
1004 */
1005static int zcrypt_pcixcc_rng_supported(struct ap_device *ap_dev)
1006{
1007 struct ap_message ap_msg;
1008 unsigned long long psmid;
1009 struct {
1010 struct type86_hdr hdr;
1011 struct type86_fmt2_ext fmt2;
1012 struct CPRBX cprbx;
1013 } __attribute__((packed)) *reply;
1014 int rc, i;
1015
1016 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
1017 if (!ap_msg.message)
1018 return -ENOMEM;
1019
1020 rng_type6CPRB_msgX(ap_dev, &ap_msg, 4);
1021 rc = ap_send(ap_dev->qid, 0x0102030405060708ULL, ap_msg.message,
1022 ap_msg.length);
1023 if (rc)
1024 goto out_free;
1025
1026 /* Wait for the test message to complete. */
1027 for (i = 0; i < 2 * HZ; i++) {
1028 msleep(1000 / HZ);
1029 rc = ap_recv(ap_dev->qid, &psmid, ap_msg.message, 4096);
1030 if (rc == 0 && psmid == 0x0102030405060708ULL)
1031 break;
1032 }
1033
1034 if (i >= 2 * HZ) {
1035 /* Got no answer. */
1036 rc = -ENODEV;
1037 goto out_free;
1038 }
1039
1040 reply = ap_msg.message;
1041 if (reply->cprbx.ccp_rtcode == 0 && reply->cprbx.ccp_rscode == 0)
1042 rc = 1;
1043 else
1044 rc = 0;
1045out_free:
1046 free_page((unsigned long) ap_msg.message);
1047 return rc;
1048}
1049
1050/**
862 * Probe function for PCIXCC/CEX2C cards. It always accepts the AP device 1051 * Probe function for PCIXCC/CEX2C cards. It always accepts the AP device
863 * since the bus_match already checked the hardware type. The PCIXCC 1052 * since the bus_match already checked the hardware type. The PCIXCC
864 * cards come in two flavours: micro code level 2 and micro code level 3. 1053 * cards come in two flavours: micro code level 2 and micro code level 3.
@@ -874,7 +1063,6 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
874 if (!zdev) 1063 if (!zdev)
875 return -ENOMEM; 1064 return -ENOMEM;
876 zdev->ap_dev = ap_dev; 1065 zdev->ap_dev = ap_dev;
877 zdev->ops = &zcrypt_pcixcc_ops;
878 zdev->online = 1; 1066 zdev->online = 1;
879 if (ap_dev->device_type == AP_DEVICE_TYPE_PCIXCC) { 1067 if (ap_dev->device_type == AP_DEVICE_TYPE_PCIXCC) {
880 rc = zcrypt_pcixcc_mcl(ap_dev); 1068 rc = zcrypt_pcixcc_mcl(ap_dev);
@@ -901,6 +1089,15 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
901 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE; 1089 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
902 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE; 1090 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
903 } 1091 }
1092 rc = zcrypt_pcixcc_rng_supported(ap_dev);
1093 if (rc < 0) {
1094 zcrypt_device_free(zdev);
1095 return rc;
1096 }
1097 if (rc)
1098 zdev->ops = &zcrypt_pcixcc_with_rng_ops;
1099 else
1100 zdev->ops = &zcrypt_pcixcc_ops;
904 ap_dev->reply = &zdev->reply; 1101 ap_dev->reply = &zdev->reply;
905 ap_dev->private = zdev; 1102 ap_dev->private = zdev;
906 rc = zcrypt_device_register(zdev); 1103 rc = zcrypt_device_register(zdev);
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index d8a5c229c5a7..04a1d7bf678c 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -299,7 +299,7 @@ claw_probe(struct ccwgroup_device *cgdev)
299 struct claw_privbk *privptr=NULL; 299 struct claw_privbk *privptr=NULL;
300 300
301#ifdef FUNCTRACE 301#ifdef FUNCTRACE
302 printk(KERN_INFO "%s Enter\n",__FUNCTION__); 302 printk(KERN_INFO "%s Enter\n",__func__);
303#endif 303#endif
304 CLAW_DBF_TEXT(2,setup,"probe"); 304 CLAW_DBF_TEXT(2,setup,"probe");
305 if (!get_device(&cgdev->dev)) 305 if (!get_device(&cgdev->dev))
@@ -313,7 +313,7 @@ claw_probe(struct ccwgroup_device *cgdev)
313 probe_error(cgdev); 313 probe_error(cgdev);
314 put_device(&cgdev->dev); 314 put_device(&cgdev->dev);
315 printk(KERN_WARNING "Out of memory %s %s Exit Line %d \n", 315 printk(KERN_WARNING "Out of memory %s %s Exit Line %d \n",
316 cgdev->cdev[0]->dev.bus_id,__FUNCTION__,__LINE__); 316 cgdev->cdev[0]->dev.bus_id,__func__,__LINE__);
317 CLAW_DBF_TEXT_(2,setup,"probex%d",-ENOMEM); 317 CLAW_DBF_TEXT_(2,setup,"probex%d",-ENOMEM);
318 return -ENOMEM; 318 return -ENOMEM;
319 } 319 }
@@ -323,7 +323,7 @@ claw_probe(struct ccwgroup_device *cgdev)
323 probe_error(cgdev); 323 probe_error(cgdev);
324 put_device(&cgdev->dev); 324 put_device(&cgdev->dev);
325 printk(KERN_WARNING "Out of memory %s %s Exit Line %d \n", 325 printk(KERN_WARNING "Out of memory %s %s Exit Line %d \n",
326 cgdev->cdev[0]->dev.bus_id,__FUNCTION__,__LINE__); 326 cgdev->cdev[0]->dev.bus_id,__func__,__LINE__);
327 CLAW_DBF_TEXT_(2,setup,"probex%d",-ENOMEM); 327 CLAW_DBF_TEXT_(2,setup,"probex%d",-ENOMEM);
328 return -ENOMEM; 328 return -ENOMEM;
329 } 329 }
@@ -340,7 +340,7 @@ claw_probe(struct ccwgroup_device *cgdev)
340 probe_error(cgdev); 340 probe_error(cgdev);
341 put_device(&cgdev->dev); 341 put_device(&cgdev->dev);
342 printk(KERN_WARNING "add_files failed %s %s Exit Line %d \n", 342 printk(KERN_WARNING "add_files failed %s %s Exit Line %d \n",
343 cgdev->cdev[0]->dev.bus_id,__FUNCTION__,__LINE__); 343 cgdev->cdev[0]->dev.bus_id,__func__,__LINE__);
344 CLAW_DBF_TEXT_(2,setup,"probex%d",rc); 344 CLAW_DBF_TEXT_(2,setup,"probex%d",rc);
345 return rc; 345 return rc;
346 } 346 }
@@ -351,7 +351,7 @@ claw_probe(struct ccwgroup_device *cgdev)
351 cgdev->dev.driver_data = privptr; 351 cgdev->dev.driver_data = privptr;
352#ifdef FUNCTRACE 352#ifdef FUNCTRACE
353 printk(KERN_INFO "claw:%s exit on line %d, " 353 printk(KERN_INFO "claw:%s exit on line %d, "
354 "rc = 0\n",__FUNCTION__,__LINE__); 354 "rc = 0\n",__func__,__LINE__);
355#endif 355#endif
356 CLAW_DBF_TEXT(2,setup,"prbext 0"); 356 CLAW_DBF_TEXT(2,setup,"prbext 0");
357 357
@@ -371,7 +371,7 @@ claw_tx(struct sk_buff *skb, struct net_device *dev)
371 struct chbk *p_ch; 371 struct chbk *p_ch;
372 372
373#ifdef FUNCTRACE 373#ifdef FUNCTRACE
374 printk(KERN_INFO "%s:%s enter\n",dev->name,__FUNCTION__); 374 printk(KERN_INFO "%s:%s enter\n",dev->name,__func__);
375#endif 375#endif
376 CLAW_DBF_TEXT(4,trace,"claw_tx"); 376 CLAW_DBF_TEXT(4,trace,"claw_tx");
377 p_ch=&privptr->channel[WRITE]; 377 p_ch=&privptr->channel[WRITE];
@@ -381,7 +381,7 @@ claw_tx(struct sk_buff *skb, struct net_device *dev)
381 privptr->stats.tx_dropped++; 381 privptr->stats.tx_dropped++;
382#ifdef FUNCTRACE 382#ifdef FUNCTRACE
383 printk(KERN_INFO "%s: %s() exit on line %d, rc = EIO\n", 383 printk(KERN_INFO "%s: %s() exit on line %d, rc = EIO\n",
384 dev->name,__FUNCTION__, __LINE__); 384 dev->name,__func__, __LINE__);
385#endif 385#endif
386 CLAW_DBF_TEXT_(2,trace,"clawtx%d",-EIO); 386 CLAW_DBF_TEXT_(2,trace,"clawtx%d",-EIO);
387 return -EIO; 387 return -EIO;
@@ -398,7 +398,7 @@ claw_tx(struct sk_buff *skb, struct net_device *dev)
398 spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags); 398 spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
399#ifdef FUNCTRACE 399#ifdef FUNCTRACE
400 printk(KERN_INFO "%s:%s exit on line %d, rc = %d\n", 400 printk(KERN_INFO "%s:%s exit on line %d, rc = %d\n",
401 dev->name, __FUNCTION__, __LINE__, rc); 401 dev->name, __func__, __LINE__, rc);
402#endif 402#endif
403 CLAW_DBF_TEXT_(4,trace,"clawtx%d",rc); 403 CLAW_DBF_TEXT_(4,trace,"clawtx%d",rc);
404 return rc; 404 return rc;
@@ -460,7 +460,7 @@ claw_pack_skb(struct claw_privbk *privptr)
460#ifdef IOTRACE 460#ifdef IOTRACE
461 printk(KERN_INFO "%s: %s() Packed %d len %d\n", 461 printk(KERN_INFO "%s: %s() Packed %d len %d\n",
462 p_env->ndev->name, 462 p_env->ndev->name,
463 __FUNCTION__,pkt_cnt,new_skb->len); 463 __func__,pkt_cnt,new_skb->len);
464#endif 464#endif
465 } 465 }
466 CLAW_DBF_TEXT(4,trace,"PackSKBx"); 466 CLAW_DBF_TEXT(4,trace,"PackSKBx");
@@ -478,7 +478,7 @@ claw_change_mtu(struct net_device *dev, int new_mtu)
478 struct claw_privbk *privptr=dev->priv; 478 struct claw_privbk *privptr=dev->priv;
479 int buff_size; 479 int buff_size;
480#ifdef FUNCTRACE 480#ifdef FUNCTRACE
481 printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__); 481 printk(KERN_INFO "%s:%s Enter \n",dev->name,__func__);
482#endif 482#endif
483#ifdef DEBUGMSG 483#ifdef DEBUGMSG
484 printk(KERN_INFO "variable dev =\n"); 484 printk(KERN_INFO "variable dev =\n");
@@ -491,14 +491,14 @@ claw_change_mtu(struct net_device *dev, int new_mtu)
491#ifdef FUNCTRACE 491#ifdef FUNCTRACE
492 printk(KERN_INFO "%s:%s Exit on line %d, rc=EINVAL\n", 492 printk(KERN_INFO "%s:%s Exit on line %d, rc=EINVAL\n",
493 dev->name, 493 dev->name,
494 __FUNCTION__, __LINE__); 494 __func__, __LINE__);
495#endif 495#endif
496 return -EINVAL; 496 return -EINVAL;
497 } 497 }
498 dev->mtu = new_mtu; 498 dev->mtu = new_mtu;
499#ifdef FUNCTRACE 499#ifdef FUNCTRACE
500 printk(KERN_INFO "%s:%s Exit on line %d\n",dev->name, 500 printk(KERN_INFO "%s:%s Exit on line %d\n",dev->name,
501 __FUNCTION__, __LINE__); 501 __func__, __LINE__);
502#endif 502#endif
503 return 0; 503 return 0;
504} /* end of claw_change_mtu */ 504} /* end of claw_change_mtu */
@@ -522,7 +522,7 @@ claw_open(struct net_device *dev)
522 struct ccwbk *p_buf; 522 struct ccwbk *p_buf;
523 523
524#ifdef FUNCTRACE 524#ifdef FUNCTRACE
525 printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__); 525 printk(KERN_INFO "%s:%s Enter \n",dev->name,__func__);
526#endif 526#endif
527 CLAW_DBF_TEXT(4,trace,"open"); 527 CLAW_DBF_TEXT(4,trace,"open");
528 if (!dev || (dev->name[0] == 0x00)) { 528 if (!dev || (dev->name[0] == 0x00)) {
@@ -537,7 +537,7 @@ claw_open(struct net_device *dev)
537 if (rc) { 537 if (rc) {
538 printk(KERN_INFO "%s:%s Exit on line %d, rc=ENOMEM\n", 538 printk(KERN_INFO "%s:%s Exit on line %d, rc=ENOMEM\n",
539 dev->name, 539 dev->name,
540 __FUNCTION__, __LINE__); 540 __func__, __LINE__);
541 CLAW_DBF_TEXT(2,trace,"openmem"); 541 CLAW_DBF_TEXT(2,trace,"openmem");
542 return -ENOMEM; 542 return -ENOMEM;
543 } 543 }
@@ -661,7 +661,7 @@ claw_open(struct net_device *dev)
661 claw_clear_busy(dev); 661 claw_clear_busy(dev);
662#ifdef FUNCTRACE 662#ifdef FUNCTRACE
663 printk(KERN_INFO "%s:%s Exit on line %d, rc=EIO\n", 663 printk(KERN_INFO "%s:%s Exit on line %d, rc=EIO\n",
664 dev->name,__FUNCTION__,__LINE__); 664 dev->name,__func__,__LINE__);
665#endif 665#endif
666 CLAW_DBF_TEXT(2,trace,"open EIO"); 666 CLAW_DBF_TEXT(2,trace,"open EIO");
667 return -EIO; 667 return -EIO;
@@ -673,7 +673,7 @@ claw_open(struct net_device *dev)
673 673
674#ifdef FUNCTRACE 674#ifdef FUNCTRACE
675 printk(KERN_INFO "%s:%s Exit on line %d, rc=0\n", 675 printk(KERN_INFO "%s:%s Exit on line %d, rc=0\n",
676 dev->name,__FUNCTION__,__LINE__); 676 dev->name,__func__,__LINE__);
677#endif 677#endif
678 CLAW_DBF_TEXT(4,trace,"openok"); 678 CLAW_DBF_TEXT(4,trace,"openok");
679 return 0; 679 return 0;
@@ -696,7 +696,7 @@ claw_irq_handler(struct ccw_device *cdev,
696 696
697 697
698#ifdef FUNCTRACE 698#ifdef FUNCTRACE
699 printk(KERN_INFO "%s enter \n",__FUNCTION__); 699 printk(KERN_INFO "%s enter \n",__func__);
700#endif 700#endif
701 CLAW_DBF_TEXT(4,trace,"clawirq"); 701 CLAW_DBF_TEXT(4,trace,"clawirq");
702 /* Bypass all 'unsolicited interrupts' */ 702 /* Bypass all 'unsolicited interrupts' */
@@ -706,7 +706,7 @@ claw_irq_handler(struct ccw_device *cdev,
706 cdev->dev.bus_id,irb->scsw.cstat, irb->scsw.dstat); 706 cdev->dev.bus_id,irb->scsw.cstat, irb->scsw.dstat);
707#ifdef FUNCTRACE 707#ifdef FUNCTRACE
708 printk(KERN_INFO "claw: %s() " 708 printk(KERN_INFO "claw: %s() "
709 "exit on line %d\n",__FUNCTION__,__LINE__); 709 "exit on line %d\n",__func__,__LINE__);
710#endif 710#endif
711 CLAW_DBF_TEXT(2,trace,"badirq"); 711 CLAW_DBF_TEXT(2,trace,"badirq");
712 return; 712 return;
@@ -752,7 +752,7 @@ claw_irq_handler(struct ccw_device *cdev,
752#endif 752#endif
753#ifdef FUNCTRACE 753#ifdef FUNCTRACE
754 printk(KERN_INFO "%s:%s Exit on line %d\n", 754 printk(KERN_INFO "%s:%s Exit on line %d\n",
755 dev->name,__FUNCTION__,__LINE__); 755 dev->name,__func__,__LINE__);
756#endif 756#endif
757 CLAW_DBF_TEXT(2,trace,"chanchk"); 757 CLAW_DBF_TEXT(2,trace,"chanchk");
758 /* return; */ 758 /* return; */
@@ -777,7 +777,7 @@ claw_irq_handler(struct ccw_device *cdev,
777 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { 777 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
778#ifdef FUNCTRACE 778#ifdef FUNCTRACE
779 printk(KERN_INFO "%s:%s Exit on line %d\n", 779 printk(KERN_INFO "%s:%s Exit on line %d\n",
780 dev->name,__FUNCTION__,__LINE__); 780 dev->name,__func__,__LINE__);
781#endif 781#endif
782 return; 782 return;
783 } 783 }
@@ -788,7 +788,7 @@ claw_irq_handler(struct ccw_device *cdev,
788#endif 788#endif
789#ifdef FUNCTRACE 789#ifdef FUNCTRACE
790 printk(KERN_INFO "%s:%s Exit on line %d\n", 790 printk(KERN_INFO "%s:%s Exit on line %d\n",
791 dev->name,__FUNCTION__,__LINE__); 791 dev->name,__func__,__LINE__);
792#endif 792#endif
793 CLAW_DBF_TEXT(4,trace,"stop"); 793 CLAW_DBF_TEXT(4,trace,"stop");
794 return; 794 return;
@@ -804,7 +804,7 @@ claw_irq_handler(struct ccw_device *cdev,
804 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { 804 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
805#ifdef FUNCTRACE 805#ifdef FUNCTRACE
806 printk(KERN_INFO "%s:%s Exit on line %d\n", 806 printk(KERN_INFO "%s:%s Exit on line %d\n",
807 dev->name,__FUNCTION__,__LINE__); 807 dev->name,__func__,__LINE__);
808#endif 808#endif
809 CLAW_DBF_TEXT(4,trace,"haltio"); 809 CLAW_DBF_TEXT(4,trace,"haltio");
810 return; 810 return;
@@ -838,7 +838,7 @@ claw_irq_handler(struct ccw_device *cdev,
838#endif 838#endif
839#ifdef FUNCTRACE 839#ifdef FUNCTRACE
840 printk(KERN_INFO "%s:%s Exit on line %d\n", 840 printk(KERN_INFO "%s:%s Exit on line %d\n",
841 dev->name,__FUNCTION__,__LINE__); 841 dev->name,__func__,__LINE__);
842#endif 842#endif
843 CLAW_DBF_TEXT(4,trace,"haltio"); 843 CLAW_DBF_TEXT(4,trace,"haltio");
844 return; 844 return;
@@ -858,7 +858,7 @@ claw_irq_handler(struct ccw_device *cdev,
858 } 858 }
859#ifdef FUNCTRACE 859#ifdef FUNCTRACE
860 printk(KERN_INFO "%s:%s Exit on line %d\n", 860 printk(KERN_INFO "%s:%s Exit on line %d\n",
861 dev->name,__FUNCTION__,__LINE__); 861 dev->name,__func__,__LINE__);
862#endif 862#endif
863 CLAW_DBF_TEXT(4,trace,"notrdy"); 863 CLAW_DBF_TEXT(4,trace,"notrdy");
864 return; 864 return;
@@ -874,7 +874,7 @@ claw_irq_handler(struct ccw_device *cdev,
874 } 874 }
875#ifdef FUNCTRACE 875#ifdef FUNCTRACE
876 printk(KERN_INFO "%s:%s Exit on line %d\n", 876 printk(KERN_INFO "%s:%s Exit on line %d\n",
877 dev->name,__FUNCTION__,__LINE__); 877 dev->name,__func__,__LINE__);
878#endif 878#endif
879 CLAW_DBF_TEXT(4,trace,"PCI_read"); 879 CLAW_DBF_TEXT(4,trace,"PCI_read");
880 return; 880 return;
@@ -885,7 +885,7 @@ claw_irq_handler(struct ccw_device *cdev,
885 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { 885 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
886#ifdef FUNCTRACE 886#ifdef FUNCTRACE
887 printk(KERN_INFO "%s:%s Exit on line %d\n", 887 printk(KERN_INFO "%s:%s Exit on line %d\n",
888 dev->name,__FUNCTION__,__LINE__); 888 dev->name,__func__,__LINE__);
889#endif 889#endif
890 CLAW_DBF_TEXT(4,trace,"SPend_rd"); 890 CLAW_DBF_TEXT(4,trace,"SPend_rd");
891 return; 891 return;
@@ -906,7 +906,7 @@ claw_irq_handler(struct ccw_device *cdev,
906#endif 906#endif
907#ifdef FUNCTRACE 907#ifdef FUNCTRACE
908 printk(KERN_INFO "%s:%s Exit on line %d\n", 908 printk(KERN_INFO "%s:%s Exit on line %d\n",
909 dev->name,__FUNCTION__,__LINE__); 909 dev->name,__func__,__LINE__);
910#endif 910#endif
911 CLAW_DBF_TEXT(4,trace,"RdIRQXit"); 911 CLAW_DBF_TEXT(4,trace,"RdIRQXit");
912 return; 912 return;
@@ -929,7 +929,7 @@ claw_irq_handler(struct ccw_device *cdev,
929 } 929 }
930#ifdef FUNCTRACE 930#ifdef FUNCTRACE
931 printk(KERN_INFO "%s:%s Exit on line %d\n", 931 printk(KERN_INFO "%s:%s Exit on line %d\n",
932 dev->name,__FUNCTION__,__LINE__); 932 dev->name,__func__,__LINE__);
933#endif 933#endif
934 CLAW_DBF_TEXT(4,trace,"rstrtwrt"); 934 CLAW_DBF_TEXT(4,trace,"rstrtwrt");
935 return; 935 return;
@@ -946,7 +946,7 @@ claw_irq_handler(struct ccw_device *cdev,
946 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { 946 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
947#ifdef FUNCTRACE 947#ifdef FUNCTRACE
948 printk(KERN_INFO "%s:%s Exit on line %d\n", 948 printk(KERN_INFO "%s:%s Exit on line %d\n",
949 dev->name,__FUNCTION__,__LINE__); 949 dev->name,__func__,__LINE__);
950#endif 950#endif
951 CLAW_DBF_TEXT(4,trace,"writeUE"); 951 CLAW_DBF_TEXT(4,trace,"writeUE");
952 return; 952 return;
@@ -969,7 +969,7 @@ claw_irq_handler(struct ccw_device *cdev,
969#endif 969#endif
970#ifdef FUNCTRACE 970#ifdef FUNCTRACE
971 printk(KERN_INFO "%s:%s Exit on line %d\n", 971 printk(KERN_INFO "%s:%s Exit on line %d\n",
972 dev->name,__FUNCTION__,__LINE__); 972 dev->name,__func__,__LINE__);
973#endif 973#endif
974 CLAW_DBF_TEXT(4,trace,"StWtExit"); 974 CLAW_DBF_TEXT(4,trace,"StWtExit");
975 return; 975 return;
@@ -978,7 +978,7 @@ claw_irq_handler(struct ccw_device *cdev,
978 "state=%d\n",dev->name,p_ch->claw_state); 978 "state=%d\n",dev->name,p_ch->claw_state);
979#ifdef FUNCTRACE 979#ifdef FUNCTRACE
980 printk(KERN_INFO "%s:%s Exit on line %d\n", 980 printk(KERN_INFO "%s:%s Exit on line %d\n",
981 dev->name,__FUNCTION__,__LINE__); 981 dev->name,__func__,__LINE__);
982#endif 982#endif
983 CLAW_DBF_TEXT(2,trace,"badIRQ"); 983 CLAW_DBF_TEXT(2,trace,"badIRQ");
984 return; 984 return;
@@ -1001,7 +1001,7 @@ claw_irq_tasklet ( unsigned long data )
1001 p_ch = (struct chbk *) data; 1001 p_ch = (struct chbk *) data;
1002 dev = (struct net_device *)p_ch->ndev; 1002 dev = (struct net_device *)p_ch->ndev;
1003#ifdef FUNCTRACE 1003#ifdef FUNCTRACE
1004 printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__); 1004 printk(KERN_INFO "%s:%s Enter \n",dev->name,__func__);
1005#endif 1005#endif
1006#ifdef DEBUGMSG 1006#ifdef DEBUGMSG
1007 printk(KERN_INFO "%s: variable p_ch =\n",dev->name); 1007 printk(KERN_INFO "%s: variable p_ch =\n",dev->name);
@@ -1021,7 +1021,7 @@ claw_irq_tasklet ( unsigned long data )
1021 CLAW_DBF_TEXT(4,trace,"TskletXt"); 1021 CLAW_DBF_TEXT(4,trace,"TskletXt");
1022#ifdef FUNCTRACE 1022#ifdef FUNCTRACE
1023 printk(KERN_INFO "%s:%s Exit on line %d\n", 1023 printk(KERN_INFO "%s:%s Exit on line %d\n",
1024 dev->name,__FUNCTION__,__LINE__); 1024 dev->name,__func__,__LINE__);
1025#endif 1025#endif
1026 return; 1026 return;
1027} /* end of claw_irq_bh */ 1027} /* end of claw_irq_bh */
@@ -1048,7 +1048,7 @@ claw_release(struct net_device *dev)
1048 if (!privptr) 1048 if (!privptr)
1049 return 0; 1049 return 0;
1050#ifdef FUNCTRACE 1050#ifdef FUNCTRACE
1051 printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__); 1051 printk(KERN_INFO "%s:%s Enter \n",dev->name,__func__);
1052#endif 1052#endif
1053 CLAW_DBF_TEXT(4,trace,"release"); 1053 CLAW_DBF_TEXT(4,trace,"release");
1054#ifdef DEBUGMSG 1054#ifdef DEBUGMSG
@@ -1090,7 +1090,7 @@ claw_release(struct net_device *dev)
1090 if(privptr->buffs_alloc != 1) { 1090 if(privptr->buffs_alloc != 1) {
1091#ifdef FUNCTRACE 1091#ifdef FUNCTRACE
1092 printk(KERN_INFO "%s:%s Exit on line %d\n", 1092 printk(KERN_INFO "%s:%s Exit on line %d\n",
1093 dev->name,__FUNCTION__,__LINE__); 1093 dev->name,__func__,__LINE__);
1094#endif 1094#endif
1095 CLAW_DBF_TEXT(4,trace,"none2fre"); 1095 CLAW_DBF_TEXT(4,trace,"none2fre");
1096 return 0; 1096 return 0;
@@ -1171,7 +1171,7 @@ claw_release(struct net_device *dev)
1171 } 1171 }
1172#ifdef FUNCTRACE 1172#ifdef FUNCTRACE
1173 printk(KERN_INFO "%s:%s Exit on line %d\n", 1173 printk(KERN_INFO "%s:%s Exit on line %d\n",
1174 dev->name,__FUNCTION__,__LINE__); 1174 dev->name,__func__,__LINE__);
1175#endif 1175#endif
1176 CLAW_DBF_TEXT(4,trace,"rlsexit"); 1176 CLAW_DBF_TEXT(4,trace,"rlsexit");
1177 return 0; 1177 return 0;
@@ -1192,7 +1192,7 @@ claw_write_retry ( struct chbk *p_ch )
1192 1192
1193 1193
1194#ifdef FUNCTRACE 1194#ifdef FUNCTRACE
1195 printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__); 1195 printk(KERN_INFO "%s:%s Enter\n",dev->name,__func__);
1196 printk(KERN_INFO "claw: variable p_ch =\n"); 1196 printk(KERN_INFO "claw: variable p_ch =\n");
1197 dumpit((char *) p_ch, sizeof(struct chbk)); 1197 dumpit((char *) p_ch, sizeof(struct chbk));
1198#endif 1198#endif
@@ -1200,20 +1200,20 @@ claw_write_retry ( struct chbk *p_ch )
1200 if (p_ch->claw_state == CLAW_STOP) { 1200 if (p_ch->claw_state == CLAW_STOP) {
1201#ifdef FUNCTRACE 1201#ifdef FUNCTRACE
1202 printk(KERN_INFO "%s:%s Exit on line %d\n", 1202 printk(KERN_INFO "%s:%s Exit on line %d\n",
1203 dev->name,__FUNCTION__,__LINE__); 1203 dev->name,__func__,__LINE__);
1204#endif 1204#endif
1205 return; 1205 return;
1206 } 1206 }
1207#ifdef DEBUGMSG 1207#ifdef DEBUGMSG
1208 printk( KERN_INFO "%s:%s state-%02x\n" , 1208 printk( KERN_INFO "%s:%s state-%02x\n" ,
1209 dev->name, 1209 dev->name,
1210 __FUNCTION__, 1210 __func__,
1211 p_ch->claw_state); 1211 p_ch->claw_state);
1212#endif 1212#endif
1213 claw_strt_out_IO( dev ); 1213 claw_strt_out_IO( dev );
1214#ifdef FUNCTRACE 1214#ifdef FUNCTRACE
1215 printk(KERN_INFO "%s:%s Exit on line %d\n", 1215 printk(KERN_INFO "%s:%s Exit on line %d\n",
1216 dev->name,__FUNCTION__,__LINE__); 1216 dev->name,__func__,__LINE__);
1217#endif 1217#endif
1218 CLAW_DBF_TEXT(4,trace,"rtry_xit"); 1218 CLAW_DBF_TEXT(4,trace,"rtry_xit");
1219 return; 1219 return;
@@ -1235,7 +1235,7 @@ claw_write_next ( struct chbk * p_ch )
1235 int rc; 1235 int rc;
1236 1236
1237#ifdef FUNCTRACE 1237#ifdef FUNCTRACE
1238 printk(KERN_INFO "%s:%s Enter \n",p_ch->ndev->name,__FUNCTION__); 1238 printk(KERN_INFO "%s:%s Enter \n",p_ch->ndev->name,__func__);
1239 printk(KERN_INFO "%s: variable p_ch =\n",p_ch->ndev->name); 1239 printk(KERN_INFO "%s: variable p_ch =\n",p_ch->ndev->name);
1240 dumpit((char *) p_ch, sizeof(struct chbk)); 1240 dumpit((char *) p_ch, sizeof(struct chbk));
1241#endif 1241#endif
@@ -1262,7 +1262,7 @@ claw_write_next ( struct chbk * p_ch )
1262 1262
1263#ifdef FUNCTRACE 1263#ifdef FUNCTRACE
1264 printk(KERN_INFO "%s:%s Exit on line %d\n", 1264 printk(KERN_INFO "%s:%s Exit on line %d\n",
1265 dev->name,__FUNCTION__,__LINE__); 1265 dev->name,__func__,__LINE__);
1266#endif 1266#endif
1267 return; 1267 return;
1268} /* end of claw_write_next */ 1268} /* end of claw_write_next */
@@ -1276,7 +1276,7 @@ static void
1276claw_timer ( struct chbk * p_ch ) 1276claw_timer ( struct chbk * p_ch )
1277{ 1277{
1278#ifdef FUNCTRACE 1278#ifdef FUNCTRACE
1279 printk(KERN_INFO "%s:%s Entry\n",p_ch->ndev->name,__FUNCTION__); 1279 printk(KERN_INFO "%s:%s Entry\n",p_ch->ndev->name,__func__);
1280 printk(KERN_INFO "%s: variable p_ch =\n",p_ch->ndev->name); 1280 printk(KERN_INFO "%s: variable p_ch =\n",p_ch->ndev->name);
1281 dumpit((char *) p_ch, sizeof(struct chbk)); 1281 dumpit((char *) p_ch, sizeof(struct chbk));
1282#endif 1282#endif
@@ -1285,7 +1285,7 @@ claw_timer ( struct chbk * p_ch )
1285 wake_up(&p_ch->wait); 1285 wake_up(&p_ch->wait);
1286#ifdef FUNCTRACE 1286#ifdef FUNCTRACE
1287 printk(KERN_INFO "%s:%s Exit on line %d\n", 1287 printk(KERN_INFO "%s:%s Exit on line %d\n",
1288 p_ch->ndev->name,__FUNCTION__,__LINE__); 1288 p_ch->ndev->name,__func__,__LINE__);
1289#endif 1289#endif
1290 return; 1290 return;
1291} /* end of claw_timer */ 1291} /* end of claw_timer */
@@ -1312,7 +1312,7 @@ pages_to_order_of_mag(int num_of_pages)
1312 int order_of_mag=1; /* assume 2 pages */ 1312 int order_of_mag=1; /* assume 2 pages */
1313 int nump=2; 1313 int nump=2;
1314#ifdef FUNCTRACE 1314#ifdef FUNCTRACE
1315 printk(KERN_INFO "%s Enter pages = %d \n",__FUNCTION__,num_of_pages); 1315 printk(KERN_INFO "%s Enter pages = %d \n",__func__,num_of_pages);
1316#endif 1316#endif
1317 CLAW_DBF_TEXT_(5,trace,"pages%d",num_of_pages); 1317 CLAW_DBF_TEXT_(5,trace,"pages%d",num_of_pages);
1318 if (num_of_pages == 1) {return 0; } /* magnitude of 0 = 1 page */ 1318 if (num_of_pages == 1) {return 0; } /* magnitude of 0 = 1 page */
@@ -1327,7 +1327,7 @@ pages_to_order_of_mag(int num_of_pages)
1327 if (order_of_mag > 9) { order_of_mag = 9; } /* I know it's paranoid */ 1327 if (order_of_mag > 9) { order_of_mag = 9; } /* I know it's paranoid */
1328#ifdef FUNCTRACE 1328#ifdef FUNCTRACE
1329 printk(KERN_INFO "%s Exit on line %d, order = %d\n", 1329 printk(KERN_INFO "%s Exit on line %d, order = %d\n",
1330 __FUNCTION__,__LINE__, order_of_mag); 1330 __func__,__LINE__, order_of_mag);
1331#endif 1331#endif
1332 CLAW_DBF_TEXT_(5,trace,"mag%d",order_of_mag); 1332 CLAW_DBF_TEXT_(5,trace,"mag%d",order_of_mag);
1333 return order_of_mag; 1333 return order_of_mag;
@@ -1349,7 +1349,7 @@ add_claw_reads(struct net_device *dev, struct ccwbk* p_first,
1349 struct ccwbk* p_buf; 1349 struct ccwbk* p_buf;
1350#endif 1350#endif
1351#ifdef FUNCTRACE 1351#ifdef FUNCTRACE
1352 printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__); 1352 printk(KERN_INFO "%s:%s Enter \n",dev->name,__func__);
1353#endif 1353#endif
1354#ifdef DEBUGMSG 1354#ifdef DEBUGMSG
1355 printk(KERN_INFO "dev\n"); 1355 printk(KERN_INFO "dev\n");
@@ -1369,7 +1369,7 @@ add_claw_reads(struct net_device *dev, struct ccwbk* p_first,
1369 if ( p_first==NULL) { 1369 if ( p_first==NULL) {
1370#ifdef FUNCTRACE 1370#ifdef FUNCTRACE
1371 printk(KERN_INFO "%s:%s Exit on line %d\n", 1371 printk(KERN_INFO "%s:%s Exit on line %d\n",
1372 dev->name,__FUNCTION__,__LINE__); 1372 dev->name,__func__,__LINE__);
1373#endif 1373#endif
1374 CLAW_DBF_TEXT(4,trace,"addexit"); 1374 CLAW_DBF_TEXT(4,trace,"addexit");
1375 return 0; 1375 return 0;
@@ -1400,9 +1400,9 @@ add_claw_reads(struct net_device *dev, struct ccwbk* p_first,
1400 if ( privptr-> p_read_active_first ==NULL ) { 1400 if ( privptr-> p_read_active_first ==NULL ) {
1401#ifdef DEBUGMSG 1401#ifdef DEBUGMSG
1402 printk(KERN_INFO "%s:%s p_read_active_first == NULL \n", 1402 printk(KERN_INFO "%s:%s p_read_active_first == NULL \n",
1403 dev->name,__FUNCTION__); 1403 dev->name,__func__);
1404 printk(KERN_INFO "%s:%s Read active first/last changed \n", 1404 printk(KERN_INFO "%s:%s Read active first/last changed \n",
1405 dev->name,__FUNCTION__); 1405 dev->name,__func__);
1406#endif 1406#endif
1407 privptr-> p_read_active_first= p_first; /* set new first */ 1407 privptr-> p_read_active_first= p_first; /* set new first */
1408 privptr-> p_read_active_last = p_last; /* set new last */ 1408 privptr-> p_read_active_last = p_last; /* set new last */
@@ -1411,7 +1411,7 @@ add_claw_reads(struct net_device *dev, struct ccwbk* p_first,
1411 1411
1412#ifdef DEBUGMSG 1412#ifdef DEBUGMSG
1413 printk(KERN_INFO "%s:%s Read in progress \n", 1413 printk(KERN_INFO "%s:%s Read in progress \n",
1414 dev->name,__FUNCTION__); 1414 dev->name,__func__);
1415#endif 1415#endif
1416 /* set up TIC ccw */ 1416 /* set up TIC ccw */
1417 temp_ccw.cda= (__u32)__pa(&p_first->read); 1417 temp_ccw.cda= (__u32)__pa(&p_first->read);
@@ -1450,15 +1450,15 @@ add_claw_reads(struct net_device *dev, struct ccwbk* p_first,
1450 privptr->p_read_active_last=p_last; 1450 privptr->p_read_active_last=p_last;
1451 } /* end of if ( privptr-> p_read_active_first ==NULL) */ 1451 } /* end of if ( privptr-> p_read_active_first ==NULL) */
1452#ifdef IOTRACE 1452#ifdef IOTRACE
1453 printk(KERN_INFO "%s:%s dump p_last CCW BK \n",dev->name,__FUNCTION__); 1453 printk(KERN_INFO "%s:%s dump p_last CCW BK \n",dev->name,__func__);
1454 dumpit((char *)p_last, sizeof(struct ccwbk)); 1454 dumpit((char *)p_last, sizeof(struct ccwbk));
1455 printk(KERN_INFO "%s:%s dump p_end CCW BK \n",dev->name,__FUNCTION__); 1455 printk(KERN_INFO "%s:%s dump p_end CCW BK \n",dev->name,__func__);
1456 dumpit((char *)p_end, sizeof(struct endccw)); 1456 dumpit((char *)p_end, sizeof(struct endccw));
1457 1457
1458 printk(KERN_INFO "%s:%s dump p_first CCW BK \n",dev->name,__FUNCTION__); 1458 printk(KERN_INFO "%s:%s dump p_first CCW BK \n",dev->name,__func__);
1459 dumpit((char *)p_first, sizeof(struct ccwbk)); 1459 dumpit((char *)p_first, sizeof(struct ccwbk));
1460 printk(KERN_INFO "%s:%s Dump Active CCW chain \n", 1460 printk(KERN_INFO "%s:%s Dump Active CCW chain \n",
1461 dev->name,__FUNCTION__); 1461 dev->name,__func__);
1462 p_buf=privptr->p_read_active_first; 1462 p_buf=privptr->p_read_active_first;
1463 while (p_buf!=NULL) { 1463 while (p_buf!=NULL) {
1464 dumpit((char *)p_buf, sizeof(struct ccwbk)); 1464 dumpit((char *)p_buf, sizeof(struct ccwbk));
@@ -1467,7 +1467,7 @@ add_claw_reads(struct net_device *dev, struct ccwbk* p_first,
1467#endif 1467#endif
1468#ifdef FUNCTRACE 1468#ifdef FUNCTRACE
1469 printk(KERN_INFO "%s:%s Exit on line %d\n", 1469 printk(KERN_INFO "%s:%s Exit on line %d\n",
1470 dev->name,__FUNCTION__,__LINE__); 1470 dev->name,__func__,__LINE__);
1471#endif 1471#endif
1472 CLAW_DBF_TEXT(4,trace,"addexit"); 1472 CLAW_DBF_TEXT(4,trace,"addexit");
1473 return 0; 1473 return 0;
@@ -1483,7 +1483,7 @@ ccw_check_return_code(struct ccw_device *cdev, int return_code)
1483{ 1483{
1484#ifdef FUNCTRACE 1484#ifdef FUNCTRACE
1485 printk(KERN_INFO "%s: %s() > enter \n", 1485 printk(KERN_INFO "%s: %s() > enter \n",
1486 cdev->dev.bus_id,__FUNCTION__); 1486 cdev->dev.bus_id,__func__);
1487#endif 1487#endif
1488 CLAW_DBF_TEXT(4,trace,"ccwret"); 1488 CLAW_DBF_TEXT(4,trace,"ccwret");
1489#ifdef DEBUGMSG 1489#ifdef DEBUGMSG
@@ -1516,7 +1516,7 @@ ccw_check_return_code(struct ccw_device *cdev, int return_code)
1516 } 1516 }
1517#ifdef FUNCTRACE 1517#ifdef FUNCTRACE
1518 printk(KERN_INFO "%s: %s() > exit on line %d\n", 1518 printk(KERN_INFO "%s: %s() > exit on line %d\n",
1519 cdev->dev.bus_id,__FUNCTION__,__LINE__); 1519 cdev->dev.bus_id,__func__,__LINE__);
1520#endif 1520#endif
1521 CLAW_DBF_TEXT(4,trace,"ccwret"); 1521 CLAW_DBF_TEXT(4,trace,"ccwret");
1522} /* end of ccw_check_return_code */ 1522} /* end of ccw_check_return_code */
@@ -1531,7 +1531,7 @@ ccw_check_unit_check(struct chbk * p_ch, unsigned char sense )
1531 struct net_device *dev = p_ch->ndev; 1531 struct net_device *dev = p_ch->ndev;
1532 1532
1533#ifdef FUNCTRACE 1533#ifdef FUNCTRACE
1534 printk(KERN_INFO "%s: %s() > enter\n",dev->name,__FUNCTION__); 1534 printk(KERN_INFO "%s: %s() > enter\n",dev->name,__func__);
1535#endif 1535#endif
1536#ifdef DEBUGMSG 1536#ifdef DEBUGMSG
1537 printk(KERN_INFO "%s: variable dev =\n",dev->name); 1537 printk(KERN_INFO "%s: variable dev =\n",dev->name);
@@ -1578,7 +1578,7 @@ ccw_check_unit_check(struct chbk * p_ch, unsigned char sense )
1578 1578
1579#ifdef FUNCTRACE 1579#ifdef FUNCTRACE
1580 printk(KERN_INFO "%s: %s() exit on line %d\n", 1580 printk(KERN_INFO "%s: %s() exit on line %d\n",
1581 dev->name,__FUNCTION__,__LINE__); 1581 dev->name,__func__,__LINE__);
1582#endif 1582#endif
1583} /* end of ccw_check_unit_check */ 1583} /* end of ccw_check_unit_check */
1584 1584
@@ -1706,7 +1706,7 @@ find_link(struct net_device *dev, char *host_name, char *ws_name )
1706 int rc=0; 1706 int rc=0;
1707 1707
1708#ifdef FUNCTRACE 1708#ifdef FUNCTRACE
1709 printk(KERN_INFO "%s:%s > enter \n",dev->name,__FUNCTION__); 1709 printk(KERN_INFO "%s:%s > enter \n",dev->name,__func__);
1710#endif 1710#endif
1711 CLAW_DBF_TEXT(2,setup,"findlink"); 1711 CLAW_DBF_TEXT(2,setup,"findlink");
1712#ifdef DEBUGMSG 1712#ifdef DEBUGMSG
@@ -1739,7 +1739,7 @@ find_link(struct net_device *dev, char *host_name, char *ws_name )
1739 1739
1740#ifdef FUNCTRACE 1740#ifdef FUNCTRACE
1741 printk(KERN_INFO "%s:%s Exit on line %d\n", 1741 printk(KERN_INFO "%s:%s Exit on line %d\n",
1742 dev->name,__FUNCTION__,__LINE__); 1742 dev->name,__func__,__LINE__);
1743#endif 1743#endif
1744 return 0; 1744 return 0;
1745} /* end of find_link */ 1745} /* end of find_link */
@@ -1773,7 +1773,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
1773 struct ccwbk *p_buf; 1773 struct ccwbk *p_buf;
1774#endif 1774#endif
1775#ifdef FUNCTRACE 1775#ifdef FUNCTRACE
1776 printk(KERN_INFO "%s: %s() > enter\n",dev->name,__FUNCTION__); 1776 printk(KERN_INFO "%s: %s() > enter\n",dev->name,__func__);
1777#endif 1777#endif
1778 CLAW_DBF_TEXT(4,trace,"hw_tx"); 1778 CLAW_DBF_TEXT(4,trace,"hw_tx");
1779#ifdef DEBUGMSG 1779#ifdef DEBUGMSG
@@ -1787,7 +1787,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
1787 p_ch=(struct chbk *)&privptr->channel[WRITE]; 1787 p_ch=(struct chbk *)&privptr->channel[WRITE];
1788 p_env =privptr->p_env; 1788 p_env =privptr->p_env;
1789#ifdef IOTRACE 1789#ifdef IOTRACE
1790 printk(KERN_INFO "%s: %s() dump sk_buff \n",dev->name,__FUNCTION__); 1790 printk(KERN_INFO "%s: %s() dump sk_buff \n",dev->name,__func__);
1791 dumpit((char *)skb ,sizeof(struct sk_buff)); 1791 dumpit((char *)skb ,sizeof(struct sk_buff));
1792#endif 1792#endif
1793 claw_free_wrt_buf(dev); /* Clean up free chain if posible */ 1793 claw_free_wrt_buf(dev); /* Clean up free chain if posible */
@@ -1877,7 +1877,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
1877 while (len_of_data > 0) { 1877 while (len_of_data > 0) {
1878#ifdef DEBUGMSG 1878#ifdef DEBUGMSG
1879 printk(KERN_INFO "%s: %s() length-of-data is %ld \n", 1879 printk(KERN_INFO "%s: %s() length-of-data is %ld \n",
1880 dev->name ,__FUNCTION__,len_of_data); 1880 dev->name ,__func__,len_of_data);
1881 dumpit((char *)pDataAddress ,64); 1881 dumpit((char *)pDataAddress ,64);
1882#endif 1882#endif
1883 p_this_ccw=privptr->p_write_free_chain; /* get a block */ 1883 p_this_ccw=privptr->p_write_free_chain; /* get a block */
@@ -1913,7 +1913,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
1913 p_last_ccw=p_this_ccw; /* save new last block */ 1913 p_last_ccw=p_this_ccw; /* save new last block */
1914#ifdef IOTRACE 1914#ifdef IOTRACE
1915 printk(KERN_INFO "%s: %s() > CCW and Buffer %ld bytes long \n", 1915 printk(KERN_INFO "%s: %s() > CCW and Buffer %ld bytes long \n",
1916 dev->name,__FUNCTION__,bytesInThisBuffer); 1916 dev->name,__func__,bytesInThisBuffer);
1917 dumpit((char *)p_this_ccw, sizeof(struct ccwbk)); 1917 dumpit((char *)p_this_ccw, sizeof(struct ccwbk));
1918 dumpit((char *)p_this_ccw->p_buffer, 64); 1918 dumpit((char *)p_this_ccw->p_buffer, 64);
1919#endif 1919#endif
@@ -1998,7 +1998,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
1998 1998
1999#ifdef IOTRACE 1999#ifdef IOTRACE
2000 printk(KERN_INFO "%s: %s() > Dump Active CCW chain \n", 2000 printk(KERN_INFO "%s: %s() > Dump Active CCW chain \n",
2001 dev->name,__FUNCTION__); 2001 dev->name,__func__);
2002 p_buf=privptr->p_write_active_first; 2002 p_buf=privptr->p_write_active_first;
2003 while (p_buf!=NULL) { 2003 while (p_buf!=NULL) {
2004 dumpit((char *)p_buf, sizeof(struct ccwbk)); 2004 dumpit((char *)p_buf, sizeof(struct ccwbk));
@@ -2018,7 +2018,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
2018 /* if write free count is zero , set NOBUFFER */ 2018 /* if write free count is zero , set NOBUFFER */
2019#ifdef DEBUGMSG 2019#ifdef DEBUGMSG
2020 printk(KERN_INFO "%s: %s() > free_count is %d\n", 2020 printk(KERN_INFO "%s: %s() > free_count is %d\n",
2021 dev->name,__FUNCTION__, 2021 dev->name,__func__,
2022 (int) privptr->write_free_count ); 2022 (int) privptr->write_free_count );
2023#endif 2023#endif
2024 if (privptr->write_free_count==0) { 2024 if (privptr->write_free_count==0) {
@@ -2029,7 +2029,7 @@ Done2:
2029Done: 2029Done:
2030#ifdef FUNCTRACE 2030#ifdef FUNCTRACE
2031 printk(KERN_INFO "%s: %s() > exit on line %d, rc = %d \n", 2031 printk(KERN_INFO "%s: %s() > exit on line %d, rc = %d \n",
2032 dev->name,__FUNCTION__,__LINE__, rc); 2032 dev->name,__func__,__LINE__, rc);
2033#endif 2033#endif
2034 return(rc); 2034 return(rc);
2035} /* end of claw_hw_tx */ 2035} /* end of claw_hw_tx */
@@ -2063,7 +2063,7 @@ init_ccw_bk(struct net_device *dev)
2063 addr_t real_TIC_address; 2063 addr_t real_TIC_address;
2064 int i,j; 2064 int i,j;
2065#ifdef FUNCTRACE 2065#ifdef FUNCTRACE
2066 printk(KERN_INFO "%s: %s() enter \n",dev->name,__FUNCTION__); 2066 printk(KERN_INFO "%s: %s() enter \n",dev->name,__func__);
2067#endif 2067#endif
2068 CLAW_DBF_TEXT(4,trace,"init_ccw"); 2068 CLAW_DBF_TEXT(4,trace,"init_ccw");
2069#ifdef DEBUGMSG 2069#ifdef DEBUGMSG
@@ -2097,15 +2097,15 @@ init_ccw_bk(struct net_device *dev)
2097#ifdef DEBUGMSG 2097#ifdef DEBUGMSG
2098 printk(KERN_INFO "%s: %s() " 2098 printk(KERN_INFO "%s: %s() "
2099 "ccw_blocks_required=%d\n", 2099 "ccw_blocks_required=%d\n",
2100 dev->name,__FUNCTION__, 2100 dev->name,__func__,
2101 ccw_blocks_required); 2101 ccw_blocks_required);
2102 printk(KERN_INFO "%s: %s() " 2102 printk(KERN_INFO "%s: %s() "
2103 "PAGE_SIZE=0x%x\n", 2103 "PAGE_SIZE=0x%x\n",
2104 dev->name,__FUNCTION__, 2104 dev->name,__func__,
2105 (unsigned int)PAGE_SIZE); 2105 (unsigned int)PAGE_SIZE);
2106 printk(KERN_INFO "%s: %s() > " 2106 printk(KERN_INFO "%s: %s() > "
2107 "PAGE_MASK=0x%x\n", 2107 "PAGE_MASK=0x%x\n",
2108 dev->name,__FUNCTION__, 2108 dev->name,__func__,
2109 (unsigned int)PAGE_MASK); 2109 (unsigned int)PAGE_MASK);
2110#endif 2110#endif
2111 /* 2111 /*
@@ -2117,10 +2117,10 @@ init_ccw_bk(struct net_device *dev)
2117 2117
2118#ifdef DEBUGMSG 2118#ifdef DEBUGMSG
2119 printk(KERN_INFO "%s: %s() > ccw_blocks_perpage=%d\n", 2119 printk(KERN_INFO "%s: %s() > ccw_blocks_perpage=%d\n",
2120 dev->name,__FUNCTION__, 2120 dev->name,__func__,
2121 ccw_blocks_perpage); 2121 ccw_blocks_perpage);
2122 printk(KERN_INFO "%s: %s() > ccw_pages_required=%d\n", 2122 printk(KERN_INFO "%s: %s() > ccw_pages_required=%d\n",
2123 dev->name,__FUNCTION__, 2123 dev->name,__func__,
2124 ccw_pages_required); 2124 ccw_pages_required);
2125#endif 2125#endif
2126 /* 2126 /*
@@ -2156,29 +2156,29 @@ init_ccw_bk(struct net_device *dev)
2156#ifdef DEBUGMSG 2156#ifdef DEBUGMSG
2157 if (privptr->p_env->read_size < PAGE_SIZE) { 2157 if (privptr->p_env->read_size < PAGE_SIZE) {
2158 printk(KERN_INFO "%s: %s() reads_perpage=%d\n", 2158 printk(KERN_INFO "%s: %s() reads_perpage=%d\n",
2159 dev->name,__FUNCTION__, 2159 dev->name,__func__,
2160 claw_reads_perpage); 2160 claw_reads_perpage);
2161 } 2161 }
2162 else { 2162 else {
2163 printk(KERN_INFO "%s: %s() pages_perread=%d\n", 2163 printk(KERN_INFO "%s: %s() pages_perread=%d\n",
2164 dev->name,__FUNCTION__, 2164 dev->name,__func__,
2165 privptr->p_buff_pages_perread); 2165 privptr->p_buff_pages_perread);
2166 } 2166 }
2167 printk(KERN_INFO "%s: %s() read_pages=%d\n", 2167 printk(KERN_INFO "%s: %s() read_pages=%d\n",
2168 dev->name,__FUNCTION__, 2168 dev->name,__func__,
2169 claw_read_pages); 2169 claw_read_pages);
2170 if (privptr->p_env->write_size < PAGE_SIZE) { 2170 if (privptr->p_env->write_size < PAGE_SIZE) {
2171 printk(KERN_INFO "%s: %s() writes_perpage=%d\n", 2171 printk(KERN_INFO "%s: %s() writes_perpage=%d\n",
2172 dev->name,__FUNCTION__, 2172 dev->name,__func__,
2173 claw_writes_perpage); 2173 claw_writes_perpage);
2174 } 2174 }
2175 else { 2175 else {
2176 printk(KERN_INFO "%s: %s() pages_perwrite=%d\n", 2176 printk(KERN_INFO "%s: %s() pages_perwrite=%d\n",
2177 dev->name,__FUNCTION__, 2177 dev->name,__func__,
2178 privptr->p_buff_pages_perwrite); 2178 privptr->p_buff_pages_perwrite);
2179 } 2179 }
2180 printk(KERN_INFO "%s: %s() write_pages=%d\n", 2180 printk(KERN_INFO "%s: %s() write_pages=%d\n",
2181 dev->name,__FUNCTION__, 2181 dev->name,__func__,
2182 claw_write_pages); 2182 claw_write_pages);
2183#endif 2183#endif
2184 2184
@@ -2194,12 +2194,12 @@ init_ccw_bk(struct net_device *dev)
2194 printk(KERN_INFO "%s: %s() " 2194 printk(KERN_INFO "%s: %s() "
2195 "__get_free_pages for CCWs failed : " 2195 "__get_free_pages for CCWs failed : "
2196 "pages is %d\n", 2196 "pages is %d\n",
2197 dev->name,__FUNCTION__, 2197 dev->name,__func__,
2198 ccw_pages_required ); 2198 ccw_pages_required );
2199#ifdef FUNCTRACE 2199#ifdef FUNCTRACE
2200 printk(KERN_INFO "%s: %s() > " 2200 printk(KERN_INFO "%s: %s() > "
2201 "exit on line %d, rc = ENOMEM\n", 2201 "exit on line %d, rc = ENOMEM\n",
2202 dev->name,__FUNCTION__, 2202 dev->name,__func__,
2203 __LINE__); 2203 __LINE__);
2204#endif 2204#endif
2205 return -ENOMEM; 2205 return -ENOMEM;
@@ -2218,7 +2218,7 @@ init_ccw_bk(struct net_device *dev)
2218 /* Initialize ending CCW block */ 2218 /* Initialize ending CCW block */
2219#ifdef DEBUGMSG 2219#ifdef DEBUGMSG
2220 printk(KERN_INFO "%s: %s() begin initialize ending CCW blocks\n", 2220 printk(KERN_INFO "%s: %s() begin initialize ending CCW blocks\n",
2221 dev->name,__FUNCTION__); 2221 dev->name,__func__);
2222#endif 2222#endif
2223 2223
2224 p_endccw=privptr->p_end_ccw; 2224 p_endccw=privptr->p_end_ccw;
@@ -2276,7 +2276,7 @@ init_ccw_bk(struct net_device *dev)
2276 2276
2277#ifdef IOTRACE 2277#ifdef IOTRACE
2278 printk(KERN_INFO "%s: %s() dump claw ending CCW BK \n", 2278 printk(KERN_INFO "%s: %s() dump claw ending CCW BK \n",
2279 dev->name,__FUNCTION__); 2279 dev->name,__func__);
2280 dumpit((char *)p_endccw, sizeof(struct endccw)); 2280 dumpit((char *)p_endccw, sizeof(struct endccw));
2281#endif 2281#endif
2282 2282
@@ -2287,7 +2287,7 @@ init_ccw_bk(struct net_device *dev)
2287 2287
2288#ifdef DEBUGMSG 2288#ifdef DEBUGMSG
2289 printk(KERN_INFO "%s: %s() Begin build a chain of CCW buffer \n", 2289 printk(KERN_INFO "%s: %s() Begin build a chain of CCW buffer \n",
2290 dev->name,__FUNCTION__); 2290 dev->name,__func__);
2291#endif 2291#endif
2292 p_buff=privptr->p_buff_ccw; 2292 p_buff=privptr->p_buff_ccw;
2293 2293
@@ -2306,7 +2306,7 @@ init_ccw_bk(struct net_device *dev)
2306#ifdef DEBUGMSG 2306#ifdef DEBUGMSG
2307 printk(KERN_INFO "%s: %s() " 2307 printk(KERN_INFO "%s: %s() "
2308 "End build a chain of CCW buffer \n", 2308 "End build a chain of CCW buffer \n",
2309 dev->name,__FUNCTION__); 2309 dev->name,__func__);
2310 p_buf=p_free_chain; 2310 p_buf=p_free_chain;
2311 while (p_buf!=NULL) { 2311 while (p_buf!=NULL) {
2312 dumpit((char *)p_buf, sizeof(struct ccwbk)); 2312 dumpit((char *)p_buf, sizeof(struct ccwbk));
@@ -2321,7 +2321,7 @@ init_ccw_bk(struct net_device *dev)
2321#ifdef DEBUGMSG 2321#ifdef DEBUGMSG
2322 printk(KERN_INFO "%s: %s() " 2322 printk(KERN_INFO "%s: %s() "
2323 "Begin initialize ClawSignalBlock \n", 2323 "Begin initialize ClawSignalBlock \n",
2324 dev->name,__FUNCTION__); 2324 dev->name,__func__);
2325#endif 2325#endif
2326 if (privptr->p_claw_signal_blk==NULL) { 2326 if (privptr->p_claw_signal_blk==NULL) {
2327 privptr->p_claw_signal_blk=p_free_chain; 2327 privptr->p_claw_signal_blk=p_free_chain;
@@ -2334,7 +2334,7 @@ init_ccw_bk(struct net_device *dev)
2334#ifdef DEBUGMSG 2334#ifdef DEBUGMSG
2335 printk(KERN_INFO "%s: %s() > End initialize " 2335 printk(KERN_INFO "%s: %s() > End initialize "
2336 "ClawSignalBlock\n", 2336 "ClawSignalBlock\n",
2337 dev->name,__FUNCTION__); 2337 dev->name,__func__);
2338 dumpit((char *)privptr->p_claw_signal_blk, sizeof(struct ccwbk)); 2338 dumpit((char *)privptr->p_claw_signal_blk, sizeof(struct ccwbk));
2339#endif 2339#endif
2340 2340
@@ -2349,14 +2349,14 @@ init_ccw_bk(struct net_device *dev)
2349 if (privptr->p_buff_write==NULL) { 2349 if (privptr->p_buff_write==NULL) {
2350 printk(KERN_INFO "%s: %s() __get_free_pages for write" 2350 printk(KERN_INFO "%s: %s() __get_free_pages for write"
2351 " bufs failed : get is for %d pages\n", 2351 " bufs failed : get is for %d pages\n",
2352 dev->name,__FUNCTION__,claw_write_pages ); 2352 dev->name,__func__,claw_write_pages );
2353 free_pages((unsigned long)privptr->p_buff_ccw, 2353 free_pages((unsigned long)privptr->p_buff_ccw,
2354 (int)pages_to_order_of_mag(privptr->p_buff_ccw_num)); 2354 (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
2355 privptr->p_buff_ccw=NULL; 2355 privptr->p_buff_ccw=NULL;
2356#ifdef FUNCTRACE 2356#ifdef FUNCTRACE
2357 printk(KERN_INFO "%s: %s() > exit on line %d," 2357 printk(KERN_INFO "%s: %s() > exit on line %d,"
2358 "rc = ENOMEM\n", 2358 "rc = ENOMEM\n",
2359 dev->name,__FUNCTION__,__LINE__); 2359 dev->name,__func__,__LINE__);
2360#endif 2360#endif
2361 return -ENOMEM; 2361 return -ENOMEM;
2362 } 2362 }
@@ -2369,7 +2369,7 @@ init_ccw_bk(struct net_device *dev)
2369 ccw_pages_required * PAGE_SIZE); 2369 ccw_pages_required * PAGE_SIZE);
2370#ifdef DEBUGMSG 2370#ifdef DEBUGMSG
2371 printk(KERN_INFO "%s: %s() Begin build claw write free " 2371 printk(KERN_INFO "%s: %s() Begin build claw write free "
2372 "chain \n",dev->name,__FUNCTION__); 2372 "chain \n",dev->name,__func__);
2373#endif 2373#endif
2374 privptr->p_write_free_chain=NULL; 2374 privptr->p_write_free_chain=NULL;
2375 2375
@@ -2409,14 +2409,14 @@ init_ccw_bk(struct net_device *dev)
2409#ifdef IOTRACE 2409#ifdef IOTRACE
2410 printk(KERN_INFO "%s:%s __get_free_pages " 2410 printk(KERN_INFO "%s:%s __get_free_pages "
2411 "for writes buf: get for %d pages\n", 2411 "for writes buf: get for %d pages\n",
2412 dev->name,__FUNCTION__, 2412 dev->name,__func__,
2413 privptr->p_buff_pages_perwrite); 2413 privptr->p_buff_pages_perwrite);
2414#endif 2414#endif
2415 if (p_buff==NULL) { 2415 if (p_buff==NULL) {
2416 printk(KERN_INFO "%s:%s __get_free_pages " 2416 printk(KERN_INFO "%s:%s __get_free_pages "
2417 "for writes buf failed : get is for %d pages\n", 2417 "for writes buf failed : get is for %d pages\n",
2418 dev->name, 2418 dev->name,
2419 __FUNCTION__, 2419 __func__,
2420 privptr->p_buff_pages_perwrite ); 2420 privptr->p_buff_pages_perwrite );
2421 free_pages((unsigned long)privptr->p_buff_ccw, 2421 free_pages((unsigned long)privptr->p_buff_ccw,
2422 (int)pages_to_order_of_mag( 2422 (int)pages_to_order_of_mag(
@@ -2433,7 +2433,7 @@ init_ccw_bk(struct net_device *dev)
2433#ifdef FUNCTRACE 2433#ifdef FUNCTRACE
2434 printk(KERN_INFO "%s: %s exit on line %d, rc = ENOMEM\n", 2434 printk(KERN_INFO "%s: %s exit on line %d, rc = ENOMEM\n",
2435 dev->name, 2435 dev->name,
2436 __FUNCTION__, 2436 __func__,
2437 __LINE__); 2437 __LINE__);
2438#endif 2438#endif
2439 return -ENOMEM; 2439 return -ENOMEM;
@@ -2466,7 +2466,7 @@ init_ccw_bk(struct net_device *dev)
2466 2466
2467#ifdef DEBUGMSG 2467#ifdef DEBUGMSG
2468 printk(KERN_INFO "%s:%s End build claw write free chain \n", 2468 printk(KERN_INFO "%s:%s End build claw write free chain \n",
2469 dev->name,__FUNCTION__); 2469 dev->name,__func__);
2470 p_buf=privptr->p_write_free_chain; 2470 p_buf=privptr->p_write_free_chain;
2471 while (p_buf!=NULL) { 2471 while (p_buf!=NULL) {
2472 dumpit((char *)p_buf, sizeof(struct ccwbk)); 2472 dumpit((char *)p_buf, sizeof(struct ccwbk));
@@ -2485,7 +2485,7 @@ init_ccw_bk(struct net_device *dev)
2485 printk(KERN_INFO "%s: %s() " 2485 printk(KERN_INFO "%s: %s() "
2486 "__get_free_pages for read buf failed : " 2486 "__get_free_pages for read buf failed : "
2487 "get is for %d pages\n", 2487 "get is for %d pages\n",
2488 dev->name,__FUNCTION__,claw_read_pages ); 2488 dev->name,__func__,claw_read_pages );
2489 free_pages((unsigned long)privptr->p_buff_ccw, 2489 free_pages((unsigned long)privptr->p_buff_ccw,
2490 (int)pages_to_order_of_mag( 2490 (int)pages_to_order_of_mag(
2491 privptr->p_buff_ccw_num)); 2491 privptr->p_buff_ccw_num));
@@ -2497,7 +2497,7 @@ init_ccw_bk(struct net_device *dev)
2497 privptr->p_buff_write=NULL; 2497 privptr->p_buff_write=NULL;
2498#ifdef FUNCTRACE 2498#ifdef FUNCTRACE
2499 printk(KERN_INFO "%s: %s() > exit on line %d, rc =" 2499 printk(KERN_INFO "%s: %s() > exit on line %d, rc ="
2500 " ENOMEM\n",dev->name,__FUNCTION__,__LINE__); 2500 " ENOMEM\n",dev->name,__func__,__LINE__);
2501#endif 2501#endif
2502 return -ENOMEM; 2502 return -ENOMEM;
2503 } 2503 }
@@ -2509,7 +2509,7 @@ init_ccw_bk(struct net_device *dev)
2509 */ 2509 */
2510#ifdef DEBUGMSG 2510#ifdef DEBUGMSG
2511 printk(KERN_INFO "%s: %s() Begin build claw read free chain \n", 2511 printk(KERN_INFO "%s: %s() Begin build claw read free chain \n",
2512 dev->name,__FUNCTION__); 2512 dev->name,__func__);
2513#endif 2513#endif
2514 p_buff=privptr->p_buff_read; 2514 p_buff=privptr->p_buff_read;
2515 for (i=0 ; i< privptr->p_env->read_buffers ; i++) { 2515 for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
@@ -2590,7 +2590,7 @@ init_ccw_bk(struct net_device *dev)
2590 2590
2591#ifdef DEBUGMSG 2591#ifdef DEBUGMSG
2592 printk(KERN_INFO "%s: %s() Begin build claw read free chain \n", 2592 printk(KERN_INFO "%s: %s() Begin build claw read free chain \n",
2593 dev->name,__FUNCTION__); 2593 dev->name,__func__);
2594#endif 2594#endif
2595 for (i=0 ; i< privptr->p_env->read_buffers ; i++) { 2595 for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
2596 p_buff = (void *)__get_free_pages(__GFP_DMA, 2596 p_buff = (void *)__get_free_pages(__GFP_DMA,
@@ -2598,7 +2598,7 @@ init_ccw_bk(struct net_device *dev)
2598 if (p_buff==NULL) { 2598 if (p_buff==NULL) {
2599 printk(KERN_INFO "%s: %s() __get_free_pages for read " 2599 printk(KERN_INFO "%s: %s() __get_free_pages for read "
2600 "buf failed : get is for %d pages\n", 2600 "buf failed : get is for %d pages\n",
2601 dev->name,__FUNCTION__, 2601 dev->name,__func__,
2602 privptr->p_buff_pages_perread ); 2602 privptr->p_buff_pages_perread );
2603 free_pages((unsigned long)privptr->p_buff_ccw, 2603 free_pages((unsigned long)privptr->p_buff_ccw,
2604 (int)pages_to_order_of_mag(privptr->p_buff_ccw_num)); 2604 (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
@@ -2622,7 +2622,7 @@ init_ccw_bk(struct net_device *dev)
2622 privptr->p_buff_write=NULL; 2622 privptr->p_buff_write=NULL;
2623#ifdef FUNCTRACE 2623#ifdef FUNCTRACE
2624 printk(KERN_INFO "%s: %s() exit on line %d, rc = ENOMEM\n", 2624 printk(KERN_INFO "%s: %s() exit on line %d, rc = ENOMEM\n",
2625 dev->name,__FUNCTION__, 2625 dev->name,__func__,
2626 __LINE__); 2626 __LINE__);
2627#endif 2627#endif
2628 return -ENOMEM; 2628 return -ENOMEM;
@@ -2695,7 +2695,7 @@ init_ccw_bk(struct net_device *dev)
2695 } /* pBuffread = NULL */ 2695 } /* pBuffread = NULL */
2696#ifdef DEBUGMSG 2696#ifdef DEBUGMSG
2697 printk(KERN_INFO "%s: %s() > End build claw read free chain \n", 2697 printk(KERN_INFO "%s: %s() > End build claw read free chain \n",
2698 dev->name,__FUNCTION__); 2698 dev->name,__func__);
2699 p_buf=p_first_CCWB; 2699 p_buf=p_first_CCWB;
2700 while (p_buf!=NULL) { 2700 while (p_buf!=NULL) {
2701 dumpit((char *)p_buf, sizeof(struct ccwbk)); 2701 dumpit((char *)p_buf, sizeof(struct ccwbk));
@@ -2707,7 +2707,7 @@ init_ccw_bk(struct net_device *dev)
2707 privptr->buffs_alloc = 1; 2707 privptr->buffs_alloc = 1;
2708#ifdef FUNCTRACE 2708#ifdef FUNCTRACE
2709 printk(KERN_INFO "%s: %s() exit on line %d\n", 2709 printk(KERN_INFO "%s: %s() exit on line %d\n",
2710 dev->name,__FUNCTION__,__LINE__); 2710 dev->name,__func__,__LINE__);
2711#endif 2711#endif
2712 return 0; 2712 return 0;
2713} /* end of init_ccw_bk */ 2713} /* end of init_ccw_bk */
@@ -2723,11 +2723,11 @@ probe_error( struct ccwgroup_device *cgdev)
2723{ 2723{
2724 struct claw_privbk *privptr; 2724 struct claw_privbk *privptr;
2725#ifdef FUNCTRACE 2725#ifdef FUNCTRACE
2726 printk(KERN_INFO "%s enter \n",__FUNCTION__); 2726 printk(KERN_INFO "%s enter \n",__func__);
2727#endif 2727#endif
2728 CLAW_DBF_TEXT(4,trace,"proberr"); 2728 CLAW_DBF_TEXT(4,trace,"proberr");
2729#ifdef DEBUGMSG 2729#ifdef DEBUGMSG
2730 printk(KERN_INFO "%s variable cgdev =\n",__FUNCTION__); 2730 printk(KERN_INFO "%s variable cgdev =\n",__func__);
2731 dumpit((char *) cgdev, sizeof(struct ccwgroup_device)); 2731 dumpit((char *) cgdev, sizeof(struct ccwgroup_device));
2732#endif 2732#endif
2733 privptr=(struct claw_privbk *)cgdev->dev.driver_data; 2733 privptr=(struct claw_privbk *)cgdev->dev.driver_data;
@@ -2741,7 +2741,7 @@ probe_error( struct ccwgroup_device *cgdev)
2741 } 2741 }
2742#ifdef FUNCTRACE 2742#ifdef FUNCTRACE
2743 printk(KERN_INFO "%s > exit on line %d\n", 2743 printk(KERN_INFO "%s > exit on line %d\n",
2744 __FUNCTION__,__LINE__); 2744 __func__,__LINE__);
2745#endif 2745#endif
2746 2746
2747 return; 2747 return;
@@ -2772,7 +2772,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
2772 struct chbk *p_ch = NULL; 2772 struct chbk *p_ch = NULL;
2773#ifdef FUNCTRACE 2773#ifdef FUNCTRACE
2774 printk(KERN_INFO "%s: %s() > enter \n", 2774 printk(KERN_INFO "%s: %s() > enter \n",
2775 dev->name,__FUNCTION__); 2775 dev->name,__func__);
2776#endif 2776#endif
2777 CLAW_DBF_TEXT(2,setup,"clw_cntl"); 2777 CLAW_DBF_TEXT(2,setup,"clw_cntl");
2778#ifdef DEBUGMSG 2778#ifdef DEBUGMSG
@@ -2794,7 +2794,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
2794#ifdef FUNCTRACE 2794#ifdef FUNCTRACE
2795 printk(KERN_INFO "%s: %s() > " 2795 printk(KERN_INFO "%s: %s() > "
2796 "exit on line %d, rc=0\n", 2796 "exit on line %d, rc=0\n",
2797 dev->name,__FUNCTION__,__LINE__); 2797 dev->name,__func__,__LINE__);
2798#endif 2798#endif
2799 return 0; 2799 return 0;
2800 } 2800 }
@@ -3057,7 +3057,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
3057 3057
3058#ifdef FUNCTRACE 3058#ifdef FUNCTRACE
3059 printk(KERN_INFO "%s: %s() exit on line %d, rc = 0\n", 3059 printk(KERN_INFO "%s: %s() exit on line %d, rc = 0\n",
3060 dev->name,__FUNCTION__,__LINE__); 3060 dev->name,__func__,__LINE__);
3061#endif 3061#endif
3062 3062
3063 return 0; 3063 return 0;
@@ -3080,7 +3080,7 @@ claw_send_control(struct net_device *dev, __u8 type, __u8 link,
3080 struct sk_buff *skb; 3080 struct sk_buff *skb;
3081 3081
3082#ifdef FUNCTRACE 3082#ifdef FUNCTRACE
3083 printk(KERN_INFO "%s:%s > enter \n",dev->name,__FUNCTION__); 3083 printk(KERN_INFO "%s:%s > enter \n",dev->name,__func__);
3084#endif 3084#endif
3085 CLAW_DBF_TEXT(2,setup,"sndcntl"); 3085 CLAW_DBF_TEXT(2,setup,"sndcntl");
3086#ifdef DEBUGMSG 3086#ifdef DEBUGMSG
@@ -3143,10 +3143,10 @@ claw_send_control(struct net_device *dev, __u8 type, __u8 link,
3143 skb = dev_alloc_skb(sizeof(struct clawctl)); 3143 skb = dev_alloc_skb(sizeof(struct clawctl));
3144 if (!skb) { 3144 if (!skb) {
3145 printk( "%s:%s low on mem, returning...\n", 3145 printk( "%s:%s low on mem, returning...\n",
3146 dev->name,__FUNCTION__); 3146 dev->name,__func__);
3147#ifdef DEBUG 3147#ifdef DEBUG
3148 printk(KERN_INFO "%s:%s Exit, rc = ENOMEM\n", 3148 printk(KERN_INFO "%s:%s Exit, rc = ENOMEM\n",
3149 dev->name,__FUNCTION__); 3149 dev->name,__func__);
3150#endif 3150#endif
3151 return -ENOMEM; 3151 return -ENOMEM;
3152 } 3152 }
@@ -3162,7 +3162,7 @@ claw_send_control(struct net_device *dev, __u8 type, __u8 link,
3162 claw_hw_tx(skb, dev, 0); 3162 claw_hw_tx(skb, dev, 0);
3163#ifdef FUNCTRACE 3163#ifdef FUNCTRACE
3164 printk(KERN_INFO "%s:%s Exit on line %d\n", 3164 printk(KERN_INFO "%s:%s Exit on line %d\n",
3165 dev->name,__FUNCTION__,__LINE__); 3165 dev->name,__func__,__LINE__);
3166#endif 3166#endif
3167 3167
3168 return 0; 3168 return 0;
@@ -3180,7 +3180,7 @@ claw_snd_conn_req(struct net_device *dev, __u8 link)
3180 struct clawctl *p_ctl; 3180 struct clawctl *p_ctl;
3181 3181
3182#ifdef FUNCTRACE 3182#ifdef FUNCTRACE
3183 printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__); 3183 printk(KERN_INFO "%s:%s Enter \n",dev->name,__func__);
3184#endif 3184#endif
3185 CLAW_DBF_TEXT(2,setup,"snd_conn"); 3185 CLAW_DBF_TEXT(2,setup,"snd_conn");
3186#ifdef DEBUGMSG 3186#ifdef DEBUGMSG
@@ -3193,7 +3193,7 @@ claw_snd_conn_req(struct net_device *dev, __u8 link)
3193 if ( privptr->system_validate_comp==0x00 ) { 3193 if ( privptr->system_validate_comp==0x00 ) {
3194#ifdef FUNCTRACE 3194#ifdef FUNCTRACE
3195 printk(KERN_INFO "%s:%s Exit on line %d, rc = 1\n", 3195 printk(KERN_INFO "%s:%s Exit on line %d, rc = 1\n",
3196 dev->name,__FUNCTION__,__LINE__); 3196 dev->name,__func__,__LINE__);
3197#endif 3197#endif
3198 return rc; 3198 return rc;
3199 } 3199 }
@@ -3209,7 +3209,7 @@ claw_snd_conn_req(struct net_device *dev, __u8 link)
3209 HOST_APPL_NAME, privptr->p_env->api_type); 3209 HOST_APPL_NAME, privptr->p_env->api_type);
3210#ifdef FUNCTRACE 3210#ifdef FUNCTRACE
3211 printk(KERN_INFO "%s:%s Exit on line %d, rc = %d\n", 3211 printk(KERN_INFO "%s:%s Exit on line %d, rc = %d\n",
3212 dev->name,__FUNCTION__,__LINE__, rc); 3212 dev->name,__func__,__LINE__, rc);
3213#endif 3213#endif
3214 return rc; 3214 return rc;
3215 3215
@@ -3228,7 +3228,7 @@ claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl)
3228 struct conncmd * p_connect; 3228 struct conncmd * p_connect;
3229 3229
3230#ifdef FUNCTRACE 3230#ifdef FUNCTRACE
3231 printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__); 3231 printk(KERN_INFO "%s:%s Enter\n",dev->name,__func__);
3232#endif 3232#endif
3233 CLAW_DBF_TEXT(2,setup,"snd_dsc"); 3233 CLAW_DBF_TEXT(2,setup,"snd_dsc");
3234#ifdef DEBUGMSG 3234#ifdef DEBUGMSG
@@ -3244,7 +3244,7 @@ claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl)
3244 p_connect->host_name, p_connect->WS_name); 3244 p_connect->host_name, p_connect->WS_name);
3245#ifdef FUNCTRACE 3245#ifdef FUNCTRACE
3246 printk(KERN_INFO "%s:%s Exit on line %d, rc = %d\n", 3246 printk(KERN_INFO "%s:%s Exit on line %d, rc = %d\n",
3247 dev->name,__FUNCTION__, __LINE__, rc); 3247 dev->name,__func__, __LINE__, rc);
3248#endif 3248#endif
3249 return rc; 3249 return rc;
3250} /* end of claw_snd_disc */ 3250} /* end of claw_snd_disc */
@@ -3265,7 +3265,7 @@ claw_snd_sys_validate_rsp(struct net_device *dev,
3265 3265
3266#ifdef FUNCTRACE 3266#ifdef FUNCTRACE
3267 printk(KERN_INFO "%s:%s Enter\n", 3267 printk(KERN_INFO "%s:%s Enter\n",
3268 dev->name,__FUNCTION__); 3268 dev->name,__func__);
3269#endif 3269#endif
3270 CLAW_DBF_TEXT(2,setup,"chkresp"); 3270 CLAW_DBF_TEXT(2,setup,"chkresp");
3271#ifdef DEBUGMSG 3271#ifdef DEBUGMSG
@@ -3285,7 +3285,7 @@ claw_snd_sys_validate_rsp(struct net_device *dev,
3285 p_env->adapter_name ); 3285 p_env->adapter_name );
3286#ifdef FUNCTRACE 3286#ifdef FUNCTRACE
3287 printk(KERN_INFO "%s:%s Exit on line %d, rc = %d\n", 3287 printk(KERN_INFO "%s:%s Exit on line %d, rc = %d\n",
3288 dev->name,__FUNCTION__,__LINE__, rc); 3288 dev->name,__func__,__LINE__, rc);
3289#endif 3289#endif
3290 return rc; 3290 return rc;
3291} /* end of claw_snd_sys_validate_rsp */ 3291} /* end of claw_snd_sys_validate_rsp */
@@ -3301,7 +3301,7 @@ claw_strt_conn_req(struct net_device *dev )
3301 int rc; 3301 int rc;
3302 3302
3303#ifdef FUNCTRACE 3303#ifdef FUNCTRACE
3304 printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__); 3304 printk(KERN_INFO "%s:%s Enter\n",dev->name,__func__);
3305#endif 3305#endif
3306 CLAW_DBF_TEXT(2,setup,"conn_req"); 3306 CLAW_DBF_TEXT(2,setup,"conn_req");
3307#ifdef DEBUGMSG 3307#ifdef DEBUGMSG
@@ -3311,7 +3311,7 @@ claw_strt_conn_req(struct net_device *dev )
3311 rc=claw_snd_conn_req(dev, 1); 3311 rc=claw_snd_conn_req(dev, 1);
3312#ifdef FUNCTRACE 3312#ifdef FUNCTRACE
3313 printk(KERN_INFO "%s:%s Exit on line %d, rc = %d\n", 3313 printk(KERN_INFO "%s:%s Exit on line %d, rc = %d\n",
3314 dev->name,__FUNCTION__,__LINE__, rc); 3314 dev->name,__func__,__LINE__, rc);
3315#endif 3315#endif
3316 return rc; 3316 return rc;
3317} /* end of claw_strt_conn_req */ 3317} /* end of claw_strt_conn_req */
@@ -3327,13 +3327,13 @@ net_device_stats *claw_stats(struct net_device *dev)
3327{ 3327{
3328 struct claw_privbk *privptr; 3328 struct claw_privbk *privptr;
3329#ifdef FUNCTRACE 3329#ifdef FUNCTRACE
3330 printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__); 3330 printk(KERN_INFO "%s:%s Enter\n",dev->name,__func__);
3331#endif 3331#endif
3332 CLAW_DBF_TEXT(4,trace,"stats"); 3332 CLAW_DBF_TEXT(4,trace,"stats");
3333 privptr = dev->priv; 3333 privptr = dev->priv;
3334#ifdef FUNCTRACE 3334#ifdef FUNCTRACE
3335 printk(KERN_INFO "%s:%s Exit on line %d\n", 3335 printk(KERN_INFO "%s:%s Exit on line %d\n",
3336 dev->name,__FUNCTION__,__LINE__); 3336 dev->name,__func__,__LINE__);
3337#endif 3337#endif
3338 return &privptr->stats; 3338 return &privptr->stats;
3339} /* end of claw_stats */ 3339} /* end of claw_stats */
@@ -3366,7 +3366,7 @@ unpack_read(struct net_device *dev )
3366 int p=0; 3366 int p=0;
3367 3367
3368#ifdef FUNCTRACE 3368#ifdef FUNCTRACE
3369 printk(KERN_INFO "%s:%s enter \n",dev->name,__FUNCTION__); 3369 printk(KERN_INFO "%s:%s enter \n",dev->name,__func__);
3370#endif 3370#endif
3371 CLAW_DBF_TEXT(4,trace,"unpkread"); 3371 CLAW_DBF_TEXT(4,trace,"unpkread");
3372 p_first_ccw=NULL; 3372 p_first_ccw=NULL;
@@ -3408,7 +3408,7 @@ unpack_read(struct net_device *dev )
3408 if ((p_this_ccw->header.opcode & MORE_to_COME_FLAG)!=0) { 3408 if ((p_this_ccw->header.opcode & MORE_to_COME_FLAG)!=0) {
3409#ifdef DEBUGMSG 3409#ifdef DEBUGMSG
3410 printk(KERN_INFO "%s: %s > More_to_come is ON\n", 3410 printk(KERN_INFO "%s: %s > More_to_come is ON\n",
3411 dev->name,__FUNCTION__); 3411 dev->name,__func__);
3412#endif 3412#endif
3413 mtc_this_frm=1; 3413 mtc_this_frm=1;
3414 if (p_this_ccw->header.length!= 3414 if (p_this_ccw->header.length!=
@@ -3435,7 +3435,7 @@ unpack_read(struct net_device *dev )
3435#ifdef DEBUGMSG 3435#ifdef DEBUGMSG
3436 printk(KERN_INFO "%s:%s goto next " 3436 printk(KERN_INFO "%s:%s goto next "
3437 "frame from MoretoComeSkip \n", 3437 "frame from MoretoComeSkip \n",
3438 dev->name,__FUNCTION__); 3438 dev->name,__func__);
3439#endif 3439#endif
3440 goto NextFrame; 3440 goto NextFrame;
3441 } 3441 }
@@ -3445,7 +3445,7 @@ unpack_read(struct net_device *dev )
3445#ifdef DEBUGMSG 3445#ifdef DEBUGMSG
3446 printk(KERN_INFO "%s:%s goto next " 3446 printk(KERN_INFO "%s:%s goto next "
3447 "frame from claw_process_control \n", 3447 "frame from claw_process_control \n",
3448 dev->name,__FUNCTION__); 3448 dev->name,__func__);
3449#endif 3449#endif
3450 CLAW_DBF_TEXT(4,trace,"UnpkCntl"); 3450 CLAW_DBF_TEXT(4,trace,"UnpkCntl");
3451 goto NextFrame; 3451 goto NextFrame;
@@ -3468,7 +3468,7 @@ unpack_next:
3468 if (privptr->mtc_logical_link<0) { 3468 if (privptr->mtc_logical_link<0) {
3469#ifdef DEBUGMSG 3469#ifdef DEBUGMSG
3470 printk(KERN_INFO "%s: %s mtc_logical_link < 0 \n", 3470 printk(KERN_INFO "%s: %s mtc_logical_link < 0 \n",
3471 dev->name,__FUNCTION__); 3471 dev->name,__func__);
3472#endif 3472#endif
3473 3473
3474 /* 3474 /*
@@ -3487,7 +3487,7 @@ unpack_next:
3487 printk(KERN_INFO "%s: %s > goto next " 3487 printk(KERN_INFO "%s: %s > goto next "
3488 "frame from MoretoComeSkip \n", 3488 "frame from MoretoComeSkip \n",
3489 dev->name, 3489 dev->name,
3490 __FUNCTION__); 3490 __func__);
3491 printk(KERN_INFO " bytes_to_mov %d > (MAX_ENVELOPE_" 3491 printk(KERN_INFO " bytes_to_mov %d > (MAX_ENVELOPE_"
3492 "SIZE-privptr->mtc_offset %d)\n", 3492 "SIZE-privptr->mtc_offset %d)\n",
3493 bytes_to_mov,(MAX_ENVELOPE_SIZE- privptr->mtc_offset)); 3493 bytes_to_mov,(MAX_ENVELOPE_SIZE- privptr->mtc_offset));
@@ -3505,13 +3505,13 @@ unpack_next:
3505 } 3505 }
3506#ifdef DEBUGMSG 3506#ifdef DEBUGMSG
3507 printk(KERN_INFO "%s: %s() received data \n", 3507 printk(KERN_INFO "%s: %s() received data \n",
3508 dev->name,__FUNCTION__); 3508 dev->name,__func__);
3509 if (p_env->packing == DO_PACKED) 3509 if (p_env->packing == DO_PACKED)
3510 dumpit((char *)p_packd+sizeof(struct clawph),32); 3510 dumpit((char *)p_packd+sizeof(struct clawph),32);
3511 else 3511 else
3512 dumpit((char *)p_this_ccw->p_buffer, 32); 3512 dumpit((char *)p_this_ccw->p_buffer, 32);
3513 printk(KERN_INFO "%s: %s() bytelength %d \n", 3513 printk(KERN_INFO "%s: %s() bytelength %d \n",
3514 dev->name,__FUNCTION__,bytes_to_mov); 3514 dev->name,__func__,bytes_to_mov);
3515#endif 3515#endif
3516 if (mtc_this_frm==0) { 3516 if (mtc_this_frm==0) {
3517 len_of_data=privptr->mtc_offset+bytes_to_mov; 3517 len_of_data=privptr->mtc_offset+bytes_to_mov;
@@ -3530,13 +3530,13 @@ unpack_next:
3530#ifdef DEBUGMSG 3530#ifdef DEBUGMSG
3531 printk(KERN_INFO "%s: %s() netif_" 3531 printk(KERN_INFO "%s: %s() netif_"
3532 "rx(skb) completed \n", 3532 "rx(skb) completed \n",
3533 dev->name,__FUNCTION__); 3533 dev->name,__func__);
3534#endif 3534#endif
3535 } 3535 }
3536 else { 3536 else {
3537 privptr->stats.rx_dropped++; 3537 privptr->stats.rx_dropped++;
3538 printk(KERN_WARNING "%s: %s() low on memory\n", 3538 printk(KERN_WARNING "%s: %s() low on memory\n",
3539 dev->name,__FUNCTION__); 3539 dev->name,__func__);
3540 } 3540 }
3541 privptr->mtc_offset=0; 3541 privptr->mtc_offset=0;
3542 privptr->mtc_logical_link=-1; 3542 privptr->mtc_logical_link=-1;
@@ -3575,10 +3575,10 @@ NextFrame:
3575 3575
3576#ifdef IOTRACE 3576#ifdef IOTRACE
3577 printk(KERN_INFO "%s:%s processed frame is %d \n", 3577 printk(KERN_INFO "%s:%s processed frame is %d \n",
3578 dev->name,__FUNCTION__,i); 3578 dev->name,__func__,i);
3579 printk(KERN_INFO "%s:%s F:%lx L:%lx\n", 3579 printk(KERN_INFO "%s:%s F:%lx L:%lx\n",
3580 dev->name, 3580 dev->name,
3581 __FUNCTION__, 3581 __func__,
3582 (unsigned long)p_first_ccw, 3582 (unsigned long)p_first_ccw,
3583 (unsigned long)p_last_ccw); 3583 (unsigned long)p_last_ccw);
3584#endif 3584#endif
@@ -3588,7 +3588,7 @@ NextFrame:
3588 claw_strt_read(dev, LOCK_YES); 3588 claw_strt_read(dev, LOCK_YES);
3589#ifdef FUNCTRACE 3589#ifdef FUNCTRACE
3590 printk(KERN_INFO "%s: %s exit on line %d\n", 3590 printk(KERN_INFO "%s: %s exit on line %d\n",
3591 dev->name, __FUNCTION__, __LINE__); 3591 dev->name, __func__, __LINE__);
3592#endif 3592#endif
3593 return; 3593 return;
3594} /* end of unpack_read */ 3594} /* end of unpack_read */
@@ -3610,7 +3610,7 @@ claw_strt_read (struct net_device *dev, int lock )
3610 p_ch=&privptr->channel[READ]; 3610 p_ch=&privptr->channel[READ];
3611 3611
3612#ifdef FUNCTRACE 3612#ifdef FUNCTRACE
3613 printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__); 3613 printk(KERN_INFO "%s:%s Enter \n",dev->name,__func__);
3614 printk(KERN_INFO "%s: variable lock = %d, dev =\n",dev->name, lock); 3614 printk(KERN_INFO "%s: variable lock = %d, dev =\n",dev->name, lock);
3615 dumpit((char *) dev, sizeof(struct net_device)); 3615 dumpit((char *) dev, sizeof(struct net_device));
3616#endif 3616#endif
@@ -3626,7 +3626,7 @@ claw_strt_read (struct net_device *dev, int lock )
3626 } 3626 }
3627#ifdef DEBUGMSG 3627#ifdef DEBUGMSG
3628 printk(KERN_INFO "%s:%s state-%02x\n" , 3628 printk(KERN_INFO "%s:%s state-%02x\n" ,
3629 dev->name,__FUNCTION__, p_ch->claw_state); 3629 dev->name,__func__, p_ch->claw_state);
3630#endif 3630#endif
3631 if (lock==LOCK_YES) { 3631 if (lock==LOCK_YES) {
3632 spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags); 3632 spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
@@ -3634,7 +3634,7 @@ claw_strt_read (struct net_device *dev, int lock )
3634 if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) { 3634 if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
3635#ifdef DEBUGMSG 3635#ifdef DEBUGMSG
3636 printk(KERN_INFO "%s: HOT READ started in %s\n" , 3636 printk(KERN_INFO "%s: HOT READ started in %s\n" ,
3637 dev->name,__FUNCTION__); 3637 dev->name,__func__);
3638 p_clawh=(struct clawh *)privptr->p_claw_signal_blk; 3638 p_clawh=(struct clawh *)privptr->p_claw_signal_blk;
3639 dumpit((char *)&p_clawh->flag , 1); 3639 dumpit((char *)&p_clawh->flag , 1);
3640#endif 3640#endif
@@ -3650,7 +3650,7 @@ claw_strt_read (struct net_device *dev, int lock )
3650 else { 3650 else {
3651#ifdef DEBUGMSG 3651#ifdef DEBUGMSG
3652 printk(KERN_INFO "%s: No READ started by %s() In progress\n" , 3652 printk(KERN_INFO "%s: No READ started by %s() In progress\n" ,
3653 dev->name,__FUNCTION__); 3653 dev->name,__func__);
3654#endif 3654#endif
3655 CLAW_DBF_TEXT(2,trace,"ReadAct"); 3655 CLAW_DBF_TEXT(2,trace,"ReadAct");
3656 } 3656 }
@@ -3660,7 +3660,7 @@ claw_strt_read (struct net_device *dev, int lock )
3660 } 3660 }
3661#ifdef FUNCTRACE 3661#ifdef FUNCTRACE
3662 printk(KERN_INFO "%s:%s Exit on line %d\n", 3662 printk(KERN_INFO "%s:%s Exit on line %d\n",
3663 dev->name,__FUNCTION__,__LINE__); 3663 dev->name,__func__,__LINE__);
3664#endif 3664#endif
3665 CLAW_DBF_TEXT(4,trace,"StRdExit"); 3665 CLAW_DBF_TEXT(4,trace,"StRdExit");
3666 return; 3666 return;
@@ -3681,7 +3681,7 @@ claw_strt_out_IO( struct net_device *dev )
3681 struct ccwbk *p_first_ccw; 3681 struct ccwbk *p_first_ccw;
3682 3682
3683#ifdef FUNCTRACE 3683#ifdef FUNCTRACE
3684 printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__); 3684 printk(KERN_INFO "%s:%s Enter\n",dev->name,__func__);
3685#endif 3685#endif
3686 if (!dev) { 3686 if (!dev) {
3687 return; 3687 return;
@@ -3691,7 +3691,7 @@ claw_strt_out_IO( struct net_device *dev )
3691 3691
3692#ifdef DEBUGMSG 3692#ifdef DEBUGMSG
3693 printk(KERN_INFO "%s:%s state-%02x\n" , 3693 printk(KERN_INFO "%s:%s state-%02x\n" ,
3694 dev->name,__FUNCTION__,p_ch->claw_state); 3694 dev->name,__func__,p_ch->claw_state);
3695#endif 3695#endif
3696 CLAW_DBF_TEXT(4,trace,"strt_io"); 3696 CLAW_DBF_TEXT(4,trace,"strt_io");
3697 p_first_ccw=privptr->p_write_active_first; 3697 p_first_ccw=privptr->p_write_active_first;
@@ -3701,14 +3701,14 @@ claw_strt_out_IO( struct net_device *dev )
3701 if (p_first_ccw == NULL) { 3701 if (p_first_ccw == NULL) {
3702#ifdef FUNCTRACE 3702#ifdef FUNCTRACE
3703 printk(KERN_INFO "%s:%s Exit on line %d\n", 3703 printk(KERN_INFO "%s:%s Exit on line %d\n",
3704 dev->name,__FUNCTION__,__LINE__); 3704 dev->name,__func__,__LINE__);
3705#endif 3705#endif
3706 return; 3706 return;
3707 } 3707 }
3708 if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) { 3708 if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
3709 parm = (unsigned long) p_ch; 3709 parm = (unsigned long) p_ch;
3710#ifdef DEBUGMSG 3710#ifdef DEBUGMSG
3711 printk(KERN_INFO "%s:%s do_io \n" ,dev->name,__FUNCTION__); 3711 printk(KERN_INFO "%s:%s do_io \n" ,dev->name,__func__);
3712 dumpit((char *)p_first_ccw, sizeof(struct ccwbk)); 3712 dumpit((char *)p_first_ccw, sizeof(struct ccwbk));
3713#endif 3713#endif
3714 CLAW_DBF_TEXT(2,trace,"StWrtIO"); 3714 CLAW_DBF_TEXT(2,trace,"StWrtIO");
@@ -3721,7 +3721,7 @@ claw_strt_out_IO( struct net_device *dev )
3721 dev->trans_start = jiffies; 3721 dev->trans_start = jiffies;
3722#ifdef FUNCTRACE 3722#ifdef FUNCTRACE
3723 printk(KERN_INFO "%s:%s Exit on line %d\n", 3723 printk(KERN_INFO "%s:%s Exit on line %d\n",
3724 dev->name,__FUNCTION__,__LINE__); 3724 dev->name,__func__,__LINE__);
3725#endif 3725#endif
3726 3726
3727 return; 3727 return;
@@ -3745,7 +3745,7 @@ claw_free_wrt_buf( struct net_device *dev )
3745 struct ccwbk*p_buf; 3745 struct ccwbk*p_buf;
3746#endif 3746#endif
3747#ifdef FUNCTRACE 3747#ifdef FUNCTRACE
3748 printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__); 3748 printk(KERN_INFO "%s:%s Enter\n",dev->name,__func__);
3749 printk(KERN_INFO "%s: free count = %d variable dev =\n", 3749 printk(KERN_INFO "%s: free count = %d variable dev =\n",
3750 dev->name,privptr->write_free_count); 3750 dev->name,privptr->write_free_count);
3751#endif 3751#endif
@@ -3798,7 +3798,7 @@ claw_free_wrt_buf( struct net_device *dev )
3798 privptr->p_write_active_last=NULL; 3798 privptr->p_write_active_last=NULL;
3799#ifdef DEBUGMSG 3799#ifdef DEBUGMSG
3800 printk(KERN_INFO "%s:%s p_write_" 3800 printk(KERN_INFO "%s:%s p_write_"
3801 "active_first==NULL\n",dev->name,__FUNCTION__); 3801 "active_first==NULL\n",dev->name,__func__);
3802#endif 3802#endif
3803 } 3803 }
3804#ifdef IOTRACE 3804#ifdef IOTRACE
@@ -3819,7 +3819,7 @@ claw_free_wrt_buf( struct net_device *dev )
3819 CLAW_DBF_TEXT_(4,trace,"FWC=%d",privptr->write_free_count); 3819 CLAW_DBF_TEXT_(4,trace,"FWC=%d",privptr->write_free_count);
3820#ifdef FUNCTRACE 3820#ifdef FUNCTRACE
3821 printk(KERN_INFO "%s:%s Exit on line %d free_count =%d\n", 3821 printk(KERN_INFO "%s:%s Exit on line %d free_count =%d\n",
3822 dev->name,__FUNCTION__, __LINE__,privptr->write_free_count); 3822 dev->name,__func__, __LINE__,privptr->write_free_count);
3823#endif 3823#endif
3824 return; 3824 return;
3825} 3825}
@@ -3833,7 +3833,7 @@ claw_free_netdevice(struct net_device * dev, int free_dev)
3833{ 3833{
3834 struct claw_privbk *privptr; 3834 struct claw_privbk *privptr;
3835#ifdef FUNCTRACE 3835#ifdef FUNCTRACE
3836 printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__); 3836 printk(KERN_INFO "%s:%s Enter\n",dev->name,__func__);
3837#endif 3837#endif
3838 CLAW_DBF_TEXT(2,setup,"free_dev"); 3838 CLAW_DBF_TEXT(2,setup,"free_dev");
3839 3839
@@ -3854,7 +3854,7 @@ claw_free_netdevice(struct net_device * dev, int free_dev)
3854#endif 3854#endif
3855 CLAW_DBF_TEXT(2,setup,"feee_ok"); 3855 CLAW_DBF_TEXT(2,setup,"feee_ok");
3856#ifdef FUNCTRACE 3856#ifdef FUNCTRACE
3857 printk(KERN_INFO "%s:%s Exit\n",dev->name,__FUNCTION__); 3857 printk(KERN_INFO "%s:%s Exit\n",dev->name,__func__);
3858#endif 3858#endif
3859} 3859}
3860 3860
@@ -3867,13 +3867,13 @@ static void
3867claw_init_netdevice(struct net_device * dev) 3867claw_init_netdevice(struct net_device * dev)
3868{ 3868{
3869#ifdef FUNCTRACE 3869#ifdef FUNCTRACE
3870 printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__); 3870 printk(KERN_INFO "%s:%s Enter\n",dev->name,__func__);
3871#endif 3871#endif
3872 CLAW_DBF_TEXT(2,setup,"init_dev"); 3872 CLAW_DBF_TEXT(2,setup,"init_dev");
3873 CLAW_DBF_TEXT_(2,setup,"%s",dev->name); 3873 CLAW_DBF_TEXT_(2,setup,"%s",dev->name);
3874 if (!dev) { 3874 if (!dev) {
3875 printk(KERN_WARNING "claw:%s BAD Device exit line %d\n", 3875 printk(KERN_WARNING "claw:%s BAD Device exit line %d\n",
3876 __FUNCTION__,__LINE__); 3876 __func__,__LINE__);
3877 CLAW_DBF_TEXT(2,setup,"baddev"); 3877 CLAW_DBF_TEXT(2,setup,"baddev");
3878 return; 3878 return;
3879 } 3879 }
@@ -3889,7 +3889,7 @@ claw_init_netdevice(struct net_device * dev)
3889 dev->tx_queue_len = 1300; 3889 dev->tx_queue_len = 1300;
3890 dev->flags = IFF_POINTOPOINT | IFF_NOARP; 3890 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
3891#ifdef FUNCTRACE 3891#ifdef FUNCTRACE
3892 printk(KERN_INFO "%s:%s Exit\n",dev->name,__FUNCTION__); 3892 printk(KERN_INFO "%s:%s Exit\n",dev->name,__func__);
3893#endif 3893#endif
3894 CLAW_DBF_TEXT(2,setup,"initok"); 3894 CLAW_DBF_TEXT(2,setup,"initok");
3895 return; 3895 return;
@@ -3909,7 +3909,7 @@ add_channel(struct ccw_device *cdev,int i,struct claw_privbk *privptr)
3909 struct ccw_dev_id dev_id; 3909 struct ccw_dev_id dev_id;
3910 3910
3911#ifdef FUNCTRACE 3911#ifdef FUNCTRACE
3912 printk(KERN_INFO "%s:%s Enter\n",cdev->dev.bus_id,__FUNCTION__); 3912 printk(KERN_INFO "%s:%s Enter\n",cdev->dev.bus_id,__func__);
3913#endif 3913#endif
3914 CLAW_DBF_TEXT_(2,setup,"%s",cdev->dev.bus_id); 3914 CLAW_DBF_TEXT_(2,setup,"%s",cdev->dev.bus_id);
3915 privptr->channel[i].flag = i+1; /* Read is 1 Write is 2 */ 3915 privptr->channel[i].flag = i+1; /* Read is 1 Write is 2 */
@@ -3920,16 +3920,16 @@ add_channel(struct ccw_device *cdev,int i,struct claw_privbk *privptr)
3920 p_ch->devno = dev_id.devno; 3920 p_ch->devno = dev_id.devno;
3921 if ((p_ch->irb = kzalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) { 3921 if ((p_ch->irb = kzalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) {
3922 printk(KERN_WARNING "%s Out of memory in %s for irb\n", 3922 printk(KERN_WARNING "%s Out of memory in %s for irb\n",
3923 p_ch->id,__FUNCTION__); 3923 p_ch->id,__func__);
3924#ifdef FUNCTRACE 3924#ifdef FUNCTRACE
3925 printk(KERN_INFO "%s:%s Exit on line %d\n", 3925 printk(KERN_INFO "%s:%s Exit on line %d\n",
3926 p_ch->id,__FUNCTION__,__LINE__); 3926 p_ch->id,__func__,__LINE__);
3927#endif 3927#endif
3928 return -ENOMEM; 3928 return -ENOMEM;
3929 } 3929 }
3930#ifdef FUNCTRACE 3930#ifdef FUNCTRACE
3931 printk(KERN_INFO "%s:%s Exit on line %d\n", 3931 printk(KERN_INFO "%s:%s Exit on line %d\n",
3932 cdev->dev.bus_id,__FUNCTION__,__LINE__); 3932 cdev->dev.bus_id,__func__,__LINE__);
3933#endif 3933#endif
3934 return 0; 3934 return 0;
3935} 3935}
@@ -3952,7 +3952,7 @@ claw_new_device(struct ccwgroup_device *cgdev)
3952 int ret; 3952 int ret;
3953 struct ccw_dev_id dev_id; 3953 struct ccw_dev_id dev_id;
3954 3954
3955 pr_debug("%s() called\n", __FUNCTION__); 3955 pr_debug("%s() called\n", __func__);
3956 printk(KERN_INFO "claw: add for %s\n",cgdev->cdev[READ]->dev.bus_id); 3956 printk(KERN_INFO "claw: add for %s\n",cgdev->cdev[READ]->dev.bus_id);
3957 CLAW_DBF_TEXT(2,setup,"new_dev"); 3957 CLAW_DBF_TEXT(2,setup,"new_dev");
3958 privptr = cgdev->dev.driver_data; 3958 privptr = cgdev->dev.driver_data;
@@ -3990,7 +3990,7 @@ claw_new_device(struct ccwgroup_device *cgdev)
3990 } 3990 }
3991 dev = alloc_netdev(0,"claw%d",claw_init_netdevice); 3991 dev = alloc_netdev(0,"claw%d",claw_init_netdevice);
3992 if (!dev) { 3992 if (!dev) {
3993 printk(KERN_WARNING "%s:alloc_netdev failed\n",__FUNCTION__); 3993 printk(KERN_WARNING "%s:alloc_netdev failed\n",__func__);
3994 goto out; 3994 goto out;
3995 } 3995 }
3996 dev->priv = privptr; 3996 dev->priv = privptr;
@@ -4065,7 +4065,7 @@ claw_shutdown_device(struct ccwgroup_device *cgdev)
4065 struct net_device *ndev; 4065 struct net_device *ndev;
4066 int ret; 4066 int ret;
4067 4067
4068 pr_debug("%s() called\n", __FUNCTION__); 4068 pr_debug("%s() called\n", __func__);
4069 CLAW_DBF_TEXT_(2,setup,"%s",cgdev->dev.bus_id); 4069 CLAW_DBF_TEXT_(2,setup,"%s",cgdev->dev.bus_id);
4070 priv = cgdev->dev.driver_data; 4070 priv = cgdev->dev.driver_data;
4071 if (!priv) 4071 if (!priv)
@@ -4095,15 +4095,15 @@ claw_remove_device(struct ccwgroup_device *cgdev)
4095{ 4095{
4096 struct claw_privbk *priv; 4096 struct claw_privbk *priv;
4097 4097
4098 pr_debug("%s() called\n", __FUNCTION__); 4098 pr_debug("%s() called\n", __func__);
4099 CLAW_DBF_TEXT_(2,setup,"%s",cgdev->dev.bus_id); 4099 CLAW_DBF_TEXT_(2,setup,"%s",cgdev->dev.bus_id);
4100 priv = cgdev->dev.driver_data; 4100 priv = cgdev->dev.driver_data;
4101 if (!priv) { 4101 if (!priv) {
4102 printk(KERN_WARNING "claw: %s() no Priv exiting\n",__FUNCTION__); 4102 printk(KERN_WARNING "claw: %s() no Priv exiting\n",__func__);
4103 return; 4103 return;
4104 } 4104 }
4105 printk(KERN_INFO "claw: %s() called %s will be removed.\n", 4105 printk(KERN_INFO "claw: %s() called %s will be removed.\n",
4106 __FUNCTION__,cgdev->cdev[0]->dev.bus_id); 4106 __func__,cgdev->cdev[0]->dev.bus_id);
4107 if (cgdev->state == CCWGROUP_ONLINE) 4107 if (cgdev->state == CCWGROUP_ONLINE)
4108 claw_shutdown_device(cgdev); 4108 claw_shutdown_device(cgdev);
4109 claw_remove_files(&cgdev->dev); 4109 claw_remove_files(&cgdev->dev);
@@ -4346,7 +4346,7 @@ static struct attribute_group claw_attr_group = {
4346static int 4346static int
4347claw_add_files(struct device *dev) 4347claw_add_files(struct device *dev)
4348{ 4348{
4349 pr_debug("%s() called\n", __FUNCTION__); 4349 pr_debug("%s() called\n", __func__);
4350 CLAW_DBF_TEXT(2,setup,"add_file"); 4350 CLAW_DBF_TEXT(2,setup,"add_file");
4351 return sysfs_create_group(&dev->kobj, &claw_attr_group); 4351 return sysfs_create_group(&dev->kobj, &claw_attr_group);
4352} 4352}
@@ -4354,7 +4354,7 @@ claw_add_files(struct device *dev)
4354static void 4354static void
4355claw_remove_files(struct device *dev) 4355claw_remove_files(struct device *dev)
4356{ 4356{
4357 pr_debug("%s() called\n", __FUNCTION__); 4357 pr_debug("%s() called\n", __func__);
4358 CLAW_DBF_TEXT(2,setup,"rem_file"); 4358 CLAW_DBF_TEXT(2,setup,"rem_file");
4359 sysfs_remove_group(&dev->kobj, &claw_attr_group); 4359 sysfs_remove_group(&dev->kobj, &claw_attr_group);
4360} 4360}
@@ -4385,12 +4385,12 @@ claw_init(void)
4385 printk(KERN_INFO "claw: starting driver\n"); 4385 printk(KERN_INFO "claw: starting driver\n");
4386 4386
4387#ifdef FUNCTRACE 4387#ifdef FUNCTRACE
4388 printk(KERN_INFO "claw: %s() enter \n",__FUNCTION__); 4388 printk(KERN_INFO "claw: %s() enter \n",__func__);
4389#endif 4389#endif
4390 ret = claw_register_debug_facility(); 4390 ret = claw_register_debug_facility();
4391 if (ret) { 4391 if (ret) {
4392 printk(KERN_WARNING "claw: %s() debug_register failed %d\n", 4392 printk(KERN_WARNING "claw: %s() debug_register failed %d\n",
4393 __FUNCTION__,ret); 4393 __func__,ret);
4394 return ret; 4394 return ret;
4395 } 4395 }
4396 CLAW_DBF_TEXT(2,setup,"init_mod"); 4396 CLAW_DBF_TEXT(2,setup,"init_mod");
@@ -4398,10 +4398,10 @@ claw_init(void)
4398 if (ret) { 4398 if (ret) {
4399 claw_unregister_debug_facility(); 4399 claw_unregister_debug_facility();
4400 printk(KERN_WARNING "claw; %s() cu3088 register failed %d\n", 4400 printk(KERN_WARNING "claw; %s() cu3088 register failed %d\n",
4401 __FUNCTION__,ret); 4401 __func__,ret);
4402 } 4402 }
4403#ifdef FUNCTRACE 4403#ifdef FUNCTRACE
4404 printk(KERN_INFO "claw: %s() exit \n",__FUNCTION__); 4404 printk(KERN_INFO "claw: %s() exit \n",__func__);
4405#endif 4405#endif
4406 return ret; 4406 return ret;
4407} 4407}
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 874a19994489..8f876f6ab367 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -670,7 +670,7 @@ static void conn_action_rx(fsm_instance *fi, int event, void *arg)
670 struct netiucv_priv *privptr = netdev_priv(conn->netdev); 670 struct netiucv_priv *privptr = netdev_priv(conn->netdev);
671 int rc; 671 int rc;
672 672
673 IUCV_DBF_TEXT(trace, 4, __FUNCTION__); 673 IUCV_DBF_TEXT(trace, 4, __func__);
674 674
675 if (!conn->netdev) { 675 if (!conn->netdev) {
676 iucv_message_reject(conn->path, msg); 676 iucv_message_reject(conn->path, msg);
@@ -718,7 +718,7 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
718 struct ll_header header; 718 struct ll_header header;
719 int rc; 719 int rc;
720 720
721 IUCV_DBF_TEXT(trace, 4, __FUNCTION__); 721 IUCV_DBF_TEXT(trace, 4, __func__);
722 722
723 if (conn && conn->netdev) 723 if (conn && conn->netdev)
724 privptr = netdev_priv(conn->netdev); 724 privptr = netdev_priv(conn->netdev);
@@ -799,7 +799,7 @@ static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
799 struct netiucv_priv *privptr = netdev_priv(netdev); 799 struct netiucv_priv *privptr = netdev_priv(netdev);
800 int rc; 800 int rc;
801 801
802 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 802 IUCV_DBF_TEXT(trace, 3, __func__);
803 803
804 conn->path = path; 804 conn->path = path;
805 path->msglim = NETIUCV_QUEUELEN_DEFAULT; 805 path->msglim = NETIUCV_QUEUELEN_DEFAULT;
@@ -821,7 +821,7 @@ static void conn_action_connreject(fsm_instance *fi, int event, void *arg)
821 struct iucv_event *ev = arg; 821 struct iucv_event *ev = arg;
822 struct iucv_path *path = ev->data; 822 struct iucv_path *path = ev->data;
823 823
824 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 824 IUCV_DBF_TEXT(trace, 3, __func__);
825 iucv_path_sever(path, NULL); 825 iucv_path_sever(path, NULL);
826} 826}
827 827
@@ -831,7 +831,7 @@ static void conn_action_connack(fsm_instance *fi, int event, void *arg)
831 struct net_device *netdev = conn->netdev; 831 struct net_device *netdev = conn->netdev;
832 struct netiucv_priv *privptr = netdev_priv(netdev); 832 struct netiucv_priv *privptr = netdev_priv(netdev);
833 833
834 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 834 IUCV_DBF_TEXT(trace, 3, __func__);
835 fsm_deltimer(&conn->timer); 835 fsm_deltimer(&conn->timer);
836 fsm_newstate(fi, CONN_STATE_IDLE); 836 fsm_newstate(fi, CONN_STATE_IDLE);
837 netdev->tx_queue_len = conn->path->msglim; 837 netdev->tx_queue_len = conn->path->msglim;
@@ -842,7 +842,7 @@ static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
842{ 842{
843 struct iucv_connection *conn = arg; 843 struct iucv_connection *conn = arg;
844 844
845 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 845 IUCV_DBF_TEXT(trace, 3, __func__);
846 fsm_deltimer(&conn->timer); 846 fsm_deltimer(&conn->timer);
847 iucv_path_sever(conn->path, NULL); 847 iucv_path_sever(conn->path, NULL);
848 fsm_newstate(fi, CONN_STATE_STARTWAIT); 848 fsm_newstate(fi, CONN_STATE_STARTWAIT);
@@ -854,7 +854,7 @@ static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
854 struct net_device *netdev = conn->netdev; 854 struct net_device *netdev = conn->netdev;
855 struct netiucv_priv *privptr = netdev_priv(netdev); 855 struct netiucv_priv *privptr = netdev_priv(netdev);
856 856
857 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 857 IUCV_DBF_TEXT(trace, 3, __func__);
858 858
859 fsm_deltimer(&conn->timer); 859 fsm_deltimer(&conn->timer);
860 iucv_path_sever(conn->path, NULL); 860 iucv_path_sever(conn->path, NULL);
@@ -870,7 +870,7 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg)
870 struct iucv_connection *conn = arg; 870 struct iucv_connection *conn = arg;
871 int rc; 871 int rc;
872 872
873 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 873 IUCV_DBF_TEXT(trace, 3, __func__);
874 874
875 fsm_newstate(fi, CONN_STATE_STARTWAIT); 875 fsm_newstate(fi, CONN_STATE_STARTWAIT);
876 PRINT_DEBUG("%s('%s'): connecting ...\n", 876 PRINT_DEBUG("%s('%s'): connecting ...\n",
@@ -948,7 +948,7 @@ static void conn_action_stop(fsm_instance *fi, int event, void *arg)
948 struct net_device *netdev = conn->netdev; 948 struct net_device *netdev = conn->netdev;
949 struct netiucv_priv *privptr = netdev_priv(netdev); 949 struct netiucv_priv *privptr = netdev_priv(netdev);
950 950
951 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 951 IUCV_DBF_TEXT(trace, 3, __func__);
952 952
953 fsm_deltimer(&conn->timer); 953 fsm_deltimer(&conn->timer);
954 fsm_newstate(fi, CONN_STATE_STOPPED); 954 fsm_newstate(fi, CONN_STATE_STOPPED);
@@ -1024,7 +1024,7 @@ static void dev_action_start(fsm_instance *fi, int event, void *arg)
1024 struct net_device *dev = arg; 1024 struct net_device *dev = arg;
1025 struct netiucv_priv *privptr = netdev_priv(dev); 1025 struct netiucv_priv *privptr = netdev_priv(dev);
1026 1026
1027 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1027 IUCV_DBF_TEXT(trace, 3, __func__);
1028 1028
1029 fsm_newstate(fi, DEV_STATE_STARTWAIT); 1029 fsm_newstate(fi, DEV_STATE_STARTWAIT);
1030 fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn); 1030 fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
@@ -1044,7 +1044,7 @@ dev_action_stop(fsm_instance *fi, int event, void *arg)
1044 struct netiucv_priv *privptr = netdev_priv(dev); 1044 struct netiucv_priv *privptr = netdev_priv(dev);
1045 struct iucv_event ev; 1045 struct iucv_event ev;
1046 1046
1047 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1047 IUCV_DBF_TEXT(trace, 3, __func__);
1048 1048
1049 ev.conn = privptr->conn; 1049 ev.conn = privptr->conn;
1050 1050
@@ -1066,7 +1066,7 @@ dev_action_connup(fsm_instance *fi, int event, void *arg)
1066 struct net_device *dev = arg; 1066 struct net_device *dev = arg;
1067 struct netiucv_priv *privptr = netdev_priv(dev); 1067 struct netiucv_priv *privptr = netdev_priv(dev);
1068 1068
1069 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1069 IUCV_DBF_TEXT(trace, 3, __func__);
1070 1070
1071 switch (fsm_getstate(fi)) { 1071 switch (fsm_getstate(fi)) {
1072 case DEV_STATE_STARTWAIT: 1072 case DEV_STATE_STARTWAIT:
@@ -1097,7 +1097,7 @@ dev_action_connup(fsm_instance *fi, int event, void *arg)
1097static void 1097static void
1098dev_action_conndown(fsm_instance *fi, int event, void *arg) 1098dev_action_conndown(fsm_instance *fi, int event, void *arg)
1099{ 1099{
1100 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1100 IUCV_DBF_TEXT(trace, 3, __func__);
1101 1101
1102 switch (fsm_getstate(fi)) { 1102 switch (fsm_getstate(fi)) {
1103 case DEV_STATE_RUNNING: 1103 case DEV_STATE_RUNNING:
@@ -1288,7 +1288,7 @@ static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
1288 struct netiucv_priv *privptr = netdev_priv(dev); 1288 struct netiucv_priv *privptr = netdev_priv(dev);
1289 int rc; 1289 int rc;
1290 1290
1291 IUCV_DBF_TEXT(trace, 4, __FUNCTION__); 1291 IUCV_DBF_TEXT(trace, 4, __func__);
1292 /** 1292 /**
1293 * Some sanity checks ... 1293 * Some sanity checks ...
1294 */ 1294 */
@@ -1344,7 +1344,7 @@ static struct net_device_stats *netiucv_stats (struct net_device * dev)
1344{ 1344{
1345 struct netiucv_priv *priv = netdev_priv(dev); 1345 struct netiucv_priv *priv = netdev_priv(dev);
1346 1346
1347 IUCV_DBF_TEXT(trace, 5, __FUNCTION__); 1347 IUCV_DBF_TEXT(trace, 5, __func__);
1348 return &priv->stats; 1348 return &priv->stats;
1349} 1349}
1350 1350
@@ -1360,7 +1360,7 @@ static struct net_device_stats *netiucv_stats (struct net_device * dev)
1360 */ 1360 */
1361static int netiucv_change_mtu(struct net_device * dev, int new_mtu) 1361static int netiucv_change_mtu(struct net_device * dev, int new_mtu)
1362{ 1362{
1363 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1363 IUCV_DBF_TEXT(trace, 3, __func__);
1364 if (new_mtu < 576 || new_mtu > NETIUCV_MTU_MAX) { 1364 if (new_mtu < 576 || new_mtu > NETIUCV_MTU_MAX) {
1365 IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n"); 1365 IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n");
1366 return -EINVAL; 1366 return -EINVAL;
@@ -1378,7 +1378,7 @@ static ssize_t user_show(struct device *dev, struct device_attribute *attr,
1378{ 1378{
1379 struct netiucv_priv *priv = dev->driver_data; 1379 struct netiucv_priv *priv = dev->driver_data;
1380 1380
1381 IUCV_DBF_TEXT(trace, 5, __FUNCTION__); 1381 IUCV_DBF_TEXT(trace, 5, __func__);
1382 return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid)); 1382 return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
1383} 1383}
1384 1384
@@ -1393,7 +1393,7 @@ static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1393 int i; 1393 int i;
1394 struct iucv_connection *cp; 1394 struct iucv_connection *cp;
1395 1395
1396 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1396 IUCV_DBF_TEXT(trace, 3, __func__);
1397 if (count > 9) { 1397 if (count > 9) {
1398 PRINT_WARN("netiucv: username too long (%d)!\n", (int) count); 1398 PRINT_WARN("netiucv: username too long (%d)!\n", (int) count);
1399 IUCV_DBF_TEXT_(setup, 2, 1399 IUCV_DBF_TEXT_(setup, 2,
@@ -1449,7 +1449,7 @@ static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
1449 char *buf) 1449 char *buf)
1450{ struct netiucv_priv *priv = dev->driver_data; 1450{ struct netiucv_priv *priv = dev->driver_data;
1451 1451
1452 IUCV_DBF_TEXT(trace, 5, __FUNCTION__); 1452 IUCV_DBF_TEXT(trace, 5, __func__);
1453 return sprintf(buf, "%d\n", priv->conn->max_buffsize); 1453 return sprintf(buf, "%d\n", priv->conn->max_buffsize);
1454} 1454}
1455 1455
@@ -1461,7 +1461,7 @@ static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
1461 char *e; 1461 char *e;
1462 int bs1; 1462 int bs1;
1463 1463
1464 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1464 IUCV_DBF_TEXT(trace, 3, __func__);
1465 if (count >= 39) 1465 if (count >= 39)
1466 return -EINVAL; 1466 return -EINVAL;
1467 1467
@@ -1513,7 +1513,7 @@ static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
1513{ 1513{
1514 struct netiucv_priv *priv = dev->driver_data; 1514 struct netiucv_priv *priv = dev->driver_data;
1515 1515
1516 IUCV_DBF_TEXT(trace, 5, __FUNCTION__); 1516 IUCV_DBF_TEXT(trace, 5, __func__);
1517 return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm)); 1517 return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
1518} 1518}
1519 1519
@@ -1524,7 +1524,7 @@ static ssize_t conn_fsm_show (struct device *dev,
1524{ 1524{
1525 struct netiucv_priv *priv = dev->driver_data; 1525 struct netiucv_priv *priv = dev->driver_data;
1526 1526
1527 IUCV_DBF_TEXT(trace, 5, __FUNCTION__); 1527 IUCV_DBF_TEXT(trace, 5, __func__);
1528 return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm)); 1528 return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
1529} 1529}
1530 1530
@@ -1535,7 +1535,7 @@ static ssize_t maxmulti_show (struct device *dev,
1535{ 1535{
1536 struct netiucv_priv *priv = dev->driver_data; 1536 struct netiucv_priv *priv = dev->driver_data;
1537 1537
1538 IUCV_DBF_TEXT(trace, 5, __FUNCTION__); 1538 IUCV_DBF_TEXT(trace, 5, __func__);
1539 return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti); 1539 return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
1540} 1540}
1541 1541
@@ -1545,7 +1545,7 @@ static ssize_t maxmulti_write (struct device *dev,
1545{ 1545{
1546 struct netiucv_priv *priv = dev->driver_data; 1546 struct netiucv_priv *priv = dev->driver_data;
1547 1547
1548 IUCV_DBF_TEXT(trace, 4, __FUNCTION__); 1548 IUCV_DBF_TEXT(trace, 4, __func__);
1549 priv->conn->prof.maxmulti = 0; 1549 priv->conn->prof.maxmulti = 0;
1550 return count; 1550 return count;
1551} 1551}
@@ -1557,7 +1557,7 @@ static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
1557{ 1557{
1558 struct netiucv_priv *priv = dev->driver_data; 1558 struct netiucv_priv *priv = dev->driver_data;
1559 1559
1560 IUCV_DBF_TEXT(trace, 5, __FUNCTION__); 1560 IUCV_DBF_TEXT(trace, 5, __func__);
1561 return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue); 1561 return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
1562} 1562}
1563 1563
@@ -1566,7 +1566,7 @@ static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
1566{ 1566{
1567 struct netiucv_priv *priv = dev->driver_data; 1567 struct netiucv_priv *priv = dev->driver_data;
1568 1568
1569 IUCV_DBF_TEXT(trace, 4, __FUNCTION__); 1569 IUCV_DBF_TEXT(trace, 4, __func__);
1570 priv->conn->prof.maxcqueue = 0; 1570 priv->conn->prof.maxcqueue = 0;
1571 return count; 1571 return count;
1572} 1572}
@@ -1578,7 +1578,7 @@ static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
1578{ 1578{
1579 struct netiucv_priv *priv = dev->driver_data; 1579 struct netiucv_priv *priv = dev->driver_data;
1580 1580
1581 IUCV_DBF_TEXT(trace, 5, __FUNCTION__); 1581 IUCV_DBF_TEXT(trace, 5, __func__);
1582 return sprintf(buf, "%ld\n", priv->conn->prof.doios_single); 1582 return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
1583} 1583}
1584 1584
@@ -1587,7 +1587,7 @@ static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
1587{ 1587{
1588 struct netiucv_priv *priv = dev->driver_data; 1588 struct netiucv_priv *priv = dev->driver_data;
1589 1589
1590 IUCV_DBF_TEXT(trace, 4, __FUNCTION__); 1590 IUCV_DBF_TEXT(trace, 4, __func__);
1591 priv->conn->prof.doios_single = 0; 1591 priv->conn->prof.doios_single = 0;
1592 return count; 1592 return count;
1593} 1593}
@@ -1599,7 +1599,7 @@ static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
1599{ 1599{
1600 struct netiucv_priv *priv = dev->driver_data; 1600 struct netiucv_priv *priv = dev->driver_data;
1601 1601
1602 IUCV_DBF_TEXT(trace, 5, __FUNCTION__); 1602 IUCV_DBF_TEXT(trace, 5, __func__);
1603 return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi); 1603 return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
1604} 1604}
1605 1605
@@ -1608,7 +1608,7 @@ static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
1608{ 1608{
1609 struct netiucv_priv *priv = dev->driver_data; 1609 struct netiucv_priv *priv = dev->driver_data;
1610 1610
1611 IUCV_DBF_TEXT(trace, 5, __FUNCTION__); 1611 IUCV_DBF_TEXT(trace, 5, __func__);
1612 priv->conn->prof.doios_multi = 0; 1612 priv->conn->prof.doios_multi = 0;
1613 return count; 1613 return count;
1614} 1614}
@@ -1620,7 +1620,7 @@ static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
1620{ 1620{
1621 struct netiucv_priv *priv = dev->driver_data; 1621 struct netiucv_priv *priv = dev->driver_data;
1622 1622
1623 IUCV_DBF_TEXT(trace, 5, __FUNCTION__); 1623 IUCV_DBF_TEXT(trace, 5, __func__);
1624 return sprintf(buf, "%ld\n", priv->conn->prof.txlen); 1624 return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
1625} 1625}
1626 1626
@@ -1629,7 +1629,7 @@ static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
1629{ 1629{
1630 struct netiucv_priv *priv = dev->driver_data; 1630 struct netiucv_priv *priv = dev->driver_data;
1631 1631
1632 IUCV_DBF_TEXT(trace, 4, __FUNCTION__); 1632 IUCV_DBF_TEXT(trace, 4, __func__);
1633 priv->conn->prof.txlen = 0; 1633 priv->conn->prof.txlen = 0;
1634 return count; 1634 return count;
1635} 1635}
@@ -1641,7 +1641,7 @@ static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
1641{ 1641{
1642 struct netiucv_priv *priv = dev->driver_data; 1642 struct netiucv_priv *priv = dev->driver_data;
1643 1643
1644 IUCV_DBF_TEXT(trace, 5, __FUNCTION__); 1644 IUCV_DBF_TEXT(trace, 5, __func__);
1645 return sprintf(buf, "%ld\n", priv->conn->prof.tx_time); 1645 return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
1646} 1646}
1647 1647
@@ -1650,7 +1650,7 @@ static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
1650{ 1650{
1651 struct netiucv_priv *priv = dev->driver_data; 1651 struct netiucv_priv *priv = dev->driver_data;
1652 1652
1653 IUCV_DBF_TEXT(trace, 4, __FUNCTION__); 1653 IUCV_DBF_TEXT(trace, 4, __func__);
1654 priv->conn->prof.tx_time = 0; 1654 priv->conn->prof.tx_time = 0;
1655 return count; 1655 return count;
1656} 1656}
@@ -1662,7 +1662,7 @@ static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
1662{ 1662{
1663 struct netiucv_priv *priv = dev->driver_data; 1663 struct netiucv_priv *priv = dev->driver_data;
1664 1664
1665 IUCV_DBF_TEXT(trace, 5, __FUNCTION__); 1665 IUCV_DBF_TEXT(trace, 5, __func__);
1666 return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending); 1666 return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
1667} 1667}
1668 1668
@@ -1671,7 +1671,7 @@ static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
1671{ 1671{
1672 struct netiucv_priv *priv = dev->driver_data; 1672 struct netiucv_priv *priv = dev->driver_data;
1673 1673
1674 IUCV_DBF_TEXT(trace, 4, __FUNCTION__); 1674 IUCV_DBF_TEXT(trace, 4, __func__);
1675 priv->conn->prof.tx_pending = 0; 1675 priv->conn->prof.tx_pending = 0;
1676 return count; 1676 return count;
1677} 1677}
@@ -1683,7 +1683,7 @@ static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
1683{ 1683{
1684 struct netiucv_priv *priv = dev->driver_data; 1684 struct netiucv_priv *priv = dev->driver_data;
1685 1685
1686 IUCV_DBF_TEXT(trace, 5, __FUNCTION__); 1686 IUCV_DBF_TEXT(trace, 5, __func__);
1687 return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending); 1687 return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
1688} 1688}
1689 1689
@@ -1692,7 +1692,7 @@ static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
1692{ 1692{
1693 struct netiucv_priv *priv = dev->driver_data; 1693 struct netiucv_priv *priv = dev->driver_data;
1694 1694
1695 IUCV_DBF_TEXT(trace, 4, __FUNCTION__); 1695 IUCV_DBF_TEXT(trace, 4, __func__);
1696 priv->conn->prof.tx_max_pending = 0; 1696 priv->conn->prof.tx_max_pending = 0;
1697 return count; 1697 return count;
1698} 1698}
@@ -1732,7 +1732,7 @@ static int netiucv_add_files(struct device *dev)
1732{ 1732{
1733 int ret; 1733 int ret;
1734 1734
1735 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1735 IUCV_DBF_TEXT(trace, 3, __func__);
1736 ret = sysfs_create_group(&dev->kobj, &netiucv_attr_group); 1736 ret = sysfs_create_group(&dev->kobj, &netiucv_attr_group);
1737 if (ret) 1737 if (ret)
1738 return ret; 1738 return ret;
@@ -1744,7 +1744,7 @@ static int netiucv_add_files(struct device *dev)
1744 1744
1745static void netiucv_remove_files(struct device *dev) 1745static void netiucv_remove_files(struct device *dev)
1746{ 1746{
1747 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1747 IUCV_DBF_TEXT(trace, 3, __func__);
1748 sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group); 1748 sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group);
1749 sysfs_remove_group(&dev->kobj, &netiucv_attr_group); 1749 sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
1750} 1750}
@@ -1756,7 +1756,7 @@ static int netiucv_register_device(struct net_device *ndev)
1756 int ret; 1756 int ret;
1757 1757
1758 1758
1759 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1759 IUCV_DBF_TEXT(trace, 3, __func__);
1760 1760
1761 if (dev) { 1761 if (dev) {
1762 snprintf(dev->bus_id, BUS_ID_SIZE, "net%s", ndev->name); 1762 snprintf(dev->bus_id, BUS_ID_SIZE, "net%s", ndev->name);
@@ -1792,7 +1792,7 @@ out_unreg:
1792 1792
1793static void netiucv_unregister_device(struct device *dev) 1793static void netiucv_unregister_device(struct device *dev)
1794{ 1794{
1795 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1795 IUCV_DBF_TEXT(trace, 3, __func__);
1796 netiucv_remove_files(dev); 1796 netiucv_remove_files(dev);
1797 device_unregister(dev); 1797 device_unregister(dev);
1798} 1798}
@@ -1857,7 +1857,7 @@ out:
1857 */ 1857 */
1858static void netiucv_remove_connection(struct iucv_connection *conn) 1858static void netiucv_remove_connection(struct iucv_connection *conn)
1859{ 1859{
1860 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1860 IUCV_DBF_TEXT(trace, 3, __func__);
1861 write_lock_bh(&iucv_connection_rwlock); 1861 write_lock_bh(&iucv_connection_rwlock);
1862 list_del_init(&conn->list); 1862 list_del_init(&conn->list);
1863 write_unlock_bh(&iucv_connection_rwlock); 1863 write_unlock_bh(&iucv_connection_rwlock);
@@ -1881,7 +1881,7 @@ static void netiucv_free_netdevice(struct net_device *dev)
1881{ 1881{
1882 struct netiucv_priv *privptr = netdev_priv(dev); 1882 struct netiucv_priv *privptr = netdev_priv(dev);
1883 1883
1884 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1884 IUCV_DBF_TEXT(trace, 3, __func__);
1885 1885
1886 if (!dev) 1886 if (!dev)
1887 return; 1887 return;
@@ -1963,7 +1963,7 @@ static ssize_t conn_write(struct device_driver *drv,
1963 struct netiucv_priv *priv; 1963 struct netiucv_priv *priv;
1964 struct iucv_connection *cp; 1964 struct iucv_connection *cp;
1965 1965
1966 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1966 IUCV_DBF_TEXT(trace, 3, __func__);
1967 if (count>9) { 1967 if (count>9) {
1968 PRINT_WARN("netiucv: username too long (%d)!\n", (int)count); 1968 PRINT_WARN("netiucv: username too long (%d)!\n", (int)count);
1969 IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n"); 1969 IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
@@ -2048,7 +2048,7 @@ static ssize_t remove_write (struct device_driver *drv,
2048 const char *p; 2048 const char *p;
2049 int i; 2049 int i;
2050 2050
2051 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 2051 IUCV_DBF_TEXT(trace, 3, __func__);
2052 2052
2053 if (count >= IFNAMSIZ) 2053 if (count >= IFNAMSIZ)
2054 count = IFNAMSIZ - 1;; 2054 count = IFNAMSIZ - 1;;
@@ -2116,7 +2116,7 @@ static void __exit netiucv_exit(void)
2116 struct netiucv_priv *priv; 2116 struct netiucv_priv *priv;
2117 struct device *dev; 2117 struct device *dev;
2118 2118
2119 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 2119 IUCV_DBF_TEXT(trace, 3, __func__);
2120 while (!list_empty(&iucv_connection_list)) { 2120 while (!list_empty(&iucv_connection_list)) {
2121 cp = list_entry(iucv_connection_list.next, 2121 cp = list_entry(iucv_connection_list.next,
2122 struct iucv_connection, list); 2122 struct iucv_connection, list);
@@ -2146,8 +2146,7 @@ static int __init netiucv_init(void)
2146 rc = iucv_register(&netiucv_handler, 1); 2146 rc = iucv_register(&netiucv_handler, 1);
2147 if (rc) 2147 if (rc)
2148 goto out_dbf; 2148 goto out_dbf;
2149 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 2149 IUCV_DBF_TEXT(trace, 3, __func__);
2150 netiucv_driver.groups = netiucv_drv_attr_groups;
2151 rc = driver_register(&netiucv_driver); 2150 rc = driver_register(&netiucv_driver);
2152 if (rc) { 2151 if (rc) {
2153 PRINT_ERR("NETIUCV: failed to register driver.\n"); 2152 PRINT_ERR("NETIUCV: failed to register driver.\n");
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c
index 644a06eba828..4d4b54277c43 100644
--- a/drivers/s390/s390mach.c
+++ b/drivers/s390/s390mach.c
@@ -59,15 +59,15 @@ repeat:
59 59
60 printk(KERN_WARNING"%s: Code does not support more " 60 printk(KERN_WARNING"%s: Code does not support more "
61 "than two chained crws; please report to " 61 "than two chained crws; please report to "
62 "linux390@de.ibm.com!\n", __FUNCTION__); 62 "linux390@de.ibm.com!\n", __func__);
63 ccode = stcrw(&tmp_crw); 63 ccode = stcrw(&tmp_crw);
64 printk(KERN_WARNING"%s: crw reports slct=%d, oflw=%d, " 64 printk(KERN_WARNING"%s: crw reports slct=%d, oflw=%d, "
65 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 65 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
66 __FUNCTION__, tmp_crw.slct, tmp_crw.oflw, 66 __func__, tmp_crw.slct, tmp_crw.oflw,
67 tmp_crw.chn, tmp_crw.rsc, tmp_crw.anc, 67 tmp_crw.chn, tmp_crw.rsc, tmp_crw.anc,
68 tmp_crw.erc, tmp_crw.rsid); 68 tmp_crw.erc, tmp_crw.rsid);
69 printk(KERN_WARNING"%s: This was crw number %x in the " 69 printk(KERN_WARNING"%s: This was crw number %x in the "
70 "chain\n", __FUNCTION__, chain); 70 "chain\n", __func__, chain);
71 if (ccode != 0) 71 if (ccode != 0)
72 break; 72 break;
73 chain = tmp_crw.chn ? chain + 1 : 0; 73 chain = tmp_crw.chn ? chain + 1 : 0;
@@ -83,7 +83,7 @@ repeat:
83 crw[chain].rsid); 83 crw[chain].rsid);
84 /* Check for overflows. */ 84 /* Check for overflows. */
85 if (crw[chain].oflw) { 85 if (crw[chain].oflw) {
86 pr_debug("%s: crw overflow detected!\n", __FUNCTION__); 86 pr_debug("%s: crw overflow detected!\n", __func__);
87 css_schedule_eval_all(); 87 css_schedule_eval_all();
88 chain = 0; 88 chain = 0;
89 continue; 89 continue;
diff --git a/drivers/s390/s390mach.h b/drivers/s390/s390mach.h
index d3ca4281a494..ca681f9b67fc 100644
--- a/drivers/s390/s390mach.h
+++ b/drivers/s390/s390mach.h
@@ -105,4 +105,8 @@ static inline int stcrw(struct crw *pcrw )
105#define ED_ETR_SYNC 12 /* External damage ETR sync check */ 105#define ED_ETR_SYNC 12 /* External damage ETR sync check */
106#define ED_ETR_SWITCH 13 /* External damage ETR switch to local */ 106#define ED_ETR_SWITCH 13 /* External damage ETR switch to local */
107 107
108struct pt_regs;
109
110void s390_handle_mcck(void);
111void s390_do_machine_check(struct pt_regs *regs);
108#endif /* __s390mach */ 112#endif /* __s390mach */
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 9e9f6c1e4e5d..45a7cd98c140 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -539,7 +539,7 @@ struct zfcp_rc_entry {
539 539
540/* logging routine for zfcp */ 540/* logging routine for zfcp */
541#define _ZFCP_LOG(fmt, args...) \ 541#define _ZFCP_LOG(fmt, args...) \
542 printk(KERN_ERR ZFCP_NAME": %s(%d): " fmt, __FUNCTION__, \ 542 printk(KERN_ERR ZFCP_NAME": %s(%d): " fmt, __func__, \
543 __LINE__ , ##args) 543 __LINE__ , ##args)
544 544
545#define ZFCP_LOG(level, fmt, args...) \ 545#define ZFCP_LOG(level, fmt, args...) \
diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c
index 291ff6235fe2..c3e4ab07b9cc 100644
--- a/drivers/s390/sysinfo.c
+++ b/drivers/s390/sysinfo.c
@@ -11,111 +11,13 @@
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/delay.h> 12#include <linux/delay.h>
13#include <asm/ebcdic.h> 13#include <asm/ebcdic.h>
14#include <asm/sysinfo.h>
14 15
15/* Sigh, math-emu. Don't ask. */ 16/* Sigh, math-emu. Don't ask. */
16#include <asm/sfp-util.h> 17#include <asm/sfp-util.h>
17#include <math-emu/soft-fp.h> 18#include <math-emu/soft-fp.h>
18#include <math-emu/single.h> 19#include <math-emu/single.h>
19 20
20struct sysinfo_1_1_1 {
21 char reserved_0[32];
22 char manufacturer[16];
23 char type[4];
24 char reserved_1[12];
25 char model_capacity[16];
26 char sequence[16];
27 char plant[4];
28 char model[16];
29};
30
31struct sysinfo_1_2_1 {
32 char reserved_0[80];
33 char sequence[16];
34 char plant[4];
35 char reserved_1[2];
36 unsigned short cpu_address;
37};
38
39struct sysinfo_1_2_2 {
40 char format;
41 char reserved_0[1];
42 unsigned short acc_offset;
43 char reserved_1[24];
44 unsigned int secondary_capability;
45 unsigned int capability;
46 unsigned short cpus_total;
47 unsigned short cpus_configured;
48 unsigned short cpus_standby;
49 unsigned short cpus_reserved;
50 unsigned short adjustment[0];
51};
52
53struct sysinfo_1_2_2_extension {
54 unsigned int alt_capability;
55 unsigned short alt_adjustment[0];
56};
57
58struct sysinfo_2_2_1 {
59 char reserved_0[80];
60 char sequence[16];
61 char plant[4];
62 unsigned short cpu_id;
63 unsigned short cpu_address;
64};
65
66struct sysinfo_2_2_2 {
67 char reserved_0[32];
68 unsigned short lpar_number;
69 char reserved_1;
70 unsigned char characteristics;
71 unsigned short cpus_total;
72 unsigned short cpus_configured;
73 unsigned short cpus_standby;
74 unsigned short cpus_reserved;
75 char name[8];
76 unsigned int caf;
77 char reserved_2[16];
78 unsigned short cpus_dedicated;
79 unsigned short cpus_shared;
80};
81
82#define LPAR_CHAR_DEDICATED (1 << 7)
83#define LPAR_CHAR_SHARED (1 << 6)
84#define LPAR_CHAR_LIMITED (1 << 5)
85
86struct sysinfo_3_2_2 {
87 char reserved_0[31];
88 unsigned char count;
89 struct {
90 char reserved_0[4];
91 unsigned short cpus_total;
92 unsigned short cpus_configured;
93 unsigned short cpus_standby;
94 unsigned short cpus_reserved;
95 char name[8];
96 unsigned int caf;
97 char cpi[16];
98 char reserved_1[24];
99
100 } vm[8];
101};
102
103static inline int stsi(void *sysinfo, int fc, int sel1, int sel2)
104{
105 register int r0 asm("0") = (fc << 28) | sel1;
106 register int r1 asm("1") = sel2;
107
108 asm volatile(
109 " stsi 0(%2)\n"
110 "0: jz 2f\n"
111 "1: lhi %0,%3\n"
112 "2:\n"
113 EX_TABLE(0b,1b)
114 : "+d" (r0) : "d" (r1), "a" (sysinfo), "K" (-ENOSYS)
115 : "cc", "memory" );
116 return r0;
117}
118
119static inline int stsi_0(void) 21static inline int stsi_0(void)
120{ 22{
121 int rc = stsi (NULL, 0, 0, 0); 23 int rc = stsi (NULL, 0, 0, 0);
@@ -133,6 +35,8 @@ static int stsi_1_1_1(struct sysinfo_1_1_1 *info, char *page, int len)
133 EBCASC(info->sequence, sizeof(info->sequence)); 35 EBCASC(info->sequence, sizeof(info->sequence));
134 EBCASC(info->plant, sizeof(info->plant)); 36 EBCASC(info->plant, sizeof(info->plant));
135 EBCASC(info->model_capacity, sizeof(info->model_capacity)); 37 EBCASC(info->model_capacity, sizeof(info->model_capacity));
38 EBCASC(info->model_perm_cap, sizeof(info->model_perm_cap));
39 EBCASC(info->model_temp_cap, sizeof(info->model_temp_cap));
136 len += sprintf(page + len, "Manufacturer: %-16.16s\n", 40 len += sprintf(page + len, "Manufacturer: %-16.16s\n",
137 info->manufacturer); 41 info->manufacturer);
138 len += sprintf(page + len, "Type: %-4.4s\n", 42 len += sprintf(page + len, "Type: %-4.4s\n",
@@ -155,8 +59,18 @@ static int stsi_1_1_1(struct sysinfo_1_1_1 *info, char *page, int len)
155 info->sequence); 59 info->sequence);
156 len += sprintf(page + len, "Plant: %-4.4s\n", 60 len += sprintf(page + len, "Plant: %-4.4s\n",
157 info->plant); 61 info->plant);
158 len += sprintf(page + len, "Model Capacity: %-16.16s\n", 62 len += sprintf(page + len, "Model Capacity: %-16.16s %08u\n",
159 info->model_capacity); 63 info->model_capacity, *(u32 *) info->model_cap_rating);
64 if (info->model_perm_cap[0] != '\0')
65 len += sprintf(page + len,
66 "Model Perm. Capacity: %-16.16s %08u\n",
67 info->model_perm_cap,
68 *(u32 *) info->model_perm_cap_rating);
69 if (info->model_temp_cap[0] != '\0')
70 len += sprintf(page + len,
71 "Model Temp. Capacity: %-16.16s %08u\n",
72 info->model_temp_cap,
73 *(u32 *) info->model_temp_cap_rating);
160 return len; 74 return len;
161} 75}
162 76
diff --git a/include/asm-s390/cio.h b/include/asm-s390/cio.h
index 123b557c3ff4..0818ecd30ca6 100644
--- a/include/asm-s390/cio.h
+++ b/include/asm-s390/cio.h
@@ -397,6 +397,10 @@ struct cio_iplinfo {
397 397
398extern int cio_get_iplinfo(struct cio_iplinfo *iplinfo); 398extern int cio_get_iplinfo(struct cio_iplinfo *iplinfo);
399 399
400/* Function from drivers/s390/cio/chsc.c */
401int chsc_sstpc(void *page, unsigned int op, u16 ctrl);
402int chsc_sstpi(void *page, void *result, size_t size);
403
400#endif 404#endif
401 405
402#endif 406#endif
diff --git a/include/asm-s390/cpu.h b/include/asm-s390/cpu.h
index 352dde194f3c..e5a6a9ba3adf 100644
--- a/include/asm-s390/cpu.h
+++ b/include/asm-s390/cpu.h
@@ -22,4 +22,12 @@ struct s390_idle_data {
22 22
23DECLARE_PER_CPU(struct s390_idle_data, s390_idle); 23DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
24 24
25void s390_idle_leave(void);
26
27static inline void s390_idle_check(void)
28{
29 if ((&__get_cpu_var(s390_idle))->in_idle)
30 s390_idle_leave();
31}
32
25#endif /* _ASM_S390_CPU_H_ */ 33#endif /* _ASM_S390_CPU_H_ */
diff --git a/include/asm-s390/debug.h b/include/asm-s390/debug.h
index c00dd2b3dc50..335baf4fc64f 100644
--- a/include/asm-s390/debug.h
+++ b/include/asm-s390/debug.h
@@ -73,6 +73,7 @@ typedef struct debug_info {
73 struct dentry* debugfs_entries[DEBUG_MAX_VIEWS]; 73 struct dentry* debugfs_entries[DEBUG_MAX_VIEWS];
74 struct debug_view* views[DEBUG_MAX_VIEWS]; 74 struct debug_view* views[DEBUG_MAX_VIEWS];
75 char name[DEBUG_MAX_NAME_LEN]; 75 char name[DEBUG_MAX_NAME_LEN];
76 mode_t mode;
76} debug_info_t; 77} debug_info_t;
77 78
78typedef int (debug_header_proc_t) (debug_info_t* id, 79typedef int (debug_header_proc_t) (debug_info_t* id,
@@ -122,6 +123,10 @@ debug_entry_t* debug_exception_common(debug_info_t* id, int level,
122debug_info_t* debug_register(char* name, int pages, int nr_areas, 123debug_info_t* debug_register(char* name, int pages, int nr_areas,
123 int buf_size); 124 int buf_size);
124 125
126debug_info_t *debug_register_mode(char *name, int pages, int nr_areas,
127 int buf_size, mode_t mode, uid_t uid,
128 gid_t gid);
129
125void debug_unregister(debug_info_t* id); 130void debug_unregister(debug_info_t* id);
126 131
127void debug_set_level(debug_info_t* id, int new_level); 132void debug_set_level(debug_info_t* id, int new_level);
diff --git a/include/asm-s390/extmem.h b/include/asm-s390/extmem.h
index c8802c934b74..33837d756184 100644
--- a/include/asm-s390/extmem.h
+++ b/include/asm-s390/extmem.h
@@ -22,11 +22,12 @@
22#define SEGMENT_SHARED 0 22#define SEGMENT_SHARED 0
23#define SEGMENT_EXCLUSIVE 1 23#define SEGMENT_EXCLUSIVE 1
24 24
25extern int segment_load (char *name,int segtype,unsigned long *addr,unsigned long *length); 25int segment_load (char *name, int segtype, unsigned long *addr, unsigned long *length);
26extern void segment_unload(char *name); 26void segment_unload(char *name);
27extern void segment_save(char *name); 27void segment_save(char *name);
28extern int segment_type (char* name); 28int segment_type (char* name);
29extern int segment_modify_shared (char *name, int do_nonshared); 29int segment_modify_shared (char *name, int do_nonshared);
30void segment_warning(int rc, char *seg_name);
30 31
31#endif 32#endif
32#endif 33#endif
diff --git a/include/asm-s390/hardirq.h b/include/asm-s390/hardirq.h
index 31beb18cb3d1..4b7cb964ff35 100644
--- a/include/asm-s390/hardirq.h
+++ b/include/asm-s390/hardirq.h
@@ -32,6 +32,6 @@ typedef struct {
32 32
33#define HARDIRQ_BITS 8 33#define HARDIRQ_BITS 8
34 34
35extern void account_ticks(u64 time); 35void clock_comparator_work(void);
36 36
37#endif /* __ASM_HARDIRQ_H */ 37#endif /* __ASM_HARDIRQ_H */
diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h
index 801a6fd35b5b..5de3efb31445 100644
--- a/include/asm-s390/lowcore.h
+++ b/include/asm-s390/lowcore.h
@@ -56,6 +56,8 @@
56#define __LC_IO_INT_WORD 0x0C0 56#define __LC_IO_INT_WORD 0x0C0
57#define __LC_MCCK_CODE 0x0E8 57#define __LC_MCCK_CODE 0x0E8
58 58
59#define __LC_LAST_BREAK 0x110
60
59#define __LC_RETURN_PSW 0x200 61#define __LC_RETURN_PSW 0x200
60 62
61#define __LC_SAVE_AREA 0xC00 63#define __LC_SAVE_AREA 0xC00
@@ -80,7 +82,6 @@
80#define __LC_CPUID 0xC60 82#define __LC_CPUID 0xC60
81#define __LC_CPUADDR 0xC68 83#define __LC_CPUADDR 0xC68
82#define __LC_IPLDEV 0xC7C 84#define __LC_IPLDEV 0xC7C
83#define __LC_JIFFY_TIMER 0xC80
84#define __LC_CURRENT 0xC90 85#define __LC_CURRENT 0xC90
85#define __LC_INT_CLOCK 0xC98 86#define __LC_INT_CLOCK 0xC98
86#else /* __s390x__ */ 87#else /* __s390x__ */
@@ -103,7 +104,6 @@
103#define __LC_CPUID 0xD80 104#define __LC_CPUID 0xD80
104#define __LC_CPUADDR 0xD88 105#define __LC_CPUADDR 0xD88
105#define __LC_IPLDEV 0xDB8 106#define __LC_IPLDEV 0xDB8
106#define __LC_JIFFY_TIMER 0xDC0
107#define __LC_CURRENT 0xDD8 107#define __LC_CURRENT 0xDD8
108#define __LC_INT_CLOCK 0xDE8 108#define __LC_INT_CLOCK 0xDE8
109#endif /* __s390x__ */ 109#endif /* __s390x__ */
@@ -276,7 +276,7 @@ struct _lowcore
276 /* entry.S sensitive area end */ 276 /* entry.S sensitive area end */
277 277
278 /* SMP info area: defined by DJB */ 278 /* SMP info area: defined by DJB */
279 __u64 jiffy_timer; /* 0xc80 */ 279 __u64 clock_comparator; /* 0xc80 */
280 __u32 ext_call_fast; /* 0xc88 */ 280 __u32 ext_call_fast; /* 0xc88 */
281 __u32 percpu_offset; /* 0xc8c */ 281 __u32 percpu_offset; /* 0xc8c */
282 __u32 current_task; /* 0xc90 */ 282 __u32 current_task; /* 0xc90 */
@@ -368,11 +368,12 @@ struct _lowcore
368 /* entry.S sensitive area end */ 368 /* entry.S sensitive area end */
369 369
370 /* SMP info area: defined by DJB */ 370 /* SMP info area: defined by DJB */
371 __u64 jiffy_timer; /* 0xdc0 */ 371 __u64 clock_comparator; /* 0xdc0 */
372 __u64 ext_call_fast; /* 0xdc8 */ 372 __u64 ext_call_fast; /* 0xdc8 */
373 __u64 percpu_offset; /* 0xdd0 */ 373 __u64 percpu_offset; /* 0xdd0 */
374 __u64 current_task; /* 0xdd8 */ 374 __u64 current_task; /* 0xdd8 */
375 __u64 softirq_pending; /* 0xde0 */ 375 __u32 softirq_pending; /* 0xde0 */
376 __u32 pad_0x0de4; /* 0xde4 */
376 __u64 int_clock; /* 0xde8 */ 377 __u64 int_clock; /* 0xde8 */
377 __u8 pad12[0xe00-0xdf0]; /* 0xdf0 */ 378 __u8 pad12[0xe00-0xdf0]; /* 0xdf0 */
378 379
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h
index 51d88912aa20..8eaf343a12a8 100644
--- a/include/asm-s390/processor.h
+++ b/include/asm-s390/processor.h
@@ -175,6 +175,13 @@ extern void task_show_regs(struct seq_file *m, struct task_struct *task);
175extern void show_registers(struct pt_regs *regs); 175extern void show_registers(struct pt_regs *regs);
176extern void show_code(struct pt_regs *regs); 176extern void show_code(struct pt_regs *regs);
177extern void show_trace(struct task_struct *task, unsigned long *sp); 177extern void show_trace(struct task_struct *task, unsigned long *sp);
178#ifdef CONFIG_64BIT
179extern void show_last_breaking_event(struct pt_regs *regs);
180#else
181static inline void show_last_breaking_event(struct pt_regs *regs)
182{
183}
184#endif
178 185
179unsigned long get_wchan(struct task_struct *p); 186unsigned long get_wchan(struct task_struct *p);
180#define task_pt_regs(tsk) ((struct pt_regs *) \ 187#define task_pt_regs(tsk) ((struct pt_regs *) \
diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h
index c7b74326a527..6f3821a6a902 100644
--- a/include/asm-s390/smp.h
+++ b/include/asm-s390/smp.h
@@ -90,6 +90,9 @@ extern void __cpu_die (unsigned int cpu);
90extern void cpu_die (void) __attribute__ ((noreturn)); 90extern void cpu_die (void) __attribute__ ((noreturn));
91extern int __cpu_up (unsigned int cpu); 91extern int __cpu_up (unsigned int cpu);
92 92
93extern struct mutex smp_cpu_state_mutex;
94extern int smp_cpu_polarization[];
95
93extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), 96extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
94 void *info, int wait); 97 void *info, int wait);
95#endif 98#endif
diff --git a/include/asm-s390/sysinfo.h b/include/asm-s390/sysinfo.h
new file mode 100644
index 000000000000..abe10ae15e46
--- /dev/null
+++ b/include/asm-s390/sysinfo.h
@@ -0,0 +1,116 @@
1/*
2 * definition for store system information stsi
3 *
4 * Copyright IBM Corp. 2001,2008
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Ulrich Weigand <weigand@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 */
13
14struct sysinfo_1_1_1 {
15 char reserved_0[32];
16 char manufacturer[16];
17 char type[4];
18 char reserved_1[12];
19 char model_capacity[16];
20 char sequence[16];
21 char plant[4];
22 char model[16];
23 char model_perm_cap[16];
24 char model_temp_cap[16];
25 char model_cap_rating[4];
26 char model_perm_cap_rating[4];
27 char model_temp_cap_rating[4];
28};
29
30struct sysinfo_1_2_1 {
31 char reserved_0[80];
32 char sequence[16];
33 char plant[4];
34 char reserved_1[2];
35 unsigned short cpu_address;
36};
37
38struct sysinfo_1_2_2 {
39 char format;
40 char reserved_0[1];
41 unsigned short acc_offset;
42 char reserved_1[24];
43 unsigned int secondary_capability;
44 unsigned int capability;
45 unsigned short cpus_total;
46 unsigned short cpus_configured;
47 unsigned short cpus_standby;
48 unsigned short cpus_reserved;
49 unsigned short adjustment[0];
50};
51
52struct sysinfo_1_2_2_extension {
53 unsigned int alt_capability;
54 unsigned short alt_adjustment[0];
55};
56
57struct sysinfo_2_2_1 {
58 char reserved_0[80];
59 char sequence[16];
60 char plant[4];
61 unsigned short cpu_id;
62 unsigned short cpu_address;
63};
64
65struct sysinfo_2_2_2 {
66 char reserved_0[32];
67 unsigned short lpar_number;
68 char reserved_1;
69 unsigned char characteristics;
70 unsigned short cpus_total;
71 unsigned short cpus_configured;
72 unsigned short cpus_standby;
73 unsigned short cpus_reserved;
74 char name[8];
75 unsigned int caf;
76 char reserved_2[16];
77 unsigned short cpus_dedicated;
78 unsigned short cpus_shared;
79};
80
81#define LPAR_CHAR_DEDICATED (1 << 7)
82#define LPAR_CHAR_SHARED (1 << 6)
83#define LPAR_CHAR_LIMITED (1 << 5)
84
85struct sysinfo_3_2_2 {
86 char reserved_0[31];
87 unsigned char count;
88 struct {
89 char reserved_0[4];
90 unsigned short cpus_total;
91 unsigned short cpus_configured;
92 unsigned short cpus_standby;
93 unsigned short cpus_reserved;
94 char name[8];
95 unsigned int caf;
96 char cpi[16];
97 char reserved_1[24];
98
99 } vm[8];
100};
101
102static inline int stsi(void *sysinfo, int fc, int sel1, int sel2)
103{
104 register int r0 asm("0") = (fc << 28) | sel1;
105 register int r1 asm("1") = sel2;
106
107 asm volatile(
108 " stsi 0(%2)\n"
109 "0: jz 2f\n"
110 "1: lhi %0,%3\n"
111 "2:\n"
112 EX_TABLE(0b, 1b)
113 : "+d" (r0) : "d" (r1), "a" (sysinfo), "K" (-ENOSYS)
114 : "cc", "memory");
115 return r0;
116}
diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h
index 15aba30601a3..92098df4d6e3 100644
--- a/include/asm-s390/system.h
+++ b/include/asm-s390/system.h
@@ -406,6 +406,8 @@ __set_psw_mask(unsigned long mask)
406#define local_mcck_enable() __set_psw_mask(psw_kernel_bits) 406#define local_mcck_enable() __set_psw_mask(psw_kernel_bits)
407#define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK) 407#define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK)
408 408
409int stfle(unsigned long long *list, int doublewords);
410
409#ifdef CONFIG_SMP 411#ifdef CONFIG_SMP
410 412
411extern void smp_ctl_set_bit(int cr, int bit); 413extern void smp_ctl_set_bit(int cr, int bit);
diff --git a/include/asm-s390/timex.h b/include/asm-s390/timex.h
index 98229db24314..d744c3d62de5 100644
--- a/include/asm-s390/timex.h
+++ b/include/asm-s390/timex.h
@@ -62,16 +62,18 @@ static inline unsigned long long get_clock (void)
62 return clk; 62 return clk;
63} 63}
64 64
65static inline void get_clock_extended(void *dest) 65static inline unsigned long long get_clock_xt(void)
66{ 66{
67 typedef struct { unsigned long long clk[2]; } __clock_t; 67 unsigned char clk[16];
68 68
69#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) 69#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
70 asm volatile("stcke %0" : "=Q" (*((__clock_t *)dest)) : : "cc"); 70 asm volatile("stcke %0" : "=Q" (clk) : : "cc");
71#else /* __GNUC__ */ 71#else /* __GNUC__ */
72 asm volatile("stcke 0(%1)" : "=m" (*((__clock_t *)dest)) 72 asm volatile("stcke 0(%1)" : "=m" (clk)
73 : "a" ((__clock_t *)dest) : "cc"); 73 : "a" (clk) : "cc");
74#endif /* __GNUC__ */ 74#endif /* __GNUC__ */
75
76 return *((unsigned long long *)&clk[1]);
75} 77}
76 78
77static inline cycles_t get_cycles(void) 79static inline cycles_t get_cycles(void)
@@ -81,5 +83,6 @@ static inline cycles_t get_cycles(void)
81 83
82int get_sync_clock(unsigned long long *clock); 84int get_sync_clock(unsigned long long *clock);
83void init_cpu_timer(void); 85void init_cpu_timer(void);
86unsigned long long monotonic_clock(void);
84 87
85#endif 88#endif
diff --git a/include/asm-s390/tlbflush.h b/include/asm-s390/tlbflush.h
index 35fb4f9127b2..9e57a93d7de1 100644
--- a/include/asm-s390/tlbflush.h
+++ b/include/asm-s390/tlbflush.h
@@ -13,12 +13,14 @@ static inline void __tlb_flush_local(void)
13 asm volatile("ptlb" : : : "memory"); 13 asm volatile("ptlb" : : : "memory");
14} 14}
15 15
16#ifdef CONFIG_SMP
16/* 17/*
17 * Flush all tlb entries on all cpus. 18 * Flush all tlb entries on all cpus.
18 */ 19 */
20void smp_ptlb_all(void);
21
19static inline void __tlb_flush_global(void) 22static inline void __tlb_flush_global(void)
20{ 23{
21 extern void smp_ptlb_all(void);
22 register unsigned long reg2 asm("2"); 24 register unsigned long reg2 asm("2");
23 register unsigned long reg3 asm("3"); 25 register unsigned long reg3 asm("3");
24 register unsigned long reg4 asm("4"); 26 register unsigned long reg4 asm("4");
@@ -39,6 +41,25 @@ static inline void __tlb_flush_global(void)
39 : : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" ); 41 : : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" );
40} 42}
41 43
44static inline void __tlb_flush_full(struct mm_struct *mm)
45{
46 cpumask_t local_cpumask;
47
48 preempt_disable();
49 /*
50 * If the process only ran on the local cpu, do a local flush.
51 */
52 local_cpumask = cpumask_of_cpu(smp_processor_id());
53 if (cpus_equal(mm->cpu_vm_mask, local_cpumask))
54 __tlb_flush_local();
55 else
56 __tlb_flush_global();
57 preempt_enable();
58}
59#else
60#define __tlb_flush_full(mm) __tlb_flush_local()
61#endif
62
42/* 63/*
43 * Flush all tlb entries of a page table on all cpus. 64 * Flush all tlb entries of a page table on all cpus.
44 */ 65 */
@@ -51,8 +72,6 @@ static inline void __tlb_flush_idte(unsigned long asce)
51 72
52static inline void __tlb_flush_mm(struct mm_struct * mm) 73static inline void __tlb_flush_mm(struct mm_struct * mm)
53{ 74{
54 cpumask_t local_cpumask;
55
56 if (unlikely(cpus_empty(mm->cpu_vm_mask))) 75 if (unlikely(cpus_empty(mm->cpu_vm_mask)))
57 return; 76 return;
58 /* 77 /*
@@ -69,16 +88,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
69 mm->context.asce_bits); 88 mm->context.asce_bits);
70 return; 89 return;
71 } 90 }
72 preempt_disable(); 91 __tlb_flush_full(mm);
73 /*
74 * If the process only ran on the local cpu, do a local flush.
75 */
76 local_cpumask = cpumask_of_cpu(smp_processor_id());
77 if (cpus_equal(mm->cpu_vm_mask, local_cpumask))
78 __tlb_flush_local();
79 else
80 __tlb_flush_global();
81 preempt_enable();
82} 92}
83 93
84static inline void __tlb_flush_mm_cond(struct mm_struct * mm) 94static inline void __tlb_flush_mm_cond(struct mm_struct * mm)
diff --git a/include/asm-s390/topology.h b/include/asm-s390/topology.h
index 613aa64019da..8e97b06f298a 100644
--- a/include/asm-s390/topology.h
+++ b/include/asm-s390/topology.h
@@ -1,6 +1,29 @@
1#ifndef _ASM_S390_TOPOLOGY_H 1#ifndef _ASM_S390_TOPOLOGY_H
2#define _ASM_S390_TOPOLOGY_H 2#define _ASM_S390_TOPOLOGY_H
3 3
4#include <linux/cpumask.h>
5
6#define mc_capable() (1)
7
8cpumask_t cpu_coregroup_map(unsigned int cpu);
9
10int topology_set_cpu_management(int fc);
11void topology_schedule_update(void);
12
13#define POLARIZATION_UNKNWN (-1)
14#define POLARIZATION_HRZ (0)
15#define POLARIZATION_VL (1)
16#define POLARIZATION_VM (2)
17#define POLARIZATION_VH (3)
18
19#ifdef CONFIG_SMP
20void s390_init_cpu_topology(void);
21#else
22static inline void s390_init_cpu_topology(void)
23{
24};
25#endif
26
4#include <asm-generic/topology.h> 27#include <asm-generic/topology.h>
5 28
6#endif /* _ASM_S390_TOPOLOGY_H */ 29#endif /* _ASM_S390_TOPOLOGY_H */
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index f8ab4ce70564..b5fef13148bd 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -102,6 +102,25 @@ extern void disable_irq_nosync(unsigned int irq);
102extern void disable_irq(unsigned int irq); 102extern void disable_irq(unsigned int irq);
103extern void enable_irq(unsigned int irq); 103extern void enable_irq(unsigned int irq);
104 104
105#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
106
107extern int irq_set_affinity(unsigned int irq, cpumask_t cpumask);
108extern int irq_can_set_affinity(unsigned int irq);
109
110#else /* CONFIG_SMP */
111
112static inline int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
113{
114 return -EINVAL;
115}
116
117static inline int irq_can_set_affinity(unsigned int irq)
118{
119 return 0;
120}
121
122#endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */
123
105#ifdef CONFIG_GENERIC_HARDIRQS 124#ifdef CONFIG_GENERIC_HARDIRQS
106/* 125/*
107 * Special lockdep variants of irq disabling/enabling. 126 * Special lockdep variants of irq disabling/enabling.
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 176e5e790a44..1883a85625dd 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -228,21 +228,11 @@ static inline void set_pending_irq(unsigned int irq, cpumask_t mask)
228 228
229#endif /* CONFIG_GENERIC_PENDING_IRQ */ 229#endif /* CONFIG_GENERIC_PENDING_IRQ */
230 230
231extern int irq_set_affinity(unsigned int irq, cpumask_t cpumask);
232extern int irq_can_set_affinity(unsigned int irq);
233
234#else /* CONFIG_SMP */ 231#else /* CONFIG_SMP */
235 232
236#define move_native_irq(x) 233#define move_native_irq(x)
237#define move_masked_irq(x) 234#define move_masked_irq(x)
238 235
239static inline int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
240{
241 return -EINVAL;
242}
243
244static inline int irq_can_set_affinity(unsigned int irq) { return 0; }
245
246#endif /* CONFIG_SMP */ 236#endif /* CONFIG_SMP */
247 237
248#ifdef CONFIG_IRQBALANCE 238#ifdef CONFIG_IRQBALANCE
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index e1bd50cbbf5d..fdfa0c745bb6 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -14,7 +14,7 @@
14#include <linux/cpu.h> 14#include <linux/cpu.h>
15#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/hrtimer.h> 16#include <linux/hrtimer.h>
17#include <linux/irq.h> 17#include <linux/interrupt.h>
18#include <linux/percpu.h> 18#include <linux/percpu.h>
19#include <linux/profile.h> 19#include <linux/profile.h>
20#include <linux/sched.h> 20#include <linux/sched.h>
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 1bea399a9ef0..4f3886562b8c 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -14,12 +14,14 @@
14#include <linux/cpu.h> 14#include <linux/cpu.h>
15#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/hrtimer.h> 16#include <linux/hrtimer.h>
17#include <linux/irq.h> 17#include <linux/interrupt.h>
18#include <linux/percpu.h> 18#include <linux/percpu.h>
19#include <linux/profile.h> 19#include <linux/profile.h>
20#include <linux/sched.h> 20#include <linux/sched.h>
21#include <linux/tick.h> 21#include <linux/tick.h>
22 22
23#include <asm/irq_regs.h>
24
23#include "tick-internal.h" 25#include "tick-internal.h"
24 26
25/* 27/*
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c
index 0258d3115d54..450c04935b66 100644
--- a/kernel/time/tick-oneshot.c
+++ b/kernel/time/tick-oneshot.c
@@ -14,7 +14,7 @@
14#include <linux/cpu.h> 14#include <linux/cpu.h>
15#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/hrtimer.h> 16#include <linux/hrtimer.h>
17#include <linux/irq.h> 17#include <linux/interrupt.h>
18#include <linux/percpu.h> 18#include <linux/percpu.h>
19#include <linux/profile.h> 19#include <linux/profile.h>
20#include <linux/sched.h> 20#include <linux/sched.h>