aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-03-06 22:52:50 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-03-06 22:52:50 -0500
commitfe1b4ba400193176213f95be3ea711a53518a609 (patch)
treee60943d4703fca5a291fffc2e0b77a786b6db3f4
parentae5dd8e346efc25a5f9cc9d01bc0915c40eb38d9 (diff)
parent2470b648e17e0216922bb78c7f05b4668402459a (diff)
Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6
* 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6: [S390] cio: Call cancel_halt_clear even when actl == 0. [S390] cio: Use path verification to check for path state. [S390] cio: Fix locking when calling notify function. [S390] Fixed handling of access register mode faults. [S390] dasd: Use default recovery for SNSS requests [S390] check_bugs() should be inline. [S390] tape: Compression overwrites crypto setting [S390] nss: disable kexec. [S390] reipl: move dump_prefix_page out of text section. [S390] smp: disable preemption in smp_call_function/smp_call_function_on [S390] kprobes breaks BUG_ON
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/kernel/head31.S11
-rw-r--r--arch/s390/kernel/head64.S11
-rw-r--r--arch/s390/kernel/ipl.c4
-rw-r--r--arch/s390/kernel/kprobes.c21
-rw-r--r--arch/s390/kernel/machine_kexec.c5
-rw-r--r--arch/s390/kernel/reipl.S13
-rw-r--r--arch/s390/kernel/reipl64.S13
-rw-r--r--arch/s390/kernel/smp.c15
-rw-r--r--arch/s390/mm/fault.c105
-rw-r--r--drivers/s390/block/dasd_eer.c1
-rw-r--r--drivers/s390/char/tape_std.c5
-rw-r--r--drivers/s390/cio/device_fsm.c117
-rw-r--r--include/asm-s390/bugs.h2
-rw-r--r--include/asm-s390/ipl.h1
15 files changed, 165 insertions, 161 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index d9425f59be91..0f293aa7b0fa 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -376,6 +376,8 @@ config SHARED_KERNEL
376 Select this option, if you want to share the text segment of the 376 Select this option, if you want to share the text segment of the
377 Linux kernel between different VM guests. This reduces memory 377 Linux kernel between different VM guests. This reduces memory
378 usage with lots of guests but greatly increases kernel size. 378 usage with lots of guests but greatly increases kernel size.
379 Also if a kernel was IPL'ed from a shared segment the kexec system
380 call will not work.
379 You should only select this option if you know what you are 381 You should only select this option if you know what you are
380 doing and want to exploit this feature. 382 doing and want to exploit this feature.
381 383
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
index da7c8bb80982..dc364c1419af 100644
--- a/arch/s390/kernel/head31.S
+++ b/arch/s390/kernel/head31.S
@@ -121,7 +121,7 @@ startup_continue:
121 .long .Lduct # cr2: dispatchable unit control table 121 .long .Lduct # cr2: dispatchable unit control table
122 .long 0 # cr3: instruction authorization 122 .long 0 # cr3: instruction authorization
123 .long 0 # cr4: instruction authorization 123 .long 0 # cr4: instruction authorization
124 .long 0xffffffff # cr5: primary-aste origin 124 .long .Lduct # cr5: primary-aste origin
125 .long 0 # cr6: I/O interrupts 125 .long 0 # cr6: I/O interrupts
126 .long 0 # cr7: secondary space segment table 126 .long 0 # cr7: secondary space segment table
127 .long 0 # cr8: access registers translation 127 .long 0 # cr8: access registers translation
@@ -132,8 +132,6 @@ startup_continue:
132 .long 0 # cr13: home space segment table 132 .long 0 # cr13: home space segment table
133 .long 0xc0000000 # cr14: machine check handling off 133 .long 0xc0000000 # cr14: machine check handling off
134 .long 0 # cr15: linkage stack operations 134 .long 0 # cr15: linkage stack operations
135.Lduct: .long 0,0,0,0,0,0,0,0
136 .long 0,0,0,0,0,0,0,0
137.Lpcfpu:.long 0x00080000,0x80000000 + .Lchkfpu 135.Lpcfpu:.long 0x00080000,0x80000000 + .Lchkfpu
138.Lpccsp:.long 0x00080000,0x80000000 + .Lchkcsp 136.Lpccsp:.long 0x00080000,0x80000000 + .Lchkcsp
139.Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg 137.Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg
@@ -147,6 +145,13 @@ startup_continue:
147.Linittu: .long init_thread_union 145.Linittu: .long init_thread_union
148.Lstartup_init: 146.Lstartup_init:
149 .long startup_init 147 .long startup_init
148 .align 64
149.Lduct: .long 0,0,0,0,.Lduald,0,0,0
150 .long 0,0,0,0,0,0,0,0
151 .align 128
152.Lduald:.rept 8
153 .long 0x80000000,0,0,0 # invalid access-list entries
154 .endr
150 155
151 .org 0x12000 156 .org 0x12000
152 .globl _ehead 157 .globl _ehead
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index af09e18cc5d0..37010709fe68 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -134,7 +134,7 @@ startup_continue:
134 .quad .Lduct # cr2: dispatchable unit control table 134 .quad .Lduct # cr2: dispatchable unit control table
135 .quad 0 # cr3: instruction authorization 135 .quad 0 # cr3: instruction authorization
136 .quad 0 # cr4: instruction authorization 136 .quad 0 # cr4: instruction authorization
137 .quad 0xffffffffffffffff # cr5: primary-aste origin 137 .quad .Lduct # cr5: primary-aste origin
138 .quad 0 # cr6: I/O interrupts 138 .quad 0 # cr6: I/O interrupts
139 .quad 0 # cr7: secondary space segment table 139 .quad 0 # cr7: secondary space segment table
140 .quad 0 # cr8: access registers translation 140 .quad 0 # cr8: access registers translation
@@ -145,14 +145,19 @@ startup_continue:
145 .quad 0 # cr13: home space segment table 145 .quad 0 # cr13: home space segment table
146 .quad 0xc0000000 # cr14: machine check handling off 146 .quad 0xc0000000 # cr14: machine check handling off
147 .quad 0 # cr15: linkage stack operations 147 .quad 0 # cr15: linkage stack operations
148.Lduct: .long 0,0,0,0,0,0,0,0
149 .long 0,0,0,0,0,0,0,0
150.Lpcmsk:.quad 0x0000000180000000 148.Lpcmsk:.quad 0x0000000180000000
151.L4malign:.quad 0xffffffffffc00000 149.L4malign:.quad 0xffffffffffc00000
152.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 150.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
153.Lnop: .long 0x07000700 151.Lnop: .long 0x07000700
154.Lparmaddr: 152.Lparmaddr:
155 .quad PARMAREA 153 .quad PARMAREA
154 .align 64
155.Lduct: .long 0,0,0,0,.Lduald,0,0,0
156 .long 0,0,0,0,0,0,0,0
157 .align 128
158.Lduald:.rept 8
159 .long 0x80000000,0,0,0 # invalid access-list entries
160 .endr
156 161
157 .org 0x12000 162 .org 0x12000
158 .globl _ehead 163 .globl _ehead
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 5a863a3bf10c..d125a4ead08d 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -1066,7 +1066,7 @@ static void do_reset_calls(void)
1066 reset->fn(); 1066 reset->fn();
1067} 1067}
1068 1068
1069extern __u32 dump_prefix_page; 1069u32 dump_prefix_page;
1070 1070
1071void s390_reset_system(void) 1071void s390_reset_system(void)
1072{ 1072{
@@ -1078,7 +1078,7 @@ void s390_reset_system(void)
1078 lc->panic_stack = S390_lowcore.panic_stack; 1078 lc->panic_stack = S390_lowcore.panic_stack;
1079 1079
1080 /* Save prefix page address for dump case */ 1080 /* Save prefix page address for dump case */
1081 dump_prefix_page = (unsigned long) lc; 1081 dump_prefix_page = (u32)(unsigned long) lc;
1082 1082
1083 /* Disable prefixing */ 1083 /* Disable prefixing */
1084 set_prefix(0); 1084 set_prefix(0);
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index a466bab6677e..8af549e95730 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -337,21 +337,14 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
337 } 337 }
338 338
339 p = get_kprobe(addr); 339 p = get_kprobe(addr);
340 if (!p) { 340 if (!p)
341 if (*addr != BREAKPOINT_INSTRUCTION) { 341 /*
342 /* 342 * No kprobe at this address. The fault has not been
343 * The breakpoint instruction was removed right 343 * caused by a kprobe breakpoint. The race of breakpoint
344 * after we hit it. Another cpu has removed 344 * vs. kprobe remove does not exist because on s390 we
345 * either a probepoint or a debugger breakpoint 345 * use stop_machine_run to arm/disarm the breakpoints.
346 * at this address. In either case, no further 346 */
347 * handling of this interrupt is appropriate.
348 *
349 */
350 ret = 1;
351 }
352 /* Not one of ours: let kernel handle it */
353 goto no_kprobe; 347 goto no_kprobe;
354 }
355 348
356 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 349 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
357 set_current_kprobe(p, regs, kcb); 350 set_current_kprobe(p, regs, kcb);
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 52f57af252b4..3c77dd36994c 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -19,6 +19,7 @@
19#include <asm/system.h> 19#include <asm/system.h>
20#include <asm/smp.h> 20#include <asm/smp.h>
21#include <asm/reset.h> 21#include <asm/reset.h>
22#include <asm/ipl.h>
22 23
23typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long); 24typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long);
24 25
@@ -29,6 +30,10 @@ int machine_kexec_prepare(struct kimage *image)
29{ 30{
30 void *reboot_code_buffer; 31 void *reboot_code_buffer;
31 32
33 /* Can't replace kernel image since it is read-only. */
34 if (ipl_flags & IPL_NSS_VALID)
35 return -ENOSYS;
36
32 /* We don't support anything but the default image type for now. */ 37 /* We don't support anything but the default image type for now. */
33 if (image->type != KEXEC_TYPE_DEFAULT) 38 if (image->type != KEXEC_TYPE_DEFAULT)
34 return -EINVAL; 39 return -EINVAL;
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
index c3f4d9b95083..2f481cc3d1c9 100644
--- a/arch/s390/kernel/reipl.S
+++ b/arch/s390/kernel/reipl.S
@@ -8,6 +8,10 @@
8 8
9#include <asm/lowcore.h> 9#include <asm/lowcore.h>
10 10
11#
12# do_reipl_asm
13# Parameter: r2 = schid of reipl device
14#
11 .globl do_reipl_asm 15 .globl do_reipl_asm
12do_reipl_asm: basr %r13,0 16do_reipl_asm: basr %r13,0
13.Lpg0: lpsw .Lnewpsw-.Lpg0(%r13) 17.Lpg0: lpsw .Lnewpsw-.Lpg0(%r13)
@@ -16,12 +20,12 @@ do_reipl_asm: basr %r13,0
16 stm %r0,%r15,__LC_GPREGS_SAVE_AREA 20 stm %r0,%r15,__LC_GPREGS_SAVE_AREA
17 stctl %c0,%c15,__LC_CREGS_SAVE_AREA 21 stctl %c0,%c15,__LC_CREGS_SAVE_AREA
18 stam %a0,%a15,__LC_AREGS_SAVE_AREA 22 stam %a0,%a15,__LC_AREGS_SAVE_AREA
19 mvc __LC_PREFIX_SAVE_AREA(4),dump_prefix_page-.Lpg0(%r13) 23 l %r10,.Ldump_pfx-.Lpg0(%r13)
24 mvc __LC_PREFIX_SAVE_AREA(4),0(%r10)
20 stckc .Lclkcmp-.Lpg0(%r13) 25 stckc .Lclkcmp-.Lpg0(%r13)
21 mvc __LC_CLOCK_COMP_SAVE_AREA(8),.Lclkcmp-.Lpg0(%r13) 26 mvc __LC_CLOCK_COMP_SAVE_AREA(8),.Lclkcmp-.Lpg0(%r13)
22 stpt __LC_CPU_TIMER_SAVE_AREA 27 stpt __LC_CPU_TIMER_SAVE_AREA
23 st %r13, __LC_PSW_SAVE_AREA+4 28 st %r13, __LC_PSW_SAVE_AREA+4
24
25 lctl %c6,%c6,.Lall-.Lpg0(%r13) 29 lctl %c6,%c6,.Lall-.Lpg0(%r13)
26 lr %r1,%r2 30 lr %r1,%r2
27 mvc __LC_PGM_NEW_PSW(8),.Lpcnew-.Lpg0(%r13) 31 mvc __LC_PGM_NEW_PSW(8),.Lpcnew-.Lpg0(%r13)
@@ -55,6 +59,7 @@ do_reipl_asm: basr %r13,0
55 .align 8 59 .align 8
56.Lclkcmp: .quad 0x0000000000000000 60.Lclkcmp: .quad 0x0000000000000000
57.Lall: .long 0xff000000 61.Lall: .long 0xff000000
62.Ldump_pfx: .long dump_prefix_page
58 .align 8 63 .align 8
59.Lnewpsw: .long 0x00080000,0x80000000+.Lpg1 64.Lnewpsw: .long 0x00080000,0x80000000+.Lpg1
60.Lpcnew: .long 0x00080000,0x80000000+.Lecs 65.Lpcnew: .long 0x00080000,0x80000000+.Lecs
@@ -79,7 +84,3 @@ do_reipl_asm: basr %r13,0
79 .long 0x00000000,0x00000000 84 .long 0x00000000,0x00000000
80 .long 0x00000000,0x00000000 85 .long 0x00000000,0x00000000
81 .long 0x00000000,0x00000000 86 .long 0x00000000,0x00000000
82 .globl dump_prefix_page
83dump_prefix_page:
84 .long 0x00000000
85
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S
index dbb3eed38865..c41930499a5f 100644
--- a/arch/s390/kernel/reipl64.S
+++ b/arch/s390/kernel/reipl64.S
@@ -8,6 +8,12 @@
8 */ 8 */
9 9
10#include <asm/lowcore.h> 10#include <asm/lowcore.h>
11
12#
13# do_reipl_asm
14# Parameter: r2 = schid of reipl device
15#
16
11 .globl do_reipl_asm 17 .globl do_reipl_asm
12do_reipl_asm: basr %r13,0 18do_reipl_asm: basr %r13,0
13.Lpg0: lpswe .Lnewpsw-.Lpg0(%r13) 19.Lpg0: lpswe .Lnewpsw-.Lpg0(%r13)
@@ -20,7 +26,8 @@ do_reipl_asm: basr %r13,0
20 stg %r0,__LC_GPREGS_SAVE_AREA-0x1000+8(%r1) 26 stg %r0,__LC_GPREGS_SAVE_AREA-0x1000+8(%r1)
21 stctg %c0,%c15,__LC_CREGS_SAVE_AREA-0x1000(%r1) 27 stctg %c0,%c15,__LC_CREGS_SAVE_AREA-0x1000(%r1)
22 stam %a0,%a15,__LC_AREGS_SAVE_AREA-0x1000(%r1) 28 stam %a0,%a15,__LC_AREGS_SAVE_AREA-0x1000(%r1)
23 mvc __LC_PREFIX_SAVE_AREA-0x1000(4,%r1),dump_prefix_page-.Lpg0(%r13) 29 lg %r10,.Ldump_pfx-.Lpg0(%r13)
30 mvc __LC_PREFIX_SAVE_AREA-0x1000(4,%r1),0(%r10)
24 stfpc __LC_FP_CREG_SAVE_AREA-0x1000(%r1) 31 stfpc __LC_FP_CREG_SAVE_AREA-0x1000(%r1)
25 stckc .Lclkcmp-.Lpg0(%r13) 32 stckc .Lclkcmp-.Lpg0(%r13)
26 mvc __LC_CLOCK_COMP_SAVE_AREA-0x1000(8,%r1),.Lclkcmp-.Lpg0(%r13) 33 mvc __LC_CLOCK_COMP_SAVE_AREA-0x1000(8,%r1),.Lclkcmp-.Lpg0(%r13)
@@ -64,6 +71,7 @@ do_reipl_asm: basr %r13,0
64 .align 8 71 .align 8
65.Lclkcmp: .quad 0x0000000000000000 72.Lclkcmp: .quad 0x0000000000000000
66.Lall: .quad 0x00000000ff000000 73.Lall: .quad 0x00000000ff000000
74.Ldump_pfx: .quad dump_prefix_page
67.Lregsave: .quad 0x0000000000000000 75.Lregsave: .quad 0x0000000000000000
68 .align 16 76 .align 16
69/* 77/*
@@ -103,6 +111,3 @@ do_reipl_asm: basr %r13,0
103 .long 0x00000000,0x00000000 111 .long 0x00000000,0x00000000
104 .long 0x00000000,0x00000000 112 .long 0x00000000,0x00000000
105 .long 0x00000000,0x00000000 113 .long 0x00000000,0x00000000
106 .globl dump_prefix_page
107dump_prefix_page:
108 .long 0x00000000
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index ecaa432a99f8..97764f710bb7 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -94,10 +94,9 @@ static void __smp_call_function_map(void (*func) (void *info), void *info,
94 int cpu, local = 0; 94 int cpu, local = 0;
95 95
96 /* 96 /*
97 * Can deadlock when interrupts are disabled or if in wrong context, 97 * Can deadlock when interrupts are disabled or if in wrong context.
98 * caller must disable preemption
99 */ 98 */
100 WARN_ON(irqs_disabled() || in_irq() || preemptible()); 99 WARN_ON(irqs_disabled() || in_irq());
101 100
102 /* 101 /*
103 * Check for local function call. We have to have the same call order 102 * Check for local function call. We have to have the same call order
@@ -152,17 +151,18 @@ out:
152 * Run a function on all other CPUs. 151 * Run a function on all other CPUs.
153 * 152 *
154 * You must not call this function with disabled interrupts or from a 153 * You must not call this function with disabled interrupts or from a
155 * hardware interrupt handler. Must be called with preemption disabled. 154 * hardware interrupt handler. You may call it from a bottom half.
156 * You may call it from a bottom half.
157 */ 155 */
158int smp_call_function(void (*func) (void *info), void *info, int nonatomic, 156int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
159 int wait) 157 int wait)
160{ 158{
161 cpumask_t map; 159 cpumask_t map;
162 160
161 preempt_disable();
163 map = cpu_online_map; 162 map = cpu_online_map;
164 cpu_clear(smp_processor_id(), map); 163 cpu_clear(smp_processor_id(), map);
165 __smp_call_function_map(func, info, nonatomic, wait, map); 164 __smp_call_function_map(func, info, nonatomic, wait, map);
165 preempt_enable();
166 return 0; 166 return 0;
167} 167}
168EXPORT_SYMBOL(smp_call_function); 168EXPORT_SYMBOL(smp_call_function);
@@ -178,16 +178,17 @@ EXPORT_SYMBOL(smp_call_function);
178 * Run a function on one processor. 178 * Run a function on one processor.
179 * 179 *
180 * You must not call this function with disabled interrupts or from a 180 * You must not call this function with disabled interrupts or from a
181 * hardware interrupt handler. Must be called with preemption disabled. 181 * hardware interrupt handler. You may call it from a bottom half.
182 * You may call it from a bottom half.
183 */ 182 */
184int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic, 183int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic,
185 int wait, int cpu) 184 int wait, int cpu)
186{ 185{
187 cpumask_t map = CPU_MASK_NONE; 186 cpumask_t map = CPU_MASK_NONE;
188 187
188 preempt_disable();
189 cpu_set(cpu, map); 189 cpu_set(cpu, map);
190 __smp_call_function_map(func, info, nonatomic, wait, map); 190 __smp_call_function_map(func, info, nonatomic, wait, map);
191 preempt_enable();
191 return 0; 192 return 0;
192} 193}
193EXPORT_SYMBOL(smp_call_function_on); 194EXPORT_SYMBOL(smp_call_function_on);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 641aef36ccc4..7462aebd3eb6 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -108,53 +108,40 @@ void bust_spinlocks(int yes)
108} 108}
109 109
110/* 110/*
111 * Check which address space is addressed by the access 111 * Returns the address space associated with the fault.
112 * register in S390_lowcore.exc_access_id. 112 * Returns 0 for kernel space, 1 for user space and
113 * Returns 1 for user space and 0 for kernel space. 113 * 2 for code execution in user space with noexec=on.
114 */ 114 */
115static int __check_access_register(struct pt_regs *regs, int error_code) 115static inline int check_space(struct task_struct *tsk)
116{
117 int areg = S390_lowcore.exc_access_id;
118
119 if (areg == 0)
120 /* Access via access register 0 -> kernel address */
121 return 0;
122 save_access_regs(current->thread.acrs);
123 if (regs && areg < NUM_ACRS && current->thread.acrs[areg] <= 1)
124 /*
125 * access register contains 0 -> kernel address,
126 * access register contains 1 -> user space address
127 */
128 return current->thread.acrs[areg];
129
130 /* Something unhealthy was done with the access registers... */
131 die("page fault via unknown access register", regs, error_code);
132 do_exit(SIGKILL);
133 return 0;
134}
135
136/*
137 * Check which address space the address belongs to.
138 * May return 1 or 2 for user space and 0 for kernel space.
139 * Returns 2 for user space in primary addressing mode with
140 * CONFIG_S390_EXEC_PROTECT on and kernel parameter noexec=on.
141 */
142static inline int check_user_space(struct pt_regs *regs, int error_code)
143{ 116{
144 /* 117 /*
145 * The lowest two bits of S390_lowcore.trans_exc_code indicate 118 * The lowest two bits of S390_lowcore.trans_exc_code
146 * which paging table was used: 119 * indicate which paging table was used.
147 * 0: Primary Segment Table Descriptor
148 * 1: STD determined via access register
149 * 2: Secondary Segment Table Descriptor
150 * 3: Home Segment Table Descriptor
151 */ 120 */
152 int descriptor = S390_lowcore.trans_exc_code & 3; 121 int desc = S390_lowcore.trans_exc_code & 3;
153 if (unlikely(descriptor == 1)) 122
154 return __check_access_register(regs, error_code); 123 if (desc == 3) /* Home Segment Table Descriptor */
155 if (descriptor == 2) 124 return switch_amode == 0;
156 return current->thread.mm_segment.ar4; 125 if (desc == 2) /* Secondary Segment Table Descriptor */
157 return ((descriptor != 0) ^ (switch_amode)) << s390_noexec; 126 return tsk->thread.mm_segment.ar4;
127#ifdef CONFIG_S390_SWITCH_AMODE
128 if (unlikely(desc == 1)) { /* STD determined via access register */
129 /* %a0 always indicates primary space. */
130 if (S390_lowcore.exc_access_id != 0) {
131 save_access_regs(tsk->thread.acrs);
132 /*
133 * An alet of 0 indicates primary space.
134 * An alet of 1 indicates secondary space.
135 * Any other alet values generate an
136 * alen-translation exception.
137 */
138 if (tsk->thread.acrs[S390_lowcore.exc_access_id])
139 return tsk->thread.mm_segment.ar4;
140 }
141 }
142#endif
143 /* Primary Segment Table Descriptor */
144 return switch_amode << s390_noexec;
158} 145}
159 146
160/* 147/*
@@ -265,16 +252,16 @@ out_fault:
265 * 11 Page translation -> Not present (nullification) 252 * 11 Page translation -> Not present (nullification)
266 * 3b Region third trans. -> Not present (nullification) 253 * 3b Region third trans. -> Not present (nullification)
267 */ 254 */
268static inline void __kprobes 255static inline void
269do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection) 256do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
270{ 257{
271 struct task_struct *tsk; 258 struct task_struct *tsk;
272 struct mm_struct *mm; 259 struct mm_struct *mm;
273 struct vm_area_struct * vma; 260 struct vm_area_struct * vma;
274 unsigned long address; 261 unsigned long address;
275 int user_address;
276 const struct exception_table_entry *fixup; 262 const struct exception_table_entry *fixup;
277 int si_code = SEGV_MAPERR; 263 int si_code;
264 int space;
278 265
279 tsk = current; 266 tsk = current;
280 mm = tsk->mm; 267 mm = tsk->mm;
@@ -294,7 +281,7 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
294 NULL pointer write access in kernel mode. */ 281 NULL pointer write access in kernel mode. */
295 if (!(regs->psw.mask & PSW_MASK_PSTATE)) { 282 if (!(regs->psw.mask & PSW_MASK_PSTATE)) {
296 address = 0; 283 address = 0;
297 user_address = 0; 284 space = 0;
298 goto no_context; 285 goto no_context;
299 } 286 }
300 287
@@ -309,15 +296,15 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
309 * the address 296 * the address
310 */ 297 */
311 address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK; 298 address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK;
312 user_address = check_user_space(regs, error_code); 299 space = check_space(tsk);
313 300
314 /* 301 /*
315 * Verify that the fault happened in user space, that 302 * Verify that the fault happened in user space, that
316 * we are not in an interrupt and that there is a 303 * we are not in an interrupt and that there is a
317 * user context. 304 * user context.
318 */ 305 */
319 if (user_address == 0 || in_atomic() || !mm) 306 if (unlikely(space == 0 || in_atomic() || !mm))
320 goto no_context; 307 goto no_context;
321 308
322 /* 309 /*
323 * When we get here, the fault happened in the current 310 * When we get here, the fault happened in the current
@@ -328,12 +315,13 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
328 315
329 down_read(&mm->mmap_sem); 316 down_read(&mm->mmap_sem);
330 317
331 vma = find_vma(mm, address); 318 si_code = SEGV_MAPERR;
332 if (!vma) 319 vma = find_vma(mm, address);
333 goto bad_area; 320 if (!vma)
321 goto bad_area;
334 322
335#ifdef CONFIG_S390_EXEC_PROTECT 323#ifdef CONFIG_S390_EXEC_PROTECT
336 if (unlikely((user_address == 2) && !(vma->vm_flags & VM_EXEC))) 324 if (unlikely((space == 2) && !(vma->vm_flags & VM_EXEC)))
337 if (!signal_return(mm, regs, address, error_code)) 325 if (!signal_return(mm, regs, address, error_code))
338 /* 326 /*
339 * signal_return() has done an up_read(&mm->mmap_sem) 327 * signal_return() has done an up_read(&mm->mmap_sem)
@@ -389,7 +377,7 @@ survive:
389 * The instruction that caused the program check will 377 * The instruction that caused the program check will
390 * be repeated. Don't signal single step via SIGTRAP. 378 * be repeated. Don't signal single step via SIGTRAP.
391 */ 379 */
392 clear_tsk_thread_flag(current, TIF_SINGLE_STEP); 380 clear_tsk_thread_flag(tsk, TIF_SINGLE_STEP);
393 return; 381 return;
394 382
395/* 383/*
@@ -419,7 +407,7 @@ no_context:
419 * Oops. The kernel tried to access some bad page. We'll have to 407 * Oops. The kernel tried to access some bad page. We'll have to
420 * terminate things with extreme prejudice. 408 * terminate things with extreme prejudice.
421 */ 409 */
422 if (user_address == 0) 410 if (space == 0)
423 printk(KERN_ALERT "Unable to handle kernel pointer dereference" 411 printk(KERN_ALERT "Unable to handle kernel pointer dereference"
424 " at virtual kernel address %p\n", (void *)address); 412 " at virtual kernel address %p\n", (void *)address);
425 else 413 else
@@ -462,13 +450,14 @@ do_sigbus:
462 goto no_context; 450 goto no_context;
463} 451}
464 452
465void do_protection_exception(struct pt_regs *regs, unsigned long error_code) 453void __kprobes do_protection_exception(struct pt_regs *regs,
454 unsigned long error_code)
466{ 455{
467 regs->psw.addr -= (error_code >> 16); 456 regs->psw.addr -= (error_code >> 16);
468 do_exception(regs, 4, 1); 457 do_exception(regs, 4, 1);
469} 458}
470 459
471void do_dat_exception(struct pt_regs *regs, unsigned long error_code) 460void __kprobes do_dat_exception(struct pt_regs *regs, unsigned long error_code)
472{ 461{
473 do_exception(regs, error_code & 0xff, 0); 462 do_exception(regs, error_code & 0xff, 0);
474} 463}
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index 4b8a95fba1e5..a1dc8c466ec9 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -461,6 +461,7 @@ int dasd_eer_enable(struct dasd_device *device)
461 cqr->device = device; 461 cqr->device = device;
462 cqr->retries = 255; 462 cqr->retries = 255;
463 cqr->expires = 10 * HZ; 463 cqr->expires = 10 * HZ;
464 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
464 465
465 cqr->cpaddr->cmd_code = DASD_ECKD_CCW_SNSS; 466 cqr->cpaddr->cmd_code = DASD_ECKD_CCW_SNSS;
466 cqr->cpaddr->count = SNSS_DATA_SIZE; 467 cqr->cpaddr->count = SNSS_DATA_SIZE;
diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c
index 7a76ec413a3a..2a1af4e60be0 100644
--- a/drivers/s390/char/tape_std.c
+++ b/drivers/s390/char/tape_std.c
@@ -647,7 +647,10 @@ tape_std_mtcompression(struct tape_device *device, int mt_count)
647 return PTR_ERR(request); 647 return PTR_ERR(request);
648 request->op = TO_NOP; 648 request->op = TO_NOP;
649 /* setup ccws */ 649 /* setup ccws */
650 *device->modeset_byte = (mt_count == 0) ? 0x00 : 0x08; 650 if (mt_count == 0)
651 *device->modeset_byte &= ~0x08;
652 else
653 *device->modeset_byte |= 0x08;
651 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); 654 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
652 tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL); 655 tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
653 /* execute it */ 656 /* execute it */
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 51238e7555bb..089a3ddd6265 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -144,8 +144,8 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev)
144 ret = stsch(sch->schid, &sch->schib); 144 ret = stsch(sch->schid, &sch->schib);
145 if (ret || !sch->schib.pmcw.dnv) 145 if (ret || !sch->schib.pmcw.dnv)
146 return -ENODEV; 146 return -ENODEV;
147 if (!sch->schib.pmcw.ena || sch->schib.scsw.actl == 0) 147 if (!sch->schib.pmcw.ena)
148 /* Not operational or no activity -> done. */ 148 /* Not operational -> done. */
149 return 0; 149 return 0;
150 /* Stage 1: cancel io. */ 150 /* Stage 1: cancel io. */
151 if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) && 151 if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) &&
@@ -334,20 +334,29 @@ ccw_device_oper_notify(struct work_struct *work)
334 struct ccw_device *cdev; 334 struct ccw_device *cdev;
335 struct subchannel *sch; 335 struct subchannel *sch;
336 int ret; 336 int ret;
337 unsigned long flags;
337 338
338 priv = container_of(work, struct ccw_device_private, kick_work); 339 priv = container_of(work, struct ccw_device_private, kick_work);
339 cdev = priv->cdev; 340 cdev = priv->cdev;
341 spin_lock_irqsave(cdev->ccwlock, flags);
340 sch = to_subchannel(cdev->dev.parent); 342 sch = to_subchannel(cdev->dev.parent);
341 ret = (sch->driver && sch->driver->notify) ? 343 if (sch->driver && sch->driver->notify) {
342 sch->driver->notify(&sch->dev, CIO_OPER) : 0; 344 spin_unlock_irqrestore(cdev->ccwlock, flags);
343 if (!ret) 345 ret = sch->driver->notify(&sch->dev, CIO_OPER);
344 /* Driver doesn't want device back. */ 346 spin_lock_irqsave(cdev->ccwlock, flags);
345 ccw_device_do_unreg_rereg(work); 347 } else
346 else { 348 ret = 0;
349 if (ret) {
347 /* Reenable channel measurements, if needed. */ 350 /* Reenable channel measurements, if needed. */
351 spin_unlock_irqrestore(cdev->ccwlock, flags);
348 cmf_reenable(cdev); 352 cmf_reenable(cdev);
353 spin_lock_irqsave(cdev->ccwlock, flags);
349 wake_up(&cdev->private->wait_q); 354 wake_up(&cdev->private->wait_q);
350 } 355 }
356 spin_unlock_irqrestore(cdev->ccwlock, flags);
357 if (!ret)
358 /* Driver doesn't want device back. */
359 ccw_device_do_unreg_rereg(work);
351} 360}
352 361
353/* 362/*
@@ -534,15 +543,21 @@ ccw_device_nopath_notify(struct work_struct *work)
534 struct ccw_device *cdev; 543 struct ccw_device *cdev;
535 struct subchannel *sch; 544 struct subchannel *sch;
536 int ret; 545 int ret;
546 unsigned long flags;
537 547
538 priv = container_of(work, struct ccw_device_private, kick_work); 548 priv = container_of(work, struct ccw_device_private, kick_work);
539 cdev = priv->cdev; 549 cdev = priv->cdev;
550 spin_lock_irqsave(cdev->ccwlock, flags);
540 sch = to_subchannel(cdev->dev.parent); 551 sch = to_subchannel(cdev->dev.parent);
541 /* Extra sanity. */ 552 /* Extra sanity. */
542 if (sch->lpm) 553 if (sch->lpm)
543 return; 554 goto out_unlock;
544 ret = (sch->driver && sch->driver->notify) ? 555 if (sch->driver && sch->driver->notify) {
545 sch->driver->notify(&sch->dev, CIO_NO_PATH) : 0; 556 spin_unlock_irqrestore(cdev->ccwlock, flags);
557 ret = sch->driver->notify(&sch->dev, CIO_NO_PATH);
558 spin_lock_irqsave(cdev->ccwlock, flags);
559 } else
560 ret = 0;
546 if (!ret) { 561 if (!ret) {
547 if (get_device(&sch->dev)) { 562 if (get_device(&sch->dev)) {
548 /* Driver doesn't want to keep device. */ 563 /* Driver doesn't want to keep device. */
@@ -562,6 +577,8 @@ ccw_device_nopath_notify(struct work_struct *work)
562 cdev->private->state = DEV_STATE_DISCONNECTED; 577 cdev->private->state = DEV_STATE_DISCONNECTED;
563 wake_up(&cdev->private->wait_q); 578 wake_up(&cdev->private->wait_q);
564 } 579 }
580out_unlock:
581 spin_unlock_irqrestore(cdev->ccwlock, flags);
565} 582}
566 583
567void 584void
@@ -607,10 +624,13 @@ ccw_device_verify_done(struct ccw_device *cdev, int err)
607 default: 624 default:
608 /* Reset oper notify indication after verify error. */ 625 /* Reset oper notify indication after verify error. */
609 cdev->private->flags.donotify = 0; 626 cdev->private->flags.donotify = 0;
610 PREPARE_WORK(&cdev->private->kick_work, 627 if (cdev->online) {
611 ccw_device_nopath_notify); 628 PREPARE_WORK(&cdev->private->kick_work,
612 queue_work(ccw_device_notify_work, &cdev->private->kick_work); 629 ccw_device_nopath_notify);
613 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 630 queue_work(ccw_device_notify_work,
631 &cdev->private->kick_work);
632 } else
633 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
614 break; 634 break;
615 } 635 }
616} 636}
@@ -756,15 +776,22 @@ static void
756ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event) 776ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event)
757{ 777{
758 struct subchannel *sch; 778 struct subchannel *sch;
779 int ret;
759 780
760 sch = to_subchannel(cdev->dev.parent); 781 sch = to_subchannel(cdev->dev.parent);
761 if (sch->driver->notify && 782 if (sch->driver->notify) {
762 sch->driver->notify(&sch->dev, sch->lpm ? CIO_GONE : CIO_NO_PATH)) { 783 spin_unlock_irq(cdev->ccwlock);
763 ccw_device_set_timeout(cdev, 0); 784 ret = sch->driver->notify(&sch->dev,
764 cdev->private->flags.fake_irb = 0; 785 sch->lpm ? CIO_GONE : CIO_NO_PATH);
765 cdev->private->state = DEV_STATE_DISCONNECTED; 786 spin_lock_irq(cdev->ccwlock);
766 wake_up(&cdev->private->wait_q); 787 } else
767 return; 788 ret = 0;
789 if (ret) {
790 ccw_device_set_timeout(cdev, 0);
791 cdev->private->flags.fake_irb = 0;
792 cdev->private->state = DEV_STATE_DISCONNECTED;
793 wake_up(&cdev->private->wait_q);
794 return;
768 } 795 }
769 cdev->private->state = DEV_STATE_NOT_OPER; 796 cdev->private->state = DEV_STATE_NOT_OPER;
770 cio_disable_subchannel(sch); 797 cio_disable_subchannel(sch);
@@ -969,18 +996,12 @@ ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
969 996
970 sch = to_subchannel(cdev->dev.parent); 997 sch = to_subchannel(cdev->dev.parent);
971 ccw_device_set_timeout(cdev, 0); 998 ccw_device_set_timeout(cdev, 0);
999 /* Start delayed path verification. */
1000 ccw_device_online_verify(cdev, 0);
972 /* OK, i/o is dead now. Call interrupt handler. */ 1001 /* OK, i/o is dead now. Call interrupt handler. */
973 cdev->private->state = DEV_STATE_ONLINE;
974 if (cdev->handler) 1002 if (cdev->handler)
975 cdev->handler(cdev, cdev->private->intparm, 1003 cdev->handler(cdev, cdev->private->intparm,
976 ERR_PTR(-EIO)); 1004 ERR_PTR(-EIO));
977 if (!sch->lpm) {
978 PREPARE_WORK(&cdev->private->kick_work,
979 ccw_device_nopath_notify);
980 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
981 } else if (cdev->private->flags.doverify)
982 /* Start delayed path verification. */
983 ccw_device_online_verify(cdev, 0);
984} 1005}
985 1006
986static void 1007static void
@@ -993,21 +1014,8 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
993 ccw_device_set_timeout(cdev, 3*HZ); 1014 ccw_device_set_timeout(cdev, 3*HZ);
994 return; 1015 return;
995 } 1016 }
996 if (ret == -ENODEV) { 1017 /* Start delayed path verification. */
997 struct subchannel *sch; 1018 ccw_device_online_verify(cdev, 0);
998
999 sch = to_subchannel(cdev->dev.parent);
1000 if (!sch->lpm) {
1001 PREPARE_WORK(&cdev->private->kick_work,
1002 ccw_device_nopath_notify);
1003 queue_work(ccw_device_notify_work,
1004 &cdev->private->kick_work);
1005 } else
1006 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1007 return;
1008 }
1009 //FIXME: Can we get here?
1010 cdev->private->state = DEV_STATE_ONLINE;
1011 if (cdev->handler) 1019 if (cdev->handler)
1012 cdev->handler(cdev, cdev->private->intparm, 1020 cdev->handler(cdev, cdev->private->intparm,
1013 ERR_PTR(-EIO)); 1021 ERR_PTR(-EIO));
@@ -1025,26 +1033,11 @@ void device_kill_io(struct subchannel *sch)
1025 cdev->private->state = DEV_STATE_TIMEOUT_KILL; 1033 cdev->private->state = DEV_STATE_TIMEOUT_KILL;
1026 return; 1034 return;
1027 } 1035 }
1028 if (ret == -ENODEV) { 1036 /* Start delayed path verification. */
1029 if (!sch->lpm) { 1037 ccw_device_online_verify(cdev, 0);
1030 PREPARE_WORK(&cdev->private->kick_work,
1031 ccw_device_nopath_notify);
1032 queue_work(ccw_device_notify_work,
1033 &cdev->private->kick_work);
1034 } else
1035 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1036 return;
1037 }
1038 if (cdev->handler) 1038 if (cdev->handler)
1039 cdev->handler(cdev, cdev->private->intparm, 1039 cdev->handler(cdev, cdev->private->intparm,
1040 ERR_PTR(-EIO)); 1040 ERR_PTR(-EIO));
1041 if (!sch->lpm) {
1042 PREPARE_WORK(&cdev->private->kick_work,
1043 ccw_device_nopath_notify);
1044 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
1045 } else
1046 /* Start delayed path verification. */
1047 ccw_device_online_verify(cdev, 0);
1048} 1041}
1049 1042
1050static void 1043static void
diff --git a/include/asm-s390/bugs.h b/include/asm-s390/bugs.h
index 2c3659621314..011f1e6a2a6c 100644
--- a/include/asm-s390/bugs.h
+++ b/include/asm-s390/bugs.h
@@ -16,7 +16,7 @@
16 * void check_bugs(void); 16 * void check_bugs(void);
17 */ 17 */
18 18
19static void __init check_bugs(void) 19static inline void check_bugs(void)
20{ 20{
21 /* s390 has no bugs ... */ 21 /* s390 has no bugs ... */
22} 22}
diff --git a/include/asm-s390/ipl.h b/include/asm-s390/ipl.h
index 5650d3d4ae46..660f78271a93 100644
--- a/include/asm-s390/ipl.h
+++ b/include/asm-s390/ipl.h
@@ -74,6 +74,7 @@ struct ipl_parameter_block {
74extern u32 ipl_flags; 74extern u32 ipl_flags;
75extern u16 ipl_devno; 75extern u16 ipl_devno;
76 76
77extern u32 dump_prefix_page;
77extern void do_reipl(void); 78extern void do_reipl(void);
78extern void ipl_save_parameters(void); 79extern void ipl_save_parameters(void);
79 80