aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/avr32/Kconfig9
-rw-r--r--arch/avr32/boards/atngw100/setup.c29
-rw-r--r--arch/avr32/boards/atstk1000/atstk1002.c8
-rw-r--r--arch/avr32/boards/atstk1000/atstk1003.c7
-rw-r--r--arch/avr32/boards/atstk1000/atstk1004.c9
-rw-r--r--arch/avr32/kernel/entry-avr32b.S88
-rw-r--r--arch/avr32/kernel/signal.c3
-rw-r--r--arch/avr32/kernel/time.c14
-rw-r--r--arch/avr32/kernel/vmlinux.lds.S12
-rw-r--r--arch/avr32/lib/io-readsb.S2
-rw-r--r--arch/avr32/mach-at32ap/Makefile7
-rw-r--r--arch/avr32/mach-at32ap/at32ap700x.c252
-rw-r--r--arch/avr32/mach-at32ap/intc.c80
-rw-r--r--arch/avr32/mach-at32ap/pdc.c (renamed from arch/avr32/mach-at32ap/at32ap.c)8
-rw-r--r--arch/avr32/mach-at32ap/pio.c2
-rw-r--r--arch/avr32/mach-at32ap/pio.h2
-rw-r--r--arch/avr32/mach-at32ap/pm-at32ap700x.S108
-rw-r--r--arch/avr32/mach-at32ap/pm.c245
-rw-r--r--arch/avr32/mach-at32ap/sdramc.h76
-rw-r--r--arch/avr32/mm/init.c22
-rw-r--r--arch/avr32/mm/tlb.c175
-rw-r--r--arch/blackfin/mach-bf561/coreb.c1
-rw-r--r--arch/cris/arch-v10/drivers/eeprom.c4
-rw-r--r--arch/cris/arch-v10/drivers/gpio.c3
-rw-r--r--arch/cris/arch-v10/drivers/i2c.c2
-rw-r--r--arch/cris/arch-v10/drivers/sync_serial.c34
-rw-r--r--arch/cris/arch-v32/drivers/cryptocop.c3
-rw-r--r--arch/cris/arch-v32/drivers/i2c.c2
-rw-r--r--arch/cris/arch-v32/drivers/mach-a3/gpio.c4
-rw-r--r--arch/cris/arch-v32/drivers/mach-fs/gpio.c5
-rw-r--r--arch/cris/arch-v32/drivers/sync_serial.c33
-rw-r--r--arch/m68k/bvme6000/rtc.c7
-rw-r--r--arch/m68k/mvme16x/rtc.c4
-rw-r--r--arch/mips/basler/excite/excite_iodev.c9
-rw-r--r--arch/mips/kernel/rtlx.c7
-rw-r--r--arch/mips/kernel/vpe.c12
-rw-r--r--arch/mips/sibyte/common/sb_tbprof.c25
-rw-r--r--arch/parisc/kernel/perf.c4
-rw-r--r--arch/s390/Kconfig21
-rw-r--r--arch/s390/appldata/appldata.h10
-rw-r--r--arch/s390/appldata/appldata_base.c41
-rw-r--r--arch/s390/appldata/appldata_mem.c43
-rw-r--r--arch/s390/appldata/appldata_net_sum.c39
-rw-r--r--arch/s390/appldata/appldata_os.c57
-rw-r--r--arch/s390/crypto/crypt_s390.h4
-rw-r--r--arch/s390/crypto/prng.c7
-rw-r--r--arch/s390/hypfs/inode.c29
-rw-r--r--arch/s390/kernel/Makefile9
-rw-r--r--arch/s390/kernel/binfmt_elf32.c214
-rw-r--r--arch/s390/kernel/compat_ptrace.h4
-rw-r--r--arch/s390/kernel/debug.c9
-rw-r--r--arch/s390/kernel/early.c211
-rw-r--r--arch/s390/kernel/ipl.c462
-rw-r--r--arch/s390/kernel/kprobes.c4
-rw-r--r--arch/s390/kernel/machine_kexec.c1
-rw-r--r--arch/s390/kernel/mem_detect.c100
-rw-r--r--arch/s390/kernel/process.c32
-rw-r--r--arch/s390/kernel/ptrace.c363
-rw-r--r--arch/s390/kernel/setup.c51
-rw-r--r--arch/s390/kernel/time.c634
-rw-r--r--arch/s390/kernel/topology.c2
-rw-r--r--arch/s390/kernel/vtime.c81
-rw-r--r--arch/s390/mm/init.c19
-rw-r--r--arch/sh/boards/landisk/gio.c10
-rw-r--r--arch/sparc/kernel/apc.c2
-rw-r--r--arch/sparc64/kernel/time.c7
-rw-r--r--arch/um/drivers/harddog_kern.c3
-rw-r--r--arch/um/drivers/mmapper_kern.c2
-rw-r--r--arch/um/drivers/random.c3
-rw-r--r--arch/x86/kernel/apm_32.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_64.c4
-rw-r--r--arch/x86/kernel/cpuid.c25
-rw-r--r--arch/x86/kernel/microcode.c2
-rw-r--r--arch/x86/kernel/msr.c16
-rw-r--r--arch/x86/kernel/traps_64.c25
75 files changed, 2592 insertions, 1275 deletions
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
index 09ad7995080c..45d63c986015 100644
--- a/arch/avr32/Kconfig
+++ b/arch/avr32/Kconfig
@@ -88,6 +88,7 @@ config PLATFORM_AT32AP
88 select MMU 88 select MMU
89 select PERFORMANCE_COUNTERS 89 select PERFORMANCE_COUNTERS
90 select HAVE_GPIO_LIB 90 select HAVE_GPIO_LIB
91 select GENERIC_ALLOCATOR
91 92
92# 93#
93# CPU types 94# CPU types
@@ -147,6 +148,9 @@ config PHYS_OFFSET
147 148
148source "kernel/Kconfig.preempt" 149source "kernel/Kconfig.preempt"
149 150
151config QUICKLIST
152 def_bool y
153
150config HAVE_ARCH_BOOTMEM_NODE 154config HAVE_ARCH_BOOTMEM_NODE
151 def_bool n 155 def_bool n
152 156
@@ -201,6 +205,11 @@ endmenu
201 205
202menu "Power management options" 206menu "Power management options"
203 207
208source "kernel/power/Kconfig"
209
210config ARCH_SUSPEND_POSSIBLE
211 def_bool y
212
204menu "CPU Frequency scaling" 213menu "CPU Frequency scaling"
205 214
206source "drivers/cpufreq/Kconfig" 215source "drivers/cpufreq/Kconfig"
diff --git a/arch/avr32/boards/atngw100/setup.c b/arch/avr32/boards/atngw100/setup.c
index a398be284966..a51bb9fb3c89 100644
--- a/arch/avr32/boards/atngw100/setup.c
+++ b/arch/avr32/boards/atngw100/setup.c
@@ -9,6 +9,8 @@
9 */ 9 */
10#include <linux/clk.h> 10#include <linux/clk.h>
11#include <linux/etherdevice.h> 11#include <linux/etherdevice.h>
12#include <linux/irq.h>
13#include <linux/i2c.h>
12#include <linux/i2c-gpio.h> 14#include <linux/i2c-gpio.h>
13#include <linux/init.h> 15#include <linux/init.h>
14#include <linux/linkage.h> 16#include <linux/linkage.h>
@@ -25,6 +27,13 @@
25#include <asm/arch/init.h> 27#include <asm/arch/init.h>
26#include <asm/arch/portmux.h> 28#include <asm/arch/portmux.h>
27 29
30/* Oscillator frequencies. These are board-specific */
31unsigned long at32_board_osc_rates[3] = {
32 [0] = 32768, /* 32.768 kHz on RTC osc */
33 [1] = 20000000, /* 20 MHz on osc0 */
34 [2] = 12000000, /* 12 MHz on osc1 */
35};
36
28/* Initialized by bootloader-specific startup code. */ 37/* Initialized by bootloader-specific startup code. */
29struct tag *bootloader_tags __initdata; 38struct tag *bootloader_tags __initdata;
30 39
@@ -140,6 +149,10 @@ static struct platform_device i2c_gpio_device = {
140 }, 149 },
141}; 150};
142 151
152static struct i2c_board_info __initdata i2c_info[] = {
153 /* NOTE: original ATtiny24 firmware is at address 0x0b */
154};
155
143static int __init atngw100_init(void) 156static int __init atngw100_init(void)
144{ 157{
145 unsigned i; 158 unsigned i;
@@ -165,12 +178,28 @@ static int __init atngw100_init(void)
165 } 178 }
166 platform_device_register(&ngw_gpio_leds); 179 platform_device_register(&ngw_gpio_leds);
167 180
181 /* all these i2c/smbus pins should have external pullups for
182 * open-drain sharing among all I2C devices. SDA and SCL do;
183 * PB28/EXTINT3 doesn't; it should be SMBALERT# (for PMBus),
184 * but it's not available off-board.
185 */
186 at32_select_periph(GPIO_PIN_PB(28), 0, AT32_GPIOF_PULLUP);
168 at32_select_gpio(i2c_gpio_data.sda_pin, 187 at32_select_gpio(i2c_gpio_data.sda_pin,
169 AT32_GPIOF_MULTIDRV | AT32_GPIOF_OUTPUT | AT32_GPIOF_HIGH); 188 AT32_GPIOF_MULTIDRV | AT32_GPIOF_OUTPUT | AT32_GPIOF_HIGH);
170 at32_select_gpio(i2c_gpio_data.scl_pin, 189 at32_select_gpio(i2c_gpio_data.scl_pin,
171 AT32_GPIOF_MULTIDRV | AT32_GPIOF_OUTPUT | AT32_GPIOF_HIGH); 190 AT32_GPIOF_MULTIDRV | AT32_GPIOF_OUTPUT | AT32_GPIOF_HIGH);
172 platform_device_register(&i2c_gpio_device); 191 platform_device_register(&i2c_gpio_device);
192 i2c_register_board_info(0, i2c_info, ARRAY_SIZE(i2c_info));
173 193
174 return 0; 194 return 0;
175} 195}
176postcore_initcall(atngw100_init); 196postcore_initcall(atngw100_init);
197
198static int __init atngw100_arch_init(void)
199{
200 /* set_irq_type() after the arch_initcall for EIC has run, and
201 * before the I2C subsystem could try using this IRQ.
202 */
203 return set_irq_type(AT32_EXTINT(3), IRQ_TYPE_EDGE_FALLING);
204}
205arch_initcall(atngw100_arch_init);
diff --git a/arch/avr32/boards/atstk1000/atstk1002.c b/arch/avr32/boards/atstk1000/atstk1002.c
index 000eb4220a12..86b363c1c25b 100644
--- a/arch/avr32/boards/atstk1000/atstk1002.c
+++ b/arch/avr32/boards/atstk1000/atstk1002.c
@@ -28,6 +28,12 @@
28 28
29#include "atstk1000.h" 29#include "atstk1000.h"
30 30
31/* Oscillator frequencies. These are board specific */
32unsigned long at32_board_osc_rates[3] = {
33 [0] = 32768, /* 32.768 kHz on RTC osc */
34 [1] = 20000000, /* 20 MHz on osc0 */
35 [2] = 12000000, /* 12 MHz on osc1 */
36};
31 37
32struct eth_addr { 38struct eth_addr {
33 u8 addr[6]; 39 u8 addr[6];
@@ -232,7 +238,7 @@ static int __init atstk1002_init(void)
232 set_hw_addr(at32_add_device_eth(1, &eth_data[1])); 238 set_hw_addr(at32_add_device_eth(1, &eth_data[1]));
233#else 239#else
234 at32_add_device_lcdc(0, &atstk1000_lcdc_data, 240 at32_add_device_lcdc(0, &atstk1000_lcdc_data,
235 fbmem_start, fbmem_size); 241 fbmem_start, fbmem_size, 0);
236#endif 242#endif
237 at32_add_device_usba(0, NULL); 243 at32_add_device_usba(0, NULL);
238#ifndef CONFIG_BOARD_ATSTK100X_SW3_CUSTOM 244#ifndef CONFIG_BOARD_ATSTK100X_SW3_CUSTOM
diff --git a/arch/avr32/boards/atstk1000/atstk1003.c b/arch/avr32/boards/atstk1000/atstk1003.c
index a0b223df35a2..ea109f435a83 100644
--- a/arch/avr32/boards/atstk1000/atstk1003.c
+++ b/arch/avr32/boards/atstk1000/atstk1003.c
@@ -27,6 +27,13 @@
27 27
28#include "atstk1000.h" 28#include "atstk1000.h"
29 29
30/* Oscillator frequencies. These are board specific */
31unsigned long at32_board_osc_rates[3] = {
32 [0] = 32768, /* 32.768 kHz on RTC osc */
33 [1] = 20000000, /* 20 MHz on osc0 */
34 [2] = 12000000, /* 12 MHz on osc1 */
35};
36
30#ifdef CONFIG_BOARD_ATSTK1000_EXTDAC 37#ifdef CONFIG_BOARD_ATSTK1000_EXTDAC
31static struct at73c213_board_info at73c213_data = { 38static struct at73c213_board_info at73c213_data = {
32 .ssc_id = 0, 39 .ssc_id = 0,
diff --git a/arch/avr32/boards/atstk1000/atstk1004.c b/arch/avr32/boards/atstk1000/atstk1004.c
index e765a8652b3e..c7236df74d74 100644
--- a/arch/avr32/boards/atstk1000/atstk1004.c
+++ b/arch/avr32/boards/atstk1000/atstk1004.c
@@ -29,6 +29,13 @@
29 29
30#include "atstk1000.h" 30#include "atstk1000.h"
31 31
32/* Oscillator frequencies. These are board specific */
33unsigned long at32_board_osc_rates[3] = {
34 [0] = 32768, /* 32.768 kHz on RTC osc */
35 [1] = 20000000, /* 20 MHz on osc0 */
36 [2] = 12000000, /* 12 MHz on osc1 */
37};
38
32#ifdef CONFIG_BOARD_ATSTK1000_EXTDAC 39#ifdef CONFIG_BOARD_ATSTK1000_EXTDAC
33static struct at73c213_board_info at73c213_data = { 40static struct at73c213_board_info at73c213_data = {
34 .ssc_id = 0, 41 .ssc_id = 0,
@@ -133,7 +140,7 @@ static int __init atstk1004_init(void)
133 at32_add_device_mci(0); 140 at32_add_device_mci(0);
134#endif 141#endif
135 at32_add_device_lcdc(0, &atstk1000_lcdc_data, 142 at32_add_device_lcdc(0, &atstk1000_lcdc_data,
136 fbmem_start, fbmem_size); 143 fbmem_start, fbmem_size, 0);
137 at32_add_device_usba(0, NULL); 144 at32_add_device_usba(0, NULL);
138#ifndef CONFIG_BOARD_ATSTK100X_SW3_CUSTOM 145#ifndef CONFIG_BOARD_ATSTK100X_SW3_CUSTOM
139 at32_add_device_ssc(0, ATMEL_SSC_TX); 146 at32_add_device_ssc(0, ATMEL_SSC_TX);
diff --git a/arch/avr32/kernel/entry-avr32b.S b/arch/avr32/kernel/entry-avr32b.S
index 5f31702d6b1c..2b398cae110c 100644
--- a/arch/avr32/kernel/entry-avr32b.S
+++ b/arch/avr32/kernel/entry-avr32b.S
@@ -74,50 +74,41 @@ exception_vectors:
74 .align 2 74 .align 2
75 bral do_dtlb_modified 75 bral do_dtlb_modified
76 76
77 /*
78 * r0 : PGD/PT/PTE
79 * r1 : Offending address
80 * r2 : Scratch register
81 * r3 : Cause (5, 12 or 13)
82 */
83#define tlbmiss_save pushm r0-r3 77#define tlbmiss_save pushm r0-r3
84#define tlbmiss_restore popm r0-r3 78#define tlbmiss_restore popm r0-r3
85 79
86 .section .tlbx.ex.text,"ax",@progbits 80 .org 0x50
87 .global itlb_miss 81 .global itlb_miss
88itlb_miss: 82itlb_miss:
89 tlbmiss_save 83 tlbmiss_save
90 rjmp tlb_miss_common 84 rjmp tlb_miss_common
91 85
92 .section .tlbr.ex.text,"ax",@progbits 86 .org 0x60
93dtlb_miss_read: 87dtlb_miss_read:
94 tlbmiss_save 88 tlbmiss_save
95 rjmp tlb_miss_common 89 rjmp tlb_miss_common
96 90
97 .section .tlbw.ex.text,"ax",@progbits 91 .org 0x70
98dtlb_miss_write: 92dtlb_miss_write:
99 tlbmiss_save 93 tlbmiss_save
100 94
101 .global tlb_miss_common 95 .global tlb_miss_common
96 .align 2
102tlb_miss_common: 97tlb_miss_common:
103 mfsr r0, SYSREG_TLBEAR 98 mfsr r0, SYSREG_TLBEAR
104 mfsr r1, SYSREG_PTBR 99 mfsr r1, SYSREG_PTBR
105 100
106 /* Is it the vmalloc space? */ 101 /*
107 bld r0, 31 102 * First level lookup: The PGD contains virtual pointers to
108 brcs handle_vmalloc_miss 103 * the second-level page tables, but they may be NULL if not
109 104 * present.
110 /* First level lookup */ 105 */
111pgtbl_lookup: 106pgtbl_lookup:
112 lsr r2, r0, PGDIR_SHIFT 107 lsr r2, r0, PGDIR_SHIFT
113 ld.w r3, r1[r2 << 2] 108 ld.w r3, r1[r2 << 2]
114 bfextu r1, r0, PAGE_SHIFT, PGDIR_SHIFT - PAGE_SHIFT 109 bfextu r1, r0, PAGE_SHIFT, PGDIR_SHIFT - PAGE_SHIFT
115 bld r3, _PAGE_BIT_PRESENT 110 cp.w r3, 0
116 brcc page_table_not_present 111 breq page_table_not_present
117
118 /* Translate to virtual address in P1. */
119 andl r3, 0xf000
120 sbr r3, 31
121 112
122 /* Second level lookup */ 113 /* Second level lookup */
123 ld.w r2, r3[r1 << 2] 114 ld.w r2, r3[r1 << 2]
@@ -148,16 +139,55 @@ pgtbl_lookup:
148 tlbmiss_restore 139 tlbmiss_restore
149 rete 140 rete
150 141
151handle_vmalloc_miss: 142 /* The slow path of the TLB miss handler */
152 /* Simply do the lookup in init's page table */ 143 .align 2
144page_table_not_present:
145 /* Do we need to synchronize with swapper_pg_dir? */
146 bld r0, 31
147 brcs sync_with_swapper_pg_dir
148
149page_not_present:
150 tlbmiss_restore
151 sub sp, 4
152 stmts --sp, r0-lr
153 rcall save_full_context_ex
154 mfsr r12, SYSREG_ECR
155 mov r11, sp
156 rcall do_page_fault
157 rjmp ret_from_exception
158
159 .align 2
160sync_with_swapper_pg_dir:
161 /*
162 * If swapper_pg_dir contains a non-NULL second-level page
163 * table pointer, copy it into the current PGD. If not, we
164 * must handle it as a full-blown page fault.
165 *
166 * Jumping back to pgtbl_lookup causes an unnecessary lookup,
167 * but it is guaranteed to be a cache hit, it won't happen
168 * very often, and we absolutely do not want to sacrifice any
169 * performance in the fast path in order to improve this.
170 */
153 mov r1, lo(swapper_pg_dir) 171 mov r1, lo(swapper_pg_dir)
154 orh r1, hi(swapper_pg_dir) 172 orh r1, hi(swapper_pg_dir)
173 ld.w r3, r1[r2 << 2]
174 cp.w r3, 0
175 breq page_not_present
176 mfsr r1, SYSREG_PTBR
177 st.w r1[r2 << 2], r3
155 rjmp pgtbl_lookup 178 rjmp pgtbl_lookup
156 179
180 /*
181 * We currently have two bytes left at this point until we
182 * crash into the system call handler...
183 *
184 * Don't worry, the assembler will let us know.
185 */
186
157 187
158 /* --- System Call --- */ 188 /* --- System Call --- */
159 189
160 .section .scall.text,"ax",@progbits 190 .org 0x100
161system_call: 191system_call:
162#ifdef CONFIG_PREEMPT 192#ifdef CONFIG_PREEMPT
163 mask_interrupts 193 mask_interrupts
@@ -266,18 +296,6 @@ syscall_exit_work:
266 brcc syscall_exit_cont 296 brcc syscall_exit_cont
267 rjmp enter_monitor_mode 297 rjmp enter_monitor_mode
268 298
269 /* The slow path of the TLB miss handler */
270page_table_not_present:
271page_not_present:
272 tlbmiss_restore
273 sub sp, 4
274 stmts --sp, r0-lr
275 rcall save_full_context_ex
276 mfsr r12, SYSREG_ECR
277 mov r11, sp
278 rcall do_page_fault
279 rjmp ret_from_exception
280
281 /* This function expects to find offending PC in SYSREG_RAR_EX */ 299 /* This function expects to find offending PC in SYSREG_RAR_EX */
282 .type save_full_context_ex, @function 300 .type save_full_context_ex, @function
283 .align 2 301 .align 2
diff --git a/arch/avr32/kernel/signal.c b/arch/avr32/kernel/signal.c
index 5616a00c10ba..c5b11f9067f1 100644
--- a/arch/avr32/kernel/signal.c
+++ b/arch/avr32/kernel/signal.c
@@ -93,6 +93,9 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
93 if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) 93 if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
94 goto badframe; 94 goto badframe;
95 95
96 if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT)
97 goto badframe;
98
96 pr_debug("Context restored: pc = %08lx, lr = %08lx, sp = %08lx\n", 99 pr_debug("Context restored: pc = %08lx, lr = %08lx, sp = %08lx\n",
97 regs->pc, regs->lr, regs->sp); 100 regs->pc, regs->lr, regs->sp);
98 101
diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c
index 00a9862380ff..abd954fb7ba0 100644
--- a/arch/avr32/kernel/time.c
+++ b/arch/avr32/kernel/time.c
@@ -7,21 +7,13 @@
7 */ 7 */
8#include <linux/clk.h> 8#include <linux/clk.h>
9#include <linux/clockchips.h> 9#include <linux/clockchips.h>
10#include <linux/time.h> 10#include <linux/init.h>
11#include <linux/module.h>
12#include <linux/interrupt.h> 11#include <linux/interrupt.h>
13#include <linux/irq.h> 12#include <linux/irq.h>
14#include <linux/kernel_stat.h> 13#include <linux/kernel.h>
15#include <linux/errno.h> 14#include <linux/time.h>
16#include <linux/init.h>
17#include <linux/profile.h>
18#include <linux/sysdev.h>
19#include <linux/err.h>
20 15
21#include <asm/div64.h>
22#include <asm/sysreg.h> 16#include <asm/sysreg.h>
23#include <asm/io.h>
24#include <asm/sections.h>
25 17
26#include <asm/arch/pm.h> 18#include <asm/arch/pm.h>
27 19
diff --git a/arch/avr32/kernel/vmlinux.lds.S b/arch/avr32/kernel/vmlinux.lds.S
index 481cfd40c053..5d25d8eeb750 100644
--- a/arch/avr32/kernel/vmlinux.lds.S
+++ b/arch/avr32/kernel/vmlinux.lds.S
@@ -68,14 +68,6 @@ SECTIONS
68 _evba = .; 68 _evba = .;
69 _text = .; 69 _text = .;
70 *(.ex.text) 70 *(.ex.text)
71 . = 0x50;
72 *(.tlbx.ex.text)
73 . = 0x60;
74 *(.tlbr.ex.text)
75 . = 0x70;
76 *(.tlbw.ex.text)
77 . = 0x100;
78 *(.scall.text)
79 *(.irq.text) 71 *(.irq.text)
80 KPROBES_TEXT 72 KPROBES_TEXT
81 TEXT_TEXT 73 TEXT_TEXT
@@ -107,6 +99,10 @@ SECTIONS
107 */ 99 */
108 *(.data.init_task) 100 *(.data.init_task)
109 101
102 /* Then, the page-aligned data */
103 . = ALIGN(PAGE_SIZE);
104 *(.data.page_aligned)
105
110 /* Then, the cacheline aligned data */ 106 /* Then, the cacheline aligned data */
111 . = ALIGN(L1_CACHE_BYTES); 107 . = ALIGN(L1_CACHE_BYTES);
112 *(.data.cacheline_aligned) 108 *(.data.cacheline_aligned)
diff --git a/arch/avr32/lib/io-readsb.S b/arch/avr32/lib/io-readsb.S
index 2be5da7ed26b..cb2d86945559 100644
--- a/arch/avr32/lib/io-readsb.S
+++ b/arch/avr32/lib/io-readsb.S
@@ -41,7 +41,7 @@ __raw_readsb:
412: sub r10, -4 412: sub r10, -4
42 reteq r12 42 reteq r12
43 43
443: ld.uh r8, r12[0] 443: ld.ub r8, r12[0]
45 sub r10, 1 45 sub r10, 1
46 st.b r11++, r8 46 st.b r11++, r8
47 brne 3b 47 brne 3b
diff --git a/arch/avr32/mach-at32ap/Makefile b/arch/avr32/mach-at32ap/Makefile
index e89009439e4a..d5018e2eed25 100644
--- a/arch/avr32/mach-at32ap/Makefile
+++ b/arch/avr32/mach-at32ap/Makefile
@@ -1,3 +1,8 @@
1obj-y += at32ap.o clock.o intc.o extint.o pio.o hsmc.o 1obj-y += pdc.o clock.o intc.o extint.o pio.o hsmc.o
2obj-$(CONFIG_CPU_AT32AP700X) += at32ap700x.o pm-at32ap700x.o 2obj-$(CONFIG_CPU_AT32AP700X) += at32ap700x.o pm-at32ap700x.o
3obj-$(CONFIG_CPU_FREQ_AT32AP) += cpufreq.o 3obj-$(CONFIG_CPU_FREQ_AT32AP) += cpufreq.o
4obj-$(CONFIG_PM) += pm.o
5
6ifeq ($(CONFIG_PM_DEBUG),y)
7CFLAGS_pm.o += -DDEBUG
8endif
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c
index 0f24b4f85c17..07b21b121eef 100644
--- a/arch/avr32/mach-at32ap/at32ap700x.c
+++ b/arch/avr32/mach-at32ap/at32ap700x.c
@@ -20,6 +20,7 @@
20#include <asm/arch/at32ap700x.h> 20#include <asm/arch/at32ap700x.h>
21#include <asm/arch/board.h> 21#include <asm/arch/board.h>
22#include <asm/arch/portmux.h> 22#include <asm/arch/portmux.h>
23#include <asm/arch/sram.h>
23 24
24#include <video/atmel_lcdc.h> 25#include <video/atmel_lcdc.h>
25 26
@@ -93,19 +94,12 @@ static struct clk devname##_##_name = { \
93 94
94static DEFINE_SPINLOCK(pm_lock); 95static DEFINE_SPINLOCK(pm_lock);
95 96
96unsigned long at32ap7000_osc_rates[3] = {
97 [0] = 32768,
98 /* FIXME: these are ATSTK1002-specific */
99 [1] = 20000000,
100 [2] = 12000000,
101};
102
103static struct clk osc0; 97static struct clk osc0;
104static struct clk osc1; 98static struct clk osc1;
105 99
106static unsigned long osc_get_rate(struct clk *clk) 100static unsigned long osc_get_rate(struct clk *clk)
107{ 101{
108 return at32ap7000_osc_rates[clk->index]; 102 return at32_board_osc_rates[clk->index];
109} 103}
110 104
111static unsigned long pll_get_rate(struct clk *clk, unsigned long control) 105static unsigned long pll_get_rate(struct clk *clk, unsigned long control)
@@ -682,6 +676,14 @@ static struct clk hramc_clk = {
682 .users = 1, 676 .users = 1,
683 .index = 3, 677 .index = 3,
684}; 678};
679static struct clk sdramc_clk = {
680 .name = "sdramc_clk",
681 .parent = &pbb_clk,
682 .mode = pbb_clk_mode,
683 .get_rate = pbb_clk_get_rate,
684 .users = 1,
685 .index = 14,
686};
685 687
686static struct resource smc0_resource[] = { 688static struct resource smc0_resource[] = {
687 PBMEM(0xfff03400), 689 PBMEM(0xfff03400),
@@ -841,6 +843,81 @@ void __init at32_add_system_devices(void)
841} 843}
842 844
843/* -------------------------------------------------------------------- 845/* --------------------------------------------------------------------
846 * PSIF
847 * -------------------------------------------------------------------- */
848static struct resource atmel_psif0_resource[] __initdata = {
849 {
850 .start = 0xffe03c00,
851 .end = 0xffe03cff,
852 .flags = IORESOURCE_MEM,
853 },
854 IRQ(18),
855};
856static struct clk atmel_psif0_pclk = {
857 .name = "pclk",
858 .parent = &pba_clk,
859 .mode = pba_clk_mode,
860 .get_rate = pba_clk_get_rate,
861 .index = 15,
862};
863
864static struct resource atmel_psif1_resource[] __initdata = {
865 {
866 .start = 0xffe03d00,
867 .end = 0xffe03dff,
868 .flags = IORESOURCE_MEM,
869 },
870 IRQ(18),
871};
872static struct clk atmel_psif1_pclk = {
873 .name = "pclk",
874 .parent = &pba_clk,
875 .mode = pba_clk_mode,
876 .get_rate = pba_clk_get_rate,
877 .index = 15,
878};
879
880struct platform_device *__init at32_add_device_psif(unsigned int id)
881{
882 struct platform_device *pdev;
883
884 if (!(id == 0 || id == 1))
885 return NULL;
886
887 pdev = platform_device_alloc("atmel_psif", id);
888 if (!pdev)
889 return NULL;
890
891 switch (id) {
892 case 0:
893 if (platform_device_add_resources(pdev, atmel_psif0_resource,
894 ARRAY_SIZE(atmel_psif0_resource)))
895 goto err_add_resources;
896 atmel_psif0_pclk.dev = &pdev->dev;
897 select_peripheral(PA(8), PERIPH_A, 0); /* CLOCK */
898 select_peripheral(PA(9), PERIPH_A, 0); /* DATA */
899 break;
900 case 1:
901 if (platform_device_add_resources(pdev, atmel_psif1_resource,
902 ARRAY_SIZE(atmel_psif1_resource)))
903 goto err_add_resources;
904 atmel_psif1_pclk.dev = &pdev->dev;
905 select_peripheral(PB(11), PERIPH_A, 0); /* CLOCK */
906 select_peripheral(PB(12), PERIPH_A, 0); /* DATA */
907 break;
908 default:
909 return NULL;
910 }
911
912 platform_device_add(pdev);
913 return pdev;
914
915err_add_resources:
916 platform_device_put(pdev);
917 return NULL;
918}
919
920/* --------------------------------------------------------------------
844 * USART 921 * USART
845 * -------------------------------------------------------------------- */ 922 * -------------------------------------------------------------------- */
846 923
@@ -1113,7 +1190,8 @@ at32_add_device_spi(unsigned int id, struct spi_board_info *b, unsigned int n)
1113 switch (id) { 1190 switch (id) {
1114 case 0: 1191 case 0:
1115 pdev = &atmel_spi0_device; 1192 pdev = &atmel_spi0_device;
1116 select_peripheral(PA(0), PERIPH_A, 0); /* MISO */ 1193 /* pullup MISO so a level is always defined */
1194 select_peripheral(PA(0), PERIPH_A, AT32_GPIOF_PULLUP);
1117 select_peripheral(PA(1), PERIPH_A, 0); /* MOSI */ 1195 select_peripheral(PA(1), PERIPH_A, 0); /* MOSI */
1118 select_peripheral(PA(2), PERIPH_A, 0); /* SCK */ 1196 select_peripheral(PA(2), PERIPH_A, 0); /* SCK */
1119 at32_spi_setup_slaves(0, b, n, spi0_pins); 1197 at32_spi_setup_slaves(0, b, n, spi0_pins);
@@ -1121,7 +1199,8 @@ at32_add_device_spi(unsigned int id, struct spi_board_info *b, unsigned int n)
1121 1199
1122 case 1: 1200 case 1:
1123 pdev = &atmel_spi1_device; 1201 pdev = &atmel_spi1_device;
1124 select_peripheral(PB(0), PERIPH_B, 0); /* MISO */ 1202 /* pullup MISO so a level is always defined */
1203 select_peripheral(PB(0), PERIPH_B, AT32_GPIOF_PULLUP);
1125 select_peripheral(PB(1), PERIPH_B, 0); /* MOSI */ 1204 select_peripheral(PB(1), PERIPH_B, 0); /* MOSI */
1126 select_peripheral(PB(5), PERIPH_B, 0); /* SCK */ 1205 select_peripheral(PB(5), PERIPH_B, 0); /* SCK */
1127 at32_spi_setup_slaves(1, b, n, spi1_pins); 1206 at32_spi_setup_slaves(1, b, n, spi1_pins);
@@ -1264,7 +1343,8 @@ static struct clk atmel_lcdfb0_pixclk = {
1264 1343
1265struct platform_device *__init 1344struct platform_device *__init
1266at32_add_device_lcdc(unsigned int id, struct atmel_lcdfb_info *data, 1345at32_add_device_lcdc(unsigned int id, struct atmel_lcdfb_info *data,
1267 unsigned long fbmem_start, unsigned long fbmem_len) 1346 unsigned long fbmem_start, unsigned long fbmem_len,
1347 unsigned int pin_config)
1268{ 1348{
1269 struct platform_device *pdev; 1349 struct platform_device *pdev;
1270 struct atmel_lcdfb_info *info; 1350 struct atmel_lcdfb_info *info;
@@ -1291,37 +1371,77 @@ at32_add_device_lcdc(unsigned int id, struct atmel_lcdfb_info *data,
1291 switch (id) { 1371 switch (id) {
1292 case 0: 1372 case 0:
1293 pdev = &atmel_lcdfb0_device; 1373 pdev = &atmel_lcdfb0_device;
1294 select_peripheral(PC(19), PERIPH_A, 0); /* CC */ 1374
1295 select_peripheral(PC(20), PERIPH_A, 0); /* HSYNC */ 1375 switch (pin_config) {
1296 select_peripheral(PC(21), PERIPH_A, 0); /* PCLK */ 1376 case 0:
1297 select_peripheral(PC(22), PERIPH_A, 0); /* VSYNC */ 1377 select_peripheral(PC(19), PERIPH_A, 0); /* CC */
1298 select_peripheral(PC(23), PERIPH_A, 0); /* DVAL */ 1378 select_peripheral(PC(20), PERIPH_A, 0); /* HSYNC */
1299 select_peripheral(PC(24), PERIPH_A, 0); /* MODE */ 1379 select_peripheral(PC(21), PERIPH_A, 0); /* PCLK */
1300 select_peripheral(PC(25), PERIPH_A, 0); /* PWR */ 1380 select_peripheral(PC(22), PERIPH_A, 0); /* VSYNC */
1301 select_peripheral(PC(26), PERIPH_A, 0); /* DATA0 */ 1381 select_peripheral(PC(23), PERIPH_A, 0); /* DVAL */
1302 select_peripheral(PC(27), PERIPH_A, 0); /* DATA1 */ 1382 select_peripheral(PC(24), PERIPH_A, 0); /* MODE */
1303 select_peripheral(PC(28), PERIPH_A, 0); /* DATA2 */ 1383 select_peripheral(PC(25), PERIPH_A, 0); /* PWR */
1304 select_peripheral(PC(29), PERIPH_A, 0); /* DATA3 */ 1384 select_peripheral(PC(26), PERIPH_A, 0); /* DATA0 */
1305 select_peripheral(PC(30), PERIPH_A, 0); /* DATA4 */ 1385 select_peripheral(PC(27), PERIPH_A, 0); /* DATA1 */
1306 select_peripheral(PC(31), PERIPH_A, 0); /* DATA5 */ 1386 select_peripheral(PC(28), PERIPH_A, 0); /* DATA2 */
1307 select_peripheral(PD(0), PERIPH_A, 0); /* DATA6 */ 1387 select_peripheral(PC(29), PERIPH_A, 0); /* DATA3 */
1308 select_peripheral(PD(1), PERIPH_A, 0); /* DATA7 */ 1388 select_peripheral(PC(30), PERIPH_A, 0); /* DATA4 */
1309 select_peripheral(PD(2), PERIPH_A, 0); /* DATA8 */ 1389 select_peripheral(PC(31), PERIPH_A, 0); /* DATA5 */
1310 select_peripheral(PD(3), PERIPH_A, 0); /* DATA9 */ 1390 select_peripheral(PD(0), PERIPH_A, 0); /* DATA6 */
1311 select_peripheral(PD(4), PERIPH_A, 0); /* DATA10 */ 1391 select_peripheral(PD(1), PERIPH_A, 0); /* DATA7 */
1312 select_peripheral(PD(5), PERIPH_A, 0); /* DATA11 */ 1392 select_peripheral(PD(2), PERIPH_A, 0); /* DATA8 */
1313 select_peripheral(PD(6), PERIPH_A, 0); /* DATA12 */ 1393 select_peripheral(PD(3), PERIPH_A, 0); /* DATA9 */
1314 select_peripheral(PD(7), PERIPH_A, 0); /* DATA13 */ 1394 select_peripheral(PD(4), PERIPH_A, 0); /* DATA10 */
1315 select_peripheral(PD(8), PERIPH_A, 0); /* DATA14 */ 1395 select_peripheral(PD(5), PERIPH_A, 0); /* DATA11 */
1316 select_peripheral(PD(9), PERIPH_A, 0); /* DATA15 */ 1396 select_peripheral(PD(6), PERIPH_A, 0); /* DATA12 */
1317 select_peripheral(PD(10), PERIPH_A, 0); /* DATA16 */ 1397 select_peripheral(PD(7), PERIPH_A, 0); /* DATA13 */
1318 select_peripheral(PD(11), PERIPH_A, 0); /* DATA17 */ 1398 select_peripheral(PD(8), PERIPH_A, 0); /* DATA14 */
1319 select_peripheral(PD(12), PERIPH_A, 0); /* DATA18 */ 1399 select_peripheral(PD(9), PERIPH_A, 0); /* DATA15 */
1320 select_peripheral(PD(13), PERIPH_A, 0); /* DATA19 */ 1400 select_peripheral(PD(10), PERIPH_A, 0); /* DATA16 */
1321 select_peripheral(PD(14), PERIPH_A, 0); /* DATA20 */ 1401 select_peripheral(PD(11), PERIPH_A, 0); /* DATA17 */
1322 select_peripheral(PD(15), PERIPH_A, 0); /* DATA21 */ 1402 select_peripheral(PD(12), PERIPH_A, 0); /* DATA18 */
1323 select_peripheral(PD(16), PERIPH_A, 0); /* DATA22 */ 1403 select_peripheral(PD(13), PERIPH_A, 0); /* DATA19 */
1324 select_peripheral(PD(17), PERIPH_A, 0); /* DATA23 */ 1404 select_peripheral(PD(14), PERIPH_A, 0); /* DATA20 */
1405 select_peripheral(PD(15), PERIPH_A, 0); /* DATA21 */
1406 select_peripheral(PD(16), PERIPH_A, 0); /* DATA22 */
1407 select_peripheral(PD(17), PERIPH_A, 0); /* DATA23 */
1408 break;
1409 case 1:
1410 select_peripheral(PE(0), PERIPH_B, 0); /* CC */
1411 select_peripheral(PC(20), PERIPH_A, 0); /* HSYNC */
1412 select_peripheral(PC(21), PERIPH_A, 0); /* PCLK */
1413 select_peripheral(PC(22), PERIPH_A, 0); /* VSYNC */
1414 select_peripheral(PE(1), PERIPH_B, 0); /* DVAL */
1415 select_peripheral(PE(2), PERIPH_B, 0); /* MODE */
1416 select_peripheral(PC(25), PERIPH_A, 0); /* PWR */
1417 select_peripheral(PE(3), PERIPH_B, 0); /* DATA0 */
1418 select_peripheral(PE(4), PERIPH_B, 0); /* DATA1 */
1419 select_peripheral(PE(5), PERIPH_B, 0); /* DATA2 */
1420 select_peripheral(PE(6), PERIPH_B, 0); /* DATA3 */
1421 select_peripheral(PE(7), PERIPH_B, 0); /* DATA4 */
1422 select_peripheral(PC(31), PERIPH_A, 0); /* DATA5 */
1423 select_peripheral(PD(0), PERIPH_A, 0); /* DATA6 */
1424 select_peripheral(PD(1), PERIPH_A, 0); /* DATA7 */
1425 select_peripheral(PE(8), PERIPH_B, 0); /* DATA8 */
1426 select_peripheral(PE(9), PERIPH_B, 0); /* DATA9 */
1427 select_peripheral(PE(10), PERIPH_B, 0); /* DATA10 */
1428 select_peripheral(PE(11), PERIPH_B, 0); /* DATA11 */
1429 select_peripheral(PE(12), PERIPH_B, 0); /* DATA12 */
1430 select_peripheral(PD(7), PERIPH_A, 0); /* DATA13 */
1431 select_peripheral(PD(8), PERIPH_A, 0); /* DATA14 */
1432 select_peripheral(PD(9), PERIPH_A, 0); /* DATA15 */
1433 select_peripheral(PE(13), PERIPH_B, 0); /* DATA16 */
1434 select_peripheral(PE(14), PERIPH_B, 0); /* DATA17 */
1435 select_peripheral(PE(15), PERIPH_B, 0); /* DATA18 */
1436 select_peripheral(PE(16), PERIPH_B, 0); /* DATA19 */
1437 select_peripheral(PE(17), PERIPH_B, 0); /* DATA20 */
1438 select_peripheral(PE(18), PERIPH_B, 0); /* DATA21 */
1439 select_peripheral(PD(16), PERIPH_A, 0); /* DATA22 */
1440 select_peripheral(PD(17), PERIPH_A, 0); /* DATA23 */
1441 break;
1442 default:
1443 goto err_invalid_id;
1444 }
1325 1445
1326 clk_set_parent(&atmel_lcdfb0_pixclk, &pll0); 1446 clk_set_parent(&atmel_lcdfb0_pixclk, &pll0);
1327 clk_set_rate(&atmel_lcdfb0_pixclk, clk_get_rate(&pll0)); 1447 clk_set_rate(&atmel_lcdfb0_pixclk, clk_get_rate(&pll0));
@@ -1360,7 +1480,7 @@ static struct resource atmel_pwm0_resource[] __initdata = {
1360 IRQ(24), 1480 IRQ(24),
1361}; 1481};
1362static struct clk atmel_pwm0_mck = { 1482static struct clk atmel_pwm0_mck = {
1363 .name = "mck", 1483 .name = "pwm_clk",
1364 .parent = &pbb_clk, 1484 .parent = &pbb_clk,
1365 .mode = pbb_clk_mode, 1485 .mode = pbb_clk_mode,
1366 .get_rate = pbb_clk_get_rate, 1486 .get_rate = pbb_clk_get_rate,
@@ -1887,6 +2007,7 @@ struct clk *at32_clock_list[] = {
1887 &hmatrix_clk, 2007 &hmatrix_clk,
1888 &ebi_clk, 2008 &ebi_clk,
1889 &hramc_clk, 2009 &hramc_clk,
2010 &sdramc_clk,
1890 &smc0_pclk, 2011 &smc0_pclk,
1891 &smc0_mck, 2012 &smc0_mck,
1892 &pdc_hclk, 2013 &pdc_hclk,
@@ -1900,6 +2021,8 @@ struct clk *at32_clock_list[] = {
1900 &pio4_mck, 2021 &pio4_mck,
1901 &at32_tcb0_t0_clk, 2022 &at32_tcb0_t0_clk,
1902 &at32_tcb1_t0_clk, 2023 &at32_tcb1_t0_clk,
2024 &atmel_psif0_pclk,
2025 &atmel_psif1_pclk,
1903 &atmel_usart0_usart, 2026 &atmel_usart0_usart,
1904 &atmel_usart1_usart, 2027 &atmel_usart1_usart,
1905 &atmel_usart2_usart, 2028 &atmel_usart2_usart,
@@ -1935,16 +2058,7 @@ struct clk *at32_clock_list[] = {
1935}; 2058};
1936unsigned int at32_nr_clocks = ARRAY_SIZE(at32_clock_list); 2059unsigned int at32_nr_clocks = ARRAY_SIZE(at32_clock_list);
1937 2060
1938void __init at32_portmux_init(void) 2061void __init setup_platform(void)
1939{
1940 at32_init_pio(&pio0_device);
1941 at32_init_pio(&pio1_device);
1942 at32_init_pio(&pio2_device);
1943 at32_init_pio(&pio3_device);
1944 at32_init_pio(&pio4_device);
1945}
1946
1947void __init at32_clock_init(void)
1948{ 2062{
1949 u32 cpu_mask = 0, hsb_mask = 0, pba_mask = 0, pbb_mask = 0; 2063 u32 cpu_mask = 0, hsb_mask = 0, pba_mask = 0, pbb_mask = 0;
1950 int i; 2064 int i;
@@ -1999,4 +2113,36 @@ void __init at32_clock_init(void)
1999 pm_writel(HSB_MASK, hsb_mask); 2113 pm_writel(HSB_MASK, hsb_mask);
2000 pm_writel(PBA_MASK, pba_mask); 2114 pm_writel(PBA_MASK, pba_mask);
2001 pm_writel(PBB_MASK, pbb_mask); 2115 pm_writel(PBB_MASK, pbb_mask);
2116
2117 /* Initialize the port muxes */
2118 at32_init_pio(&pio0_device);
2119 at32_init_pio(&pio1_device);
2120 at32_init_pio(&pio2_device);
2121 at32_init_pio(&pio3_device);
2122 at32_init_pio(&pio4_device);
2123}
2124
2125struct gen_pool *sram_pool;
2126
2127static int __init sram_init(void)
2128{
2129 struct gen_pool *pool;
2130
2131 /* 1KiB granularity */
2132 pool = gen_pool_create(10, -1);
2133 if (!pool)
2134 goto fail;
2135
2136 if (gen_pool_add(pool, 0x24000000, 0x8000, -1))
2137 goto err_pool_add;
2138
2139 sram_pool = pool;
2140 return 0;
2141
2142err_pool_add:
2143 gen_pool_destroy(pool);
2144fail:
2145 pr_err("Failed to create SRAM pool\n");
2146 return -ENOMEM;
2002} 2147}
2148core_initcall(sram_init);
diff --git a/arch/avr32/mach-at32ap/intc.c b/arch/avr32/mach-at32ap/intc.c
index 097cf4e84052..994c4545e2b7 100644
--- a/arch/avr32/mach-at32ap/intc.c
+++ b/arch/avr32/mach-at32ap/intc.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2006 Atmel Corporation 2 * Copyright (C) 2006, 2008 Atmel Corporation
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
@@ -12,14 +12,20 @@
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13#include <linux/irq.h> 13#include <linux/irq.h>
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/sysdev.h>
15 16
16#include <asm/io.h> 17#include <asm/io.h>
17 18
18#include "intc.h" 19#include "intc.h"
19 20
20struct intc { 21struct intc {
21 void __iomem *regs; 22 void __iomem *regs;
22 struct irq_chip chip; 23 struct irq_chip chip;
24 struct sys_device sysdev;
25#ifdef CONFIG_PM
26 unsigned long suspend_ipr;
27 unsigned long saved_ipr[64];
28#endif
23}; 29};
24 30
25extern struct platform_device at32_intc0_device; 31extern struct platform_device at32_intc0_device;
@@ -136,6 +142,74 @@ fail:
136 panic("Interrupt controller initialization failed!\n"); 142 panic("Interrupt controller initialization failed!\n");
137} 143}
138 144
145#ifdef CONFIG_PM
146void intc_set_suspend_handler(unsigned long offset)
147{
148 intc0.suspend_ipr = offset;
149}
150
151static int intc_suspend(struct sys_device *sdev, pm_message_t state)
152{
153 struct intc *intc = container_of(sdev, struct intc, sysdev);
154 int i;
155
156 if (unlikely(!irqs_disabled())) {
157 pr_err("intc_suspend: called with interrupts enabled\n");
158 return -EINVAL;
159 }
160
161 if (unlikely(!intc->suspend_ipr)) {
162 pr_err("intc_suspend: suspend_ipr not initialized\n");
163 return -EINVAL;
164 }
165
166 for (i = 0; i < 64; i++) {
167 intc->saved_ipr[i] = intc_readl(intc, INTPR0 + 4 * i);
168 intc_writel(intc, INTPR0 + 4 * i, intc->suspend_ipr);
169 }
170
171 return 0;
172}
173
174static int intc_resume(struct sys_device *sdev)
175{
176 struct intc *intc = container_of(sdev, struct intc, sysdev);
177 int i;
178
179 WARN_ON(!irqs_disabled());
180
181 for (i = 0; i < 64; i++)
182 intc_writel(intc, INTPR0 + 4 * i, intc->saved_ipr[i]);
183
184 return 0;
185}
186#else
187#define intc_suspend NULL
188#define intc_resume NULL
189#endif
190
191static struct sysdev_class intc_class = {
192 .name = "intc",
193 .suspend = intc_suspend,
194 .resume = intc_resume,
195};
196
197static int __init intc_init_sysdev(void)
198{
199 int ret;
200
201 ret = sysdev_class_register(&intc_class);
202 if (ret)
203 return ret;
204
205 intc0.sysdev.id = 0;
206 intc0.sysdev.cls = &intc_class;
207 ret = sysdev_register(&intc0.sysdev);
208
209 return ret;
210}
211device_initcall(intc_init_sysdev);
212
139unsigned long intc_get_pending(unsigned int group) 213unsigned long intc_get_pending(unsigned int group)
140{ 214{
141 return intc_readl(&intc0, INTREQ0 + 4 * group); 215 return intc_readl(&intc0, INTREQ0 + 4 * group);
diff --git a/arch/avr32/mach-at32ap/at32ap.c b/arch/avr32/mach-at32ap/pdc.c
index 7c4987f3287a..1040bda4fda7 100644
--- a/arch/avr32/mach-at32ap/at32ap.c
+++ b/arch/avr32/mach-at32ap/pdc.c
@@ -11,14 +11,6 @@
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/platform_device.h> 12#include <linux/platform_device.h>
13 13
14#include <asm/arch/init.h>
15
16void __init setup_platform(void)
17{
18 at32_clock_init();
19 at32_portmux_init();
20}
21
22static int __init pdc_probe(struct platform_device *pdev) 14static int __init pdc_probe(struct platform_device *pdev)
23{ 15{
24 struct clk *pclk, *hclk; 16 struct clk *pclk, *hclk;
diff --git a/arch/avr32/mach-at32ap/pio.c b/arch/avr32/mach-at32ap/pio.c
index 38a8fa31c0b5..60da03ba7117 100644
--- a/arch/avr32/mach-at32ap/pio.c
+++ b/arch/avr32/mach-at32ap/pio.c
@@ -318,6 +318,8 @@ static void pio_bank_show(struct seq_file *s, struct gpio_chip *chip)
318 const char *label; 318 const char *label;
319 319
320 label = gpiochip_is_requested(chip, i); 320 label = gpiochip_is_requested(chip, i);
321 if (!label && (imr & mask))
322 label = "[irq]";
321 if (!label) 323 if (!label)
322 continue; 324 continue;
323 325
diff --git a/arch/avr32/mach-at32ap/pio.h b/arch/avr32/mach-at32ap/pio.h
index 7795116a483a..9484dfcc08f2 100644
--- a/arch/avr32/mach-at32ap/pio.h
+++ b/arch/avr32/mach-at32ap/pio.h
@@ -57,7 +57,7 @@
57 57
58/* Bitfields in IFDR */ 58/* Bitfields in IFDR */
59 59
60/* Bitfields in ISFR */ 60/* Bitfields in IFSR */
61 61
62/* Bitfields in SODR */ 62/* Bitfields in SODR */
63 63
diff --git a/arch/avr32/mach-at32ap/pm-at32ap700x.S b/arch/avr32/mach-at32ap/pm-at32ap700x.S
index 949e2485e278..0a53ad314ff4 100644
--- a/arch/avr32/mach-at32ap/pm-at32ap700x.S
+++ b/arch/avr32/mach-at32ap/pm-at32ap700x.S
@@ -12,6 +12,12 @@
12#include <asm/thread_info.h> 12#include <asm/thread_info.h>
13#include <asm/arch/pm.h> 13#include <asm/arch/pm.h>
14 14
15#include "pm.h"
16#include "sdramc.h"
17
18/* Same as 0xfff00000 but fits in a 21 bit signed immediate */
19#define PM_BASE -0x100000
20
15 .section .bss, "wa", @nobits 21 .section .bss, "wa", @nobits
16 .global disable_idle_sleep 22 .global disable_idle_sleep
17 .type disable_idle_sleep, @object 23 .type disable_idle_sleep, @object
@@ -64,3 +70,105 @@ cpu_idle_skip_sleep:
64 unmask_interrupts 70 unmask_interrupts
65 retal r12 71 retal r12
66 .size cpu_idle_skip_sleep, . - cpu_idle_skip_sleep 72 .size cpu_idle_skip_sleep, . - cpu_idle_skip_sleep
73
74#ifdef CONFIG_PM
75 .section .init.text, "ax", @progbits
76
77 .global pm_exception
78 .type pm_exception, @function
79pm_exception:
80 /*
81 * Exceptions are masked when we switch to this handler, so
82 * we'll only get "unrecoverable" exceptions (offset 0.)
83 */
84 sub r12, pc, . - .Lpanic_msg
85 lddpc pc, .Lpanic_addr
86
87 .align 2
88.Lpanic_addr:
89 .long panic
90.Lpanic_msg:
91 .asciz "Unrecoverable exception during suspend\n"
92 .size pm_exception, . - pm_exception
93
94 .global pm_irq0
95 .type pm_irq0, @function
96pm_irq0:
97 /* Disable interrupts and return after the sleep instruction */
98 mfsr r9, SYSREG_RSR_INT0
99 mtsr SYSREG_RAR_INT0, r8
100 sbr r9, SYSREG_GM_OFFSET
101 mtsr SYSREG_RSR_INT0, r9
102 rete
103
104 /*
105 * void cpu_enter_standby(unsigned long sdramc_base)
106 *
107 * Enter PM_SUSPEND_STANDBY mode. At this point, all drivers
108 * are suspended and interrupts are disabled. Interrupts
109 * marked as 'wakeup' event sources may still come along and
110 * get us out of here.
111 *
112 * The SDRAM will be put into self-refresh mode (which does
113 * not require a clock from the CPU), and the CPU will be put
114 * into "frozen" mode (HSB bus stopped). The SDRAM controller
115 * will automatically bring the SDRAM into normal mode on the
116 * first access, and the power manager will automatically
117 * start the HSB and CPU clocks upon a wakeup event.
118 *
119 * This code uses the same "skip sleep" technique as above.
120 * It is very important that we jump directly to
121 * cpu_after_sleep after the sleep instruction since that's
122 * where we'll end up if the interrupt handler decides that we
123 * need to skip the sleep instruction.
124 */
125 .global pm_standby
126 .type pm_standby, @function
127pm_standby:
128 /*
129 * interrupts are already masked at this point, and EVBA
130 * points to pm_exception above.
131 */
132 ld.w r10, r12[SDRAMC_LPR]
133 sub r8, pc, . - 1f /* return address for irq handler */
134 mov r11, SDRAMC_LPR_LPCB_SELF_RFR
135 bfins r10, r11, 0, 2 /* LPCB <- self Refresh */
136 sync 0 /* flush write buffer */
137 st.w r12[SDRAMC_LPR], r11 /* put SDRAM in self-refresh mode */
138 ld.w r11, r12[SDRAMC_LPR]
139 unmask_interrupts
140 sleep CPU_SLEEP_FROZEN
1411: mask_interrupts
142 retal r12
143 .size pm_standby, . - pm_standby
144
145 .global pm_suspend_to_ram
146 .type pm_suspend_to_ram, @function
147pm_suspend_to_ram:
148 /*
149 * interrupts are already masked at this point, and EVBA
150 * points to pm_exception above.
151 */
152 mov r11, 0
153 cache r11[2], 8 /* clean all dcache lines */
154 sync 0 /* flush write buffer */
155 ld.w r10, r12[SDRAMC_LPR]
156 sub r8, pc, . - 1f /* return address for irq handler */
157 mov r11, SDRAMC_LPR_LPCB_SELF_RFR
158 bfins r10, r11, 0, 2 /* LPCB <- self refresh */
159 st.w r12[SDRAMC_LPR], r10 /* put SDRAM in self-refresh mode */
160 ld.w r11, r12[SDRAMC_LPR]
161
162 unmask_interrupts
163 sleep CPU_SLEEP_STOP
1641: mask_interrupts
165
166 retal r12
167 .size pm_suspend_to_ram, . - pm_suspend_to_ram
168
169 .global pm_sram_end
170 .type pm_sram_end, @function
171pm_sram_end:
172 .size pm_sram_end, 0
173
174#endif /* CONFIG_PM */
diff --git a/arch/avr32/mach-at32ap/pm.c b/arch/avr32/mach-at32ap/pm.c
new file mode 100644
index 000000000000..0b764320135d
--- /dev/null
+++ b/arch/avr32/mach-at32ap/pm.c
@@ -0,0 +1,245 @@
1/*
2 * AVR32 AP Power Management
3 *
4 * Copyright (C) 2008 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 */
10#include <linux/io.h>
11#include <linux/suspend.h>
12#include <linux/vmalloc.h>
13
14#include <asm/cacheflush.h>
15#include <asm/sysreg.h>
16
17#include <asm/arch/pm.h>
18#include <asm/arch/sram.h>
19
20/* FIXME: This is only valid for AP7000 */
21#define SDRAMC_BASE 0xfff03800
22
23#include "sdramc.h"
24
25#define SRAM_PAGE_FLAGS (SYSREG_BIT(TLBELO_D) | SYSREG_BF(SZ, 1) \
26 | SYSREG_BF(AP, 3) | SYSREG_BIT(G))
27
28
29static unsigned long pm_sram_start;
30static size_t pm_sram_size;
31static struct vm_struct *pm_sram_area;
32
33static void (*avr32_pm_enter_standby)(unsigned long sdramc_base);
34static void (*avr32_pm_enter_str)(unsigned long sdramc_base);
35
36/*
37 * Must be called with interrupts disabled. Exceptions will be masked
38 * on return (i.e. all exceptions will be "unrecoverable".)
39 */
40static void *avr32_pm_map_sram(void)
41{
42 unsigned long vaddr;
43 unsigned long page_addr;
44 u32 tlbehi;
45 u32 mmucr;
46
47 vaddr = (unsigned long)pm_sram_area->addr;
48 page_addr = pm_sram_start & PAGE_MASK;
49
50 /*
51 * Mask exceptions and grab the first TLB entry. We won't be
52 * needing it while sleeping.
53 */
54 asm volatile("ssrf %0" : : "i"(SYSREG_EM_OFFSET) : "memory");
55
56 mmucr = sysreg_read(MMUCR);
57 tlbehi = sysreg_read(TLBEHI);
58 sysreg_write(MMUCR, SYSREG_BFINS(DRP, 0, mmucr));
59
60 tlbehi = SYSREG_BF(ASID, SYSREG_BFEXT(ASID, tlbehi));
61 tlbehi |= vaddr & PAGE_MASK;
62 tlbehi |= SYSREG_BIT(TLBEHI_V);
63
64 sysreg_write(TLBELO, page_addr | SRAM_PAGE_FLAGS);
65 sysreg_write(TLBEHI, tlbehi);
66 __builtin_tlbw();
67
68 return (void *)(vaddr + pm_sram_start - page_addr);
69}
70
71/*
72 * Must be called with interrupts disabled. Exceptions will be
73 * unmasked on return.
74 */
75static void avr32_pm_unmap_sram(void)
76{
77 u32 mmucr;
78 u32 tlbehi;
79 u32 tlbarlo;
80
81 /* Going to update TLB entry at index 0 */
82 mmucr = sysreg_read(MMUCR);
83 tlbehi = sysreg_read(TLBEHI);
84 sysreg_write(MMUCR, SYSREG_BFINS(DRP, 0, mmucr));
85
86 /* Clear the "valid" bit */
87 tlbehi = SYSREG_BF(ASID, SYSREG_BFEXT(ASID, tlbehi));
88 sysreg_write(TLBEHI, tlbehi);
89
90 /* Mark it as "not accessed" */
91 tlbarlo = sysreg_read(TLBARLO);
92 sysreg_write(TLBARLO, tlbarlo | 0x80000000U);
93
94 /* Update the TLB */
95 __builtin_tlbw();
96
97 /* Unmask exceptions */
98 asm volatile("csrf %0" : : "i"(SYSREG_EM_OFFSET) : "memory");
99}
100
101static int avr32_pm_valid_state(suspend_state_t state)
102{
103 switch (state) {
104 case PM_SUSPEND_ON:
105 case PM_SUSPEND_STANDBY:
106 case PM_SUSPEND_MEM:
107 return 1;
108
109 default:
110 return 0;
111 }
112}
113
114static int avr32_pm_enter(suspend_state_t state)
115{
116 u32 lpr_saved;
117 u32 evba_saved;
118 void *sram;
119
120 switch (state) {
121 case PM_SUSPEND_STANDBY:
122 sram = avr32_pm_map_sram();
123
124 /* Switch to in-sram exception handlers */
125 evba_saved = sysreg_read(EVBA);
126 sysreg_write(EVBA, (unsigned long)sram);
127
128 /*
129 * Save the LPR register so that we can re-enable
130 * SDRAM Low Power mode on resume.
131 */
132 lpr_saved = sdramc_readl(LPR);
133 pr_debug("%s: Entering standby...\n", __func__);
134 avr32_pm_enter_standby(SDRAMC_BASE);
135 sdramc_writel(LPR, lpr_saved);
136
137 /* Switch back to regular exception handlers */
138 sysreg_write(EVBA, evba_saved);
139
140 avr32_pm_unmap_sram();
141 break;
142
143 case PM_SUSPEND_MEM:
144 sram = avr32_pm_map_sram();
145
146 /* Switch to in-sram exception handlers */
147 evba_saved = sysreg_read(EVBA);
148 sysreg_write(EVBA, (unsigned long)sram);
149
150 /*
151 * Save the LPR register so that we can re-enable
152 * SDRAM Low Power mode on resume.
153 */
154 lpr_saved = sdramc_readl(LPR);
155 pr_debug("%s: Entering suspend-to-ram...\n", __func__);
156 avr32_pm_enter_str(SDRAMC_BASE);
157 sdramc_writel(LPR, lpr_saved);
158
159 /* Switch back to regular exception handlers */
160 sysreg_write(EVBA, evba_saved);
161
162 avr32_pm_unmap_sram();
163 break;
164
165 case PM_SUSPEND_ON:
166 pr_debug("%s: Entering idle...\n", __func__);
167 cpu_enter_idle();
168 break;
169
170 default:
171 pr_debug("%s: Invalid suspend state %d\n", __func__, state);
172 goto out;
173 }
174
175 pr_debug("%s: wakeup\n", __func__);
176
177out:
178 return 0;
179}
180
181static struct platform_suspend_ops avr32_pm_ops = {
182 .valid = avr32_pm_valid_state,
183 .enter = avr32_pm_enter,
184};
185
186static unsigned long avr32_pm_offset(void *symbol)
187{
188 extern u8 pm_exception[];
189
190 return (unsigned long)symbol - (unsigned long)pm_exception;
191}
192
193static int __init avr32_pm_init(void)
194{
195 extern u8 pm_exception[];
196 extern u8 pm_irq0[];
197 extern u8 pm_standby[];
198 extern u8 pm_suspend_to_ram[];
199 extern u8 pm_sram_end[];
200 void *dst;
201
202 /*
203 * To keep things simple, we depend on not needing more than a
204 * single page.
205 */
206 pm_sram_size = avr32_pm_offset(pm_sram_end);
207 if (pm_sram_size > PAGE_SIZE)
208 goto err;
209
210 pm_sram_start = sram_alloc(pm_sram_size);
211 if (!pm_sram_start)
212 goto err_alloc_sram;
213
214 /* Grab a virtual area we can use later on. */
215 pm_sram_area = get_vm_area(pm_sram_size, VM_IOREMAP);
216 if (!pm_sram_area)
217 goto err_vm_area;
218 pm_sram_area->phys_addr = pm_sram_start;
219
220 local_irq_disable();
221 dst = avr32_pm_map_sram();
222 memcpy(dst, pm_exception, pm_sram_size);
223 flush_dcache_region(dst, pm_sram_size);
224 invalidate_icache_region(dst, pm_sram_size);
225 avr32_pm_unmap_sram();
226 local_irq_enable();
227
228 avr32_pm_enter_standby = dst + avr32_pm_offset(pm_standby);
229 avr32_pm_enter_str = dst + avr32_pm_offset(pm_suspend_to_ram);
230 intc_set_suspend_handler(avr32_pm_offset(pm_irq0));
231
232 suspend_set_ops(&avr32_pm_ops);
233
234 printk("AVR32 AP Power Management enabled\n");
235
236 return 0;
237
238err_vm_area:
239 sram_free(pm_sram_start, pm_sram_size);
240err_alloc_sram:
241err:
242 pr_err("AVR32 Power Management initialization failed\n");
243 return -ENOMEM;
244}
245arch_initcall(avr32_pm_init);
diff --git a/arch/avr32/mach-at32ap/sdramc.h b/arch/avr32/mach-at32ap/sdramc.h
new file mode 100644
index 000000000000..66eeaed49073
--- /dev/null
+++ b/arch/avr32/mach-at32ap/sdramc.h
@@ -0,0 +1,76 @@
1/*
2 * Register definitions for the AT32AP SDRAM Controller
3 *
4 * Copyright (C) 2008 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 */
10
11/* Register offsets */
12#define SDRAMC_MR 0x0000
13#define SDRAMC_TR 0x0004
14#define SDRAMC_CR 0x0008
15#define SDRAMC_HSR 0x000c
16#define SDRAMC_LPR 0x0010
17#define SDRAMC_IER 0x0014
18#define SDRAMC_IDR 0x0018
19#define SDRAMC_IMR 0x001c
20#define SDRAMC_ISR 0x0020
21#define SDRAMC_MDR 0x0024
22
23/* MR - Mode Register */
24#define SDRAMC_MR_MODE_NORMAL ( 0 << 0)
25#define SDRAMC_MR_MODE_NOP ( 1 << 0)
26#define SDRAMC_MR_MODE_BANKS_PRECHARGE ( 2 << 0)
27#define SDRAMC_MR_MODE_LOAD_MODE ( 3 << 0)
28#define SDRAMC_MR_MODE_AUTO_REFRESH ( 4 << 0)
29#define SDRAMC_MR_MODE_EXT_LOAD_MODE ( 5 << 0)
30#define SDRAMC_MR_MODE_POWER_DOWN ( 6 << 0)
31
32/* CR - Configuration Register */
33#define SDRAMC_CR_NC_8_BITS ( 0 << 0)
34#define SDRAMC_CR_NC_9_BITS ( 1 << 0)
35#define SDRAMC_CR_NC_10_BITS ( 2 << 0)
36#define SDRAMC_CR_NC_11_BITS ( 3 << 0)
37#define SDRAMC_CR_NR_11_BITS ( 0 << 2)
38#define SDRAMC_CR_NR_12_BITS ( 1 << 2)
39#define SDRAMC_CR_NR_13_BITS ( 2 << 2)
40#define SDRAMC_CR_NB_2_BANKS ( 0 << 4)
41#define SDRAMC_CR_NB_4_BANKS ( 1 << 4)
42#define SDRAMC_CR_CAS(x) ((x) << 5)
43#define SDRAMC_CR_DBW_32_BITS ( 0 << 7)
44#define SDRAMC_CR_DBW_16_BITS ( 1 << 7)
45#define SDRAMC_CR_TWR(x) ((x) << 8)
46#define SDRAMC_CR_TRC(x) ((x) << 12)
47#define SDRAMC_CR_TRP(x) ((x) << 16)
48#define SDRAMC_CR_TRCD(x) ((x) << 20)
49#define SDRAMC_CR_TRAS(x) ((x) << 24)
50#define SDRAMC_CR_TXSR(x) ((x) << 28)
51
52/* HSR - High Speed Register */
53#define SDRAMC_HSR_DA ( 1 << 0)
54
55/* LPR - Low Power Register */
56#define SDRAMC_LPR_LPCB_INHIBIT ( 0 << 0)
57#define SDRAMC_LPR_LPCB_SELF_RFR ( 1 << 0)
58#define SDRAMC_LPR_LPCB_PDOWN ( 2 << 0)
59#define SDRAMC_LPR_LPCB_DEEP_PDOWN ( 3 << 0)
60#define SDRAMC_LPR_PASR(x) ((x) << 4)
61#define SDRAMC_LPR_TCSR(x) ((x) << 8)
62#define SDRAMC_LPR_DS(x) ((x) << 10)
63#define SDRAMC_LPR_TIMEOUT(x) ((x) << 12)
64
65/* IER/IDR/IMR/ISR - Interrupt Enable/Disable/Mask/Status Register */
66#define SDRAMC_ISR_RES ( 1 << 0)
67
68/* MDR - Memory Device Register */
69#define SDRAMC_MDR_MD_SDRAM ( 0 << 0)
70#define SDRAMC_MDR_MD_LOW_PWR_SDRAM ( 1 << 0)
71
72/* Register access macros */
73#define sdramc_readl(reg) \
74 __raw_readl((void __iomem __force *)SDRAMC_BASE + SDRAMC_##reg)
75#define sdramc_writel(reg, value) \
76 __raw_writel(value, (void __iomem __force *)SDRAMC_BASE + SDRAMC_##reg)
diff --git a/arch/avr32/mm/init.c b/arch/avr32/mm/init.c
index 0e64ddc45e37..3f90a87527bb 100644
--- a/arch/avr32/mm/init.c
+++ b/arch/avr32/mm/init.c
@@ -11,6 +11,7 @@
11#include <linux/swap.h> 11#include <linux/swap.h>
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/mmzone.h> 13#include <linux/mmzone.h>
14#include <linux/module.h>
14#include <linux/bootmem.h> 15#include <linux/bootmem.h>
15#include <linux/pagemap.h> 16#include <linux/pagemap.h>
16#include <linux/nodemask.h> 17#include <linux/nodemask.h>
@@ -23,11 +24,14 @@
23#include <asm/setup.h> 24#include <asm/setup.h>
24#include <asm/sections.h> 25#include <asm/sections.h>
25 26
27#define __page_aligned __attribute__((section(".data.page_aligned")))
28
26DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 29DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
27 30
28pgd_t swapper_pg_dir[PTRS_PER_PGD]; 31pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned;
29 32
30struct page *empty_zero_page; 33struct page *empty_zero_page;
34EXPORT_SYMBOL(empty_zero_page);
31 35
32/* 36/*
33 * Cache of MMU context last used. 37 * Cache of MMU context last used.
@@ -106,19 +110,9 @@ void __init paging_init(void)
106 zero_page = alloc_bootmem_low_pages_node(NODE_DATA(0), 110 zero_page = alloc_bootmem_low_pages_node(NODE_DATA(0),
107 PAGE_SIZE); 111 PAGE_SIZE);
108 112
109 { 113 sysreg_write(PTBR, (unsigned long)swapper_pg_dir);
110 pgd_t *pg_dir; 114 enable_mmu();
111 int i; 115 printk ("CPU: Paging enabled\n");
112
113 pg_dir = swapper_pg_dir;
114 sysreg_write(PTBR, (unsigned long)pg_dir);
115
116 for (i = 0; i < PTRS_PER_PGD; i++)
117 pgd_val(pg_dir[i]) = 0;
118
119 enable_mmu();
120 printk ("CPU: Paging enabled\n");
121 }
122 116
123 for_each_online_node(nid) { 117 for_each_online_node(nid) {
124 pg_data_t *pgdat = NODE_DATA(nid); 118 pg_data_t *pgdat = NODE_DATA(nid);
diff --git a/arch/avr32/mm/tlb.c b/arch/avr32/mm/tlb.c
index cd12edbea9f2..06677be98ffb 100644
--- a/arch/avr32/mm/tlb.c
+++ b/arch/avr32/mm/tlb.c
@@ -11,21 +11,21 @@
11 11
12#include <asm/mmu_context.h> 12#include <asm/mmu_context.h>
13 13
14#define _TLBEHI_I 0x100 14/* TODO: Get the correct number from the CONFIG1 system register */
15#define NR_TLB_ENTRIES 32
15 16
16void show_dtlb_entry(unsigned int index) 17static void show_dtlb_entry(unsigned int index)
17{ 18{
18 unsigned int tlbehi, tlbehi_save, tlbelo, mmucr, mmucr_save; 19 u32 tlbehi, tlbehi_save, tlbelo, mmucr, mmucr_save;
19 unsigned long flags; 20 unsigned long flags;
20 21
21 local_irq_save(flags); 22 local_irq_save(flags);
22 mmucr_save = sysreg_read(MMUCR); 23 mmucr_save = sysreg_read(MMUCR);
23 tlbehi_save = sysreg_read(TLBEHI); 24 tlbehi_save = sysreg_read(TLBEHI);
24 mmucr = mmucr_save & 0x13; 25 mmucr = SYSREG_BFINS(DRP, index, mmucr_save);
25 mmucr |= index << 14;
26 sysreg_write(MMUCR, mmucr); 26 sysreg_write(MMUCR, mmucr);
27 27
28 asm volatile("tlbr" : : : "memory"); 28 __builtin_tlbr();
29 cpu_sync_pipeline(); 29 cpu_sync_pipeline();
30 30
31 tlbehi = sysreg_read(TLBEHI); 31 tlbehi = sysreg_read(TLBEHI);
@@ -33,15 +33,17 @@ void show_dtlb_entry(unsigned int index)
33 33
34 printk("%2u: %c %c %02x %05x %05x %o %o %c %c %c %c\n", 34 printk("%2u: %c %c %02x %05x %05x %o %o %c %c %c %c\n",
35 index, 35 index,
36 (tlbehi & 0x200)?'1':'0', 36 SYSREG_BFEXT(TLBEHI_V, tlbehi) ? '1' : '0',
37 (tlbelo & 0x100)?'1':'0', 37 SYSREG_BFEXT(G, tlbelo) ? '1' : '0',
38 (tlbehi & 0xff), 38 SYSREG_BFEXT(ASID, tlbehi),
39 (tlbehi >> 12), (tlbelo >> 12), 39 SYSREG_BFEXT(VPN, tlbehi) >> 2,
40 (tlbelo >> 4) & 7, (tlbelo >> 2) & 3, 40 SYSREG_BFEXT(PFN, tlbelo) >> 2,
41 (tlbelo & 0x200)?'1':'0', 41 SYSREG_BFEXT(AP, tlbelo),
42 (tlbelo & 0x080)?'1':'0', 42 SYSREG_BFEXT(SZ, tlbelo),
43 (tlbelo & 0x001)?'1':'0', 43 SYSREG_BFEXT(TLBELO_C, tlbelo) ? 'C' : ' ',
44 (tlbelo & 0x002)?'1':'0'); 44 SYSREG_BFEXT(B, tlbelo) ? 'B' : ' ',
45 SYSREG_BFEXT(W, tlbelo) ? 'W' : ' ',
46 SYSREG_BFEXT(TLBELO_D, tlbelo) ? 'D' : ' ');
45 47
46 sysreg_write(MMUCR, mmucr_save); 48 sysreg_write(MMUCR, mmucr_save);
47 sysreg_write(TLBEHI, tlbehi_save); 49 sysreg_write(TLBEHI, tlbehi_save);
@@ -54,29 +56,33 @@ void dump_dtlb(void)
54 unsigned int i; 56 unsigned int i;
55 57
56 printk("ID V G ASID VPN PFN AP SZ C B W D\n"); 58 printk("ID V G ASID VPN PFN AP SZ C B W D\n");
57 for (i = 0; i < 32; i++) 59 for (i = 0; i < NR_TLB_ENTRIES; i++)
58 show_dtlb_entry(i); 60 show_dtlb_entry(i);
59} 61}
60 62
61static unsigned long last_mmucr; 63static void update_dtlb(unsigned long address, pte_t pte)
62
63static inline void set_replacement_pointer(unsigned shift)
64{ 64{
65 unsigned long mmucr, mmucr_save; 65 u32 tlbehi;
66 u32 mmucr;
66 67
67 mmucr = mmucr_save = sysreg_read(MMUCR); 68 /*
69 * We're not changing the ASID here, so no need to flush the
70 * pipeline.
71 */
72 tlbehi = sysreg_read(TLBEHI);
73 tlbehi = SYSREG_BF(ASID, SYSREG_BFEXT(ASID, tlbehi));
74 tlbehi |= address & MMU_VPN_MASK;
75 tlbehi |= SYSREG_BIT(TLBEHI_V);
76 sysreg_write(TLBEHI, tlbehi);
68 77
69 /* Does this mapping already exist? */ 78 /* Does this mapping already exist? */
70 __asm__ __volatile__( 79 __builtin_tlbs();
71 " tlbs\n" 80 mmucr = sysreg_read(MMUCR);
72 " mfsr %0, %1"
73 : "=r"(mmucr)
74 : "i"(SYSREG_MMUCR));
75 81
76 if (mmucr & SYSREG_BIT(MMUCR_N)) { 82 if (mmucr & SYSREG_BIT(MMUCR_N)) {
77 /* Not found -- pick a not-recently-accessed entry */ 83 /* Not found -- pick a not-recently-accessed entry */
78 unsigned long rp; 84 unsigned int rp;
79 unsigned long tlbar = sysreg_read(TLBARLO); 85 u32 tlbar = sysreg_read(TLBARLO);
80 86
81 rp = 32 - fls(tlbar); 87 rp = 32 - fls(tlbar);
82 if (rp == 32) { 88 if (rp == 32) {
@@ -84,30 +90,14 @@ static inline void set_replacement_pointer(unsigned shift)
84 sysreg_write(TLBARLO, -1L); 90 sysreg_write(TLBARLO, -1L);
85 } 91 }
86 92
87 mmucr &= 0x13; 93 mmucr = SYSREG_BFINS(DRP, rp, mmucr);
88 mmucr |= (rp << shift);
89
90 sysreg_write(MMUCR, mmucr); 94 sysreg_write(MMUCR, mmucr);
91 } 95 }
92 96
93 last_mmucr = mmucr;
94}
95
96static void update_dtlb(unsigned long address, pte_t pte, unsigned long asid)
97{
98 unsigned long vpn;
99
100 vpn = (address & MMU_VPN_MASK) | _TLBEHI_VALID | asid;
101 sysreg_write(TLBEHI, vpn);
102 cpu_sync_pipeline();
103
104 set_replacement_pointer(14);
105
106 sysreg_write(TLBELO, pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK); 97 sysreg_write(TLBELO, pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK);
107 98
108 /* Let's go */ 99 /* Let's go */
109 asm volatile("nop\n\ttlbw" : : : "memory"); 100 __builtin_tlbw();
110 cpu_sync_pipeline();
111} 101}
112 102
113void update_mmu_cache(struct vm_area_struct *vma, 103void update_mmu_cache(struct vm_area_struct *vma,
@@ -120,39 +110,40 @@ void update_mmu_cache(struct vm_area_struct *vma,
120 return; 110 return;
121 111
122 local_irq_save(flags); 112 local_irq_save(flags);
123 update_dtlb(address, pte, get_asid()); 113 update_dtlb(address, pte);
124 local_irq_restore(flags); 114 local_irq_restore(flags);
125} 115}
126 116
127void __flush_tlb_page(unsigned long asid, unsigned long page) 117static void __flush_tlb_page(unsigned long asid, unsigned long page)
128{ 118{
129 unsigned long mmucr, tlbehi; 119 u32 mmucr, tlbehi;
130 120
131 page |= asid; 121 /*
132 sysreg_write(TLBEHI, page); 122 * Caller is responsible for masking out non-PFN bits in page
133 cpu_sync_pipeline(); 123 * and changing the current ASID if necessary. This means that
134 asm volatile("tlbs"); 124 * we don't need to flush the pipeline after writing TLBEHI.
125 */
126 tlbehi = page | asid;
127 sysreg_write(TLBEHI, tlbehi);
128
129 __builtin_tlbs();
135 mmucr = sysreg_read(MMUCR); 130 mmucr = sysreg_read(MMUCR);
136 131
137 if (!(mmucr & SYSREG_BIT(MMUCR_N))) { 132 if (!(mmucr & SYSREG_BIT(MMUCR_N))) {
138 unsigned long tlbarlo; 133 unsigned int entry;
139 unsigned long entry; 134 u32 tlbarlo;
140 135
141 /* Clear the "valid" bit */ 136 /* Clear the "valid" bit */
142 tlbehi = sysreg_read(TLBEHI);
143 tlbehi &= ~_TLBEHI_VALID;
144 sysreg_write(TLBEHI, tlbehi); 137 sysreg_write(TLBEHI, tlbehi);
145 cpu_sync_pipeline();
146 138
147 /* mark the entry as "not accessed" */ 139 /* mark the entry as "not accessed" */
148 entry = (mmucr >> 14) & 0x3f; 140 entry = SYSREG_BFEXT(DRP, mmucr);
149 tlbarlo = sysreg_read(TLBARLO); 141 tlbarlo = sysreg_read(TLBARLO);
150 tlbarlo |= (0x80000000 >> entry); 142 tlbarlo |= (0x80000000UL >> entry);
151 sysreg_write(TLBARLO, tlbarlo); 143 sysreg_write(TLBARLO, tlbarlo);
152 144
153 /* update the entry with valid bit clear */ 145 /* update the entry with valid bit clear */
154 asm volatile("tlbw"); 146 __builtin_tlbw();
155 cpu_sync_pipeline();
156 } 147 }
157} 148}
158 149
@@ -190,17 +181,22 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
190 181
191 local_irq_save(flags); 182 local_irq_save(flags);
192 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 183 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
184
193 if (size > (MMU_DTLB_ENTRIES / 4)) { /* Too many entries to flush */ 185 if (size > (MMU_DTLB_ENTRIES / 4)) { /* Too many entries to flush */
194 mm->context = NO_CONTEXT; 186 mm->context = NO_CONTEXT;
195 if (mm == current->mm) 187 if (mm == current->mm)
196 activate_context(mm); 188 activate_context(mm);
197 } else { 189 } else {
198 unsigned long asid = mm->context & MMU_CONTEXT_ASID_MASK; 190 unsigned long asid;
199 unsigned long saved_asid = MMU_NO_ASID; 191 unsigned long saved_asid;
192
193 asid = mm->context & MMU_CONTEXT_ASID_MASK;
194 saved_asid = MMU_NO_ASID;
200 195
201 start &= PAGE_MASK; 196 start &= PAGE_MASK;
202 end += (PAGE_SIZE - 1); 197 end += (PAGE_SIZE - 1);
203 end &= PAGE_MASK; 198 end &= PAGE_MASK;
199
204 if (mm != current->mm) { 200 if (mm != current->mm) {
205 saved_asid = get_asid(); 201 saved_asid = get_asid();
206 set_asid(asid); 202 set_asid(asid);
@@ -218,33 +214,34 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
218} 214}
219 215
220/* 216/*
221 * TODO: If this is only called for addresses > TASK_SIZE, we can probably 217 * This function depends on the pages to be flushed having the G
222 * skip the ASID stuff and just use the Global bit... 218 * (global) bit set in their pte. This is true for all
219 * PAGE_KERNEL(_RO) pages.
223 */ 220 */
224void flush_tlb_kernel_range(unsigned long start, unsigned long end) 221void flush_tlb_kernel_range(unsigned long start, unsigned long end)
225{ 222{
226 unsigned long flags; 223 unsigned long flags;
227 int size; 224 int size;
228 225
229 local_irq_save(flags);
230 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 226 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
231 if (size > (MMU_DTLB_ENTRIES / 4)) { /* Too many entries to flush */ 227 if (size > (MMU_DTLB_ENTRIES / 4)) { /* Too many entries to flush */
232 flush_tlb_all(); 228 flush_tlb_all();
233 } else { 229 } else {
234 unsigned long asid = init_mm.context & MMU_CONTEXT_ASID_MASK; 230 unsigned long asid;
235 unsigned long saved_asid = get_asid(); 231
232 local_irq_save(flags);
233 asid = get_asid();
236 234
237 start &= PAGE_MASK; 235 start &= PAGE_MASK;
238 end += (PAGE_SIZE - 1); 236 end += (PAGE_SIZE - 1);
239 end &= PAGE_MASK; 237 end &= PAGE_MASK;
240 set_asid(asid); 238
241 while (start < end) { 239 while (start < end) {
242 __flush_tlb_page(asid, start); 240 __flush_tlb_page(asid, start);
243 start += PAGE_SIZE; 241 start += PAGE_SIZE;
244 } 242 }
245 set_asid(saved_asid); 243 local_irq_restore(flags);
246 } 244 }
247 local_irq_restore(flags);
248} 245}
249 246
250void flush_tlb_mm(struct mm_struct *mm) 247void flush_tlb_mm(struct mm_struct *mm)
@@ -280,7 +277,7 @@ static void *tlb_start(struct seq_file *tlb, loff_t *pos)
280{ 277{
281 static unsigned long tlb_index; 278 static unsigned long tlb_index;
282 279
283 if (*pos >= 32) 280 if (*pos >= NR_TLB_ENTRIES)
284 return NULL; 281 return NULL;
285 282
286 tlb_index = 0; 283 tlb_index = 0;
@@ -291,7 +288,7 @@ static void *tlb_next(struct seq_file *tlb, void *v, loff_t *pos)
291{ 288{
292 unsigned long *index = v; 289 unsigned long *index = v;
293 290
294 if (*index >= 31) 291 if (*index >= NR_TLB_ENTRIES - 1)
295 return NULL; 292 return NULL;
296 293
297 ++*pos; 294 ++*pos;
@@ -313,16 +310,16 @@ static int tlb_show(struct seq_file *tlb, void *v)
313 if (*index == 0) 310 if (*index == 0)
314 seq_puts(tlb, "ID V G ASID VPN PFN AP SZ C B W D\n"); 311 seq_puts(tlb, "ID V G ASID VPN PFN AP SZ C B W D\n");
315 312
316 BUG_ON(*index >= 32); 313 BUG_ON(*index >= NR_TLB_ENTRIES);
317 314
318 local_irq_save(flags); 315 local_irq_save(flags);
319 mmucr_save = sysreg_read(MMUCR); 316 mmucr_save = sysreg_read(MMUCR);
320 tlbehi_save = sysreg_read(TLBEHI); 317 tlbehi_save = sysreg_read(TLBEHI);
321 mmucr = mmucr_save & 0x13; 318 mmucr = SYSREG_BFINS(DRP, *index, mmucr_save);
322 mmucr |= *index << 14;
323 sysreg_write(MMUCR, mmucr); 319 sysreg_write(MMUCR, mmucr);
324 320
325 asm volatile("tlbr" : : : "memory"); 321 /* TLBR might change the ASID */
322 __builtin_tlbr();
326 cpu_sync_pipeline(); 323 cpu_sync_pipeline();
327 324
328 tlbehi = sysreg_read(TLBEHI); 325 tlbehi = sysreg_read(TLBEHI);
@@ -334,16 +331,18 @@ static int tlb_show(struct seq_file *tlb, void *v)
334 local_irq_restore(flags); 331 local_irq_restore(flags);
335 332
336 seq_printf(tlb, "%2lu: %c %c %02x %05x %05x %o %o %c %c %c %c\n", 333 seq_printf(tlb, "%2lu: %c %c %02x %05x %05x %o %o %c %c %c %c\n",
337 *index, 334 *index,
338 (tlbehi & 0x200)?'1':'0', 335 SYSREG_BFEXT(TLBEHI_V, tlbehi) ? '1' : '0',
339 (tlbelo & 0x100)?'1':'0', 336 SYSREG_BFEXT(G, tlbelo) ? '1' : '0',
340 (tlbehi & 0xff), 337 SYSREG_BFEXT(ASID, tlbehi),
341 (tlbehi >> 12), (tlbelo >> 12), 338 SYSREG_BFEXT(VPN, tlbehi) >> 2,
342 (tlbelo >> 4) & 7, (tlbelo >> 2) & 3, 339 SYSREG_BFEXT(PFN, tlbelo) >> 2,
343 (tlbelo & 0x200)?'1':'0', 340 SYSREG_BFEXT(AP, tlbelo),
344 (tlbelo & 0x080)?'1':'0', 341 SYSREG_BFEXT(SZ, tlbelo),
345 (tlbelo & 0x001)?'1':'0', 342 SYSREG_BFEXT(TLBELO_C, tlbelo) ? '1' : '0',
346 (tlbelo & 0x002)?'1':'0'); 343 SYSREG_BFEXT(B, tlbelo) ? '1' : '0',
344 SYSREG_BFEXT(W, tlbelo) ? '1' : '0',
345 SYSREG_BFEXT(TLBELO_D, tlbelo) ? '1' : '0');
347 346
348 return 0; 347 return 0;
349} 348}
diff --git a/arch/blackfin/mach-bf561/coreb.c b/arch/blackfin/mach-bf561/coreb.c
index 1b44e9e6dc3b..8598098c0840 100644
--- a/arch/blackfin/mach-bf561/coreb.c
+++ b/arch/blackfin/mach-bf561/coreb.c
@@ -194,6 +194,7 @@ static loff_t coreb_lseek(struct file *file, loff_t offset, int origin)
194 return ret; 194 return ret;
195} 195}
196 196
197/* No BKL needed here */
197static int coreb_open(struct inode *inode, struct file *file) 198static int coreb_open(struct inode *inode, struct file *file)
198{ 199{
199 spin_lock_irq(&coreb_lock); 200 spin_lock_irq(&coreb_lock);
diff --git a/arch/cris/arch-v10/drivers/eeprom.c b/arch/cris/arch-v10/drivers/eeprom.c
index f1cac9dc75b8..1f2ae909d3e6 100644
--- a/arch/cris/arch-v10/drivers/eeprom.c
+++ b/arch/cris/arch-v10/drivers/eeprom.c
@@ -28,6 +28,7 @@
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/delay.h> 29#include <linux/delay.h>
30#include <linux/interrupt.h> 30#include <linux/interrupt.h>
31#include <linux/smp_lock.h>
31#include <linux/wait.h> 32#include <linux/wait.h>
32#include <asm/uaccess.h> 33#include <asm/uaccess.h>
33#include "i2c.h" 34#include "i2c.h"
@@ -375,10 +376,9 @@ int __init eeprom_init(void)
375} 376}
376 377
377/* Opens the device. */ 378/* Opens the device. */
378
379static int eeprom_open(struct inode * inode, struct file * file) 379static int eeprom_open(struct inode * inode, struct file * file)
380{ 380{
381 381 cycle_kernel_lock();
382 if(iminor(inode) != EEPROM_MINOR_NR) 382 if(iminor(inode) != EEPROM_MINOR_NR)
383 return -ENXIO; 383 return -ENXIO;
384 if(imajor(inode) != EEPROM_MAJOR_NR) 384 if(imajor(inode) != EEPROM_MAJOR_NR)
diff --git a/arch/cris/arch-v10/drivers/gpio.c b/arch/cris/arch-v10/drivers/gpio.c
index 68a998bd1069..86048e697eb5 100644
--- a/arch/cris/arch-v10/drivers/gpio.c
+++ b/arch/cris/arch-v10/drivers/gpio.c
@@ -16,6 +16,7 @@
16#include <linux/errno.h> 16#include <linux/errno.h>
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/fs.h> 18#include <linux/fs.h>
19#include <linux/smp_lock.h>
19#include <linux/string.h> 20#include <linux/string.h>
20#include <linux/poll.h> 21#include <linux/poll.h>
21#include <linux/init.h> 22#include <linux/init.h>
@@ -323,6 +324,7 @@ gpio_open(struct inode *inode, struct file *filp)
323 if (!priv) 324 if (!priv)
324 return -ENOMEM; 325 return -ENOMEM;
325 326
327 lock_kernel();
326 priv->minor = p; 328 priv->minor = p;
327 329
328 /* initialize the io/alarm struct */ 330 /* initialize the io/alarm struct */
@@ -357,6 +359,7 @@ gpio_open(struct inode *inode, struct file *filp)
357 alarmlist = priv; 359 alarmlist = priv;
358 spin_unlock_irqrestore(&gpio_lock, flags); 360 spin_unlock_irqrestore(&gpio_lock, flags);
359 361
362 unlock_kernel();
360 return 0; 363 return 0;
361} 364}
362 365
diff --git a/arch/cris/arch-v10/drivers/i2c.c b/arch/cris/arch-v10/drivers/i2c.c
index d6d22067d0c8..2797e67ce4f4 100644
--- a/arch/cris/arch-v10/drivers/i2c.c
+++ b/arch/cris/arch-v10/drivers/i2c.c
@@ -15,6 +15,7 @@
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/sched.h> 16#include <linux/sched.h>
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/smp_lock.h>
18#include <linux/errno.h> 19#include <linux/errno.h>
19#include <linux/kernel.h> 20#include <linux/kernel.h>
20#include <linux/fs.h> 21#include <linux/fs.h>
@@ -566,6 +567,7 @@ i2c_readreg(unsigned char theSlave, unsigned char theReg)
566static int 567static int
567i2c_open(struct inode *inode, struct file *filp) 568i2c_open(struct inode *inode, struct file *filp)
568{ 569{
570 cycle_kernel_lock();
569 return 0; 571 return 0;
570} 572}
571 573
diff --git a/arch/cris/arch-v10/drivers/sync_serial.c b/arch/cris/arch-v10/drivers/sync_serial.c
index 069546e342c5..91fea623c7c9 100644
--- a/arch/cris/arch-v10/drivers/sync_serial.c
+++ b/arch/cris/arch-v10/drivers/sync_serial.c
@@ -21,6 +21,7 @@
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/poll.h> 22#include <linux/poll.h>
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/smp_lock.h>
24#include <linux/timer.h> 25#include <linux/timer.h>
25#include <asm/irq.h> 26#include <asm/irq.h>
26#include <asm/dma.h> 27#include <asm/dma.h>
@@ -443,18 +444,21 @@ static int sync_serial_open(struct inode *inode, struct file *file)
443 int dev = MINOR(inode->i_rdev); 444 int dev = MINOR(inode->i_rdev);
444 struct sync_port *port; 445 struct sync_port *port;
445 int mode; 446 int mode;
447 int err = -EBUSY;
446 448
449 lock_kernel();
447 DEBUG(printk(KERN_DEBUG "Open sync serial port %d\n", dev)); 450 DEBUG(printk(KERN_DEBUG "Open sync serial port %d\n", dev));
448 451
449 if (dev < 0 || dev >= NUMBER_OF_PORTS || !ports[dev].enabled) { 452 if (dev < 0 || dev >= NUMBER_OF_PORTS || !ports[dev].enabled) {
450 DEBUG(printk(KERN_DEBUG "Invalid minor %d\n", dev)); 453 DEBUG(printk(KERN_DEBUG "Invalid minor %d\n", dev));
451 return -ENODEV; 454 err = -ENODEV;
455 goto out;
452 } 456 }
453 port = &ports[dev]; 457 port = &ports[dev];
454 /* Allow open this device twice (assuming one reader and one writer) */ 458 /* Allow open this device twice (assuming one reader and one writer) */
455 if (port->busy == 2) { 459 if (port->busy == 2) {
456 DEBUG(printk(KERN_DEBUG "Device is busy.. \n")); 460 DEBUG(printk(KERN_DEBUG "Device is busy.. \n"));
457 return -EBUSY; 461 goto out;
458 } 462 }
459 if (port->init_irqs) { 463 if (port->init_irqs) {
460 if (port->use_dma) { 464 if (port->use_dma) {
@@ -465,14 +469,14 @@ static int sync_serial_open(struct inode *inode, struct file *file)
465 &ports[0])) { 469 &ports[0])) {
466 printk(KERN_CRIT "Can't alloc " 470 printk(KERN_CRIT "Can't alloc "
467 "sync serial port 1 IRQ"); 471 "sync serial port 1 IRQ");
468 return -EBUSY; 472 goto out;
469 } else if (request_irq(25, rx_interrupt, 0, 473 } else if (request_irq(25, rx_interrupt, 0,
470 "synchronous serial 1 dma rx", 474 "synchronous serial 1 dma rx",
471 &ports[0])) { 475 &ports[0])) {
472 free_irq(24, &port[0]); 476 free_irq(24, &port[0]);
473 printk(KERN_CRIT "Can't alloc " 477 printk(KERN_CRIT "Can't alloc "
474 "sync serial port 1 IRQ"); 478 "sync serial port 1 IRQ");
475 return -EBUSY; 479 goto out;
476 } else if (cris_request_dma(8, 480 } else if (cris_request_dma(8,
477 "synchronous serial 1 dma tr", 481 "synchronous serial 1 dma tr",
478 DMA_VERBOSE_ON_ERROR, 482 DMA_VERBOSE_ON_ERROR,
@@ -482,7 +486,7 @@ static int sync_serial_open(struct inode *inode, struct file *file)
482 printk(KERN_CRIT "Can't alloc " 486 printk(KERN_CRIT "Can't alloc "
483 "sync serial port 1 " 487 "sync serial port 1 "
484 "TX DMA channel"); 488 "TX DMA channel");
485 return -EBUSY; 489 goto out;
486 } else if (cris_request_dma(9, 490 } else if (cris_request_dma(9,
487 "synchronous serial 1 dma rec", 491 "synchronous serial 1 dma rec",
488 DMA_VERBOSE_ON_ERROR, 492 DMA_VERBOSE_ON_ERROR,
@@ -493,7 +497,7 @@ static int sync_serial_open(struct inode *inode, struct file *file)
493 printk(KERN_CRIT "Can't alloc " 497 printk(KERN_CRIT "Can't alloc "
494 "sync serial port 1 " 498 "sync serial port 1 "
495 "RX DMA channel"); 499 "RX DMA channel");
496 return -EBUSY; 500 goto out;
497 } 501 }
498#endif 502#endif
499 RESET_DMA(8); WAIT_DMA(8); 503 RESET_DMA(8); WAIT_DMA(8);
@@ -520,14 +524,14 @@ static int sync_serial_open(struct inode *inode, struct file *file)
520 &ports[1])) { 524 &ports[1])) {
521 printk(KERN_CRIT "Can't alloc " 525 printk(KERN_CRIT "Can't alloc "
522 "sync serial port 3 IRQ"); 526 "sync serial port 3 IRQ");
523 return -EBUSY; 527 goto out;
524 } else if (request_irq(21, rx_interrupt, 0, 528 } else if (request_irq(21, rx_interrupt, 0,
525 "synchronous serial 3 dma rx", 529 "synchronous serial 3 dma rx",
526 &ports[1])) { 530 &ports[1])) {
527 free_irq(20, &ports[1]); 531 free_irq(20, &ports[1]);
528 printk(KERN_CRIT "Can't alloc " 532 printk(KERN_CRIT "Can't alloc "
529 "sync serial port 3 IRQ"); 533 "sync serial port 3 IRQ");
530 return -EBUSY; 534 goto out;
531 } else if (cris_request_dma(4, 535 } else if (cris_request_dma(4,
532 "synchronous serial 3 dma tr", 536 "synchronous serial 3 dma tr",
533 DMA_VERBOSE_ON_ERROR, 537 DMA_VERBOSE_ON_ERROR,
@@ -537,7 +541,7 @@ static int sync_serial_open(struct inode *inode, struct file *file)
537 printk(KERN_CRIT "Can't alloc " 541 printk(KERN_CRIT "Can't alloc "
538 "sync serial port 3 " 542 "sync serial port 3 "
539 "TX DMA channel"); 543 "TX DMA channel");
540 return -EBUSY; 544 goto out;
541 } else if (cris_request_dma(5, 545 } else if (cris_request_dma(5,
542 "synchronous serial 3 dma rec", 546 "synchronous serial 3 dma rec",
543 DMA_VERBOSE_ON_ERROR, 547 DMA_VERBOSE_ON_ERROR,
@@ -548,7 +552,7 @@ static int sync_serial_open(struct inode *inode, struct file *file)
548 printk(KERN_CRIT "Can't alloc " 552 printk(KERN_CRIT "Can't alloc "
549 "sync serial port 3 " 553 "sync serial port 3 "
550 "RX DMA channel"); 554 "RX DMA channel");
551 return -EBUSY; 555 goto out;
552 } 556 }
553#endif 557#endif
554 RESET_DMA(4); WAIT_DMA(4); 558 RESET_DMA(4); WAIT_DMA(4);
@@ -581,7 +585,7 @@ static int sync_serial_open(struct inode *inode, struct file *file)
581 &ports[0])) { 585 &ports[0])) {
582 printk(KERN_CRIT "Can't alloc " 586 printk(KERN_CRIT "Can't alloc "
583 "sync serial manual irq"); 587 "sync serial manual irq");
584 return -EBUSY; 588 goto out;
585 } 589 }
586 } else if (port == &ports[1]) { 590 } else if (port == &ports[1]) {
587 if (request_irq(8, 591 if (request_irq(8,
@@ -591,7 +595,7 @@ static int sync_serial_open(struct inode *inode, struct file *file)
591 &ports[1])) { 595 &ports[1])) {
592 printk(KERN_CRIT "Can't alloc " 596 printk(KERN_CRIT "Can't alloc "
593 "sync serial manual irq"); 597 "sync serial manual irq");
594 return -EBUSY; 598 goto out;
595 } 599 }
596 } 600 }
597 port->init_irqs = 0; 601 port->init_irqs = 0;
@@ -620,7 +624,11 @@ static int sync_serial_open(struct inode *inode, struct file *file)
620 *R_IRQ_MASK1_SET = 1 << port->data_avail_bit; 624 *R_IRQ_MASK1_SET = 1 << port->data_avail_bit;
621 DEBUG(printk(KERN_DEBUG "sser%d rec started\n", dev)); 625 DEBUG(printk(KERN_DEBUG "sser%d rec started\n", dev));
622 } 626 }
623 return 0; 627 ret = 0;
628
629out:
630 unlock_kernel();
631 return ret;
624} 632}
625 633
626static int sync_serial_release(struct inode *inode, struct file *file) 634static int sync_serial_release(struct inode *inode, struct file *file)
diff --git a/arch/cris/arch-v32/drivers/cryptocop.c b/arch/cris/arch-v32/drivers/cryptocop.c
index 9fb58202be99..67c61ea86813 100644
--- a/arch/cris/arch-v32/drivers/cryptocop.c
+++ b/arch/cris/arch-v32/drivers/cryptocop.c
@@ -11,6 +11,7 @@
11#include <linux/string.h> 11#include <linux/string.h>
12#include <linux/fs.h> 12#include <linux/fs.h>
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <linux/smp_lock.h>
14#include <linux/spinlock.h> 15#include <linux/spinlock.h>
15#include <linux/stddef.h> 16#include <linux/stddef.h>
16 17
@@ -2302,11 +2303,11 @@ static int cryptocop_job_setup(struct cryptocop_prio_job **pj, struct cryptocop_
2302 return 0; 2303 return 0;
2303} 2304}
2304 2305
2305
2306static int cryptocop_open(struct inode *inode, struct file *filp) 2306static int cryptocop_open(struct inode *inode, struct file *filp)
2307{ 2307{
2308 int p = iminor(inode); 2308 int p = iminor(inode);
2309 2309
2310 cycle_kernel_lock();
2310 if (p != CRYPTOCOP_MINOR) return -EINVAL; 2311 if (p != CRYPTOCOP_MINOR) return -EINVAL;
2311 2312
2312 filp->private_data = NULL; 2313 filp->private_data = NULL;
diff --git a/arch/cris/arch-v32/drivers/i2c.c b/arch/cris/arch-v32/drivers/i2c.c
index c2fb7a5c1396..179e7b804331 100644
--- a/arch/cris/arch-v32/drivers/i2c.c
+++ b/arch/cris/arch-v32/drivers/i2c.c
@@ -33,6 +33,7 @@
33#include <linux/fs.h> 33#include <linux/fs.h>
34#include <linux/string.h> 34#include <linux/string.h>
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/smp_lock.h>
36 37
37#include <asm/etraxi2c.h> 38#include <asm/etraxi2c.h>
38 39
@@ -636,6 +637,7 @@ i2c_readreg(unsigned char theSlave, unsigned char theReg)
636static int 637static int
637i2c_open(struct inode *inode, struct file *filp) 638i2c_open(struct inode *inode, struct file *filp)
638{ 639{
640 cycle_kernel_lock();
639 return 0; 641 return 0;
640} 642}
641 643
diff --git a/arch/cris/arch-v32/drivers/mach-a3/gpio.c b/arch/cris/arch-v32/drivers/mach-a3/gpio.c
index de107dad9f4f..ef98608e5067 100644
--- a/arch/cris/arch-v32/drivers/mach-a3/gpio.c
+++ b/arch/cris/arch-v32/drivers/mach-a3/gpio.c
@@ -23,6 +23,7 @@
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/spinlock.h> 25#include <linux/spinlock.h>
26#include <linux/smp_lock.h>
26 27
27#include <asm/etraxgpio.h> 28#include <asm/etraxgpio.h>
28#include <hwregs/reg_map.h> 29#include <hwregs/reg_map.h>
@@ -390,6 +391,8 @@ static int gpio_open(struct inode *inode, struct file *filp)
390 391
391 if (!priv) 392 if (!priv)
392 return -ENOMEM; 393 return -ENOMEM;
394
395 lock_kernel();
393 memset(priv, 0, sizeof(*priv)); 396 memset(priv, 0, sizeof(*priv));
394 397
395 priv->minor = p; 398 priv->minor = p;
@@ -412,6 +415,7 @@ static int gpio_open(struct inode *inode, struct file *filp)
412 spin_unlock_irq(&gpio_lock); 415 spin_unlock_irq(&gpio_lock);
413 } 416 }
414 417
418 unlock_kernel();
415 return 0; 419 return 0;
416} 420}
417 421
diff --git a/arch/cris/arch-v32/drivers/mach-fs/gpio.c b/arch/cris/arch-v32/drivers/mach-fs/gpio.c
index 7863fd4efc2b..fe1fde893887 100644
--- a/arch/cris/arch-v32/drivers/mach-fs/gpio.c
+++ b/arch/cris/arch-v32/drivers/mach-fs/gpio.c
@@ -22,6 +22,7 @@
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/spinlock.h> 24#include <linux/spinlock.h>
25#include <linux/smp_lock.h>
25 26
26#include <asm/etraxgpio.h> 27#include <asm/etraxgpio.h>
27#include <hwregs/reg_map.h> 28#include <hwregs/reg_map.h>
@@ -426,9 +427,10 @@ gpio_open(struct inode *inode, struct file *filp)
426 return -EINVAL; 427 return -EINVAL;
427 428
428 priv = kmalloc(sizeof(struct gpio_private), GFP_KERNEL); 429 priv = kmalloc(sizeof(struct gpio_private), GFP_KERNEL);
429
430 if (!priv) 430 if (!priv)
431 return -ENOMEM; 431 return -ENOMEM;
432
433 lock_kernel();
432 memset(priv, 0, sizeof(*priv)); 434 memset(priv, 0, sizeof(*priv));
433 435
434 priv->minor = p; 436 priv->minor = p;
@@ -449,6 +451,7 @@ gpio_open(struct inode *inode, struct file *filp)
449 alarmlist = priv; 451 alarmlist = priv;
450 spin_unlock_irq(&alarm_lock); 452 spin_unlock_irq(&alarm_lock);
451 453
454 unlock_kernel();
452 return 0; 455 return 0;
453} 456}
454 457
diff --git a/arch/cris/arch-v32/drivers/sync_serial.c b/arch/cris/arch-v32/drivers/sync_serial.c
index 47c377df6fb3..d2a0fbf5341f 100644
--- a/arch/cris/arch-v32/drivers/sync_serial.c
+++ b/arch/cris/arch-v32/drivers/sync_serial.c
@@ -14,6 +14,7 @@
14#include <linux/major.h> 14#include <linux/major.h>
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/smp_lock.h>
17#include <linux/interrupt.h> 18#include <linux/interrupt.h>
18#include <linux/poll.h> 19#include <linux/poll.h>
19#include <linux/init.h> 20#include <linux/init.h>
@@ -429,23 +430,26 @@ static inline int sync_data_avail_to_end(struct sync_port *port)
429static int sync_serial_open(struct inode *inode, struct file *file) 430static int sync_serial_open(struct inode *inode, struct file *file)
430{ 431{
431 int dev = iminor(inode); 432 int dev = iminor(inode);
433 int ret = -EBUSY;
432 sync_port *port; 434 sync_port *port;
433 reg_dma_rw_cfg cfg = {.en = regk_dma_yes}; 435 reg_dma_rw_cfg cfg = {.en = regk_dma_yes};
434 reg_dma_rw_intr_mask intr_mask = {.data = regk_dma_yes}; 436 reg_dma_rw_intr_mask intr_mask = {.data = regk_dma_yes};
435 437
438 lock_kernel();
436 DEBUG(printk(KERN_DEBUG "Open sync serial port %d\n", dev)); 439 DEBUG(printk(KERN_DEBUG "Open sync serial port %d\n", dev));
437 440
438 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) 441 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
439 { 442 {
440 DEBUG(printk(KERN_DEBUG "Invalid minor %d\n", dev)); 443 DEBUG(printk(KERN_DEBUG "Invalid minor %d\n", dev));
441 return -ENODEV; 444 ret = -ENODEV;
445 goto out;
442 } 446 }
443 port = &ports[dev]; 447 port = &ports[dev];
444 /* Allow open this device twice (assuming one reader and one writer) */ 448 /* Allow open this device twice (assuming one reader and one writer) */
445 if (port->busy == 2) 449 if (port->busy == 2)
446 { 450 {
447 DEBUG(printk(KERN_DEBUG "Device is busy.. \n")); 451 DEBUG(printk(KERN_DEBUG "Device is busy.. \n"));
448 return -EBUSY; 452 goto out;
449 } 453 }
450 454
451 455
@@ -459,7 +463,7 @@ static int sync_serial_open(struct inode *inode, struct file *file)
459 "synchronous serial 0 dma tr", 463 "synchronous serial 0 dma tr",
460 &ports[0])) { 464 &ports[0])) {
461 printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ"); 465 printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ");
462 return -EBUSY; 466 goto out;
463 } else if (request_irq(DMA_IN_INTR_VECT, 467 } else if (request_irq(DMA_IN_INTR_VECT,
464 rx_interrupt, 468 rx_interrupt,
465 0, 469 0,
@@ -467,7 +471,7 @@ static int sync_serial_open(struct inode *inode, struct file *file)
467 &ports[0])) { 471 &ports[0])) {
468 free_irq(DMA_OUT_INTR_VECT, &port[0]); 472 free_irq(DMA_OUT_INTR_VECT, &port[0]);
469 printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ"); 473 printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ");
470 return -EBUSY; 474 goto out;
471 } else if (crisv32_request_dma(OUT_DMA_NBR, 475 } else if (crisv32_request_dma(OUT_DMA_NBR,
472 "synchronous serial 0 dma tr", 476 "synchronous serial 0 dma tr",
473 DMA_VERBOSE_ON_ERROR, 477 DMA_VERBOSE_ON_ERROR,
@@ -476,7 +480,7 @@ static int sync_serial_open(struct inode *inode, struct file *file)
476 free_irq(DMA_OUT_INTR_VECT, &port[0]); 480 free_irq(DMA_OUT_INTR_VECT, &port[0]);
477 free_irq(DMA_IN_INTR_VECT, &port[0]); 481 free_irq(DMA_IN_INTR_VECT, &port[0]);
478 printk(KERN_CRIT "Can't allocate sync serial port 0 TX DMA channel"); 482 printk(KERN_CRIT "Can't allocate sync serial port 0 TX DMA channel");
479 return -EBUSY; 483 goto out;
480 } else if (crisv32_request_dma(IN_DMA_NBR, 484 } else if (crisv32_request_dma(IN_DMA_NBR,
481 "synchronous serial 0 dma rec", 485 "synchronous serial 0 dma rec",
482 DMA_VERBOSE_ON_ERROR, 486 DMA_VERBOSE_ON_ERROR,
@@ -486,7 +490,7 @@ static int sync_serial_open(struct inode *inode, struct file *file)
486 free_irq(DMA_OUT_INTR_VECT, &port[0]); 490 free_irq(DMA_OUT_INTR_VECT, &port[0]);
487 free_irq(DMA_IN_INTR_VECT, &port[0]); 491 free_irq(DMA_IN_INTR_VECT, &port[0]);
488 printk(KERN_CRIT "Can't allocate sync serial port 1 RX DMA channel"); 492 printk(KERN_CRIT "Can't allocate sync serial port 1 RX DMA channel");
489 return -EBUSY; 493 goto out;
490 } 494 }
491#endif 495#endif
492 } 496 }
@@ -499,7 +503,7 @@ static int sync_serial_open(struct inode *inode, struct file *file)
499 "synchronous serial 1 dma tr", 503 "synchronous serial 1 dma tr",
500 &ports[1])) { 504 &ports[1])) {
501 printk(KERN_CRIT "Can't allocate sync serial port 1 IRQ"); 505 printk(KERN_CRIT "Can't allocate sync serial port 1 IRQ");
502 return -EBUSY; 506 goto out;
503 } else if (request_irq(DMA7_INTR_VECT, 507 } else if (request_irq(DMA7_INTR_VECT,
504 rx_interrupt, 508 rx_interrupt,
505 0, 509 0,
@@ -507,7 +511,7 @@ static int sync_serial_open(struct inode *inode, struct file *file)
507 &ports[1])) { 511 &ports[1])) {
508 free_irq(DMA6_INTR_VECT, &ports[1]); 512 free_irq(DMA6_INTR_VECT, &ports[1]);
509 printk(KERN_CRIT "Can't allocate sync serial port 3 IRQ"); 513 printk(KERN_CRIT "Can't allocate sync serial port 3 IRQ");
510 return -EBUSY; 514 goto out;
511 } else if (crisv32_request_dma( 515 } else if (crisv32_request_dma(
512 SYNC_SER1_TX_DMA_NBR, 516 SYNC_SER1_TX_DMA_NBR,
513 "synchronous serial 1 dma tr", 517 "synchronous serial 1 dma tr",
@@ -517,7 +521,7 @@ static int sync_serial_open(struct inode *inode, struct file *file)
517 free_irq(DMA6_INTR_VECT, &ports[1]); 521 free_irq(DMA6_INTR_VECT, &ports[1]);
518 free_irq(DMA7_INTR_VECT, &ports[1]); 522 free_irq(DMA7_INTR_VECT, &ports[1]);
519 printk(KERN_CRIT "Can't allocate sync serial port 3 TX DMA channel"); 523 printk(KERN_CRIT "Can't allocate sync serial port 3 TX DMA channel");
520 return -EBUSY; 524 goto out;
521 } else if (crisv32_request_dma( 525 } else if (crisv32_request_dma(
522 SYNC_SER1_RX_DMA_NBR, 526 SYNC_SER1_RX_DMA_NBR,
523 "synchronous serial 3 dma rec", 527 "synchronous serial 3 dma rec",
@@ -528,7 +532,7 @@ static int sync_serial_open(struct inode *inode, struct file *file)
528 free_irq(DMA6_INTR_VECT, &ports[1]); 532 free_irq(DMA6_INTR_VECT, &ports[1]);
529 free_irq(DMA7_INTR_VECT, &ports[1]); 533 free_irq(DMA7_INTR_VECT, &ports[1]);
530 printk(KERN_CRIT "Can't allocate sync serial port 3 RX DMA channel"); 534 printk(KERN_CRIT "Can't allocate sync serial port 3 RX DMA channel");
531 return -EBUSY; 535 goto out;
532 } 536 }
533#endif 537#endif
534 } 538 }
@@ -554,7 +558,7 @@ static int sync_serial_open(struct inode *inode, struct file *file)
554 "synchronous serial manual irq", 558 "synchronous serial manual irq",
555 &ports[0])) { 559 &ports[0])) {
556 printk("Can't allocate sync serial manual irq"); 560 printk("Can't allocate sync serial manual irq");
557 return -EBUSY; 561 goto out;
558 } 562 }
559 } 563 }
560#ifdef CONFIG_ETRAXFS 564#ifdef CONFIG_ETRAXFS
@@ -565,7 +569,7 @@ static int sync_serial_open(struct inode *inode, struct file *file)
565 "synchronous serial manual irq", 569 "synchronous serial manual irq",
566 &ports[1])) { 570 &ports[1])) {
567 printk(KERN_CRIT "Can't allocate sync serial manual irq"); 571 printk(KERN_CRIT "Can't allocate sync serial manual irq");
568 return -EBUSY; 572 goto out;
569 } 573 }
570 } 574 }
571#endif 575#endif
@@ -578,7 +582,10 @@ static int sync_serial_open(struct inode *inode, struct file *file)
578 } /* port->init_irqs */ 582 } /* port->init_irqs */
579 583
580 port->busy++; 584 port->busy++;
581 return 0; 585 ret = 0;
586out:
587 unlock_kernel();
588 return ret;
582} 589}
583 590
584static int sync_serial_release(struct inode *inode, struct file *file) 591static int sync_serial_release(struct inode *inode, struct file *file)
diff --git a/arch/m68k/bvme6000/rtc.c b/arch/m68k/bvme6000/rtc.c
index a812d03879f8..e8ac3f7d72df 100644
--- a/arch/m68k/bvme6000/rtc.c
+++ b/arch/m68k/bvme6000/rtc.c
@@ -10,6 +10,7 @@
10#include <linux/errno.h> 10#include <linux/errno.h>
11#include <linux/miscdevice.h> 11#include <linux/miscdevice.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/smp_lock.h>
13#include <linux/ioport.h> 14#include <linux/ioport.h>
14#include <linux/capability.h> 15#include <linux/capability.h>
15#include <linux/fcntl.h> 16#include <linux/fcntl.h>
@@ -140,10 +141,14 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
140 141
141static int rtc_open(struct inode *inode, struct file *file) 142static int rtc_open(struct inode *inode, struct file *file)
142{ 143{
143 if(rtc_status) 144 lock_kernel();
145 if(rtc_status) {
146 unlock_kernel();
144 return -EBUSY; 147 return -EBUSY;
148 }
145 149
146 rtc_status = 1; 150 rtc_status = 1;
151 unlock_kernel();
147 return 0; 152 return 0;
148} 153}
149 154
diff --git a/arch/m68k/mvme16x/rtc.c b/arch/m68k/mvme16x/rtc.c
index e341387787ab..432a9f13b2ed 100644
--- a/arch/m68k/mvme16x/rtc.c
+++ b/arch/m68k/mvme16x/rtc.c
@@ -10,6 +10,7 @@
10#include <linux/errno.h> 10#include <linux/errno.h>
11#include <linux/miscdevice.h> 11#include <linux/miscdevice.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/smp_lock.h>
13#include <linux/ioport.h> 14#include <linux/ioport.h>
14#include <linux/capability.h> 15#include <linux/capability.h>
15#include <linux/fcntl.h> 16#include <linux/fcntl.h>
@@ -127,11 +128,14 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
127 128
128static int rtc_open(struct inode *inode, struct file *file) 129static int rtc_open(struct inode *inode, struct file *file)
129{ 130{
131 lock_kernel();
130 if( !atomic_dec_and_test(&rtc_ready) ) 132 if( !atomic_dec_and_test(&rtc_ready) )
131 { 133 {
132 atomic_inc( &rtc_ready ); 134 atomic_inc( &rtc_ready );
135 unlock_kernel();
133 return -EBUSY; 136 return -EBUSY;
134 } 137 }
138 unlock_kernel();
135 139
136 return 0; 140 return 0;
137} 141}
diff --git a/arch/mips/basler/excite/excite_iodev.c b/arch/mips/basler/excite/excite_iodev.c
index 476d20e08d0e..a1e3526b4a94 100644
--- a/arch/mips/basler/excite/excite_iodev.c
+++ b/arch/mips/basler/excite/excite_iodev.c
@@ -26,6 +26,7 @@
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/miscdevice.h> 28#include <linux/miscdevice.h>
29#include <linux/smp_lock.h>
29 30
30#include "excite_iodev.h" 31#include "excite_iodev.h"
31 32
@@ -110,8 +111,14 @@ static int __exit iodev_remove(struct device *dev)
110 111
111static int iodev_open(struct inode *i, struct file *f) 112static int iodev_open(struct inode *i, struct file *f)
112{ 113{
113 return request_irq(iodev_irq, iodev_irqhdl, IRQF_DISABLED, 114 int ret;
115
116 lock_kernel();
117 ret = request_irq(iodev_irq, iodev_irqhdl, IRQF_DISABLED,
114 iodev_name, &miscdev); 118 iodev_name, &miscdev);
119 unlock_kernel();
120
121 return ret;
115} 122}
116 123
117static int iodev_release(struct inode *i, struct file *f) 124static int iodev_release(struct inode *i, struct file *f)
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c
index b88f1c18ff4d..b55641961232 100644
--- a/arch/mips/kernel/rtlx.c
+++ b/arch/mips/kernel/rtlx.c
@@ -28,6 +28,7 @@
28#include <linux/vmalloc.h> 28#include <linux/vmalloc.h>
29#include <linux/elf.h> 29#include <linux/elf.h>
30#include <linux/seq_file.h> 30#include <linux/seq_file.h>
31#include <linux/smp_lock.h>
31#include <linux/syscalls.h> 32#include <linux/syscalls.h>
32#include <linux/moduleloader.h> 33#include <linux/moduleloader.h>
33#include <linux/interrupt.h> 34#include <linux/interrupt.h>
@@ -392,8 +393,12 @@ out:
392static int file_open(struct inode *inode, struct file *filp) 393static int file_open(struct inode *inode, struct file *filp)
393{ 394{
394 int minor = iminor(inode); 395 int minor = iminor(inode);
396 int err;
395 397
396 return rtlx_open(minor, (filp->f_flags & O_NONBLOCK) ? 0 : 1); 398 lock_kernel();
399 err = rtlx_open(minor, (filp->f_flags & O_NONBLOCK) ? 0 : 1);
400 unlock_kernel();
401 return err;
397} 402}
398 403
399static int file_release(struct inode *inode, struct file *filp) 404static int file_release(struct inode *inode, struct file *filp)
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 2794501ff302..972b2d2b8401 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -38,6 +38,7 @@
38#include <linux/vmalloc.h> 38#include <linux/vmalloc.h>
39#include <linux/elf.h> 39#include <linux/elf.h>
40#include <linux/seq_file.h> 40#include <linux/seq_file.h>
41#include <linux/smp_lock.h>
41#include <linux/syscalls.h> 42#include <linux/syscalls.h>
42#include <linux/moduleloader.h> 43#include <linux/moduleloader.h>
43#include <linux/interrupt.h> 44#include <linux/interrupt.h>
@@ -1050,17 +1051,20 @@ static int vpe_open(struct inode *inode, struct file *filp)
1050 enum vpe_state state; 1051 enum vpe_state state;
1051 struct vpe_notifications *not; 1052 struct vpe_notifications *not;
1052 struct vpe *v; 1053 struct vpe *v;
1053 int ret; 1054 int ret, err = 0;
1054 1055
1056 lock_kernel();
1055 if (minor != iminor(inode)) { 1057 if (minor != iminor(inode)) {
1056 /* assume only 1 device at the moment. */ 1058 /* assume only 1 device at the moment. */
1057 printk(KERN_WARNING "VPE loader: only vpe1 is supported\n"); 1059 printk(KERN_WARNING "VPE loader: only vpe1 is supported\n");
1058 return -ENODEV; 1060 err = -ENODEV;
1061 goto out;
1059 } 1062 }
1060 1063
1061 if ((v = get_vpe(tclimit)) == NULL) { 1064 if ((v = get_vpe(tclimit)) == NULL) {
1062 printk(KERN_WARNING "VPE loader: unable to get vpe\n"); 1065 printk(KERN_WARNING "VPE loader: unable to get vpe\n");
1063 return -ENODEV; 1066 err = -ENODEV;
1067 goto out;
1064 } 1068 }
1065 1069
1066 state = xchg(&v->state, VPE_STATE_INUSE); 1070 state = xchg(&v->state, VPE_STATE_INUSE);
@@ -1100,6 +1104,8 @@ static int vpe_open(struct inode *inode, struct file *filp)
1100 v->shared_ptr = NULL; 1104 v->shared_ptr = NULL;
1101 v->__start = 0; 1105 v->__start = 0;
1102 1106
1107out:
1108 unlock_kernel();
1103 return 0; 1109 return 0;
1104} 1110}
1105 1111
diff --git a/arch/mips/sibyte/common/sb_tbprof.c b/arch/mips/sibyte/common/sb_tbprof.c
index 63b444eaf01e..28b012ab8dcb 100644
--- a/arch/mips/sibyte/common/sb_tbprof.c
+++ b/arch/mips/sibyte/common/sb_tbprof.c
@@ -28,6 +28,7 @@
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/interrupt.h> 29#include <linux/interrupt.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/smp_lock.h>
31#include <linux/vmalloc.h> 32#include <linux/vmalloc.h>
32#include <linux/fs.h> 33#include <linux/fs.h>
33#include <linux/errno.h> 34#include <linux/errno.h>
@@ -402,18 +403,26 @@ static int sbprof_zbprof_stop(void)
402static int sbprof_tb_open(struct inode *inode, struct file *filp) 403static int sbprof_tb_open(struct inode *inode, struct file *filp)
403{ 404{
404 int minor; 405 int minor;
406 int err = 0;
405 407
408 lock_kernel();
406 minor = iminor(inode); 409 minor = iminor(inode);
407 if (minor != 0) 410 if (minor != 0) {
408 return -ENODEV; 411 err = -ENODEV;
412 goto out;
413 }
409 414
410 if (xchg(&sbp.open, SB_OPENING) != SB_CLOSED) 415 if (xchg(&sbp.open, SB_OPENING) != SB_CLOSED) {
411 return -EBUSY; 416 err = -EBUSY;
417 goto out;
418 }
412 419
413 memset(&sbp, 0, sizeof(struct sbprof_tb)); 420 memset(&sbp, 0, sizeof(struct sbprof_tb));
414 sbp.sbprof_tbbuf = vmalloc(MAX_TBSAMPLE_BYTES); 421 sbp.sbprof_tbbuf = vmalloc(MAX_TBSAMPLE_BYTES);
415 if (!sbp.sbprof_tbbuf) 422 if (!sbp.sbprof_tbbuf) {
416 return -ENOMEM; 423 err = -ENOMEM;
424 goto out;
425 }
417 memset(sbp.sbprof_tbbuf, 0, MAX_TBSAMPLE_BYTES); 426 memset(sbp.sbprof_tbbuf, 0, MAX_TBSAMPLE_BYTES);
418 init_waitqueue_head(&sbp.tb_sync); 427 init_waitqueue_head(&sbp.tb_sync);
419 init_waitqueue_head(&sbp.tb_read); 428 init_waitqueue_head(&sbp.tb_read);
@@ -421,7 +430,9 @@ static int sbprof_tb_open(struct inode *inode, struct file *filp)
421 430
422 sbp.open = SB_OPEN; 431 sbp.open = SB_OPEN;
423 432
424 return 0; 433 out:
434 unlock_kernel();
435 return err;
425} 436}
426 437
427static int sbprof_tb_release(struct inode *inode, struct file *filp) 438static int sbprof_tb_release(struct inode *inode, struct file *filp)
diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c
index 89d6d5ad44b5..f696f57faa15 100644
--- a/arch/parisc/kernel/perf.c
+++ b/arch/parisc/kernel/perf.c
@@ -46,6 +46,7 @@
46#include <linux/init.h> 46#include <linux/init.h>
47#include <linux/proc_fs.h> 47#include <linux/proc_fs.h>
48#include <linux/miscdevice.h> 48#include <linux/miscdevice.h>
49#include <linux/smp_lock.h>
49#include <linux/spinlock.h> 50#include <linux/spinlock.h>
50 51
51#include <asm/uaccess.h> 52#include <asm/uaccess.h>
@@ -260,13 +261,16 @@ printk("Preparing to start counters\n");
260 */ 261 */
261static int perf_open(struct inode *inode, struct file *file) 262static int perf_open(struct inode *inode, struct file *file)
262{ 263{
264 lock_kernel();
263 spin_lock(&perf_lock); 265 spin_lock(&perf_lock);
264 if (perf_enabled) { 266 if (perf_enabled) {
265 spin_unlock(&perf_lock); 267 spin_unlock(&perf_lock);
268 unlock_kernel();
266 return -EBUSY; 269 return -EBUSY;
267 } 270 }
268 perf_enabled = 1; 271 perf_enabled = 1;
269 spin_unlock(&perf_lock); 272 spin_unlock(&perf_lock);
273 unlock_kernel();
270 274
271 return 0; 275 return 0;
272} 276}
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 107e492cb47e..5dc8f8028d52 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -146,6 +146,7 @@ config MATHEMU
146config COMPAT 146config COMPAT
147 bool "Kernel support for 31 bit emulation" 147 bool "Kernel support for 31 bit emulation"
148 depends on 64BIT 148 depends on 64BIT
149 select COMPAT_BINFMT_ELF
149 help 150 help
150 Select this option if you want to enable your system kernel to 151 Select this option if you want to enable your system kernel to
151 handle system-calls from ELF binaries for 31 bit ESA. This option 152 handle system-calls from ELF binaries for 31 bit ESA. This option
@@ -312,6 +313,10 @@ config ARCH_SPARSEMEM_DEFAULT
312config ARCH_SELECT_MEMORY_MODEL 313config ARCH_SELECT_MEMORY_MODEL
313 def_bool y 314 def_bool y
314 315
316config ARCH_ENABLE_MEMORY_HOTPLUG
317 def_bool y
318 depends on SPARSEMEM
319
315source "mm/Kconfig" 320source "mm/Kconfig"
316 321
317comment "I/O subsystem configuration" 322comment "I/O subsystem configuration"
@@ -344,6 +349,22 @@ config QDIO_DEBUG
344 349
345 If unsure, say N. 350 If unsure, say N.
346 351
352config CHSC_SCH
353 tristate "Support for CHSC subchannels"
354 help
355 This driver allows usage of CHSC subchannels. A CHSC subchannel
356 is usually present on LPAR only.
357 The driver creates a device /dev/chsc, which may be used to
358 obtain I/O configuration information about the machine and
359 to issue asynchronous chsc commands (DANGEROUS).
360 You will usually only want to use this interface on a special
361 LPAR designated for system management.
362
363 To compile this driver as a module, choose M here: the
364 module will be called chsc_sch.
365
366 If unsure, say N.
367
347comment "Misc" 368comment "Misc"
348 369
349config IPL 370config IPL
diff --git a/arch/s390/appldata/appldata.h b/arch/s390/appldata/appldata.h
index db3ae8505103..17a2636fec0a 100644
--- a/arch/s390/appldata/appldata.h
+++ b/arch/s390/appldata/appldata.h
@@ -3,13 +3,11 @@
3 * 3 *
4 * Definitions and interface for Linux - z/VM Monitor Stream. 4 * Definitions and interface for Linux - z/VM Monitor Stream.
5 * 5 *
6 * Copyright (C) 2003,2006 IBM Corporation, IBM Deutschland Entwicklung GmbH. 6 * Copyright IBM Corp. 2003, 2008
7 * 7 *
8 * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> 8 * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
9 */ 9 */
10 10
11//#define APPLDATA_DEBUG /* Debug messages on/off */
12
13#define APPLDATA_MAX_REC_SIZE 4024 /* Maximum size of the */ 11#define APPLDATA_MAX_REC_SIZE 4024 /* Maximum size of the */
14 /* data buffer */ 12 /* data buffer */
15#define APPLDATA_MAX_PROCS 100 13#define APPLDATA_MAX_PROCS 100
@@ -32,12 +30,6 @@
32#define P_ERROR(x...) printk(KERN_ERR MY_PRINT_NAME " error: " x) 30#define P_ERROR(x...) printk(KERN_ERR MY_PRINT_NAME " error: " x)
33#define P_WARNING(x...) printk(KERN_WARNING MY_PRINT_NAME " status: " x) 31#define P_WARNING(x...) printk(KERN_WARNING MY_PRINT_NAME " status: " x)
34 32
35#ifdef APPLDATA_DEBUG
36#define P_DEBUG(x...) printk(KERN_DEBUG MY_PRINT_NAME " debug: " x)
37#else
38#define P_DEBUG(x...) do {} while (0)
39#endif
40
41struct appldata_ops { 33struct appldata_ops {
42 struct list_head list; 34 struct list_head list;
43 struct ctl_table_header *sysctl_header; 35 struct ctl_table_header *sysctl_header;
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index ad40729bec3d..9cb3d92447a3 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -5,7 +5,7 @@
5 * Exports appldata_register_ops() and appldata_unregister_ops() for the 5 * Exports appldata_register_ops() and appldata_unregister_ops() for the
6 * data gathering modules. 6 * data gathering modules.
7 * 7 *
8 * Copyright (C) 2003,2006 IBM Corporation, IBM Deutschland Entwicklung GmbH. 8 * Copyright IBM Corp. 2003, 2008
9 * 9 *
10 * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> 10 * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
11 */ 11 */
@@ -108,9 +108,6 @@ static LIST_HEAD(appldata_ops_list);
108 */ 108 */
109static void appldata_timer_function(unsigned long data) 109static void appldata_timer_function(unsigned long data)
110{ 110{
111 P_DEBUG(" -= Timer =-\n");
112 P_DEBUG("CPU: %i, expire_count: %i\n", smp_processor_id(),
113 atomic_read(&appldata_expire_count));
114 if (atomic_dec_and_test(&appldata_expire_count)) { 111 if (atomic_dec_and_test(&appldata_expire_count)) {
115 atomic_set(&appldata_expire_count, num_online_cpus()); 112 atomic_set(&appldata_expire_count, num_online_cpus());
116 queue_work(appldata_wq, (struct work_struct *) data); 113 queue_work(appldata_wq, (struct work_struct *) data);
@@ -128,14 +125,11 @@ static void appldata_work_fn(struct work_struct *work)
128 struct appldata_ops *ops; 125 struct appldata_ops *ops;
129 int i; 126 int i;
130 127
131 P_DEBUG(" -= Work Queue =-\n");
132 i = 0; 128 i = 0;
133 get_online_cpus(); 129 get_online_cpus();
134 spin_lock(&appldata_ops_lock); 130 spin_lock(&appldata_ops_lock);
135 list_for_each(lh, &appldata_ops_list) { 131 list_for_each(lh, &appldata_ops_list) {
136 ops = list_entry(lh, struct appldata_ops, list); 132 ops = list_entry(lh, struct appldata_ops, list);
137 P_DEBUG("list_for_each loop: %i) active = %u, name = %s\n",
138 ++i, ops->active, ops->name);
139 if (ops->active == 1) { 133 if (ops->active == 1) {
140 ops->callback(ops->data); 134 ops->callback(ops->data);
141 } 135 }
@@ -212,7 +206,6 @@ __appldata_vtimer_setup(int cmd)
212 0, 1); 206 0, 1);
213 } 207 }
214 appldata_timer_active = 1; 208 appldata_timer_active = 1;
215 P_INFO("Monitoring timer started.\n");
216 break; 209 break;
217 case APPLDATA_DEL_TIMER: 210 case APPLDATA_DEL_TIMER:
218 for_each_online_cpu(i) 211 for_each_online_cpu(i)
@@ -221,7 +214,6 @@ __appldata_vtimer_setup(int cmd)
221 break; 214 break;
222 appldata_timer_active = 0; 215 appldata_timer_active = 0;
223 atomic_set(&appldata_expire_count, num_online_cpus()); 216 atomic_set(&appldata_expire_count, num_online_cpus());
224 P_INFO("Monitoring timer stopped.\n");
225 break; 217 break;
226 case APPLDATA_MOD_TIMER: 218 case APPLDATA_MOD_TIMER:
227 per_cpu_interval = (u64) (appldata_interval*1000 / 219 per_cpu_interval = (u64) (appldata_interval*1000 /
@@ -313,10 +305,8 @@ appldata_interval_handler(ctl_table *ctl, int write, struct file *filp,
313 } 305 }
314 interval = 0; 306 interval = 0;
315 sscanf(buf, "%i", &interval); 307 sscanf(buf, "%i", &interval);
316 if (interval <= 0) { 308 if (interval <= 0)
317 P_ERROR("Timer CPU interval has to be > 0!\n");
318 return -EINVAL; 309 return -EINVAL;
319 }
320 310
321 get_online_cpus(); 311 get_online_cpus();
322 spin_lock(&appldata_timer_lock); 312 spin_lock(&appldata_timer_lock);
@@ -324,9 +314,6 @@ appldata_interval_handler(ctl_table *ctl, int write, struct file *filp,
324 __appldata_vtimer_setup(APPLDATA_MOD_TIMER); 314 __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
325 spin_unlock(&appldata_timer_lock); 315 spin_unlock(&appldata_timer_lock);
326 put_online_cpus(); 316 put_online_cpus();
327
328 P_INFO("Monitoring CPU interval set to %u milliseconds.\n",
329 interval);
330out: 317out:
331 *lenp = len; 318 *lenp = len;
332 *ppos += len; 319 *ppos += len;
@@ -406,23 +393,16 @@ appldata_generic_handler(ctl_table *ctl, int write, struct file *filp,
406 P_ERROR("START DIAG 0xDC for %s failed, " 393 P_ERROR("START DIAG 0xDC for %s failed, "
407 "return code: %d\n", ops->name, rc); 394 "return code: %d\n", ops->name, rc);
408 module_put(ops->owner); 395 module_put(ops->owner);
409 } else { 396 } else
410 P_INFO("Monitoring %s data enabled, "
411 "DIAG 0xDC started.\n", ops->name);
412 ops->active = 1; 397 ops->active = 1;
413 }
414 } else if ((buf[0] == '0') && (ops->active == 1)) { 398 } else if ((buf[0] == '0') && (ops->active == 1)) {
415 ops->active = 0; 399 ops->active = 0;
416 rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC, 400 rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC,
417 (unsigned long) ops->data, ops->size, 401 (unsigned long) ops->data, ops->size,
418 ops->mod_lvl); 402 ops->mod_lvl);
419 if (rc != 0) { 403 if (rc != 0)
420 P_ERROR("STOP DIAG 0xDC for %s failed, " 404 P_ERROR("STOP DIAG 0xDC for %s failed, "
421 "return code: %d\n", ops->name, rc); 405 "return code: %d\n", ops->name, rc);
422 } else {
423 P_INFO("Monitoring %s data disabled, "
424 "DIAG 0xDC stopped.\n", ops->name);
425 }
426 module_put(ops->owner); 406 module_put(ops->owner);
427 } 407 }
428 spin_unlock(&appldata_ops_lock); 408 spin_unlock(&appldata_ops_lock);
@@ -468,7 +448,6 @@ int appldata_register_ops(struct appldata_ops *ops)
468 ops->sysctl_header = register_sysctl_table(ops->ctl_table); 448 ops->sysctl_header = register_sysctl_table(ops->ctl_table);
469 if (!ops->sysctl_header) 449 if (!ops->sysctl_header)
470 goto out; 450 goto out;
471 P_INFO("%s-ops registered!\n", ops->name);
472 return 0; 451 return 0;
473out: 452out:
474 spin_lock(&appldata_ops_lock); 453 spin_lock(&appldata_ops_lock);
@@ -490,7 +469,6 @@ void appldata_unregister_ops(struct appldata_ops *ops)
490 spin_unlock(&appldata_ops_lock); 469 spin_unlock(&appldata_ops_lock);
491 unregister_sysctl_table(ops->sysctl_header); 470 unregister_sysctl_table(ops->sysctl_header);
492 kfree(ops->ctl_table); 471 kfree(ops->ctl_table);
493 P_INFO("%s-ops unregistered!\n", ops->name);
494} 472}
495/********************** module-ops management <END> **************************/ 473/********************** module-ops management <END> **************************/
496 474
@@ -553,14 +531,9 @@ static int __init appldata_init(void)
553{ 531{
554 int i; 532 int i;
555 533
556 P_DEBUG("sizeof(parameter_list) = %lu\n",
557 sizeof(struct appldata_parameter_list));
558
559 appldata_wq = create_singlethread_workqueue("appldata"); 534 appldata_wq = create_singlethread_workqueue("appldata");
560 if (!appldata_wq) { 535 if (!appldata_wq)
561 P_ERROR("Could not create work queue\n");
562 return -ENOMEM; 536 return -ENOMEM;
563 }
564 537
565 get_online_cpus(); 538 get_online_cpus();
566 for_each_online_cpu(i) 539 for_each_online_cpu(i)
@@ -571,8 +544,6 @@ static int __init appldata_init(void)
571 register_hotcpu_notifier(&appldata_nb); 544 register_hotcpu_notifier(&appldata_nb);
572 545
573 appldata_sysctl_header = register_sysctl_table(appldata_dir_table); 546 appldata_sysctl_header = register_sysctl_table(appldata_dir_table);
574
575 P_DEBUG("Base interface initialized.\n");
576 return 0; 547 return 0;
577} 548}
578 549
@@ -584,7 +555,9 @@ EXPORT_SYMBOL_GPL(appldata_register_ops);
584EXPORT_SYMBOL_GPL(appldata_unregister_ops); 555EXPORT_SYMBOL_GPL(appldata_unregister_ops);
585EXPORT_SYMBOL_GPL(appldata_diag); 556EXPORT_SYMBOL_GPL(appldata_diag);
586 557
558#ifdef CONFIG_SWAP
587EXPORT_SYMBOL_GPL(si_swapinfo); 559EXPORT_SYMBOL_GPL(si_swapinfo);
560#endif
588EXPORT_SYMBOL_GPL(nr_threads); 561EXPORT_SYMBOL_GPL(nr_threads);
589EXPORT_SYMBOL_GPL(nr_running); 562EXPORT_SYMBOL_GPL(nr_running);
590EXPORT_SYMBOL_GPL(nr_iowait); 563EXPORT_SYMBOL_GPL(nr_iowait);
diff --git a/arch/s390/appldata/appldata_mem.c b/arch/s390/appldata/appldata_mem.c
index 51181ccdb87b..3ed56b7d1b2f 100644
--- a/arch/s390/appldata/appldata_mem.c
+++ b/arch/s390/appldata/appldata_mem.c
@@ -14,14 +14,13 @@
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/errno.h> 15#include <linux/errno.h>
16#include <linux/kernel_stat.h> 16#include <linux/kernel_stat.h>
17#include <asm/io.h>
18#include <linux/pagemap.h> 17#include <linux/pagemap.h>
19#include <linux/swap.h> 18#include <linux/swap.h>
19#include <asm/io.h>
20 20
21#include "appldata.h" 21#include "appldata.h"
22 22
23 23
24#define MY_PRINT_NAME "appldata_mem" /* for debug messages, etc. */
25#define P2K(x) ((x) << (PAGE_SHIFT - 10)) /* Converts #Pages to KB */ 24#define P2K(x) ((x) << (PAGE_SHIFT - 10)) /* Converts #Pages to KB */
26 25
27/* 26/*
@@ -70,30 +69,6 @@ static struct appldata_mem_data {
70} __attribute__((packed)) appldata_mem_data; 69} __attribute__((packed)) appldata_mem_data;
71 70
72 71
73static inline void appldata_debug_print(struct appldata_mem_data *mem_data)
74{
75 P_DEBUG("--- MEM - RECORD ---\n");
76 P_DEBUG("pgpgin = %8lu KB\n", mem_data->pgpgin);
77 P_DEBUG("pgpgout = %8lu KB\n", mem_data->pgpgout);
78 P_DEBUG("pswpin = %8lu Pages\n", mem_data->pswpin);
79 P_DEBUG("pswpout = %8lu Pages\n", mem_data->pswpout);
80 P_DEBUG("pgalloc = %8lu \n", mem_data->pgalloc);
81 P_DEBUG("pgfault = %8lu \n", mem_data->pgfault);
82 P_DEBUG("pgmajfault = %8lu \n", mem_data->pgmajfault);
83 P_DEBUG("sharedram = %8lu KB\n", mem_data->sharedram);
84 P_DEBUG("totalram = %8lu KB\n", mem_data->totalram);
85 P_DEBUG("freeram = %8lu KB\n", mem_data->freeram);
86 P_DEBUG("totalhigh = %8lu KB\n", mem_data->totalhigh);
87 P_DEBUG("freehigh = %8lu KB\n", mem_data->freehigh);
88 P_DEBUG("bufferram = %8lu KB\n", mem_data->bufferram);
89 P_DEBUG("cached = %8lu KB\n", mem_data->cached);
90 P_DEBUG("totalswap = %8lu KB\n", mem_data->totalswap);
91 P_DEBUG("freeswap = %8lu KB\n", mem_data->freeswap);
92 P_DEBUG("sync_count_1 = %u\n", mem_data->sync_count_1);
93 P_DEBUG("sync_count_2 = %u\n", mem_data->sync_count_2);
94 P_DEBUG("timestamp = %lX\n", mem_data->timestamp);
95}
96
97/* 72/*
98 * appldata_get_mem_data() 73 * appldata_get_mem_data()
99 * 74 *
@@ -140,9 +115,6 @@ static void appldata_get_mem_data(void *data)
140 115
141 mem_data->timestamp = get_clock(); 116 mem_data->timestamp = get_clock();
142 mem_data->sync_count_2++; 117 mem_data->sync_count_2++;
143#ifdef APPLDATA_DEBUG
144 appldata_debug_print(mem_data);
145#endif
146} 118}
147 119
148 120
@@ -164,17 +136,7 @@ static struct appldata_ops ops = {
164 */ 136 */
165static int __init appldata_mem_init(void) 137static int __init appldata_mem_init(void)
166{ 138{
167 int rc; 139 return appldata_register_ops(&ops);
168
169 P_DEBUG("sizeof(mem) = %lu\n", sizeof(struct appldata_mem_data));
170
171 rc = appldata_register_ops(&ops);
172 if (rc != 0) {
173 P_ERROR("Error registering ops, rc = %i\n", rc);
174 } else {
175 P_DEBUG("%s-ops registered!\n", ops.name);
176 }
177 return rc;
178} 140}
179 141
180/* 142/*
@@ -185,7 +147,6 @@ static int __init appldata_mem_init(void)
185static void __exit appldata_mem_exit(void) 147static void __exit appldata_mem_exit(void)
186{ 148{
187 appldata_unregister_ops(&ops); 149 appldata_unregister_ops(&ops);
188 P_DEBUG("%s-ops unregistered!\n", ops.name);
189} 150}
190 151
191 152
diff --git a/arch/s390/appldata/appldata_net_sum.c b/arch/s390/appldata/appldata_net_sum.c
index 4d8344336001..3b746556e1a3 100644
--- a/arch/s390/appldata/appldata_net_sum.c
+++ b/arch/s390/appldata/appldata_net_sum.c
@@ -21,9 +21,6 @@
21#include "appldata.h" 21#include "appldata.h"
22 22
23 23
24#define MY_PRINT_NAME "appldata_net_sum" /* for debug messages, etc. */
25
26
27/* 24/*
28 * Network data 25 * Network data
29 * 26 *
@@ -60,26 +57,6 @@ static struct appldata_net_sum_data {
60} __attribute__((packed)) appldata_net_sum_data; 57} __attribute__((packed)) appldata_net_sum_data;
61 58
62 59
63static inline void appldata_print_debug(struct appldata_net_sum_data *net_data)
64{
65 P_DEBUG("--- NET - RECORD ---\n");
66
67 P_DEBUG("nr_interfaces = %u\n", net_data->nr_interfaces);
68 P_DEBUG("rx_packets = %8lu\n", net_data->rx_packets);
69 P_DEBUG("tx_packets = %8lu\n", net_data->tx_packets);
70 P_DEBUG("rx_bytes = %8lu\n", net_data->rx_bytes);
71 P_DEBUG("tx_bytes = %8lu\n", net_data->tx_bytes);
72 P_DEBUG("rx_errors = %8lu\n", net_data->rx_errors);
73 P_DEBUG("tx_errors = %8lu\n", net_data->tx_errors);
74 P_DEBUG("rx_dropped = %8lu\n", net_data->rx_dropped);
75 P_DEBUG("tx_dropped = %8lu\n", net_data->tx_dropped);
76 P_DEBUG("collisions = %8lu\n", net_data->collisions);
77
78 P_DEBUG("sync_count_1 = %u\n", net_data->sync_count_1);
79 P_DEBUG("sync_count_2 = %u\n", net_data->sync_count_2);
80 P_DEBUG("timestamp = %lX\n", net_data->timestamp);
81}
82
83/* 60/*
84 * appldata_get_net_sum_data() 61 * appldata_get_net_sum_data()
85 * 62 *
@@ -135,9 +112,6 @@ static void appldata_get_net_sum_data(void *data)
135 112
136 net_data->timestamp = get_clock(); 113 net_data->timestamp = get_clock();
137 net_data->sync_count_2++; 114 net_data->sync_count_2++;
138#ifdef APPLDATA_DEBUG
139 appldata_print_debug(net_data);
140#endif
141} 115}
142 116
143 117
@@ -159,17 +133,7 @@ static struct appldata_ops ops = {
159 */ 133 */
160static int __init appldata_net_init(void) 134static int __init appldata_net_init(void)
161{ 135{
162 int rc; 136 return appldata_register_ops(&ops);
163
164 P_DEBUG("sizeof(net) = %lu\n", sizeof(struct appldata_net_sum_data));
165
166 rc = appldata_register_ops(&ops);
167 if (rc != 0) {
168 P_ERROR("Error registering ops, rc = %i\n", rc);
169 } else {
170 P_DEBUG("%s-ops registered!\n", ops.name);
171 }
172 return rc;
173} 137}
174 138
175/* 139/*
@@ -180,7 +144,6 @@ static int __init appldata_net_init(void)
180static void __exit appldata_net_exit(void) 144static void __exit appldata_net_exit(void)
181{ 145{
182 appldata_unregister_ops(&ops); 146 appldata_unregister_ops(&ops);
183 P_DEBUG("%s-ops unregistered!\n", ops.name);
184} 147}
185 148
186 149
diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c
index 6b3eafe10453..eb44f9f8ab91 100644
--- a/arch/s390/appldata/appldata_os.c
+++ b/arch/s390/appldata/appldata_os.c
@@ -89,44 +89,6 @@ static struct appldata_ops ops = {
89}; 89};
90 90
91 91
92static inline void appldata_print_debug(struct appldata_os_data *os_data)
93{
94 int a0, a1, a2, i;
95
96 P_DEBUG("--- OS - RECORD ---\n");
97 P_DEBUG("nr_threads = %u\n", os_data->nr_threads);
98 P_DEBUG("nr_running = %u\n", os_data->nr_running);
99 P_DEBUG("nr_iowait = %u\n", os_data->nr_iowait);
100 P_DEBUG("avenrun(int) = %8x / %8x / %8x\n", os_data->avenrun[0],
101 os_data->avenrun[1], os_data->avenrun[2]);
102 a0 = os_data->avenrun[0];
103 a1 = os_data->avenrun[1];
104 a2 = os_data->avenrun[2];
105 P_DEBUG("avenrun(float) = %d.%02d / %d.%02d / %d.%02d\n",
106 LOAD_INT(a0), LOAD_FRAC(a0), LOAD_INT(a1), LOAD_FRAC(a1),
107 LOAD_INT(a2), LOAD_FRAC(a2));
108
109 P_DEBUG("nr_cpus = %u\n", os_data->nr_cpus);
110 for (i = 0; i < os_data->nr_cpus; i++) {
111 P_DEBUG("cpu%u : user = %u, nice = %u, system = %u, "
112 "idle = %u, irq = %u, softirq = %u, iowait = %u, "
113 "steal = %u\n",
114 os_data->os_cpu[i].cpu_id,
115 os_data->os_cpu[i].per_cpu_user,
116 os_data->os_cpu[i].per_cpu_nice,
117 os_data->os_cpu[i].per_cpu_system,
118 os_data->os_cpu[i].per_cpu_idle,
119 os_data->os_cpu[i].per_cpu_irq,
120 os_data->os_cpu[i].per_cpu_softirq,
121 os_data->os_cpu[i].per_cpu_iowait,
122 os_data->os_cpu[i].per_cpu_steal);
123 }
124
125 P_DEBUG("sync_count_1 = %u\n", os_data->sync_count_1);
126 P_DEBUG("sync_count_2 = %u\n", os_data->sync_count_2);
127 P_DEBUG("timestamp = %lX\n", os_data->timestamp);
128}
129
130/* 92/*
131 * appldata_get_os_data() 93 * appldata_get_os_data()
132 * 94 *
@@ -180,13 +142,10 @@ static void appldata_get_os_data(void *data)
180 APPLDATA_START_INTERVAL_REC, 142 APPLDATA_START_INTERVAL_REC,
181 (unsigned long) ops.data, new_size, 143 (unsigned long) ops.data, new_size,
182 ops.mod_lvl); 144 ops.mod_lvl);
183 if (rc != 0) { 145 if (rc != 0)
184 P_ERROR("os: START NEW DIAG 0xDC failed, " 146 P_ERROR("os: START NEW DIAG 0xDC failed, "
185 "return code: %d, new size = %i\n", rc, 147 "return code: %d, new size = %i\n", rc,
186 new_size); 148 new_size);
187 P_INFO("os: stopping old record now\n");
188 } else
189 P_INFO("os: new record size = %i\n", new_size);
190 149
191 rc = appldata_diag(APPLDATA_RECORD_OS_ID, 150 rc = appldata_diag(APPLDATA_RECORD_OS_ID,
192 APPLDATA_STOP_REC, 151 APPLDATA_STOP_REC,
@@ -204,9 +163,6 @@ static void appldata_get_os_data(void *data)
204 } 163 }
205 os_data->timestamp = get_clock(); 164 os_data->timestamp = get_clock();
206 os_data->sync_count_2++; 165 os_data->sync_count_2++;
207#ifdef APPLDATA_DEBUG
208 appldata_print_debug(os_data);
209#endif
210} 166}
211 167
212 168
@@ -227,12 +183,9 @@ static int __init appldata_os_init(void)
227 rc = -ENOMEM; 183 rc = -ENOMEM;
228 goto out; 184 goto out;
229 } 185 }
230 P_DEBUG("max. sizeof(os) = %i, sizeof(os_cpu) = %lu\n", max_size,
231 sizeof(struct appldata_os_per_cpu));
232 186
233 appldata_os_data = kzalloc(max_size, GFP_DMA); 187 appldata_os_data = kzalloc(max_size, GFP_DMA);
234 if (appldata_os_data == NULL) { 188 if (appldata_os_data == NULL) {
235 P_ERROR("No memory for %s!\n", ops.name);
236 rc = -ENOMEM; 189 rc = -ENOMEM;
237 goto out; 190 goto out;
238 } 191 }
@@ -240,17 +193,12 @@ static int __init appldata_os_init(void)
240 appldata_os_data->per_cpu_size = sizeof(struct appldata_os_per_cpu); 193 appldata_os_data->per_cpu_size = sizeof(struct appldata_os_per_cpu);
241 appldata_os_data->cpu_offset = offsetof(struct appldata_os_data, 194 appldata_os_data->cpu_offset = offsetof(struct appldata_os_data,
242 os_cpu); 195 os_cpu);
243 P_DEBUG("cpu offset = %u\n", appldata_os_data->cpu_offset);
244 196
245 ops.data = appldata_os_data; 197 ops.data = appldata_os_data;
246 ops.callback = &appldata_get_os_data; 198 ops.callback = &appldata_get_os_data;
247 rc = appldata_register_ops(&ops); 199 rc = appldata_register_ops(&ops);
248 if (rc != 0) { 200 if (rc != 0)
249 P_ERROR("Error registering ops, rc = %i\n", rc);
250 kfree(appldata_os_data); 201 kfree(appldata_os_data);
251 } else {
252 P_DEBUG("%s-ops registered!\n", ops.name);
253 }
254out: 202out:
255 return rc; 203 return rc;
256} 204}
@@ -264,7 +212,6 @@ static void __exit appldata_os_exit(void)
264{ 212{
265 appldata_unregister_ops(&ops); 213 appldata_unregister_ops(&ops);
266 kfree(appldata_os_data); 214 kfree(appldata_os_data);
267 P_DEBUG("%s-ops unregistered!\n", ops.name);
268} 215}
269 216
270 217
diff --git a/arch/s390/crypto/crypt_s390.h b/arch/s390/crypto/crypt_s390.h
index 9992f95ef992..0ef9829f2ad6 100644
--- a/arch/s390/crypto/crypt_s390.h
+++ b/arch/s390/crypto/crypt_s390.h
@@ -296,6 +296,10 @@ static inline int crypt_s390_func_available(int func)
296 unsigned char status[16]; 296 unsigned char status[16];
297 int ret; 297 int ret;
298 298
299 /* check if CPACF facility (bit 17) is available */
300 if (!(stfl() & 1ULL << (31 - 17)))
301 return 0;
302
299 switch (func & CRYPT_S390_OP_MASK) { 303 switch (func & CRYPT_S390_OP_MASK) {
300 case CRYPT_S390_KM: 304 case CRYPT_S390_KM:
301 ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0); 305 ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0);
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
index 0cfefddd8375..eca724d229ec 100644
--- a/arch/s390/crypto/prng.c
+++ b/arch/s390/crypto/prng.c
@@ -6,6 +6,7 @@
6#include <linux/fs.h> 6#include <linux/fs.h>
7#include <linux/init.h> 7#include <linux/init.h>
8#include <linux/kernel.h> 8#include <linux/kernel.h>
9#include <linux/smp_lock.h>
9#include <linux/miscdevice.h> 10#include <linux/miscdevice.h>
10#include <linux/module.h> 11#include <linux/module.h>
11#include <linux/moduleparam.h> 12#include <linux/moduleparam.h>
@@ -48,6 +49,7 @@ static unsigned char parm_block[32] = {
48 49
49static int prng_open(struct inode *inode, struct file *file) 50static int prng_open(struct inode *inode, struct file *file)
50{ 51{
52 cycle_kernel_lock();
51 return nonseekable_open(inode, file); 53 return nonseekable_open(inode, file);
52} 54}
53 55
@@ -185,11 +187,8 @@ static int __init prng_init(void)
185 prng_seed(16); 187 prng_seed(16);
186 188
187 ret = misc_register(&prng_dev); 189 ret = misc_register(&prng_dev);
188 if (ret) { 190 if (ret)
189 printk(KERN_WARNING
190 "Could not register misc device for PRNG.\n");
191 goto out_buf; 191 goto out_buf;
192 }
193 return 0; 192 return 0;
194 193
195out_buf: 194out_buf:
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 4b010ff814c9..7383781f3e6a 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -150,33 +150,24 @@ static ssize_t hypfs_aio_read(struct kiocb *iocb, const struct iovec *iov,
150 unsigned long nr_segs, loff_t offset) 150 unsigned long nr_segs, loff_t offset)
151{ 151{
152 char *data; 152 char *data;
153 size_t len; 153 ssize_t ret;
154 struct file *filp = iocb->ki_filp; 154 struct file *filp = iocb->ki_filp;
155 /* XXX: temporary */ 155 /* XXX: temporary */
156 char __user *buf = iov[0].iov_base; 156 char __user *buf = iov[0].iov_base;
157 size_t count = iov[0].iov_len; 157 size_t count = iov[0].iov_len;
158 158
159 if (nr_segs != 1) { 159 if (nr_segs != 1)
160 count = -EINVAL; 160 return -EINVAL;
161 goto out;
162 }
163 161
164 data = filp->private_data; 162 data = filp->private_data;
165 len = strlen(data); 163 ret = simple_read_from_buffer(buf, count, &offset, data, strlen(data));
166 if (offset > len) { 164 if (ret <= 0)
167 count = 0; 165 return ret;
168 goto out; 166
169 } 167 iocb->ki_pos += ret;
170 if (count > len - offset)
171 count = len - offset;
172 if (copy_to_user(buf, data + offset, count)) {
173 count = -EFAULT;
174 goto out;
175 }
176 iocb->ki_pos += count;
177 file_accessed(filp); 168 file_accessed(filp);
178out: 169
179 return count; 170 return ret;
180} 171}
181static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov, 172static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov,
182 unsigned long nr_segs, loff_t offset) 173 unsigned long nr_segs, loff_t offset)
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 6302f5082588..50f657e77344 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -7,9 +7,14 @@
7# 7#
8CFLAGS_smp.o := -Wno-nonnull 8CFLAGS_smp.o := -Wno-nonnull
9 9
10#
11# Pass UTS_MACHINE for user_regset definition
12#
13CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
14
10obj-y := bitmap.o traps.o time.o process.o base.o early.o \ 15obj-y := bitmap.o traps.o time.o process.o base.o early.o \
11 setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ 16 setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
12 s390_ext.o debug.o irq.o ipl.o dis.o diag.o 17 s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o
13 18
14obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) 19obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
15obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) 20obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
@@ -23,7 +28,7 @@ obj-$(CONFIG_AUDIT) += audit.o
23compat-obj-$(CONFIG_AUDIT) += compat_audit.o 28compat-obj-$(CONFIG_AUDIT) += compat_audit.o
24obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \ 29obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \
25 compat_wrapper.o compat_exec_domain.o \ 30 compat_wrapper.o compat_exec_domain.o \
26 binfmt_elf32.o $(compat-obj-y) 31 $(compat-obj-y)
27 32
28obj-$(CONFIG_VIRT_TIMER) += vtime.o 33obj-$(CONFIG_VIRT_TIMER) += vtime.o
29obj-$(CONFIG_STACKTRACE) += stacktrace.o 34obj-$(CONFIG_STACKTRACE) += stacktrace.o
diff --git a/arch/s390/kernel/binfmt_elf32.c b/arch/s390/kernel/binfmt_elf32.c
deleted file mode 100644
index 3e1c315b736d..000000000000
--- a/arch/s390/kernel/binfmt_elf32.c
+++ /dev/null
@@ -1,214 +0,0 @@
1/*
2 * Support for 32-bit Linux for S390 ELF binaries.
3 *
4 * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Gerhard Tonn (ton@de.ibm.com)
6 *
7 * Heavily inspired by the 32-bit Sparc compat code which is
8 * Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com)
9 * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
10 */
11
12#define __ASMS390_ELF_H
13
14#include <linux/time.h>
15
16/*
17 * These are used to set parameters in the core dumps.
18 */
19#define ELF_CLASS ELFCLASS32
20#define ELF_DATA ELFDATA2MSB
21#define ELF_ARCH EM_S390
22
23/*
24 * This is used to ensure we don't load something for the wrong architecture.
25 */
26#define elf_check_arch(x) \
27 (((x)->e_machine == EM_S390 || (x)->e_machine == EM_S390_OLD) \
28 && (x)->e_ident[EI_CLASS] == ELF_CLASS)
29
30/* ELF register definitions */
31#define NUM_GPRS 16
32#define NUM_FPRS 16
33#define NUM_ACRS 16
34
35/* For SVR4/S390 the function pointer to be registered with `atexit` is
36 passed in R14. */
37#define ELF_PLAT_INIT(_r, load_addr) \
38 do { \
39 _r->gprs[14] = 0; \
40 } while(0)
41
42#define USE_ELF_CORE_DUMP
43#define ELF_EXEC_PAGESIZE 4096
44
45/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
46 use of this is to invoke "./ld.so someprog" to test out a new version of
47 the loader. We need to make sure that it is out of the way of the program
48 that it will "exec", and that there is sufficient room for the brk. */
49
50#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
51
52/* Wow, the "main" arch needs arch dependent functions too.. :) */
53
54/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
55 now struct_user_regs, they are different) */
56
57#define ELF_CORE_COPY_REGS(pr_reg, regs) dump_regs32(regs, &pr_reg);
58
59#define ELF_CORE_COPY_TASK_REGS(tsk, regs) dump_task_regs32(tsk, regs)
60
61#define ELF_CORE_COPY_FPREGS(tsk, fpregs) dump_task_fpu(tsk, fpregs)
62
63/* This yields a mask that user programs can use to figure out what
64 instruction set this CPU supports. */
65
66#define ELF_HWCAP (0)
67
68/* This yields a string that ld.so will use to load implementation
69 specific libraries for optimization. This is more specific in
70 intent than poking at uname or /proc/cpuinfo.
71
72 For the moment, we have only optimizations for the Intel generations,
73 but that could change... */
74
75#define ELF_PLATFORM (NULL)
76
77#define SET_PERSONALITY(ex, ibcs2) \
78do { \
79 if (ibcs2) \
80 set_personality(PER_SVR4); \
81 else if (current->personality != PER_LINUX32) \
82 set_personality(PER_LINUX); \
83 set_thread_flag(TIF_31BIT); \
84} while (0)
85
86#include "compat_linux.h"
87
88typedef _s390_fp_regs32 elf_fpregset_t;
89
90typedef struct
91{
92
93 _psw_t32 psw;
94 __u32 gprs[__NUM_GPRS];
95 __u32 acrs[__NUM_ACRS];
96 __u32 orig_gpr2;
97} s390_regs32;
98typedef s390_regs32 elf_gregset_t;
99
100static inline int dump_regs32(struct pt_regs *ptregs, elf_gregset_t *regs)
101{
102 int i;
103
104 memcpy(&regs->psw.mask, &ptregs->psw.mask, 4);
105 memcpy(&regs->psw.addr, (char *)&ptregs->psw.addr + 4, 4);
106 for (i = 0; i < NUM_GPRS; i++)
107 regs->gprs[i] = ptregs->gprs[i];
108 save_access_regs(regs->acrs);
109 regs->orig_gpr2 = ptregs->orig_gpr2;
110 return 1;
111}
112
113static inline int dump_task_regs32(struct task_struct *tsk, elf_gregset_t *regs)
114{
115 struct pt_regs *ptregs = task_pt_regs(tsk);
116 int i;
117
118 memcpy(&regs->psw.mask, &ptregs->psw.mask, 4);
119 memcpy(&regs->psw.addr, (char *)&ptregs->psw.addr + 4, 4);
120 for (i = 0; i < NUM_GPRS; i++)
121 regs->gprs[i] = ptregs->gprs[i];
122 memcpy(regs->acrs, tsk->thread.acrs, sizeof(regs->acrs));
123 regs->orig_gpr2 = ptregs->orig_gpr2;
124 return 1;
125}
126
127static inline int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
128{
129 if (tsk == current)
130 save_fp_regs((s390_fp_regs *) fpregs);
131 else
132 memcpy(fpregs, &tsk->thread.fp_regs, sizeof(elf_fpregset_t));
133 return 1;
134}
135
136#include <asm/processor.h>
137#include <asm/pgalloc.h>
138#include <linux/module.h>
139#include <linux/elfcore.h>
140#include <linux/binfmts.h>
141#include <linux/compat.h>
142
143#define elf_prstatus elf_prstatus32
144struct elf_prstatus32
145{
146 struct elf_siginfo pr_info; /* Info associated with signal */
147 short pr_cursig; /* Current signal */
148 u32 pr_sigpend; /* Set of pending signals */
149 u32 pr_sighold; /* Set of held signals */
150 pid_t pr_pid;
151 pid_t pr_ppid;
152 pid_t pr_pgrp;
153 pid_t pr_sid;
154 struct compat_timeval pr_utime; /* User time */
155 struct compat_timeval pr_stime; /* System time */
156 struct compat_timeval pr_cutime; /* Cumulative user time */
157 struct compat_timeval pr_cstime; /* Cumulative system time */
158 elf_gregset_t pr_reg; /* GP registers */
159 int pr_fpvalid; /* True if math co-processor being used. */
160};
161
162#define elf_prpsinfo elf_prpsinfo32
163struct elf_prpsinfo32
164{
165 char pr_state; /* numeric process state */
166 char pr_sname; /* char for pr_state */
167 char pr_zomb; /* zombie */
168 char pr_nice; /* nice val */
169 u32 pr_flag; /* flags */
170 u16 pr_uid;
171 u16 pr_gid;
172 pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
173 /* Lots missing */
174 char pr_fname[16]; /* filename of executable */
175 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
176};
177
178#include <linux/highuid.h>
179
180/*
181#define init_elf_binfmt init_elf32_binfmt
182*/
183
184#undef start_thread
185#define start_thread start_thread31
186
187static inline void start_thread31(struct pt_regs *regs, unsigned long new_psw,
188 unsigned long new_stackp)
189{
190 set_fs(USER_DS);
191 regs->psw.mask = psw_user32_bits;
192 regs->psw.addr = new_psw;
193 regs->gprs[15] = new_stackp;
194 crst_table_downgrade(current->mm, 1UL << 31);
195}
196
197MODULE_DESCRIPTION("Binary format loader for compatibility with 32bit Linux for S390 binaries,"
198 " Copyright 2000 IBM Corporation");
199MODULE_AUTHOR("Gerhard Tonn <ton@de.ibm.com>");
200
201#undef MODULE_DESCRIPTION
202#undef MODULE_AUTHOR
203
204#undef cputime_to_timeval
205#define cputime_to_timeval cputime_to_compat_timeval
206static inline void
207cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
208{
209 value->tv_usec = cputime % 1000000;
210 value->tv_sec = cputime / 1000000;
211}
212
213#include "../../../fs/binfmt_elf.c"
214
diff --git a/arch/s390/kernel/compat_ptrace.h b/arch/s390/kernel/compat_ptrace.h
index 419aef913ee1..cde81fa64f89 100644
--- a/arch/s390/kernel/compat_ptrace.h
+++ b/arch/s390/kernel/compat_ptrace.h
@@ -1,7 +1,7 @@
1#ifndef _PTRACE32_H 1#ifndef _PTRACE32_H
2#define _PTRACE32_H 2#define _PTRACE32_H
3 3
4#include "compat_linux.h" /* needed for _psw_t32 */ 4#include "compat_linux.h" /* needed for psw_compat_t */
5 5
6typedef struct { 6typedef struct {
7 __u32 cr[3]; 7 __u32 cr[3];
@@ -38,7 +38,7 @@ typedef struct {
38 38
39struct user_regs_struct32 39struct user_regs_struct32
40{ 40{
41 _psw_t32 psw; 41 psw_compat_t psw;
42 u32 gprs[NUM_GPRS]; 42 u32 gprs[NUM_GPRS];
43 u32 acrs[NUM_ACRS]; 43 u32 acrs[NUM_ACRS];
44 u32 orig_gpr2; 44 u32 orig_gpr2;
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index c93d1296cc0a..d80fcd4a7fe1 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -1079,7 +1079,6 @@ __init debug_init(void)
1079 s390dbf_sysctl_header = register_sysctl_table(s390dbf_dir_table); 1079 s390dbf_sysctl_header = register_sysctl_table(s390dbf_dir_table);
1080 mutex_lock(&debug_mutex); 1080 mutex_lock(&debug_mutex);
1081 debug_debugfs_root_entry = debugfs_create_dir(DEBUG_DIR_ROOT,NULL); 1081 debug_debugfs_root_entry = debugfs_create_dir(DEBUG_DIR_ROOT,NULL);
1082 printk(KERN_INFO "debug: Initialization complete\n");
1083 initialized = 1; 1082 initialized = 1;
1084 mutex_unlock(&debug_mutex); 1083 mutex_unlock(&debug_mutex);
1085 1084
@@ -1193,7 +1192,6 @@ debug_get_uint(char *buf)
1193 for(; isspace(*buf); buf++); 1192 for(; isspace(*buf); buf++);
1194 rc = simple_strtoul(buf, &buf, 10); 1193 rc = simple_strtoul(buf, &buf, 10);
1195 if(*buf){ 1194 if(*buf){
1196 printk("debug: no integer specified!\n");
1197 rc = -EINVAL; 1195 rc = -EINVAL;
1198 } 1196 }
1199 return rc; 1197 return rc;
@@ -1340,19 +1338,12 @@ static void debug_flush(debug_info_t* id, int area)
1340 memset(id->areas[i][j], 0, PAGE_SIZE); 1338 memset(id->areas[i][j], 0, PAGE_SIZE);
1341 } 1339 }
1342 } 1340 }
1343 printk(KERN_INFO "debug: %s: all areas flushed\n",id->name);
1344 } else if(area >= 0 && area < id->nr_areas) { 1341 } else if(area >= 0 && area < id->nr_areas) {
1345 id->active_entries[area] = 0; 1342 id->active_entries[area] = 0;
1346 id->active_pages[area] = 0; 1343 id->active_pages[area] = 0;
1347 for(i = 0; i < id->pages_per_area; i++) { 1344 for(i = 0; i < id->pages_per_area; i++) {
1348 memset(id->areas[area][i],0,PAGE_SIZE); 1345 memset(id->areas[area][i],0,PAGE_SIZE);
1349 } 1346 }
1350 printk(KERN_INFO "debug: %s: area %i has been flushed\n",
1351 id->name, area);
1352 } else {
1353 printk(KERN_INFO
1354 "debug: %s: area %i cannot be flushed (range: %i - %i)\n",
1355 id->name, area, 0, id->nr_areas-1);
1356 } 1347 }
1357 spin_unlock_irqrestore(&id->lock,flags); 1348 spin_unlock_irqrestore(&id->lock,flags);
1358} 1349}
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index d0e09684b9ce..2a2ca268b1dd 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -14,6 +14,7 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/pfn.h> 15#include <linux/pfn.h>
16#include <linux/uaccess.h> 16#include <linux/uaccess.h>
17#include <asm/ebcdic.h>
17#include <asm/ipl.h> 18#include <asm/ipl.h>
18#include <asm/lowcore.h> 19#include <asm/lowcore.h>
19#include <asm/processor.h> 20#include <asm/processor.h>
@@ -26,12 +27,40 @@
26/* 27/*
27 * Create a Kernel NSS if the SAVESYS= parameter is defined 28 * Create a Kernel NSS if the SAVESYS= parameter is defined
28 */ 29 */
29#define DEFSYS_CMD_SIZE 96 30#define DEFSYS_CMD_SIZE 128
30#define SAVESYS_CMD_SIZE 32 31#define SAVESYS_CMD_SIZE 32
31 32
32char kernel_nss_name[NSS_NAME_SIZE + 1]; 33char kernel_nss_name[NSS_NAME_SIZE + 1];
33 34
35static void __init setup_boot_command_line(void);
36
37
34#ifdef CONFIG_SHARED_KERNEL 38#ifdef CONFIG_SHARED_KERNEL
39int __init savesys_ipl_nss(char *cmd, const int cmdlen);
40
41asm(
42 " .section .init.text,\"ax\",@progbits\n"
43 " .align 4\n"
44 " .type savesys_ipl_nss, @function\n"
45 "savesys_ipl_nss:\n"
46#ifdef CONFIG_64BIT
47 " stmg 6,15,48(15)\n"
48 " lgr 14,3\n"
49 " sam31\n"
50 " diag 2,14,0x8\n"
51 " sam64\n"
52 " lgr 2,14\n"
53 " lmg 6,15,48(15)\n"
54#else
55 " stm 6,15,24(15)\n"
56 " lr 14,3\n"
57 " diag 2,14,0x8\n"
58 " lr 2,14\n"
59 " lm 6,15,24(15)\n"
60#endif
61 " br 14\n"
62 " .size savesys_ipl_nss, .-savesys_ipl_nss\n");
63
35static noinline __init void create_kernel_nss(void) 64static noinline __init void create_kernel_nss(void)
36{ 65{
37 unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size; 66 unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size;
@@ -39,6 +68,7 @@ static noinline __init void create_kernel_nss(void)
39 unsigned int sinitrd_pfn, einitrd_pfn; 68 unsigned int sinitrd_pfn, einitrd_pfn;
40#endif 69#endif
41 int response; 70 int response;
71 size_t len;
42 char *savesys_ptr; 72 char *savesys_ptr;
43 char upper_command_line[COMMAND_LINE_SIZE]; 73 char upper_command_line[COMMAND_LINE_SIZE];
44 char defsys_cmd[DEFSYS_CMD_SIZE]; 74 char defsys_cmd[DEFSYS_CMD_SIZE];
@@ -49,8 +79,8 @@ static noinline __init void create_kernel_nss(void)
49 return; 79 return;
50 80
51 /* Convert COMMAND_LINE to upper case */ 81 /* Convert COMMAND_LINE to upper case */
52 for (i = 0; i < strlen(COMMAND_LINE); i++) 82 for (i = 0; i < strlen(boot_command_line); i++)
53 upper_command_line[i] = toupper(COMMAND_LINE[i]); 83 upper_command_line[i] = toupper(boot_command_line[i]);
54 84
55 savesys_ptr = strstr(upper_command_line, "SAVESYS="); 85 savesys_ptr = strstr(upper_command_line, "SAVESYS=");
56 86
@@ -83,7 +113,8 @@ static noinline __init void create_kernel_nss(void)
83 } 113 }
84#endif 114#endif
85 115
86 sprintf(defsys_cmd, "%s EW MINSIZE=%.7iK", defsys_cmd, min_size); 116 sprintf(defsys_cmd, "%s EW MINSIZE=%.7iK PARMREGS=0-13",
117 defsys_cmd, min_size);
87 sprintf(savesys_cmd, "SAVESYS %s \n IPL %s", 118 sprintf(savesys_cmd, "SAVESYS %s \n IPL %s",
88 kernel_nss_name, kernel_nss_name); 119 kernel_nss_name, kernel_nss_name);
89 120
@@ -94,13 +125,24 @@ static noinline __init void create_kernel_nss(void)
94 return; 125 return;
95 } 126 }
96 127
97 __cpcmd(savesys_cmd, NULL, 0, &response); 128 len = strlen(savesys_cmd);
129 ASCEBC(savesys_cmd, len);
130 response = savesys_ipl_nss(savesys_cmd, len);
98 131
99 if (response != strlen(savesys_cmd)) { 132 /* On success: response is equal to the command size,
133 * max SAVESYS_CMD_SIZE
134 * On error: response contains the numeric portion of cp error message.
135 * for SAVESYS it will be >= 263
136 */
137 if (response > SAVESYS_CMD_SIZE) {
100 kernel_nss_name[0] = '\0'; 138 kernel_nss_name[0] = '\0';
101 return; 139 return;
102 } 140 }
103 141
142 /* re-setup boot command line with new ipl vm parms */
143 ipl_update_parameters();
144 setup_boot_command_line();
145
104 ipl_flags = IPL_NSS_VALID; 146 ipl_flags = IPL_NSS_VALID;
105} 147}
106 148
@@ -141,109 +183,11 @@ static noinline __init void detect_machine_type(void)
141 if (cpuinfo->cpu_id.version == 0xff) 183 if (cpuinfo->cpu_id.version == 0xff)
142 machine_flags |= MACHINE_FLAG_VM; 184 machine_flags |= MACHINE_FLAG_VM;
143 185
144 /* Running on a P/390 ? */
145 if (cpuinfo->cpu_id.machine == 0x7490)
146 machine_flags |= MACHINE_FLAG_P390;
147
148 /* Running under KVM ? */ 186 /* Running under KVM ? */
149 if (cpuinfo->cpu_id.version == 0xfe) 187 if (cpuinfo->cpu_id.version == 0xfe)
150 machine_flags |= MACHINE_FLAG_KVM; 188 machine_flags |= MACHINE_FLAG_KVM;
151} 189}
152 190
153#ifdef CONFIG_64BIT
154static noinline __init int memory_fast_detect(void)
155{
156 unsigned long val0 = 0;
157 unsigned long val1 = 0xc;
158 int ret = -ENOSYS;
159
160 if (ipl_flags & IPL_NSS_VALID)
161 return -ENOSYS;
162
163 asm volatile(
164 " diag %1,%2,0x260\n"
165 "0: lhi %0,0\n"
166 "1:\n"
167 EX_TABLE(0b,1b)
168 : "+d" (ret), "+d" (val0), "+d" (val1) : : "cc");
169
170 if (ret || val0 != val1)
171 return -ENOSYS;
172
173 memory_chunk[0].size = val0 + 1;
174 return 0;
175}
176#else
177static inline int memory_fast_detect(void)
178{
179 return -ENOSYS;
180}
181#endif
182
183static inline __init unsigned long __tprot(unsigned long addr)
184{
185 int cc = -1;
186
187 asm volatile(
188 " tprot 0(%1),0\n"
189 "0: ipm %0\n"
190 " srl %0,28\n"
191 "1:\n"
192 EX_TABLE(0b,1b)
193 : "+d" (cc) : "a" (addr) : "cc");
194 return (unsigned long)cc;
195}
196
197/* Checking memory in 128KB increments. */
198#define CHUNK_INCR (1UL << 17)
199#define ADDR2G (1UL << 31)
200
201static noinline __init void find_memory_chunks(unsigned long memsize)
202{
203 unsigned long addr = 0, old_addr = 0;
204 unsigned long old_cc = CHUNK_READ_WRITE;
205 unsigned long cc;
206 int chunk = 0;
207
208 while (chunk < MEMORY_CHUNKS) {
209 cc = __tprot(addr);
210 while (cc == old_cc) {
211 addr += CHUNK_INCR;
212 if (memsize && addr >= memsize)
213 break;
214#ifndef CONFIG_64BIT
215 if (addr == ADDR2G)
216 break;
217#endif
218 cc = __tprot(addr);
219 }
220
221 if (old_addr != addr &&
222 (old_cc == CHUNK_READ_WRITE || old_cc == CHUNK_READ_ONLY)) {
223 memory_chunk[chunk].addr = old_addr;
224 memory_chunk[chunk].size = addr - old_addr;
225 memory_chunk[chunk].type = old_cc;
226 chunk++;
227 }
228
229 old_addr = addr;
230 old_cc = cc;
231
232#ifndef CONFIG_64BIT
233 if (addr == ADDR2G)
234 break;
235#endif
236 /*
237 * Finish memory detection at the first hole
238 * if storage size is unknown.
239 */
240 if (cc == -1UL && !memsize)
241 break;
242 if (memsize && addr >= memsize)
243 break;
244 }
245}
246
247static __init void early_pgm_check_handler(void) 191static __init void early_pgm_check_handler(void)
248{ 192{
249 unsigned long addr; 193 unsigned long addr;
@@ -380,23 +324,61 @@ static __init void detect_machine_facilities(void)
380#endif 324#endif
381} 325}
382 326
327static __init void rescue_initrd(void)
328{
329#ifdef CONFIG_BLK_DEV_INITRD
330 /*
331 * Move the initrd right behind the bss section in case it starts
332 * within the bss section. So we don't overwrite it when the bss
333 * section gets cleared.
334 */
335 if (!INITRD_START || !INITRD_SIZE)
336 return;
337 if (INITRD_START >= (unsigned long) __bss_stop)
338 return;
339 memmove(__bss_stop, (void *) INITRD_START, INITRD_SIZE);
340 INITRD_START = (unsigned long) __bss_stop;
341#endif
342}
343
344/* Set up boot command line */
345static void __init setup_boot_command_line(void)
346{
347 char *parm = NULL;
348
349 /* copy arch command line */
350 strlcpy(boot_command_line, COMMAND_LINE, ARCH_COMMAND_LINE_SIZE);
351 boot_command_line[ARCH_COMMAND_LINE_SIZE - 1] = 0;
352
353 /* append IPL PARM data to the boot command line */
354 if (MACHINE_IS_VM) {
355 parm = boot_command_line + strlen(boot_command_line);
356 *parm++ = ' ';
357 get_ipl_vmparm(parm);
358 if (parm[0] == '=')
359 memmove(boot_command_line, parm + 1, strlen(parm));
360 }
361}
362
363
383/* 364/*
384 * Save ipl parameters, clear bss memory, initialize storage keys 365 * Save ipl parameters, clear bss memory, initialize storage keys
385 * and create a kernel NSS at startup if the SAVESYS= parm is defined 366 * and create a kernel NSS at startup if the SAVESYS= parm is defined
386 */ 367 */
387void __init startup_init(void) 368void __init startup_init(void)
388{ 369{
389 unsigned long long memsize;
390
391 ipl_save_parameters(); 370 ipl_save_parameters();
371 rescue_initrd();
392 clear_bss_section(); 372 clear_bss_section();
393 init_kernel_storage_key(); 373 init_kernel_storage_key();
394 lockdep_init(); 374 lockdep_init();
395 lockdep_off(); 375 lockdep_off();
396 detect_machine_type();
397 create_kernel_nss();
398 sort_main_extable(); 376 sort_main_extable();
399 setup_lowcore_early(); 377 setup_lowcore_early();
378 detect_machine_type();
379 ipl_update_parameters();
380 setup_boot_command_line();
381 create_kernel_nss();
400 detect_mvpg(); 382 detect_mvpg();
401 detect_ieee(); 383 detect_ieee();
402 detect_csp(); 384 detect_csp();
@@ -404,18 +386,7 @@ void __init startup_init(void)
404 detect_diag44(); 386 detect_diag44();
405 detect_machine_facilities(); 387 detect_machine_facilities();
406 setup_hpage(); 388 setup_hpage();
407 sclp_read_info_early();
408 sclp_facilities_detect(); 389 sclp_facilities_detect();
409 memsize = sclp_memory_detect(); 390 detect_memory_layout(memory_chunk);
410#ifndef CONFIG_64BIT
411 /*
412 * Can't deal with more than 2G in 31 bit addressing mode, so
413 * limit the value in order to avoid strange side effects.
414 */
415 if (memsize > ADDR2G)
416 memsize = ADDR2G;
417#endif
418 if (memory_fast_detect() < 0)
419 find_memory_chunks((unsigned long) memsize);
420 lockdep_on(); 391 lockdep_on();
421} 392}
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 532542447d66..54b2779b5e2f 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -14,6 +14,7 @@
14#include <linux/delay.h> 14#include <linux/delay.h>
15#include <linux/reboot.h> 15#include <linux/reboot.h>
16#include <linux/ctype.h> 16#include <linux/ctype.h>
17#include <linux/fs.h>
17#include <asm/ipl.h> 18#include <asm/ipl.h>
18#include <asm/smp.h> 19#include <asm/smp.h>
19#include <asm/setup.h> 20#include <asm/setup.h>
@@ -22,6 +23,7 @@
22#include <asm/ebcdic.h> 23#include <asm/ebcdic.h>
23#include <asm/reset.h> 24#include <asm/reset.h>
24#include <asm/sclp.h> 25#include <asm/sclp.h>
26#include <asm/setup.h>
25 27
26#define IPL_PARM_BLOCK_VERSION 0 28#define IPL_PARM_BLOCK_VERSION 0
27 29
@@ -121,6 +123,7 @@ enum ipl_method {
121 REIPL_METHOD_FCP_RO_VM, 123 REIPL_METHOD_FCP_RO_VM,
122 REIPL_METHOD_FCP_DUMP, 124 REIPL_METHOD_FCP_DUMP,
123 REIPL_METHOD_NSS, 125 REIPL_METHOD_NSS,
126 REIPL_METHOD_NSS_DIAG,
124 REIPL_METHOD_DEFAULT, 127 REIPL_METHOD_DEFAULT,
125}; 128};
126 129
@@ -134,14 +137,15 @@ enum dump_method {
134 137
135static int diag308_set_works = 0; 138static int diag308_set_works = 0;
136 139
140static struct ipl_parameter_block ipl_block;
141
137static int reipl_capabilities = IPL_TYPE_UNKNOWN; 142static int reipl_capabilities = IPL_TYPE_UNKNOWN;
138 143
139static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN; 144static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN;
140static enum ipl_method reipl_method = REIPL_METHOD_DEFAULT; 145static enum ipl_method reipl_method = REIPL_METHOD_DEFAULT;
141static struct ipl_parameter_block *reipl_block_fcp; 146static struct ipl_parameter_block *reipl_block_fcp;
142static struct ipl_parameter_block *reipl_block_ccw; 147static struct ipl_parameter_block *reipl_block_ccw;
143 148static struct ipl_parameter_block *reipl_block_nss;
144static char reipl_nss_name[NSS_NAME_SIZE + 1];
145 149
146static int dump_capabilities = DUMP_TYPE_NONE; 150static int dump_capabilities = DUMP_TYPE_NONE;
147static enum dump_type dump_type = DUMP_TYPE_NONE; 151static enum dump_type dump_type = DUMP_TYPE_NONE;
@@ -263,6 +267,56 @@ static ssize_t ipl_type_show(struct kobject *kobj, struct kobj_attribute *attr,
263 267
264static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type); 268static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type);
265 269
270/* VM IPL PARM routines */
271static void reipl_get_ascii_vmparm(char *dest,
272 const struct ipl_parameter_block *ipb)
273{
274 int i;
275 int len = 0;
276 char has_lowercase = 0;
277
278 if ((ipb->ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID) &&
279 (ipb->ipl_info.ccw.vm_parm_len > 0)) {
280
281 len = ipb->ipl_info.ccw.vm_parm_len;
282 memcpy(dest, ipb->ipl_info.ccw.vm_parm, len);
283 /* If at least one character is lowercase, we assume mixed
284 * case; otherwise we convert everything to lowercase.
285 */
286 for (i = 0; i < len; i++)
287 if ((dest[i] > 0x80 && dest[i] < 0x8a) || /* a-i */
288 (dest[i] > 0x90 && dest[i] < 0x9a) || /* j-r */
289 (dest[i] > 0xa1 && dest[i] < 0xaa)) { /* s-z */
290 has_lowercase = 1;
291 break;
292 }
293 if (!has_lowercase)
294 EBC_TOLOWER(dest, len);
295 EBCASC(dest, len);
296 }
297 dest[len] = 0;
298}
299
300void get_ipl_vmparm(char *dest)
301{
302 if (diag308_set_works && (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_CCW))
303 reipl_get_ascii_vmparm(dest, &ipl_block);
304 else
305 dest[0] = 0;
306}
307
308static ssize_t ipl_vm_parm_show(struct kobject *kobj,
309 struct kobj_attribute *attr, char *page)
310{
311 char parm[DIAG308_VMPARM_SIZE + 1] = {};
312
313 get_ipl_vmparm(parm);
314 return sprintf(page, "%s\n", parm);
315}
316
317static struct kobj_attribute sys_ipl_vm_parm_attr =
318 __ATTR(parm, S_IRUGO, ipl_vm_parm_show, NULL);
319
266static ssize_t sys_ipl_device_show(struct kobject *kobj, 320static ssize_t sys_ipl_device_show(struct kobject *kobj,
267 struct kobj_attribute *attr, char *page) 321 struct kobj_attribute *attr, char *page)
268{ 322{
@@ -285,14 +339,8 @@ static struct kobj_attribute sys_ipl_device_attr =
285static ssize_t ipl_parameter_read(struct kobject *kobj, struct bin_attribute *attr, 339static ssize_t ipl_parameter_read(struct kobject *kobj, struct bin_attribute *attr,
286 char *buf, loff_t off, size_t count) 340 char *buf, loff_t off, size_t count)
287{ 341{
288 unsigned int size = IPL_PARMBLOCK_SIZE; 342 return memory_read_from_buffer(buf, count, &off, IPL_PARMBLOCK_START,
289 343 IPL_PARMBLOCK_SIZE);
290 if (off > size)
291 return 0;
292 if (off + count > size)
293 count = size - off;
294 memcpy(buf, (void *)IPL_PARMBLOCK_START + off, count);
295 return count;
296} 344}
297 345
298static struct bin_attribute ipl_parameter_attr = { 346static struct bin_attribute ipl_parameter_attr = {
@@ -310,12 +358,7 @@ static ssize_t ipl_scp_data_read(struct kobject *kobj, struct bin_attribute *att
310 unsigned int size = IPL_PARMBLOCK_START->ipl_info.fcp.scp_data_len; 358 unsigned int size = IPL_PARMBLOCK_START->ipl_info.fcp.scp_data_len;
311 void *scp_data = &IPL_PARMBLOCK_START->ipl_info.fcp.scp_data; 359 void *scp_data = &IPL_PARMBLOCK_START->ipl_info.fcp.scp_data;
312 360
313 if (off > size) 361 return memory_read_from_buffer(buf, count, &off, scp_data, size);
314 return 0;
315 if (off + count > size)
316 count = size - off;
317 memcpy(buf, scp_data + off, count);
318 return count;
319} 362}
320 363
321static struct bin_attribute ipl_scp_data_attr = { 364static struct bin_attribute ipl_scp_data_attr = {
@@ -370,15 +413,27 @@ static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj,
370static struct kobj_attribute sys_ipl_ccw_loadparm_attr = 413static struct kobj_attribute sys_ipl_ccw_loadparm_attr =
371 __ATTR(loadparm, 0444, ipl_ccw_loadparm_show, NULL); 414 __ATTR(loadparm, 0444, ipl_ccw_loadparm_show, NULL);
372 415
373static struct attribute *ipl_ccw_attrs[] = { 416static struct attribute *ipl_ccw_attrs_vm[] = {
374 &sys_ipl_type_attr.attr, 417 &sys_ipl_type_attr.attr,
375 &sys_ipl_device_attr.attr, 418 &sys_ipl_device_attr.attr,
376 &sys_ipl_ccw_loadparm_attr.attr, 419 &sys_ipl_ccw_loadparm_attr.attr,
420 &sys_ipl_vm_parm_attr.attr,
377 NULL, 421 NULL,
378}; 422};
379 423
380static struct attribute_group ipl_ccw_attr_group = { 424static struct attribute *ipl_ccw_attrs_lpar[] = {
381 .attrs = ipl_ccw_attrs, 425 &sys_ipl_type_attr.attr,
426 &sys_ipl_device_attr.attr,
427 &sys_ipl_ccw_loadparm_attr.attr,
428 NULL,
429};
430
431static struct attribute_group ipl_ccw_attr_group_vm = {
432 .attrs = ipl_ccw_attrs_vm,
433};
434
435static struct attribute_group ipl_ccw_attr_group_lpar = {
436 .attrs = ipl_ccw_attrs_lpar
382}; 437};
383 438
384/* NSS ipl device attributes */ 439/* NSS ipl device attributes */
@@ -388,6 +443,8 @@ DEFINE_IPL_ATTR_RO(ipl_nss, name, "%s\n", kernel_nss_name);
388static struct attribute *ipl_nss_attrs[] = { 443static struct attribute *ipl_nss_attrs[] = {
389 &sys_ipl_type_attr.attr, 444 &sys_ipl_type_attr.attr,
390 &sys_ipl_nss_name_attr.attr, 445 &sys_ipl_nss_name_attr.attr,
446 &sys_ipl_ccw_loadparm_attr.attr,
447 &sys_ipl_vm_parm_attr.attr,
391 NULL, 448 NULL,
392}; 449};
393 450
@@ -450,7 +507,12 @@ static int __init ipl_init(void)
450 } 507 }
451 switch (ipl_info.type) { 508 switch (ipl_info.type) {
452 case IPL_TYPE_CCW: 509 case IPL_TYPE_CCW:
453 rc = sysfs_create_group(&ipl_kset->kobj, &ipl_ccw_attr_group); 510 if (MACHINE_IS_VM)
511 rc = sysfs_create_group(&ipl_kset->kobj,
512 &ipl_ccw_attr_group_vm);
513 else
514 rc = sysfs_create_group(&ipl_kset->kobj,
515 &ipl_ccw_attr_group_lpar);
454 break; 516 break;
455 case IPL_TYPE_FCP: 517 case IPL_TYPE_FCP:
456 case IPL_TYPE_FCP_DUMP: 518 case IPL_TYPE_FCP_DUMP:
@@ -481,6 +543,83 @@ static struct shutdown_action __refdata ipl_action = {
481 * reipl shutdown action: Reboot Linux on shutdown. 543 * reipl shutdown action: Reboot Linux on shutdown.
482 */ 544 */
483 545
546/* VM IPL PARM attributes */
547static ssize_t reipl_generic_vmparm_show(struct ipl_parameter_block *ipb,
548 char *page)
549{
550 char vmparm[DIAG308_VMPARM_SIZE + 1] = {};
551
552 reipl_get_ascii_vmparm(vmparm, ipb);
553 return sprintf(page, "%s\n", vmparm);
554}
555
556static ssize_t reipl_generic_vmparm_store(struct ipl_parameter_block *ipb,
557 size_t vmparm_max,
558 const char *buf, size_t len)
559{
560 int i, ip_len;
561
562 /* ignore trailing newline */
563 ip_len = len;
564 if ((len > 0) && (buf[len - 1] == '\n'))
565 ip_len--;
566
567 if (ip_len > vmparm_max)
568 return -EINVAL;
569
570 /* parm is used to store kernel options, check for common chars */
571 for (i = 0; i < ip_len; i++)
572 if (!(isalnum(buf[i]) || isascii(buf[i]) || isprint(buf[i])))
573 return -EINVAL;
574
575 memset(ipb->ipl_info.ccw.vm_parm, 0, DIAG308_VMPARM_SIZE);
576 ipb->ipl_info.ccw.vm_parm_len = ip_len;
577 if (ip_len > 0) {
578 ipb->ipl_info.ccw.vm_flags |= DIAG308_VM_FLAGS_VP_VALID;
579 memcpy(ipb->ipl_info.ccw.vm_parm, buf, ip_len);
580 ASCEBC(ipb->ipl_info.ccw.vm_parm, ip_len);
581 } else {
582 ipb->ipl_info.ccw.vm_flags &= ~DIAG308_VM_FLAGS_VP_VALID;
583 }
584
585 return len;
586}
587
588/* NSS wrapper */
589static ssize_t reipl_nss_vmparm_show(struct kobject *kobj,
590 struct kobj_attribute *attr, char *page)
591{
592 return reipl_generic_vmparm_show(reipl_block_nss, page);
593}
594
595static ssize_t reipl_nss_vmparm_store(struct kobject *kobj,
596 struct kobj_attribute *attr,
597 const char *buf, size_t len)
598{
599 return reipl_generic_vmparm_store(reipl_block_nss, 56, buf, len);
600}
601
602/* CCW wrapper */
603static ssize_t reipl_ccw_vmparm_show(struct kobject *kobj,
604 struct kobj_attribute *attr, char *page)
605{
606 return reipl_generic_vmparm_show(reipl_block_ccw, page);
607}
608
609static ssize_t reipl_ccw_vmparm_store(struct kobject *kobj,
610 struct kobj_attribute *attr,
611 const char *buf, size_t len)
612{
613 return reipl_generic_vmparm_store(reipl_block_ccw, 64, buf, len);
614}
615
616static struct kobj_attribute sys_reipl_nss_vmparm_attr =
617 __ATTR(parm, S_IRUGO | S_IWUSR, reipl_nss_vmparm_show,
618 reipl_nss_vmparm_store);
619static struct kobj_attribute sys_reipl_ccw_vmparm_attr =
620 __ATTR(parm, S_IRUGO | S_IWUSR, reipl_ccw_vmparm_show,
621 reipl_ccw_vmparm_store);
622
484/* FCP reipl device attributes */ 623/* FCP reipl device attributes */
485 624
486DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%016llx\n", 625DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%016llx\n",
@@ -513,27 +652,26 @@ static struct attribute_group reipl_fcp_attr_group = {
513DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n", 652DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
514 reipl_block_ccw->ipl_info.ccw.devno); 653 reipl_block_ccw->ipl_info.ccw.devno);
515 654
516static void reipl_get_ascii_loadparm(char *loadparm) 655static void reipl_get_ascii_loadparm(char *loadparm,
656 struct ipl_parameter_block *ibp)
517{ 657{
518 memcpy(loadparm, &reipl_block_ccw->ipl_info.ccw.load_param, 658 memcpy(loadparm, ibp->ipl_info.ccw.load_parm, LOADPARM_LEN);
519 LOADPARM_LEN);
520 EBCASC(loadparm, LOADPARM_LEN); 659 EBCASC(loadparm, LOADPARM_LEN);
521 loadparm[LOADPARM_LEN] = 0; 660 loadparm[LOADPARM_LEN] = 0;
522 strstrip(loadparm); 661 strstrip(loadparm);
523} 662}
524 663
525static ssize_t reipl_ccw_loadparm_show(struct kobject *kobj, 664static ssize_t reipl_generic_loadparm_show(struct ipl_parameter_block *ipb,
526 struct kobj_attribute *attr, char *page) 665 char *page)
527{ 666{
528 char buf[LOADPARM_LEN + 1]; 667 char buf[LOADPARM_LEN + 1];
529 668
530 reipl_get_ascii_loadparm(buf); 669 reipl_get_ascii_loadparm(buf, ipb);
531 return sprintf(page, "%s\n", buf); 670 return sprintf(page, "%s\n", buf);
532} 671}
533 672
534static ssize_t reipl_ccw_loadparm_store(struct kobject *kobj, 673static ssize_t reipl_generic_loadparm_store(struct ipl_parameter_block *ipb,
535 struct kobj_attribute *attr, 674 const char *buf, size_t len)
536 const char *buf, size_t len)
537{ 675{
538 int i, lp_len; 676 int i, lp_len;
539 677
@@ -552,35 +690,128 @@ static ssize_t reipl_ccw_loadparm_store(struct kobject *kobj,
552 return -EINVAL; 690 return -EINVAL;
553 } 691 }
554 /* initialize loadparm with blanks */ 692 /* initialize loadparm with blanks */
555 memset(&reipl_block_ccw->ipl_info.ccw.load_param, ' ', LOADPARM_LEN); 693 memset(ipb->ipl_info.ccw.load_parm, ' ', LOADPARM_LEN);
556 /* copy and convert to ebcdic */ 694 /* copy and convert to ebcdic */
557 memcpy(&reipl_block_ccw->ipl_info.ccw.load_param, buf, lp_len); 695 memcpy(ipb->ipl_info.ccw.load_parm, buf, lp_len);
558 ASCEBC(reipl_block_ccw->ipl_info.ccw.load_param, LOADPARM_LEN); 696 ASCEBC(ipb->ipl_info.ccw.load_parm, LOADPARM_LEN);
559 return len; 697 return len;
560} 698}
561 699
700/* NSS wrapper */
701static ssize_t reipl_nss_loadparm_show(struct kobject *kobj,
702 struct kobj_attribute *attr, char *page)
703{
704 return reipl_generic_loadparm_show(reipl_block_nss, page);
705}
706
707static ssize_t reipl_nss_loadparm_store(struct kobject *kobj,
708 struct kobj_attribute *attr,
709 const char *buf, size_t len)
710{
711 return reipl_generic_loadparm_store(reipl_block_nss, buf, len);
712}
713
714/* CCW wrapper */
715static ssize_t reipl_ccw_loadparm_show(struct kobject *kobj,
716 struct kobj_attribute *attr, char *page)
717{
718 return reipl_generic_loadparm_show(reipl_block_ccw, page);
719}
720
721static ssize_t reipl_ccw_loadparm_store(struct kobject *kobj,
722 struct kobj_attribute *attr,
723 const char *buf, size_t len)
724{
725 return reipl_generic_loadparm_store(reipl_block_ccw, buf, len);
726}
727
562static struct kobj_attribute sys_reipl_ccw_loadparm_attr = 728static struct kobj_attribute sys_reipl_ccw_loadparm_attr =
563 __ATTR(loadparm, 0644, reipl_ccw_loadparm_show, 729 __ATTR(loadparm, S_IRUGO | S_IWUSR, reipl_ccw_loadparm_show,
564 reipl_ccw_loadparm_store); 730 reipl_ccw_loadparm_store);
565 731
566static struct attribute *reipl_ccw_attrs[] = { 732static struct attribute *reipl_ccw_attrs_vm[] = {
567 &sys_reipl_ccw_device_attr.attr, 733 &sys_reipl_ccw_device_attr.attr,
568 &sys_reipl_ccw_loadparm_attr.attr, 734 &sys_reipl_ccw_loadparm_attr.attr,
735 &sys_reipl_ccw_vmparm_attr.attr,
569 NULL, 736 NULL,
570}; 737};
571 738
572static struct attribute_group reipl_ccw_attr_group = { 739static struct attribute *reipl_ccw_attrs_lpar[] = {
740 &sys_reipl_ccw_device_attr.attr,
741 &sys_reipl_ccw_loadparm_attr.attr,
742 NULL,
743};
744
745static struct attribute_group reipl_ccw_attr_group_vm = {
746 .name = IPL_CCW_STR,
747 .attrs = reipl_ccw_attrs_vm,
748};
749
750static struct attribute_group reipl_ccw_attr_group_lpar = {
573 .name = IPL_CCW_STR, 751 .name = IPL_CCW_STR,
574 .attrs = reipl_ccw_attrs, 752 .attrs = reipl_ccw_attrs_lpar,
575}; 753};
576 754
577 755
578/* NSS reipl device attributes */ 756/* NSS reipl device attributes */
757static void reipl_get_ascii_nss_name(char *dst,
758 struct ipl_parameter_block *ipb)
759{
760 memcpy(dst, ipb->ipl_info.ccw.nss_name, NSS_NAME_SIZE);
761 EBCASC(dst, NSS_NAME_SIZE);
762 dst[NSS_NAME_SIZE] = 0;
763}
764
765static ssize_t reipl_nss_name_show(struct kobject *kobj,
766 struct kobj_attribute *attr, char *page)
767{
768 char nss_name[NSS_NAME_SIZE + 1] = {};
579 769
580DEFINE_IPL_ATTR_STR_RW(reipl_nss, name, "%s\n", "%s\n", reipl_nss_name); 770 reipl_get_ascii_nss_name(nss_name, reipl_block_nss);
771 return sprintf(page, "%s\n", nss_name);
772}
773
774static ssize_t reipl_nss_name_store(struct kobject *kobj,
775 struct kobj_attribute *attr,
776 const char *buf, size_t len)
777{
778 int nss_len;
779
780 /* ignore trailing newline */
781 nss_len = len;
782 if ((len > 0) && (buf[len - 1] == '\n'))
783 nss_len--;
784
785 if (nss_len > NSS_NAME_SIZE)
786 return -EINVAL;
787
788 memset(reipl_block_nss->ipl_info.ccw.nss_name, 0x40, NSS_NAME_SIZE);
789 if (nss_len > 0) {
790 reipl_block_nss->ipl_info.ccw.vm_flags |=
791 DIAG308_VM_FLAGS_NSS_VALID;
792 memcpy(reipl_block_nss->ipl_info.ccw.nss_name, buf, nss_len);
793 ASCEBC(reipl_block_nss->ipl_info.ccw.nss_name, nss_len);
794 EBC_TOUPPER(reipl_block_nss->ipl_info.ccw.nss_name, nss_len);
795 } else {
796 reipl_block_nss->ipl_info.ccw.vm_flags &=
797 ~DIAG308_VM_FLAGS_NSS_VALID;
798 }
799
800 return len;
801}
802
803static struct kobj_attribute sys_reipl_nss_name_attr =
804 __ATTR(name, S_IRUGO | S_IWUSR, reipl_nss_name_show,
805 reipl_nss_name_store);
806
807static struct kobj_attribute sys_reipl_nss_loadparm_attr =
808 __ATTR(loadparm, S_IRUGO | S_IWUSR, reipl_nss_loadparm_show,
809 reipl_nss_loadparm_store);
581 810
582static struct attribute *reipl_nss_attrs[] = { 811static struct attribute *reipl_nss_attrs[] = {
583 &sys_reipl_nss_name_attr.attr, 812 &sys_reipl_nss_name_attr.attr,
813 &sys_reipl_nss_loadparm_attr.attr,
814 &sys_reipl_nss_vmparm_attr.attr,
584 NULL, 815 NULL,
585}; 816};
586 817
@@ -617,7 +848,10 @@ static int reipl_set_type(enum ipl_type type)
617 reipl_method = REIPL_METHOD_FCP_DUMP; 848 reipl_method = REIPL_METHOD_FCP_DUMP;
618 break; 849 break;
619 case IPL_TYPE_NSS: 850 case IPL_TYPE_NSS:
620 reipl_method = REIPL_METHOD_NSS; 851 if (diag308_set_works)
852 reipl_method = REIPL_METHOD_NSS_DIAG;
853 else
854 reipl_method = REIPL_METHOD_NSS;
621 break; 855 break;
622 case IPL_TYPE_UNKNOWN: 856 case IPL_TYPE_UNKNOWN:
623 reipl_method = REIPL_METHOD_DEFAULT; 857 reipl_method = REIPL_METHOD_DEFAULT;
@@ -655,11 +889,38 @@ static struct kobj_attribute reipl_type_attr =
655 889
656static struct kset *reipl_kset; 890static struct kset *reipl_kset;
657 891
892static void get_ipl_string(char *dst, struct ipl_parameter_block *ipb,
893 const enum ipl_method m)
894{
895 char loadparm[LOADPARM_LEN + 1] = {};
896 char vmparm[DIAG308_VMPARM_SIZE + 1] = {};
897 char nss_name[NSS_NAME_SIZE + 1] = {};
898 size_t pos = 0;
899
900 reipl_get_ascii_loadparm(loadparm, ipb);
901 reipl_get_ascii_nss_name(nss_name, ipb);
902 reipl_get_ascii_vmparm(vmparm, ipb);
903
904 switch (m) {
905 case REIPL_METHOD_CCW_VM:
906 pos = sprintf(dst, "IPL %X CLEAR", ipb->ipl_info.ccw.devno);
907 break;
908 case REIPL_METHOD_NSS:
909 pos = sprintf(dst, "IPL %s", nss_name);
910 break;
911 default:
912 break;
913 }
914 if (strlen(loadparm) > 0)
915 pos += sprintf(dst + pos, " LOADPARM '%s'", loadparm);
916 if (strlen(vmparm) > 0)
917 sprintf(dst + pos, " PARM %s", vmparm);
918}
919
658static void reipl_run(struct shutdown_trigger *trigger) 920static void reipl_run(struct shutdown_trigger *trigger)
659{ 921{
660 struct ccw_dev_id devid; 922 struct ccw_dev_id devid;
661 static char buf[100]; 923 static char buf[128];
662 char loadparm[LOADPARM_LEN + 1];
663 924
664 switch (reipl_method) { 925 switch (reipl_method) {
665 case REIPL_METHOD_CCW_CIO: 926 case REIPL_METHOD_CCW_CIO:
@@ -668,13 +929,7 @@ static void reipl_run(struct shutdown_trigger *trigger)
668 reipl_ccw_dev(&devid); 929 reipl_ccw_dev(&devid);
669 break; 930 break;
670 case REIPL_METHOD_CCW_VM: 931 case REIPL_METHOD_CCW_VM:
671 reipl_get_ascii_loadparm(loadparm); 932 get_ipl_string(buf, reipl_block_ccw, REIPL_METHOD_CCW_VM);
672 if (strlen(loadparm) == 0)
673 sprintf(buf, "IPL %X CLEAR",
674 reipl_block_ccw->ipl_info.ccw.devno);
675 else
676 sprintf(buf, "IPL %X CLEAR LOADPARM '%s'",
677 reipl_block_ccw->ipl_info.ccw.devno, loadparm);
678 __cpcmd(buf, NULL, 0, NULL); 933 __cpcmd(buf, NULL, 0, NULL);
679 break; 934 break;
680 case REIPL_METHOD_CCW_DIAG: 935 case REIPL_METHOD_CCW_DIAG:
@@ -691,8 +946,12 @@ static void reipl_run(struct shutdown_trigger *trigger)
691 case REIPL_METHOD_FCP_RO_VM: 946 case REIPL_METHOD_FCP_RO_VM:
692 __cpcmd("IPL", NULL, 0, NULL); 947 __cpcmd("IPL", NULL, 0, NULL);
693 break; 948 break;
949 case REIPL_METHOD_NSS_DIAG:
950 diag308(DIAG308_SET, reipl_block_nss);
951 diag308(DIAG308_IPL, NULL);
952 break;
694 case REIPL_METHOD_NSS: 953 case REIPL_METHOD_NSS:
695 sprintf(buf, "IPL %s", reipl_nss_name); 954 get_ipl_string(buf, reipl_block_nss, REIPL_METHOD_NSS);
696 __cpcmd(buf, NULL, 0, NULL); 955 __cpcmd(buf, NULL, 0, NULL);
697 break; 956 break;
698 case REIPL_METHOD_DEFAULT: 957 case REIPL_METHOD_DEFAULT:
@@ -707,16 +966,36 @@ static void reipl_run(struct shutdown_trigger *trigger)
707 disabled_wait((unsigned long) __builtin_return_address(0)); 966 disabled_wait((unsigned long) __builtin_return_address(0));
708} 967}
709 968
710static void __init reipl_probe(void) 969static void reipl_block_ccw_init(struct ipl_parameter_block *ipb)
711{ 970{
712 void *buffer; 971 ipb->hdr.len = IPL_PARM_BLK_CCW_LEN;
972 ipb->hdr.version = IPL_PARM_BLOCK_VERSION;
973 ipb->hdr.blk0_len = IPL_PARM_BLK0_CCW_LEN;
974 ipb->hdr.pbt = DIAG308_IPL_TYPE_CCW;
975}
713 976
714 buffer = (void *) get_zeroed_page(GFP_KERNEL); 977static void reipl_block_ccw_fill_parms(struct ipl_parameter_block *ipb)
715 if (!buffer) 978{
716 return; 979 /* LOADPARM */
717 if (diag308(DIAG308_STORE, buffer) == DIAG308_RC_OK) 980 /* check if read scp info worked and set loadparm */
718 diag308_set_works = 1; 981 if (sclp_ipl_info.is_valid)
719 free_page((unsigned long)buffer); 982 memcpy(ipb->ipl_info.ccw.load_parm,
983 &sclp_ipl_info.loadparm, LOADPARM_LEN);
984 else
985 /* read scp info failed: set empty loadparm (EBCDIC blanks) */
986 memset(ipb->ipl_info.ccw.load_parm, 0x40, LOADPARM_LEN);
987 ipb->hdr.flags = DIAG308_FLAGS_LP_VALID;
988
989 /* VM PARM */
990 if (MACHINE_IS_VM && diag308_set_works &&
991 (ipl_block.ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID)) {
992
993 ipb->ipl_info.ccw.vm_flags |= DIAG308_VM_FLAGS_VP_VALID;
994 ipb->ipl_info.ccw.vm_parm_len =
995 ipl_block.ipl_info.ccw.vm_parm_len;
996 memcpy(ipb->ipl_info.ccw.vm_parm,
997 ipl_block.ipl_info.ccw.vm_parm, DIAG308_VMPARM_SIZE);
998 }
720} 999}
721 1000
722static int __init reipl_nss_init(void) 1001static int __init reipl_nss_init(void)
@@ -725,10 +1004,31 @@ static int __init reipl_nss_init(void)
725 1004
726 if (!MACHINE_IS_VM) 1005 if (!MACHINE_IS_VM)
727 return 0; 1006 return 0;
1007
1008 reipl_block_nss = (void *) get_zeroed_page(GFP_KERNEL);
1009 if (!reipl_block_nss)
1010 return -ENOMEM;
1011
1012 if (!diag308_set_works)
1013 sys_reipl_nss_vmparm_attr.attr.mode = S_IRUGO;
1014
728 rc = sysfs_create_group(&reipl_kset->kobj, &reipl_nss_attr_group); 1015 rc = sysfs_create_group(&reipl_kset->kobj, &reipl_nss_attr_group);
729 if (rc) 1016 if (rc)
730 return rc; 1017 return rc;
731 strncpy(reipl_nss_name, kernel_nss_name, NSS_NAME_SIZE + 1); 1018
1019 reipl_block_ccw_init(reipl_block_nss);
1020 if (ipl_info.type == IPL_TYPE_NSS) {
1021 memset(reipl_block_nss->ipl_info.ccw.nss_name,
1022 ' ', NSS_NAME_SIZE);
1023 memcpy(reipl_block_nss->ipl_info.ccw.nss_name,
1024 kernel_nss_name, strlen(kernel_nss_name));
1025 ASCEBC(reipl_block_nss->ipl_info.ccw.nss_name, NSS_NAME_SIZE);
1026 reipl_block_nss->ipl_info.ccw.vm_flags |=
1027 DIAG308_VM_FLAGS_NSS_VALID;
1028
1029 reipl_block_ccw_fill_parms(reipl_block_nss);
1030 }
1031
732 reipl_capabilities |= IPL_TYPE_NSS; 1032 reipl_capabilities |= IPL_TYPE_NSS;
733 return 0; 1033 return 0;
734} 1034}
@@ -740,28 +1040,27 @@ static int __init reipl_ccw_init(void)
740 reipl_block_ccw = (void *) get_zeroed_page(GFP_KERNEL); 1040 reipl_block_ccw = (void *) get_zeroed_page(GFP_KERNEL);
741 if (!reipl_block_ccw) 1041 if (!reipl_block_ccw)
742 return -ENOMEM; 1042 return -ENOMEM;
743 rc = sysfs_create_group(&reipl_kset->kobj, &reipl_ccw_attr_group); 1043
744 if (rc) { 1044 if (MACHINE_IS_VM) {
745 free_page((unsigned long)reipl_block_ccw); 1045 if (!diag308_set_works)
746 return rc; 1046 sys_reipl_ccw_vmparm_attr.attr.mode = S_IRUGO;
1047 rc = sysfs_create_group(&reipl_kset->kobj,
1048 &reipl_ccw_attr_group_vm);
1049 } else {
1050 if(!diag308_set_works)
1051 sys_reipl_ccw_loadparm_attr.attr.mode = S_IRUGO;
1052 rc = sysfs_create_group(&reipl_kset->kobj,
1053 &reipl_ccw_attr_group_lpar);
747 } 1054 }
748 reipl_block_ccw->hdr.len = IPL_PARM_BLK_CCW_LEN; 1055 if (rc)
749 reipl_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION; 1056 return rc;
750 reipl_block_ccw->hdr.blk0_len = IPL_PARM_BLK0_CCW_LEN; 1057
751 reipl_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW; 1058 reipl_block_ccw_init(reipl_block_ccw);
752 reipl_block_ccw->hdr.flags = DIAG308_FLAGS_LP_VALID; 1059 if (ipl_info.type == IPL_TYPE_CCW) {
753 /* check if read scp info worked and set loadparm */
754 if (sclp_ipl_info.is_valid)
755 memcpy(reipl_block_ccw->ipl_info.ccw.load_param,
756 &sclp_ipl_info.loadparm, LOADPARM_LEN);
757 else
758 /* read scp info failed: set empty loadparm (EBCDIC blanks) */
759 memset(reipl_block_ccw->ipl_info.ccw.load_param, 0x40,
760 LOADPARM_LEN);
761 if (!MACHINE_IS_VM && !diag308_set_works)
762 sys_reipl_ccw_loadparm_attr.attr.mode = S_IRUGO;
763 if (ipl_info.type == IPL_TYPE_CCW)
764 reipl_block_ccw->ipl_info.ccw.devno = ipl_devno; 1060 reipl_block_ccw->ipl_info.ccw.devno = ipl_devno;
1061 reipl_block_ccw_fill_parms(reipl_block_ccw);
1062 }
1063
765 reipl_capabilities |= IPL_TYPE_CCW; 1064 reipl_capabilities |= IPL_TYPE_CCW;
766 return 0; 1065 return 0;
767} 1066}
@@ -1298,7 +1597,6 @@ static void __init shutdown_actions_init(void)
1298 1597
1299static int __init s390_ipl_init(void) 1598static int __init s390_ipl_init(void)
1300{ 1599{
1301 reipl_probe();
1302 sclp_get_ipl_info(&sclp_ipl_info); 1600 sclp_get_ipl_info(&sclp_ipl_info);
1303 shutdown_actions_init(); 1601 shutdown_actions_init();
1304 shutdown_triggers_init(); 1602 shutdown_triggers_init();
@@ -1405,6 +1703,12 @@ void __init setup_ipl(void)
1405 atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb); 1703 atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
1406} 1704}
1407 1705
1706void __init ipl_update_parameters(void)
1707{
1708 if (diag308(DIAG308_STORE, &ipl_block) == DIAG308_RC_OK)
1709 diag308_set_works = 1;
1710}
1711
1408void __init ipl_save_parameters(void) 1712void __init ipl_save_parameters(void)
1409{ 1713{
1410 struct cio_iplinfo iplinfo; 1714 struct cio_iplinfo iplinfo;
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index ed04d1372d5d..288ad490a6dd 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -41,10 +41,8 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
41 if (is_prohibited_opcode((kprobe_opcode_t *) p->addr)) 41 if (is_prohibited_opcode((kprobe_opcode_t *) p->addr))
42 return -EINVAL; 42 return -EINVAL;
43 43
44 if ((unsigned long)p->addr & 0x01) { 44 if ((unsigned long)p->addr & 0x01)
45 printk("Attempt to register kprobe at an unaligned address\n");
46 return -EINVAL; 45 return -EINVAL;
47 }
48 46
49 /* Use the get_insn_slot() facility for correctness */ 47 /* Use the get_insn_slot() facility for correctness */
50 if (!(p->ainsn.insn = get_insn_slot())) 48 if (!(p->ainsn.insn = get_insn_slot()))
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 3c77dd36994c..131d7ee8b416 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -52,7 +52,6 @@ void machine_kexec_cleanup(struct kimage *image)
52 52
53void machine_shutdown(void) 53void machine_shutdown(void)
54{ 54{
55 printk(KERN_INFO "kexec: machine_shutdown called\n");
56} 55}
57 56
58void machine_kexec(struct kimage *image) 57void machine_kexec(struct kimage *image)
diff --git a/arch/s390/kernel/mem_detect.c b/arch/s390/kernel/mem_detect.c
new file mode 100644
index 000000000000..18ed7abe16c5
--- /dev/null
+++ b/arch/s390/kernel/mem_detect.c
@@ -0,0 +1,100 @@
1/*
2 * Copyright IBM Corp. 2008
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
4 */
5
6#include <linux/kernel.h>
7#include <linux/module.h>
8#include <asm/ipl.h>
9#include <asm/sclp.h>
10#include <asm/setup.h>
11
12static int memory_fast_detect(struct mem_chunk *chunk)
13{
14 unsigned long val0 = 0;
15 unsigned long val1 = 0xc;
16 int rc = -EOPNOTSUPP;
17
18 if (ipl_flags & IPL_NSS_VALID)
19 return -EOPNOTSUPP;
20 asm volatile(
21 " diag %1,%2,0x260\n"
22 "0: lhi %0,0\n"
23 "1:\n"
24 EX_TABLE(0b,1b)
25 : "+d" (rc), "+d" (val0), "+d" (val1) : : "cc");
26
27 if (rc || val0 != val1)
28 return -EOPNOTSUPP;
29 chunk->size = val0 + 1;
30 return 0;
31}
32
33static inline int tprot(unsigned long addr)
34{
35 int rc = -EFAULT;
36
37 asm volatile(
38 " tprot 0(%1),0\n"
39 "0: ipm %0\n"
40 " srl %0,28\n"
41 "1:\n"
42 EX_TABLE(0b,1b)
43 : "+d" (rc) : "a" (addr) : "cc");
44 return rc;
45}
46
47#define ADDR2G (1ULL << 31)
48
49static void find_memory_chunks(struct mem_chunk chunk[])
50{
51 unsigned long long memsize, rnmax, rzm;
52 unsigned long addr = 0, size;
53 int i = 0, type;
54
55 rzm = sclp_get_rzm();
56 rnmax = sclp_get_rnmax();
57 memsize = rzm * rnmax;
58 if (!rzm)
59 rzm = 1ULL << 17;
60 if (sizeof(long) == 4) {
61 rzm = min(ADDR2G, rzm);
62 memsize = memsize ? min(ADDR2G, memsize) : ADDR2G;
63 }
64 do {
65 size = 0;
66 type = tprot(addr);
67 do {
68 size += rzm;
69 if (memsize && addr + size >= memsize)
70 break;
71 } while (type == tprot(addr + size));
72 if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) {
73 chunk[i].addr = addr;
74 chunk[i].size = size;
75 chunk[i].type = type;
76 i++;
77 }
78 addr += size;
79 } while (addr < memsize && i < MEMORY_CHUNKS);
80}
81
82void detect_memory_layout(struct mem_chunk chunk[])
83{
84 unsigned long flags, cr0;
85
86 memset(chunk, 0, MEMORY_CHUNKS * sizeof(struct mem_chunk));
87 if (memory_fast_detect(&chunk[0]) == 0)
88 return;
89 /* Disable IRQs, DAT and low address protection so tprot does the
90 * right thing and we don't get scheduled away with low address
91 * protection disabled.
92 */
93 flags = __raw_local_irq_stnsm(0xf8);
94 __ctl_store(cr0, 0, 0);
95 __ctl_clear_bit(0, 28);
96 find_memory_chunks(chunk);
97 __ctl_load(cr0, 0, 0);
98 __raw_local_irq_ssm(flags);
99}
100EXPORT_SYMBOL(detect_memory_layout);
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 7920861109d2..85defd01d293 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -75,46 +75,19 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
75 return sf->gprs[8]; 75 return sf->gprs[8];
76} 76}
77 77
78/*
79 * Need to know about CPUs going idle?
80 */
81static ATOMIC_NOTIFIER_HEAD(idle_chain);
82DEFINE_PER_CPU(struct s390_idle_data, s390_idle); 78DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
83 79
84int register_idle_notifier(struct notifier_block *nb)
85{
86 return atomic_notifier_chain_register(&idle_chain, nb);
87}
88EXPORT_SYMBOL(register_idle_notifier);
89
90int unregister_idle_notifier(struct notifier_block *nb)
91{
92 return atomic_notifier_chain_unregister(&idle_chain, nb);
93}
94EXPORT_SYMBOL(unregister_idle_notifier);
95
96static int s390_idle_enter(void) 80static int s390_idle_enter(void)
97{ 81{
98 struct s390_idle_data *idle; 82 struct s390_idle_data *idle;
99 int nr_calls = 0;
100 void *hcpu;
101 int rc;
102 83
103 hcpu = (void *)(long)smp_processor_id();
104 rc = __atomic_notifier_call_chain(&idle_chain, S390_CPU_IDLE, hcpu, -1,
105 &nr_calls);
106 if (rc == NOTIFY_BAD) {
107 nr_calls--;
108 __atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
109 hcpu, nr_calls, NULL);
110 return rc;
111 }
112 idle = &__get_cpu_var(s390_idle); 84 idle = &__get_cpu_var(s390_idle);
113 spin_lock(&idle->lock); 85 spin_lock(&idle->lock);
114 idle->idle_count++; 86 idle->idle_count++;
115 idle->in_idle = 1; 87 idle->in_idle = 1;
116 idle->idle_enter = get_clock(); 88 idle->idle_enter = get_clock();
117 spin_unlock(&idle->lock); 89 spin_unlock(&idle->lock);
90 vtime_stop_cpu_timer();
118 return NOTIFY_OK; 91 return NOTIFY_OK;
119} 92}
120 93
@@ -122,13 +95,12 @@ void s390_idle_leave(void)
122{ 95{
123 struct s390_idle_data *idle; 96 struct s390_idle_data *idle;
124 97
98 vtime_start_cpu_timer();
125 idle = &__get_cpu_var(s390_idle); 99 idle = &__get_cpu_var(s390_idle);
126 spin_lock(&idle->lock); 100 spin_lock(&idle->lock);
127 idle->idle_time += get_clock() - idle->idle_enter; 101 idle->idle_time += get_clock() - idle->idle_enter;
128 idle->in_idle = 0; 102 idle->in_idle = 0;
129 spin_unlock(&idle->lock); 103 spin_unlock(&idle->lock);
130 atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
131 (void *)(long) smp_processor_id());
132} 104}
133 105
134extern void s390_handle_mcck(void); 106extern void s390_handle_mcck(void);
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 35827b9bd4d1..2815bfe348a6 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -33,6 +33,8 @@
33#include <linux/security.h> 33#include <linux/security.h>
34#include <linux/audit.h> 34#include <linux/audit.h>
35#include <linux/signal.h> 35#include <linux/signal.h>
36#include <linux/elf.h>
37#include <linux/regset.h>
36 38
37#include <asm/segment.h> 39#include <asm/segment.h>
38#include <asm/page.h> 40#include <asm/page.h>
@@ -47,6 +49,11 @@
47#include "compat_ptrace.h" 49#include "compat_ptrace.h"
48#endif 50#endif
49 51
52enum s390_regset {
53 REGSET_GENERAL,
54 REGSET_FP,
55};
56
50static void 57static void
51FixPerRegisters(struct task_struct *task) 58FixPerRegisters(struct task_struct *task)
52{ 59{
@@ -126,24 +133,10 @@ ptrace_disable(struct task_struct *child)
126 * struct user contain pad bytes that should be read as zeroes. 133 * struct user contain pad bytes that should be read as zeroes.
127 * Lovely... 134 * Lovely...
128 */ 135 */
129static int 136static unsigned long __peek_user(struct task_struct *child, addr_t addr)
130peek_user(struct task_struct *child, addr_t addr, addr_t data)
131{ 137{
132 struct user *dummy = NULL; 138 struct user *dummy = NULL;
133 addr_t offset, tmp, mask; 139 addr_t offset, tmp;
134
135 /*
136 * Stupid gdb peeks/pokes the access registers in 64 bit with
137 * an alignment of 4. Programmers from hell...
138 */
139 mask = __ADDR_MASK;
140#ifdef CONFIG_64BIT
141 if (addr >= (addr_t) &dummy->regs.acrs &&
142 addr < (addr_t) &dummy->regs.orig_gpr2)
143 mask = 3;
144#endif
145 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
146 return -EIO;
147 140
148 if (addr < (addr_t) &dummy->regs.acrs) { 141 if (addr < (addr_t) &dummy->regs.acrs) {
149 /* 142 /*
@@ -197,24 +190,18 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
197 } else 190 } else
198 tmp = 0; 191 tmp = 0;
199 192
200 return put_user(tmp, (addr_t __user *) data); 193 return tmp;
201} 194}
202 195
203/*
204 * Write a word to the user area of a process at location addr. This
205 * operation does have an additional problem compared to peek_user.
206 * Stores to the program status word and on the floating point
207 * control register needs to get checked for validity.
208 */
209static int 196static int
210poke_user(struct task_struct *child, addr_t addr, addr_t data) 197peek_user(struct task_struct *child, addr_t addr, addr_t data)
211{ 198{
212 struct user *dummy = NULL; 199 struct user *dummy = NULL;
213 addr_t offset, mask; 200 addr_t tmp, mask;
214 201
215 /* 202 /*
216 * Stupid gdb peeks/pokes the access registers in 64 bit with 203 * Stupid gdb peeks/pokes the access registers in 64 bit with
217 * an alignment of 4. Programmers from hell indeed... 204 * an alignment of 4. Programmers from hell...
218 */ 205 */
219 mask = __ADDR_MASK; 206 mask = __ADDR_MASK;
220#ifdef CONFIG_64BIT 207#ifdef CONFIG_64BIT
@@ -225,6 +212,21 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
225 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) 212 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
226 return -EIO; 213 return -EIO;
227 214
215 tmp = __peek_user(child, addr);
216 return put_user(tmp, (addr_t __user *) data);
217}
218
219/*
220 * Write a word to the user area of a process at location addr. This
221 * operation does have an additional problem compared to peek_user.
222 * Stores to the program status word and on the floating point
223 * control register needs to get checked for validity.
224 */
225static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
226{
227 struct user *dummy = NULL;
228 addr_t offset;
229
228 if (addr < (addr_t) &dummy->regs.acrs) { 230 if (addr < (addr_t) &dummy->regs.acrs) {
229 /* 231 /*
230 * psw and gprs are stored on the stack 232 * psw and gprs are stored on the stack
@@ -292,6 +294,28 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
292 return 0; 294 return 0;
293} 295}
294 296
297static int
298poke_user(struct task_struct *child, addr_t addr, addr_t data)
299{
300 struct user *dummy = NULL;
301 addr_t mask;
302
303 /*
304 * Stupid gdb peeks/pokes the access registers in 64 bit with
305 * an alignment of 4. Programmers from hell indeed...
306 */
307 mask = __ADDR_MASK;
308#ifdef CONFIG_64BIT
309 if (addr >= (addr_t) &dummy->regs.acrs &&
310 addr < (addr_t) &dummy->regs.orig_gpr2)
311 mask = 3;
312#endif
313 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
314 return -EIO;
315
316 return __poke_user(child, addr, data);
317}
318
295long arch_ptrace(struct task_struct *child, long request, long addr, long data) 319long arch_ptrace(struct task_struct *child, long request, long addr, long data)
296{ 320{
297 ptrace_area parea; 321 ptrace_area parea;
@@ -367,18 +391,13 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
367/* 391/*
368 * Same as peek_user but for a 31 bit program. 392 * Same as peek_user but for a 31 bit program.
369 */ 393 */
370static int 394static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
371peek_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
372{ 395{
373 struct user32 *dummy32 = NULL; 396 struct user32 *dummy32 = NULL;
374 per_struct32 *dummy_per32 = NULL; 397 per_struct32 *dummy_per32 = NULL;
375 addr_t offset; 398 addr_t offset;
376 __u32 tmp; 399 __u32 tmp;
377 400
378 if (!test_thread_flag(TIF_31BIT) ||
379 (addr & 3) || addr > sizeof(struct user) - 3)
380 return -EIO;
381
382 if (addr < (addr_t) &dummy32->regs.acrs) { 401 if (addr < (addr_t) &dummy32->regs.acrs) {
383 /* 402 /*
384 * psw and gprs are stored on the stack 403 * psw and gprs are stored on the stack
@@ -435,25 +454,32 @@ peek_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
435 } else 454 } else
436 tmp = 0; 455 tmp = 0;
437 456
457 return tmp;
458}
459
460static int peek_user_compat(struct task_struct *child,
461 addr_t addr, addr_t data)
462{
463 __u32 tmp;
464
465 if (!test_thread_flag(TIF_31BIT) ||
466 (addr & 3) || addr > sizeof(struct user) - 3)
467 return -EIO;
468
469 tmp = __peek_user_compat(child, addr);
438 return put_user(tmp, (__u32 __user *) data); 470 return put_user(tmp, (__u32 __user *) data);
439} 471}
440 472
441/* 473/*
442 * Same as poke_user but for a 31 bit program. 474 * Same as poke_user but for a 31 bit program.
443 */ 475 */
444static int 476static int __poke_user_compat(struct task_struct *child,
445poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data) 477 addr_t addr, addr_t data)
446{ 478{
447 struct user32 *dummy32 = NULL; 479 struct user32 *dummy32 = NULL;
448 per_struct32 *dummy_per32 = NULL; 480 per_struct32 *dummy_per32 = NULL;
481 __u32 tmp = (__u32) data;
449 addr_t offset; 482 addr_t offset;
450 __u32 tmp;
451
452 if (!test_thread_flag(TIF_31BIT) ||
453 (addr & 3) || addr > sizeof(struct user32) - 3)
454 return -EIO;
455
456 tmp = (__u32) data;
457 483
458 if (addr < (addr_t) &dummy32->regs.acrs) { 484 if (addr < (addr_t) &dummy32->regs.acrs) {
459 /* 485 /*
@@ -528,6 +554,16 @@ poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
528 return 0; 554 return 0;
529} 555}
530 556
557static int poke_user_compat(struct task_struct *child,
558 addr_t addr, addr_t data)
559{
560 if (!test_thread_flag(TIF_31BIT) ||
561 (addr & 3) || addr > sizeof(struct user32) - 3)
562 return -EIO;
563
564 return __poke_user_compat(child, addr, data);
565}
566
531long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 567long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
532 compat_ulong_t caddr, compat_ulong_t cdata) 568 compat_ulong_t caddr, compat_ulong_t cdata)
533{ 569{
@@ -539,11 +575,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
539 switch (request) { 575 switch (request) {
540 case PTRACE_PEEKUSR: 576 case PTRACE_PEEKUSR:
541 /* read the word at location addr in the USER area. */ 577 /* read the word at location addr in the USER area. */
542 return peek_user_emu31(child, addr, data); 578 return peek_user_compat(child, addr, data);
543 579
544 case PTRACE_POKEUSR: 580 case PTRACE_POKEUSR:
545 /* write the word at location addr in the USER area */ 581 /* write the word at location addr in the USER area */
546 return poke_user_emu31(child, addr, data); 582 return poke_user_compat(child, addr, data);
547 583
548 case PTRACE_PEEKUSR_AREA: 584 case PTRACE_PEEKUSR_AREA:
549 case PTRACE_POKEUSR_AREA: 585 case PTRACE_POKEUSR_AREA:
@@ -555,13 +591,13 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
555 copied = 0; 591 copied = 0;
556 while (copied < parea.len) { 592 while (copied < parea.len) {
557 if (request == PTRACE_PEEKUSR_AREA) 593 if (request == PTRACE_PEEKUSR_AREA)
558 ret = peek_user_emu31(child, addr, data); 594 ret = peek_user_compat(child, addr, data);
559 else { 595 else {
560 __u32 utmp; 596 __u32 utmp;
561 if (get_user(utmp, 597 if (get_user(utmp,
562 (__u32 __force __user *) data)) 598 (__u32 __force __user *) data))
563 return -EFAULT; 599 return -EFAULT;
564 ret = poke_user_emu31(child, addr, utmp); 600 ret = poke_user_compat(child, addr, utmp);
565 } 601 }
566 if (ret) 602 if (ret)
567 return ret; 603 return ret;
@@ -610,3 +646,240 @@ syscall_trace(struct pt_regs *regs, int entryexit)
610 regs->gprs[2], regs->orig_gpr2, regs->gprs[3], 646 regs->gprs[2], regs->orig_gpr2, regs->gprs[3],
611 regs->gprs[4], regs->gprs[5]); 647 regs->gprs[4], regs->gprs[5]);
612} 648}
649
650/*
651 * user_regset definitions.
652 */
653
654static int s390_regs_get(struct task_struct *target,
655 const struct user_regset *regset,
656 unsigned int pos, unsigned int count,
657 void *kbuf, void __user *ubuf)
658{
659 if (target == current)
660 save_access_regs(target->thread.acrs);
661
662 if (kbuf) {
663 unsigned long *k = kbuf;
664 while (count > 0) {
665 *k++ = __peek_user(target, pos);
666 count -= sizeof(*k);
667 pos += sizeof(*k);
668 }
669 } else {
670 unsigned long __user *u = ubuf;
671 while (count > 0) {
672 if (__put_user(__peek_user(target, pos), u++))
673 return -EFAULT;
674 count -= sizeof(*u);
675 pos += sizeof(*u);
676 }
677 }
678 return 0;
679}
680
681static int s390_regs_set(struct task_struct *target,
682 const struct user_regset *regset,
683 unsigned int pos, unsigned int count,
684 const void *kbuf, const void __user *ubuf)
685{
686 int rc = 0;
687
688 if (target == current)
689 save_access_regs(target->thread.acrs);
690
691 if (kbuf) {
692 const unsigned long *k = kbuf;
693 while (count > 0 && !rc) {
694 rc = __poke_user(target, pos, *k++);
695 count -= sizeof(*k);
696 pos += sizeof(*k);
697 }
698 } else {
699 const unsigned long __user *u = ubuf;
700 while (count > 0 && !rc) {
701 unsigned long word;
702 rc = __get_user(word, u++);
703 if (rc)
704 break;
705 rc = __poke_user(target, pos, word);
706 count -= sizeof(*u);
707 pos += sizeof(*u);
708 }
709 }
710
711 if (rc == 0 && target == current)
712 restore_access_regs(target->thread.acrs);
713
714 return rc;
715}
716
717static int s390_fpregs_get(struct task_struct *target,
718 const struct user_regset *regset, unsigned int pos,
719 unsigned int count, void *kbuf, void __user *ubuf)
720{
721 if (target == current)
722 save_fp_regs(&target->thread.fp_regs);
723
724 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
725 &target->thread.fp_regs, 0, -1);
726}
727
728static int s390_fpregs_set(struct task_struct *target,
729 const struct user_regset *regset, unsigned int pos,
730 unsigned int count, const void *kbuf,
731 const void __user *ubuf)
732{
733 int rc = 0;
734
735 if (target == current)
736 save_fp_regs(&target->thread.fp_regs);
737
738 /* If setting FPC, must validate it first. */
739 if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
740 u32 fpc[2] = { target->thread.fp_regs.fpc, 0 };
741 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpc,
742 0, offsetof(s390_fp_regs, fprs));
743 if (rc)
744 return rc;
745 if ((fpc[0] & ~FPC_VALID_MASK) != 0 || fpc[1] != 0)
746 return -EINVAL;
747 target->thread.fp_regs.fpc = fpc[0];
748 }
749
750 if (rc == 0 && count > 0)
751 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
752 target->thread.fp_regs.fprs,
753 offsetof(s390_fp_regs, fprs), -1);
754
755 if (rc == 0 && target == current)
756 restore_fp_regs(&target->thread.fp_regs);
757
758 return rc;
759}
760
761static const struct user_regset s390_regsets[] = {
762 [REGSET_GENERAL] = {
763 .core_note_type = NT_PRSTATUS,
764 .n = sizeof(s390_regs) / sizeof(long),
765 .size = sizeof(long),
766 .align = sizeof(long),
767 .get = s390_regs_get,
768 .set = s390_regs_set,
769 },
770 [REGSET_FP] = {
771 .core_note_type = NT_PRFPREG,
772 .n = sizeof(s390_fp_regs) / sizeof(long),
773 .size = sizeof(long),
774 .align = sizeof(long),
775 .get = s390_fpregs_get,
776 .set = s390_fpregs_set,
777 },
778};
779
780static const struct user_regset_view user_s390_view = {
781 .name = UTS_MACHINE,
782 .e_machine = EM_S390,
783 .regsets = s390_regsets,
784 .n = ARRAY_SIZE(s390_regsets)
785};
786
787#ifdef CONFIG_COMPAT
788static int s390_compat_regs_get(struct task_struct *target,
789 const struct user_regset *regset,
790 unsigned int pos, unsigned int count,
791 void *kbuf, void __user *ubuf)
792{
793 if (target == current)
794 save_access_regs(target->thread.acrs);
795
796 if (kbuf) {
797 compat_ulong_t *k = kbuf;
798 while (count > 0) {
799 *k++ = __peek_user_compat(target, pos);
800 count -= sizeof(*k);
801 pos += sizeof(*k);
802 }
803 } else {
804 compat_ulong_t __user *u = ubuf;
805 while (count > 0) {
806 if (__put_user(__peek_user_compat(target, pos), u++))
807 return -EFAULT;
808 count -= sizeof(*u);
809 pos += sizeof(*u);
810 }
811 }
812 return 0;
813}
814
815static int s390_compat_regs_set(struct task_struct *target,
816 const struct user_regset *regset,
817 unsigned int pos, unsigned int count,
818 const void *kbuf, const void __user *ubuf)
819{
820 int rc = 0;
821
822 if (target == current)
823 save_access_regs(target->thread.acrs);
824
825 if (kbuf) {
826 const compat_ulong_t *k = kbuf;
827 while (count > 0 && !rc) {
828 rc = __poke_user_compat(target, pos, *k++);
829 count -= sizeof(*k);
830 pos += sizeof(*k);
831 }
832 } else {
833 const compat_ulong_t __user *u = ubuf;
834 while (count > 0 && !rc) {
835 compat_ulong_t word;
836 rc = __get_user(word, u++);
837 if (rc)
838 break;
839 rc = __poke_user_compat(target, pos, word);
840 count -= sizeof(*u);
841 pos += sizeof(*u);
842 }
843 }
844
845 if (rc == 0 && target == current)
846 restore_access_regs(target->thread.acrs);
847
848 return rc;
849}
850
851static const struct user_regset s390_compat_regsets[] = {
852 [REGSET_GENERAL] = {
853 .core_note_type = NT_PRSTATUS,
854 .n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
855 .size = sizeof(compat_long_t),
856 .align = sizeof(compat_long_t),
857 .get = s390_compat_regs_get,
858 .set = s390_compat_regs_set,
859 },
860 [REGSET_FP] = {
861 .core_note_type = NT_PRFPREG,
862 .n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
863 .size = sizeof(compat_long_t),
864 .align = sizeof(compat_long_t),
865 .get = s390_fpregs_get,
866 .set = s390_fpregs_set,
867 },
868};
869
870static const struct user_regset_view user_s390_compat_view = {
871 .name = "s390",
872 .e_machine = EM_S390,
873 .regsets = s390_compat_regsets,
874 .n = ARRAY_SIZE(s390_compat_regsets)
875};
876#endif
877
878const struct user_regset_view *task_user_regset_view(struct task_struct *task)
879{
880#ifdef CONFIG_COMPAT
881 if (test_tsk_thread_flag(task, TIF_31BIT))
882 return &user_s390_compat_view;
883#endif
884 return &user_s390_view;
885}
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 2bc70b6e876a..b358e18273b0 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -77,7 +77,7 @@ unsigned long machine_flags;
77unsigned long elf_hwcap = 0; 77unsigned long elf_hwcap = 0;
78char elf_platform[ELF_PLATFORM_SIZE]; 78char elf_platform[ELF_PLATFORM_SIZE];
79 79
80struct mem_chunk __meminitdata memory_chunk[MEMORY_CHUNKS]; 80struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS];
81volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ 81volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
82static unsigned long __initdata memory_end; 82static unsigned long __initdata memory_end;
83 83
@@ -205,12 +205,6 @@ static void __init conmode_default(void)
205 SET_CONSOLE_SCLP; 205 SET_CONSOLE_SCLP;
206#endif 206#endif
207 } 207 }
208 } else if (MACHINE_IS_P390) {
209#if defined(CONFIG_TN3215_CONSOLE)
210 SET_CONSOLE_3215;
211#elif defined(CONFIG_TN3270_CONSOLE)
212 SET_CONSOLE_3270;
213#endif
214 } else { 208 } else {
215#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) 209#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
216 SET_CONSOLE_SCLP; 210 SET_CONSOLE_SCLP;
@@ -221,18 +215,17 @@ static void __init conmode_default(void)
221#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) 215#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
222static void __init setup_zfcpdump(unsigned int console_devno) 216static void __init setup_zfcpdump(unsigned int console_devno)
223{ 217{
224 static char str[64]; 218 static char str[41];
225 219
226 if (ipl_info.type != IPL_TYPE_FCP_DUMP) 220 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
227 return; 221 return;
228 if (console_devno != -1) 222 if (console_devno != -1)
229 sprintf(str, "cio_ignore=all,!0.0.%04x,!0.0.%04x", 223 sprintf(str, " cio_ignore=all,!0.0.%04x,!0.0.%04x",
230 ipl_info.data.fcp.dev_id.devno, console_devno); 224 ipl_info.data.fcp.dev_id.devno, console_devno);
231 else 225 else
232 sprintf(str, "cio_ignore=all,!0.0.%04x", 226 sprintf(str, " cio_ignore=all,!0.0.%04x",
233 ipl_info.data.fcp.dev_id.devno); 227 ipl_info.data.fcp.dev_id.devno);
234 strcat(COMMAND_LINE, " "); 228 strcat(boot_command_line, str);
235 strcat(COMMAND_LINE, str);
236 console_loglevel = 2; 229 console_loglevel = 2;
237} 230}
238#else 231#else
@@ -289,32 +282,6 @@ static int __init early_parse_mem(char *p)
289} 282}
290early_param("mem", early_parse_mem); 283early_param("mem", early_parse_mem);
291 284
292/*
293 * "ipldelay=XXX[sm]" sets ipl delay in seconds or minutes
294 */
295static int __init early_parse_ipldelay(char *p)
296{
297 unsigned long delay = 0;
298
299 delay = simple_strtoul(p, &p, 0);
300
301 switch (*p) {
302 case 's':
303 case 'S':
304 delay *= 1000000;
305 break;
306 case 'm':
307 case 'M':
308 delay *= 60 * 1000000;
309 }
310
311 /* now wait for the requested amount of time */
312 udelay(delay);
313
314 return 0;
315}
316early_param("ipldelay", early_parse_ipldelay);
317
318#ifdef CONFIG_S390_SWITCH_AMODE 285#ifdef CONFIG_S390_SWITCH_AMODE
319#ifdef CONFIG_PGSTE 286#ifdef CONFIG_PGSTE
320unsigned int switch_amode = 1; 287unsigned int switch_amode = 1;
@@ -804,11 +771,9 @@ setup_arch(char **cmdline_p)
804 printk("We are running native (64 bit mode)\n"); 771 printk("We are running native (64 bit mode)\n");
805#endif /* CONFIG_64BIT */ 772#endif /* CONFIG_64BIT */
806 773
807 /* Save unparsed command line copy for /proc/cmdline */ 774 /* Have one command line that is parsed and saved in /proc/cmdline */
808 strlcpy(boot_command_line, COMMAND_LINE, COMMAND_LINE_SIZE); 775 /* boot_command_line has been already set up in early.c */
809 776 *cmdline_p = boot_command_line;
810 *cmdline_p = COMMAND_LINE;
811 *(*cmdline_p + COMMAND_LINE_SIZE - 1) = '\0';
812 777
813 ROOT_DEV = Root_RAM0; 778 ROOT_DEV = Root_RAM0;
814 779
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 7aec676fefd5..7418bebb547f 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -3,7 +3,7 @@
3 * Time of day based timer functions. 3 * Time of day based timer functions.
4 * 4 *
5 * S390 version 5 * S390 version
6 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Copyright IBM Corp. 1999, 2008
7 * Author(s): Hartmut Penner (hp@de.ibm.com), 7 * Author(s): Hartmut Penner (hp@de.ibm.com),
8 * Martin Schwidefsky (schwidefsky@de.ibm.com), 8 * Martin Schwidefsky (schwidefsky@de.ibm.com),
9 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) 9 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
@@ -31,6 +31,7 @@
31#include <linux/notifier.h> 31#include <linux/notifier.h>
32#include <linux/clocksource.h> 32#include <linux/clocksource.h>
33#include <linux/clockchips.h> 33#include <linux/clockchips.h>
34#include <linux/bootmem.h>
34#include <asm/uaccess.h> 35#include <asm/uaccess.h>
35#include <asm/delay.h> 36#include <asm/delay.h>
36#include <asm/s390_ext.h> 37#include <asm/s390_ext.h>
@@ -162,7 +163,7 @@ void init_cpu_timer(void)
162 /* Enable clock comparator timer interrupt. */ 163 /* Enable clock comparator timer interrupt. */
163 __ctl_set_bit(0,11); 164 __ctl_set_bit(0,11);
164 165
165 /* Always allow ETR external interrupts, even without an ETR. */ 166 /* Always allow the timing alert external interrupt. */
166 __ctl_set_bit(0, 4); 167 __ctl_set_bit(0, 4);
167} 168}
168 169
@@ -170,8 +171,21 @@ static void clock_comparator_interrupt(__u16 code)
170{ 171{
171} 172}
172 173
174static void etr_timing_alert(struct etr_irq_parm *);
175static void stp_timing_alert(struct stp_irq_parm *);
176
177static void timing_alert_interrupt(__u16 code)
178{
179 if (S390_lowcore.ext_params & 0x00c40000)
180 etr_timing_alert((struct etr_irq_parm *)
181 &S390_lowcore.ext_params);
182 if (S390_lowcore.ext_params & 0x00038000)
183 stp_timing_alert((struct stp_irq_parm *)
184 &S390_lowcore.ext_params);
185}
186
173static void etr_reset(void); 187static void etr_reset(void);
174static void etr_ext_handler(__u16); 188static void stp_reset(void);
175 189
176/* 190/*
177 * Get the TOD clock running. 191 * Get the TOD clock running.
@@ -181,6 +195,7 @@ static u64 __init reset_tod_clock(void)
181 u64 time; 195 u64 time;
182 196
183 etr_reset(); 197 etr_reset();
198 stp_reset();
184 if (store_clock(&time) == 0) 199 if (store_clock(&time) == 0)
185 return time; 200 return time;
186 /* TOD clock not running. Set the clock to Unix Epoch. */ 201 /* TOD clock not running. Set the clock to Unix Epoch. */
@@ -231,8 +246,9 @@ void __init time_init(void)
231 if (clocksource_register(&clocksource_tod) != 0) 246 if (clocksource_register(&clocksource_tod) != 0)
232 panic("Could not register TOD clock source"); 247 panic("Could not register TOD clock source");
233 248
234 /* request the etr external interrupt */ 249 /* request the timing alert external interrupt */
235 if (register_early_external_interrupt(0x1406, etr_ext_handler, 250 if (register_early_external_interrupt(0x1406,
251 timing_alert_interrupt,
236 &ext_int_etr_cc) != 0) 252 &ext_int_etr_cc) != 0)
237 panic("Couldn't request external interrupt 0x1406"); 253 panic("Couldn't request external interrupt 0x1406");
238 254
@@ -245,10 +261,112 @@ void __init time_init(void)
245} 261}
246 262
247/* 263/*
264 * The time is "clock". old is what we think the time is.
265 * Adjust the value by a multiple of jiffies and add the delta to ntp.
266 * "delay" is an approximation how long the synchronization took. If
267 * the time correction is positive, then "delay" is subtracted from
268 * the time difference and only the remaining part is passed to ntp.
269 */
270static unsigned long long adjust_time(unsigned long long old,
271 unsigned long long clock,
272 unsigned long long delay)
273{
274 unsigned long long delta, ticks;
275 struct timex adjust;
276
277 if (clock > old) {
278 /* It is later than we thought. */
279 delta = ticks = clock - old;
280 delta = ticks = (delta < delay) ? 0 : delta - delay;
281 delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
282 adjust.offset = ticks * (1000000 / HZ);
283 } else {
284 /* It is earlier than we thought. */
285 delta = ticks = old - clock;
286 delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
287 delta = -delta;
288 adjust.offset = -ticks * (1000000 / HZ);
289 }
290 jiffies_timer_cc += delta;
291 if (adjust.offset != 0) {
292 printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n",
293 adjust.offset);
294 adjust.modes = ADJ_OFFSET_SINGLESHOT;
295 do_adjtimex(&adjust);
296 }
297 return delta;
298}
299
300static DEFINE_PER_CPU(atomic_t, clock_sync_word);
301static unsigned long clock_sync_flags;
302
303#define CLOCK_SYNC_HAS_ETR 0
304#define CLOCK_SYNC_HAS_STP 1
305#define CLOCK_SYNC_ETR 2
306#define CLOCK_SYNC_STP 3
307
308/*
309 * The synchronous get_clock function. It will write the current clock
310 * value to the clock pointer and return 0 if the clock is in sync with
311 * the external time source. If the clock mode is local it will return
312 * -ENOSYS and -EAGAIN if the clock is not in sync with the external
313 * reference.
314 */
315int get_sync_clock(unsigned long long *clock)
316{
317 atomic_t *sw_ptr;
318 unsigned int sw0, sw1;
319
320 sw_ptr = &get_cpu_var(clock_sync_word);
321 sw0 = atomic_read(sw_ptr);
322 *clock = get_clock();
323 sw1 = atomic_read(sw_ptr);
324 put_cpu_var(clock_sync_sync);
325 if (sw0 == sw1 && (sw0 & 0x80000000U))
326 /* Success: time is in sync. */
327 return 0;
328 if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags) &&
329 !test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
330 return -ENOSYS;
331 if (!test_bit(CLOCK_SYNC_ETR, &clock_sync_flags) &&
332 !test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
333 return -EACCES;
334 return -EAGAIN;
335}
336EXPORT_SYMBOL(get_sync_clock);
337
338/*
339 * Make get_sync_clock return -EAGAIN.
340 */
341static void disable_sync_clock(void *dummy)
342{
343 atomic_t *sw_ptr = &__get_cpu_var(clock_sync_word);
344 /*
345 * Clear the in-sync bit 2^31. All get_sync_clock calls will
346 * fail until the sync bit is turned back on. In addition
347 * increase the "sequence" counter to avoid the race of an
348 * etr event and the complete recovery against get_sync_clock.
349 */
350 atomic_clear_mask(0x80000000, sw_ptr);
351 atomic_inc(sw_ptr);
352}
353
354/*
355 * Make get_sync_clock return 0 again.
356 * Needs to be called from a context disabled for preemption.
357 */
358static void enable_sync_clock(void)
359{
360 atomic_t *sw_ptr = &__get_cpu_var(clock_sync_word);
361 atomic_set_mask(0x80000000, sw_ptr);
362}
363
364/*
248 * External Time Reference (ETR) code. 365 * External Time Reference (ETR) code.
249 */ 366 */
250static int etr_port0_online; 367static int etr_port0_online;
251static int etr_port1_online; 368static int etr_port1_online;
369static int etr_steai_available;
252 370
253static int __init early_parse_etr(char *p) 371static int __init early_parse_etr(char *p)
254{ 372{
@@ -273,12 +391,6 @@ enum etr_event {
273 ETR_EVENT_UPDATE, 391 ETR_EVENT_UPDATE,
274}; 392};
275 393
276enum etr_flags {
277 ETR_FLAG_ENOSYS,
278 ETR_FLAG_EACCES,
279 ETR_FLAG_STEAI,
280};
281
282/* 394/*
283 * Valid bit combinations of the eacr register are (x = don't care): 395 * Valid bit combinations of the eacr register are (x = don't care):
284 * e0 e1 dp p0 p1 ea es sl 396 * e0 e1 dp p0 p1 ea es sl
@@ -305,74 +417,18 @@ enum etr_flags {
305 */ 417 */
306static struct etr_eacr etr_eacr; 418static struct etr_eacr etr_eacr;
307static u64 etr_tolec; /* time of last eacr update */ 419static u64 etr_tolec; /* time of last eacr update */
308static unsigned long etr_flags;
309static struct etr_aib etr_port0; 420static struct etr_aib etr_port0;
310static int etr_port0_uptodate; 421static int etr_port0_uptodate;
311static struct etr_aib etr_port1; 422static struct etr_aib etr_port1;
312static int etr_port1_uptodate; 423static int etr_port1_uptodate;
313static unsigned long etr_events; 424static unsigned long etr_events;
314static struct timer_list etr_timer; 425static struct timer_list etr_timer;
315static DEFINE_PER_CPU(atomic_t, etr_sync_word);
316 426
317static void etr_timeout(unsigned long dummy); 427static void etr_timeout(unsigned long dummy);
318static void etr_work_fn(struct work_struct *work); 428static void etr_work_fn(struct work_struct *work);
319static DECLARE_WORK(etr_work, etr_work_fn); 429static DECLARE_WORK(etr_work, etr_work_fn);
320 430
321/* 431/*
322 * The etr get_clock function. It will write the current clock value
323 * to the clock pointer and return 0 if the clock is in sync with the
324 * external time source. If the clock mode is local it will return
325 * -ENOSYS and -EAGAIN if the clock is not in sync with the external
326 * reference. This function is what ETR is all about..
327 */
328int get_sync_clock(unsigned long long *clock)
329{
330 atomic_t *sw_ptr;
331 unsigned int sw0, sw1;
332
333 sw_ptr = &get_cpu_var(etr_sync_word);
334 sw0 = atomic_read(sw_ptr);
335 *clock = get_clock();
336 sw1 = atomic_read(sw_ptr);
337 put_cpu_var(etr_sync_sync);
338 if (sw0 == sw1 && (sw0 & 0x80000000U))
339 /* Success: time is in sync. */
340 return 0;
341 if (test_bit(ETR_FLAG_ENOSYS, &etr_flags))
342 return -ENOSYS;
343 if (test_bit(ETR_FLAG_EACCES, &etr_flags))
344 return -EACCES;
345 return -EAGAIN;
346}
347EXPORT_SYMBOL(get_sync_clock);
348
349/*
350 * Make get_sync_clock return -EAGAIN.
351 */
352static void etr_disable_sync_clock(void *dummy)
353{
354 atomic_t *sw_ptr = &__get_cpu_var(etr_sync_word);
355 /*
356 * Clear the in-sync bit 2^31. All get_sync_clock calls will
357 * fail until the sync bit is turned back on. In addition
358 * increase the "sequence" counter to avoid the race of an
359 * etr event and the complete recovery against get_sync_clock.
360 */
361 atomic_clear_mask(0x80000000, sw_ptr);
362 atomic_inc(sw_ptr);
363}
364
365/*
366 * Make get_sync_clock return 0 again.
367 * Needs to be called from a context disabled for preemption.
368 */
369static void etr_enable_sync_clock(void)
370{
371 atomic_t *sw_ptr = &__get_cpu_var(etr_sync_word);
372 atomic_set_mask(0x80000000, sw_ptr);
373}
374
375/*
376 * Reset ETR attachment. 432 * Reset ETR attachment.
377 */ 433 */
378static void etr_reset(void) 434static void etr_reset(void)
@@ -381,15 +437,13 @@ static void etr_reset(void)
381 .e0 = 0, .e1 = 0, ._pad0 = 4, .dp = 0, 437 .e0 = 0, .e1 = 0, ._pad0 = 4, .dp = 0,
382 .p0 = 0, .p1 = 0, ._pad1 = 0, .ea = 0, 438 .p0 = 0, .p1 = 0, ._pad1 = 0, .ea = 0,
383 .es = 0, .sl = 0 }; 439 .es = 0, .sl = 0 };
384 if (etr_setr(&etr_eacr) == 0) 440 if (etr_setr(&etr_eacr) == 0) {
385 etr_tolec = get_clock(); 441 etr_tolec = get_clock();
386 else { 442 set_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags);
387 set_bit(ETR_FLAG_ENOSYS, &etr_flags); 443 } else if (etr_port0_online || etr_port1_online) {
388 if (etr_port0_online || etr_port1_online) { 444 printk(KERN_WARNING "Running on non ETR capable "
389 printk(KERN_WARNING "Running on non ETR capable " 445 "machine, only local mode available.\n");
390 "machine, only local mode available.\n"); 446 etr_port0_online = etr_port1_online = 0;
391 etr_port0_online = etr_port1_online = 0;
392 }
393 } 447 }
394} 448}
395 449
@@ -397,14 +451,12 @@ static int __init etr_init(void)
397{ 451{
398 struct etr_aib aib; 452 struct etr_aib aib;
399 453
400 if (test_bit(ETR_FLAG_ENOSYS, &etr_flags)) 454 if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags))
401 return 0; 455 return 0;
402 /* Check if this machine has the steai instruction. */ 456 /* Check if this machine has the steai instruction. */
403 if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0) 457 if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0)
404 set_bit(ETR_FLAG_STEAI, &etr_flags); 458 etr_steai_available = 1;
405 setup_timer(&etr_timer, etr_timeout, 0UL); 459 setup_timer(&etr_timer, etr_timeout, 0UL);
406 if (!etr_port0_online && !etr_port1_online)
407 set_bit(ETR_FLAG_EACCES, &etr_flags);
408 if (etr_port0_online) { 460 if (etr_port0_online) {
409 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); 461 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
410 schedule_work(&etr_work); 462 schedule_work(&etr_work);
@@ -435,7 +487,8 @@ void etr_switch_to_local(void)
435{ 487{
436 if (!etr_eacr.sl) 488 if (!etr_eacr.sl)
437 return; 489 return;
438 etr_disable_sync_clock(NULL); 490 if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags))
491 disable_sync_clock(NULL);
439 set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events); 492 set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events);
440 schedule_work(&etr_work); 493 schedule_work(&etr_work);
441} 494}
@@ -450,23 +503,21 @@ void etr_sync_check(void)
450{ 503{
451 if (!etr_eacr.es) 504 if (!etr_eacr.es)
452 return; 505 return;
453 etr_disable_sync_clock(NULL); 506 if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags))
507 disable_sync_clock(NULL);
454 set_bit(ETR_EVENT_SYNC_CHECK, &etr_events); 508 set_bit(ETR_EVENT_SYNC_CHECK, &etr_events);
455 schedule_work(&etr_work); 509 schedule_work(&etr_work);
456} 510}
457 511
458/* 512/*
459 * ETR external interrupt. There are two causes: 513 * ETR timing alert. There are two causes:
460 * 1) port state change, check the usability of the port 514 * 1) port state change, check the usability of the port
461 * 2) port alert, one of the ETR-data-validity bits (v1-v2 bits of the 515 * 2) port alert, one of the ETR-data-validity bits (v1-v2 bits of the
462 * sldr-status word) or ETR-data word 1 (edf1) or ETR-data word 3 (edf3) 516 * sldr-status word) or ETR-data word 1 (edf1) or ETR-data word 3 (edf3)
463 * or ETR-data word 4 (edf4) has changed. 517 * or ETR-data word 4 (edf4) has changed.
464 */ 518 */
465static void etr_ext_handler(__u16 code) 519static void etr_timing_alert(struct etr_irq_parm *intparm)
466{ 520{
467 struct etr_interruption_parameter *intparm =
468 (struct etr_interruption_parameter *) &S390_lowcore.ext_params;
469
470 if (intparm->pc0) 521 if (intparm->pc0)
471 /* ETR port 0 state change. */ 522 /* ETR port 0 state change. */
472 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); 523 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
@@ -591,58 +642,23 @@ static int etr_aib_follows(struct etr_aib *a1, struct etr_aib *a2, int p)
591 return 1; 642 return 1;
592} 643}
593 644
594/* 645struct clock_sync_data {
595 * The time is "clock". old is what we think the time is.
596 * Adjust the value by a multiple of jiffies and add the delta to ntp.
597 * "delay" is an approximation how long the synchronization took. If
598 * the time correction is positive, then "delay" is subtracted from
599 * the time difference and only the remaining part is passed to ntp.
600 */
601static unsigned long long etr_adjust_time(unsigned long long old,
602 unsigned long long clock,
603 unsigned long long delay)
604{
605 unsigned long long delta, ticks;
606 struct timex adjust;
607
608 if (clock > old) {
609 /* It is later than we thought. */
610 delta = ticks = clock - old;
611 delta = ticks = (delta < delay) ? 0 : delta - delay;
612 delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
613 adjust.offset = ticks * (1000000 / HZ);
614 } else {
615 /* It is earlier than we thought. */
616 delta = ticks = old - clock;
617 delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
618 delta = -delta;
619 adjust.offset = -ticks * (1000000 / HZ);
620 }
621 jiffies_timer_cc += delta;
622 if (adjust.offset != 0) {
623 printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n",
624 adjust.offset);
625 adjust.modes = ADJ_OFFSET_SINGLESHOT;
626 do_adjtimex(&adjust);
627 }
628 return delta;
629}
630
631static struct {
632 int in_sync; 646 int in_sync;
633 unsigned long long fixup_cc; 647 unsigned long long fixup_cc;
634} etr_sync; 648};
635 649
636static void etr_sync_cpu_start(void *dummy) 650static void clock_sync_cpu_start(void *dummy)
637{ 651{
638 etr_enable_sync_clock(); 652 struct clock_sync_data *sync = dummy;
653
654 enable_sync_clock();
639 /* 655 /*
640 * This looks like a busy wait loop but it isn't. etr_sync_cpus 656 * This looks like a busy wait loop but it isn't. etr_sync_cpus
641 * is called on all other cpus while the TOD clocks is stopped. 657 * is called on all other cpus while the TOD clocks is stopped.
642 * __udelay will stop the cpu on an enabled wait psw until the 658 * __udelay will stop the cpu on an enabled wait psw until the
643 * TOD is running again. 659 * TOD is running again.
644 */ 660 */
645 while (etr_sync.in_sync == 0) { 661 while (sync->in_sync == 0) {
646 __udelay(1); 662 __udelay(1);
647 /* 663 /*
648 * A different cpu changes *in_sync. Therefore use 664 * A different cpu changes *in_sync. Therefore use
@@ -650,17 +666,17 @@ static void etr_sync_cpu_start(void *dummy)
650 */ 666 */
651 barrier(); 667 barrier();
652 } 668 }
653 if (etr_sync.in_sync != 1) 669 if (sync->in_sync != 1)
654 /* Didn't work. Clear per-cpu in sync bit again. */ 670 /* Didn't work. Clear per-cpu in sync bit again. */
655 etr_disable_sync_clock(NULL); 671 disable_sync_clock(NULL);
656 /* 672 /*
657 * This round of TOD syncing is done. Set the clock comparator 673 * This round of TOD syncing is done. Set the clock comparator
658 * to the next tick and let the processor continue. 674 * to the next tick and let the processor continue.
659 */ 675 */
660 fixup_clock_comparator(etr_sync.fixup_cc); 676 fixup_clock_comparator(sync->fixup_cc);
661} 677}
662 678
663static void etr_sync_cpu_end(void *dummy) 679static void clock_sync_cpu_end(void *dummy)
664{ 680{
665} 681}
666 682
@@ -672,6 +688,7 @@ static void etr_sync_cpu_end(void *dummy)
672static int etr_sync_clock(struct etr_aib *aib, int port) 688static int etr_sync_clock(struct etr_aib *aib, int port)
673{ 689{
674 struct etr_aib *sync_port; 690 struct etr_aib *sync_port;
691 struct clock_sync_data etr_sync;
675 unsigned long long clock, old_clock, delay, delta; 692 unsigned long long clock, old_clock, delay, delta;
676 int follows; 693 int follows;
677 int rc; 694 int rc;
@@ -690,9 +707,9 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
690 */ 707 */
691 memset(&etr_sync, 0, sizeof(etr_sync)); 708 memset(&etr_sync, 0, sizeof(etr_sync));
692 preempt_disable(); 709 preempt_disable();
693 smp_call_function(etr_sync_cpu_start, NULL, 0, 0); 710 smp_call_function(clock_sync_cpu_start, &etr_sync, 0, 0);
694 local_irq_disable(); 711 local_irq_disable();
695 etr_enable_sync_clock(); 712 enable_sync_clock();
696 713
697 /* Set clock to next OTE. */ 714 /* Set clock to next OTE. */
698 __ctl_set_bit(14, 21); 715 __ctl_set_bit(14, 21);
@@ -707,13 +724,13 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
707 /* Adjust Linux timing variables. */ 724 /* Adjust Linux timing variables. */
708 delay = (unsigned long long) 725 delay = (unsigned long long)
709 (aib->edf2.etv - sync_port->edf2.etv) << 32; 726 (aib->edf2.etv - sync_port->edf2.etv) << 32;
710 delta = etr_adjust_time(old_clock, clock, delay); 727 delta = adjust_time(old_clock, clock, delay);
711 etr_sync.fixup_cc = delta; 728 etr_sync.fixup_cc = delta;
712 fixup_clock_comparator(delta); 729 fixup_clock_comparator(delta);
713 /* Verify that the clock is properly set. */ 730 /* Verify that the clock is properly set. */
714 if (!etr_aib_follows(sync_port, aib, port)) { 731 if (!etr_aib_follows(sync_port, aib, port)) {
715 /* Didn't work. */ 732 /* Didn't work. */
716 etr_disable_sync_clock(NULL); 733 disable_sync_clock(NULL);
717 etr_sync.in_sync = -EAGAIN; 734 etr_sync.in_sync = -EAGAIN;
718 rc = -EAGAIN; 735 rc = -EAGAIN;
719 } else { 736 } else {
@@ -724,12 +741,12 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
724 /* Could not set the clock ?!? */ 741 /* Could not set the clock ?!? */
725 __ctl_clear_bit(0, 29); 742 __ctl_clear_bit(0, 29);
726 __ctl_clear_bit(14, 21); 743 __ctl_clear_bit(14, 21);
727 etr_disable_sync_clock(NULL); 744 disable_sync_clock(NULL);
728 etr_sync.in_sync = -EAGAIN; 745 etr_sync.in_sync = -EAGAIN;
729 rc = -EAGAIN; 746 rc = -EAGAIN;
730 } 747 }
731 local_irq_enable(); 748 local_irq_enable();
732 smp_call_function(etr_sync_cpu_end,NULL,0,0); 749 smp_call_function(clock_sync_cpu_end, NULL, 0, 0);
733 preempt_enable(); 750 preempt_enable();
734 return rc; 751 return rc;
735} 752}
@@ -832,7 +849,7 @@ static struct etr_eacr etr_handle_update(struct etr_aib *aib,
832 * Do not try to get the alternate port aib if the clock 849 * Do not try to get the alternate port aib if the clock
833 * is not in sync yet. 850 * is not in sync yet.
834 */ 851 */
835 if (!eacr.es) 852 if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags) && !eacr.es)
836 return eacr; 853 return eacr;
837 854
838 /* 855 /*
@@ -840,7 +857,7 @@ static struct etr_eacr etr_handle_update(struct etr_aib *aib,
840 * the other port immediately. If only stetr is available the 857 * the other port immediately. If only stetr is available the
841 * data-port bit toggle has to be used. 858 * data-port bit toggle has to be used.
842 */ 859 */
843 if (test_bit(ETR_FLAG_STEAI, &etr_flags)) { 860 if (etr_steai_available) {
844 if (eacr.p0 && !etr_port0_uptodate) { 861 if (eacr.p0 && !etr_port0_uptodate) {
845 etr_steai_cv(&etr_port0, ETR_STEAI_PORT_0); 862 etr_steai_cv(&etr_port0, ETR_STEAI_PORT_0);
846 etr_port0_uptodate = 1; 863 etr_port0_uptodate = 1;
@@ -909,10 +926,10 @@ static void etr_work_fn(struct work_struct *work)
909 if (!eacr.ea) { 926 if (!eacr.ea) {
910 /* Both ports offline. Reset everything. */ 927 /* Both ports offline. Reset everything. */
911 eacr.dp = eacr.es = eacr.sl = 0; 928 eacr.dp = eacr.es = eacr.sl = 0;
912 on_each_cpu(etr_disable_sync_clock, NULL, 0, 1); 929 on_each_cpu(disable_sync_clock, NULL, 0, 1);
913 del_timer_sync(&etr_timer); 930 del_timer_sync(&etr_timer);
914 etr_update_eacr(eacr); 931 etr_update_eacr(eacr);
915 set_bit(ETR_FLAG_EACCES, &etr_flags); 932 clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
916 return; 933 return;
917 } 934 }
918 935
@@ -953,7 +970,6 @@ static void etr_work_fn(struct work_struct *work)
953 eacr.e1 = 1; 970 eacr.e1 = 1;
954 sync_port = (etr_port0_uptodate && 971 sync_port = (etr_port0_uptodate &&
955 etr_port_valid(&etr_port0, 0)) ? 0 : -1; 972 etr_port_valid(&etr_port0, 0)) ? 0 : -1;
956 clear_bit(ETR_FLAG_EACCES, &etr_flags);
957 } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_pps_mode) { 973 } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_pps_mode) {
958 eacr.sl = 0; 974 eacr.sl = 0;
959 eacr.e0 = 0; 975 eacr.e0 = 0;
@@ -962,7 +978,6 @@ static void etr_work_fn(struct work_struct *work)
962 eacr.es = 0; 978 eacr.es = 0;
963 sync_port = (etr_port1_uptodate && 979 sync_port = (etr_port1_uptodate &&
964 etr_port_valid(&etr_port1, 1)) ? 1 : -1; 980 etr_port_valid(&etr_port1, 1)) ? 1 : -1;
965 clear_bit(ETR_FLAG_EACCES, &etr_flags);
966 } else if (eacr.p0 && aib.esw.psc0 == etr_lpsc_operational_step) { 981 } else if (eacr.p0 && aib.esw.psc0 == etr_lpsc_operational_step) {
967 eacr.sl = 1; 982 eacr.sl = 1;
968 eacr.e0 = 1; 983 eacr.e0 = 1;
@@ -976,7 +991,6 @@ static void etr_work_fn(struct work_struct *work)
976 eacr.e1 = 1; 991 eacr.e1 = 1;
977 sync_port = (etr_port0_uptodate && 992 sync_port = (etr_port0_uptodate &&
978 etr_port_valid(&etr_port0, 0)) ? 0 : -1; 993 etr_port_valid(&etr_port0, 0)) ? 0 : -1;
979 clear_bit(ETR_FLAG_EACCES, &etr_flags);
980 } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_operational_step) { 994 } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_operational_step) {
981 eacr.sl = 1; 995 eacr.sl = 1;
982 eacr.e0 = 0; 996 eacr.e0 = 0;
@@ -985,19 +999,22 @@ static void etr_work_fn(struct work_struct *work)
985 eacr.es = 0; 999 eacr.es = 0;
986 sync_port = (etr_port1_uptodate && 1000 sync_port = (etr_port1_uptodate &&
987 etr_port_valid(&etr_port1, 1)) ? 1 : -1; 1001 etr_port_valid(&etr_port1, 1)) ? 1 : -1;
988 clear_bit(ETR_FLAG_EACCES, &etr_flags);
989 } else { 1002 } else {
990 /* Both ports not usable. */ 1003 /* Both ports not usable. */
991 eacr.es = eacr.sl = 0; 1004 eacr.es = eacr.sl = 0;
992 sync_port = -1; 1005 sync_port = -1;
993 set_bit(ETR_FLAG_EACCES, &etr_flags); 1006 clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
994 } 1007 }
995 1008
1009 if (!test_bit(CLOCK_SYNC_ETR, &clock_sync_flags))
1010 eacr.es = 0;
1011
996 /* 1012 /*
997 * If the clock is in sync just update the eacr and return. 1013 * If the clock is in sync just update the eacr and return.
998 * If there is no valid sync port wait for a port update. 1014 * If there is no valid sync port wait for a port update.
999 */ 1015 */
1000 if (eacr.es || sync_port < 0) { 1016 if (test_bit(CLOCK_SYNC_STP, &clock_sync_flags) ||
1017 eacr.es || sync_port < 0) {
1001 etr_update_eacr(eacr); 1018 etr_update_eacr(eacr);
1002 etr_set_tolec_timeout(now); 1019 etr_set_tolec_timeout(now);
1003 return; 1020 return;
@@ -1018,11 +1035,13 @@ static void etr_work_fn(struct work_struct *work)
1018 * and set up a timer to try again after 0.5 seconds 1035 * and set up a timer to try again after 0.5 seconds
1019 */ 1036 */
1020 etr_update_eacr(eacr); 1037 etr_update_eacr(eacr);
1038 set_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
1021 if (now < etr_tolec + (1600000 << 12) || 1039 if (now < etr_tolec + (1600000 << 12) ||
1022 etr_sync_clock(&aib, sync_port) != 0) { 1040 etr_sync_clock(&aib, sync_port) != 0) {
1023 /* Sync failed. Try again in 1/2 second. */ 1041 /* Sync failed. Try again in 1/2 second. */
1024 eacr.es = 0; 1042 eacr.es = 0;
1025 etr_update_eacr(eacr); 1043 etr_update_eacr(eacr);
1044 clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
1026 etr_set_sync_timeout(); 1045 etr_set_sync_timeout();
1027 } else 1046 } else
1028 etr_set_tolec_timeout(now); 1047 etr_set_tolec_timeout(now);
@@ -1097,8 +1116,8 @@ static ssize_t etr_online_store(struct sys_device *dev,
1097 value = simple_strtoul(buf, NULL, 0); 1116 value = simple_strtoul(buf, NULL, 0);
1098 if (value != 0 && value != 1) 1117 if (value != 0 && value != 1)
1099 return -EINVAL; 1118 return -EINVAL;
1100 if (test_bit(ETR_FLAG_ENOSYS, &etr_flags)) 1119 if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags))
1101 return -ENOSYS; 1120 return -EOPNOTSUPP;
1102 if (dev == &etr_port0_dev) { 1121 if (dev == &etr_port0_dev) {
1103 if (etr_port0_online == value) 1122 if (etr_port0_online == value)
1104 return count; /* Nothing to do. */ 1123 return count; /* Nothing to do. */
@@ -1292,3 +1311,318 @@ out:
1292} 1311}
1293 1312
1294device_initcall(etr_init_sysfs); 1313device_initcall(etr_init_sysfs);
1314
1315/*
1316 * Server Time Protocol (STP) code.
1317 */
1318static int stp_online;
1319static struct stp_sstpi stp_info;
1320static void *stp_page;
1321
1322static void stp_work_fn(struct work_struct *work);
1323static DECLARE_WORK(stp_work, stp_work_fn);
1324
1325static int __init early_parse_stp(char *p)
1326{
1327 if (strncmp(p, "off", 3) == 0)
1328 stp_online = 0;
1329 else if (strncmp(p, "on", 2) == 0)
1330 stp_online = 1;
1331 return 0;
1332}
1333early_param("stp", early_parse_stp);
1334
1335/*
1336 * Reset STP attachment.
1337 */
1338static void stp_reset(void)
1339{
1340 int rc;
1341
1342 stp_page = alloc_bootmem_pages(PAGE_SIZE);
1343 rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000);
1344 if (rc == 1)
1345 set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags);
1346 else if (stp_online) {
1347 printk(KERN_WARNING "Running on non STP capable machine.\n");
1348 free_bootmem((unsigned long) stp_page, PAGE_SIZE);
1349 stp_page = NULL;
1350 stp_online = 0;
1351 }
1352}
1353
1354static int __init stp_init(void)
1355{
1356 if (test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags) && stp_online)
1357 schedule_work(&stp_work);
1358 return 0;
1359}
1360
1361arch_initcall(stp_init);
1362
1363/*
1364 * STP timing alert. There are three causes:
1365 * 1) timing status change
1366 * 2) link availability change
1367 * 3) time control parameter change
1368 * In all three cases we are only interested in the clock source state.
1369 * If a STP clock source is now available use it.
1370 */
1371static void stp_timing_alert(struct stp_irq_parm *intparm)
1372{
1373 if (intparm->tsc || intparm->lac || intparm->tcpc)
1374 schedule_work(&stp_work);
1375}
1376
1377/*
1378 * STP sync check machine check. This is called when the timing state
1379 * changes from the synchronized state to the unsynchronized state.
1380 * After a STP sync check the clock is not in sync. The machine check
1381 * is broadcasted to all cpus at the same time.
1382 */
1383void stp_sync_check(void)
1384{
1385 if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
1386 return;
1387 disable_sync_clock(NULL);
1388 schedule_work(&stp_work);
1389}
1390
1391/*
1392 * STP island condition machine check. This is called when an attached
1393 * server attempts to communicate over an STP link and the servers
1394 * have matching CTN ids and have a valid stratum-1 configuration
1395 * but the configurations do not match.
1396 */
1397void stp_island_check(void)
1398{
1399 if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
1400 return;
1401 disable_sync_clock(NULL);
1402 schedule_work(&stp_work);
1403}
1404
1405/*
1406 * STP tasklet. Check for the STP state and take over the clock
1407 * synchronization if the STP clock source is usable.
1408 */
1409static void stp_work_fn(struct work_struct *work)
1410{
1411 struct clock_sync_data stp_sync;
1412 unsigned long long old_clock, delta;
1413 int rc;
1414
1415 if (!stp_online) {
1416 chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000);
1417 return;
1418 }
1419
1420 rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xb0e0);
1421 if (rc)
1422 return;
1423
1424 rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi));
1425 if (rc || stp_info.c == 0)
1426 return;
1427
1428 /*
1429 * Catch all other cpus and make them wait until we have
1430 * successfully synced the clock. smp_call_function will
1431 * return after all other cpus are in clock_sync_cpu_start.
1432 */
1433 memset(&stp_sync, 0, sizeof(stp_sync));
1434 preempt_disable();
1435 smp_call_function(clock_sync_cpu_start, &stp_sync, 0, 0);
1436 local_irq_disable();
1437 enable_sync_clock();
1438
1439 set_bit(CLOCK_SYNC_STP, &clock_sync_flags);
1440 if (test_and_clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags))
1441 schedule_work(&etr_work);
1442
1443 rc = 0;
1444 if (stp_info.todoff[0] || stp_info.todoff[1] ||
1445 stp_info.todoff[2] || stp_info.todoff[3] ||
1446 stp_info.tmd != 2) {
1447 old_clock = get_clock();
1448 rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0);
1449 if (rc == 0) {
1450 delta = adjust_time(old_clock, get_clock(), 0);
1451 fixup_clock_comparator(delta);
1452 rc = chsc_sstpi(stp_page, &stp_info,
1453 sizeof(struct stp_sstpi));
1454 if (rc == 0 && stp_info.tmd != 2)
1455 rc = -EAGAIN;
1456 }
1457 }
1458 if (rc) {
1459 disable_sync_clock(NULL);
1460 stp_sync.in_sync = -EAGAIN;
1461 clear_bit(CLOCK_SYNC_STP, &clock_sync_flags);
1462 if (etr_port0_online || etr_port1_online)
1463 schedule_work(&etr_work);
1464 } else
1465 stp_sync.in_sync = 1;
1466
1467 local_irq_enable();
1468 smp_call_function(clock_sync_cpu_end, NULL, 0, 0);
1469 preempt_enable();
1470}
1471
1472/*
1473 * STP class sysfs interface functions
1474 */
1475static struct sysdev_class stp_sysclass = {
1476 .name = "stp",
1477};
1478
1479static ssize_t stp_ctn_id_show(struct sysdev_class *class, char *buf)
1480{
1481 if (!stp_online)
1482 return -ENODATA;
1483 return sprintf(buf, "%016llx\n",
1484 *(unsigned long long *) stp_info.ctnid);
1485}
1486
1487static SYSDEV_CLASS_ATTR(ctn_id, 0400, stp_ctn_id_show, NULL);
1488
1489static ssize_t stp_ctn_type_show(struct sysdev_class *class, char *buf)
1490{
1491 if (!stp_online)
1492 return -ENODATA;
1493 return sprintf(buf, "%i\n", stp_info.ctn);
1494}
1495
1496static SYSDEV_CLASS_ATTR(ctn_type, 0400, stp_ctn_type_show, NULL);
1497
1498static ssize_t stp_dst_offset_show(struct sysdev_class *class, char *buf)
1499{
1500 if (!stp_online || !(stp_info.vbits & 0x2000))
1501 return -ENODATA;
1502 return sprintf(buf, "%i\n", (int)(s16) stp_info.dsto);
1503}
1504
1505static SYSDEV_CLASS_ATTR(dst_offset, 0400, stp_dst_offset_show, NULL);
1506
1507static ssize_t stp_leap_seconds_show(struct sysdev_class *class, char *buf)
1508{
1509 if (!stp_online || !(stp_info.vbits & 0x8000))
1510 return -ENODATA;
1511 return sprintf(buf, "%i\n", (int)(s16) stp_info.leaps);
1512}
1513
1514static SYSDEV_CLASS_ATTR(leap_seconds, 0400, stp_leap_seconds_show, NULL);
1515
1516static ssize_t stp_stratum_show(struct sysdev_class *class, char *buf)
1517{
1518 if (!stp_online)
1519 return -ENODATA;
1520 return sprintf(buf, "%i\n", (int)(s16) stp_info.stratum);
1521}
1522
1523static SYSDEV_CLASS_ATTR(stratum, 0400, stp_stratum_show, NULL);
1524
1525static ssize_t stp_time_offset_show(struct sysdev_class *class, char *buf)
1526{
1527 if (!stp_online || !(stp_info.vbits & 0x0800))
1528 return -ENODATA;
1529 return sprintf(buf, "%i\n", (int) stp_info.tto);
1530}
1531
1532static SYSDEV_CLASS_ATTR(time_offset, 0400, stp_time_offset_show, NULL);
1533
1534static ssize_t stp_time_zone_offset_show(struct sysdev_class *class, char *buf)
1535{
1536 if (!stp_online || !(stp_info.vbits & 0x4000))
1537 return -ENODATA;
1538 return sprintf(buf, "%i\n", (int)(s16) stp_info.tzo);
1539}
1540
1541static SYSDEV_CLASS_ATTR(time_zone_offset, 0400,
1542 stp_time_zone_offset_show, NULL);
1543
1544static ssize_t stp_timing_mode_show(struct sysdev_class *class, char *buf)
1545{
1546 if (!stp_online)
1547 return -ENODATA;
1548 return sprintf(buf, "%i\n", stp_info.tmd);
1549}
1550
1551static SYSDEV_CLASS_ATTR(timing_mode, 0400, stp_timing_mode_show, NULL);
1552
1553static ssize_t stp_timing_state_show(struct sysdev_class *class, char *buf)
1554{
1555 if (!stp_online)
1556 return -ENODATA;
1557 return sprintf(buf, "%i\n", stp_info.tst);
1558}
1559
1560static SYSDEV_CLASS_ATTR(timing_state, 0400, stp_timing_state_show, NULL);
1561
1562static ssize_t stp_online_show(struct sysdev_class *class, char *buf)
1563{
1564 return sprintf(buf, "%i\n", stp_online);
1565}
1566
1567static ssize_t stp_online_store(struct sysdev_class *class,
1568 const char *buf, size_t count)
1569{
1570 unsigned int value;
1571
1572 value = simple_strtoul(buf, NULL, 0);
1573 if (value != 0 && value != 1)
1574 return -EINVAL;
1575 if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
1576 return -EOPNOTSUPP;
1577 stp_online = value;
1578 schedule_work(&stp_work);
1579 return count;
1580}
1581
1582/*
1583 * Can't use SYSDEV_CLASS_ATTR because the attribute should be named
1584 * stp/online but attr_online already exists in this file ..
1585 */
1586static struct sysdev_class_attribute attr_stp_online = {
1587 .attr = { .name = "online", .mode = 0600 },
1588 .show = stp_online_show,
1589 .store = stp_online_store,
1590};
1591
1592static struct sysdev_class_attribute *stp_attributes[] = {
1593 &attr_ctn_id,
1594 &attr_ctn_type,
1595 &attr_dst_offset,
1596 &attr_leap_seconds,
1597 &attr_stp_online,
1598 &attr_stratum,
1599 &attr_time_offset,
1600 &attr_time_zone_offset,
1601 &attr_timing_mode,
1602 &attr_timing_state,
1603 NULL
1604};
1605
1606static int __init stp_init_sysfs(void)
1607{
1608 struct sysdev_class_attribute **attr;
1609 int rc;
1610
1611 rc = sysdev_class_register(&stp_sysclass);
1612 if (rc)
1613 goto out;
1614 for (attr = stp_attributes; *attr; attr++) {
1615 rc = sysdev_class_create_file(&stp_sysclass, *attr);
1616 if (rc)
1617 goto out_unreg;
1618 }
1619 return 0;
1620out_unreg:
1621 for (; attr >= stp_attributes; attr--)
1622 sysdev_class_remove_file(&stp_sysclass, *attr);
1623 sysdev_class_unregister(&stp_sysclass);
1624out:
1625 return rc;
1626}
1627
1628device_initcall(stp_init_sysfs);
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 661a07217057..212d618b0095 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -313,8 +313,6 @@ void __init s390_init_cpu_topology(void)
313 machine_has_topology_irq = 1; 313 machine_has_topology_irq = 1;
314 314
315 tl_info = alloc_bootmem_pages(PAGE_SIZE); 315 tl_info = alloc_bootmem_pages(PAGE_SIZE);
316 if (!tl_info)
317 goto error;
318 info = tl_info; 316 info = tl_info;
319 stsi(info, 15, 1, 2); 317 stsi(info, 15, 1, 2);
320 318
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index ca90ee3f930e..0fa5dc5d68e1 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -136,7 +136,7 @@ static inline void set_vtimer(__u64 expires)
136} 136}
137#endif 137#endif
138 138
139static void start_cpu_timer(void) 139void vtime_start_cpu_timer(void)
140{ 140{
141 struct vtimer_queue *vt_list; 141 struct vtimer_queue *vt_list;
142 142
@@ -150,7 +150,7 @@ static void start_cpu_timer(void)
150 set_vtimer(vt_list->idle); 150 set_vtimer(vt_list->idle);
151} 151}
152 152
153static void stop_cpu_timer(void) 153void vtime_stop_cpu_timer(void)
154{ 154{
155 struct vtimer_queue *vt_list; 155 struct vtimer_queue *vt_list;
156 156
@@ -318,8 +318,7 @@ static void internal_add_vtimer(struct vtimer_list *timer)
318 vt_list = &per_cpu(virt_cpu_timer, timer->cpu); 318 vt_list = &per_cpu(virt_cpu_timer, timer->cpu);
319 spin_lock_irqsave(&vt_list->lock, flags); 319 spin_lock_irqsave(&vt_list->lock, flags);
320 320
321 if (timer->cpu != smp_processor_id()) 321 BUG_ON(timer->cpu != smp_processor_id());
322 printk("internal_add_vtimer: BUG, running on wrong CPU");
323 322
324 /* if list is empty we only have to set the timer */ 323 /* if list is empty we only have to set the timer */
325 if (list_empty(&vt_list->list)) { 324 if (list_empty(&vt_list->list)) {
@@ -353,25 +352,12 @@ static void internal_add_vtimer(struct vtimer_list *timer)
353 put_cpu(); 352 put_cpu();
354} 353}
355 354
356static inline int prepare_vtimer(struct vtimer_list *timer) 355static inline void prepare_vtimer(struct vtimer_list *timer)
357{ 356{
358 if (!timer->function) { 357 BUG_ON(!timer->function);
359 printk("add_virt_timer: uninitialized timer\n"); 358 BUG_ON(!timer->expires || timer->expires > VTIMER_MAX_SLICE);
360 return -EINVAL; 359 BUG_ON(vtimer_pending(timer));
361 }
362
363 if (!timer->expires || timer->expires > VTIMER_MAX_SLICE) {
364 printk("add_virt_timer: invalid timer expire value!\n");
365 return -EINVAL;
366 }
367
368 if (vtimer_pending(timer)) {
369 printk("add_virt_timer: timer pending\n");
370 return -EBUSY;
371 }
372
373 timer->cpu = get_cpu(); 360 timer->cpu = get_cpu();
374 return 0;
375} 361}
376 362
377/* 363/*
@@ -382,10 +368,7 @@ void add_virt_timer(void *new)
382 struct vtimer_list *timer; 368 struct vtimer_list *timer;
383 369
384 timer = (struct vtimer_list *)new; 370 timer = (struct vtimer_list *)new;
385 371 prepare_vtimer(timer);
386 if (prepare_vtimer(timer) < 0)
387 return;
388
389 timer->interval = 0; 372 timer->interval = 0;
390 internal_add_vtimer(timer); 373 internal_add_vtimer(timer);
391} 374}
@@ -399,10 +382,7 @@ void add_virt_timer_periodic(void *new)
399 struct vtimer_list *timer; 382 struct vtimer_list *timer;
400 383
401 timer = (struct vtimer_list *)new; 384 timer = (struct vtimer_list *)new;
402 385 prepare_vtimer(timer);
403 if (prepare_vtimer(timer) < 0)
404 return;
405
406 timer->interval = timer->expires; 386 timer->interval = timer->expires;
407 internal_add_vtimer(timer); 387 internal_add_vtimer(timer);
408} 388}
@@ -423,15 +403,8 @@ int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
423 unsigned long flags; 403 unsigned long flags;
424 int cpu; 404 int cpu;
425 405
426 if (!timer->function) { 406 BUG_ON(!timer->function);
427 printk("mod_virt_timer: uninitialized timer\n"); 407 BUG_ON(!expires || expires > VTIMER_MAX_SLICE);
428 return -EINVAL;
429 }
430
431 if (!expires || expires > VTIMER_MAX_SLICE) {
432 printk("mod_virt_timer: invalid expire range\n");
433 return -EINVAL;
434 }
435 408
436 /* 409 /*
437 * This is a common optimization triggered by the 410 * This is a common optimization triggered by the
@@ -444,6 +417,9 @@ int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
444 cpu = get_cpu(); 417 cpu = get_cpu();
445 vt_list = &per_cpu(virt_cpu_timer, cpu); 418 vt_list = &per_cpu(virt_cpu_timer, cpu);
446 419
420 /* check if we run on the right CPU */
421 BUG_ON(timer->cpu != cpu);
422
447 /* disable interrupts before test if timer is pending */ 423 /* disable interrupts before test if timer is pending */
448 spin_lock_irqsave(&vt_list->lock, flags); 424 spin_lock_irqsave(&vt_list->lock, flags);
449 425
@@ -458,14 +434,6 @@ int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
458 return 0; 434 return 0;
459 } 435 }
460 436
461 /* check if we run on the right CPU */
462 if (timer->cpu != cpu) {
463 printk("mod_virt_timer: running on wrong CPU, check your code\n");
464 spin_unlock_irqrestore(&vt_list->lock, flags);
465 put_cpu();
466 return -EINVAL;
467 }
468
469 list_del_init(&timer->entry); 437 list_del_init(&timer->entry);
470 timer->expires = expires; 438 timer->expires = expires;
471 439
@@ -536,24 +504,6 @@ void init_cpu_vtimer(void)
536 504
537} 505}
538 506
539static int vtimer_idle_notify(struct notifier_block *self,
540 unsigned long action, void *hcpu)
541{
542 switch (action) {
543 case S390_CPU_IDLE:
544 stop_cpu_timer();
545 break;
546 case S390_CPU_NOT_IDLE:
547 start_cpu_timer();
548 break;
549 }
550 return NOTIFY_OK;
551}
552
553static struct notifier_block vtimer_idle_nb = {
554 .notifier_call = vtimer_idle_notify,
555};
556
557void __init vtime_init(void) 507void __init vtime_init(void)
558{ 508{
559 /* request the cpu timer external interrupt */ 509 /* request the cpu timer external interrupt */
@@ -561,9 +511,6 @@ void __init vtime_init(void)
561 &ext_int_info_timer) != 0) 511 &ext_int_info_timer) != 0)
562 panic("Couldn't request external interrupt 0x1005"); 512 panic("Couldn't request external interrupt 0x1005");
563 513
564 if (register_idle_notifier(&vtimer_idle_nb))
565 panic("Couldn't register idle notifier");
566
567 /* Enable cpu timer interrupts on the boot cpu. */ 514 /* Enable cpu timer interrupts on the boot cpu. */
568 init_cpu_vtimer(); 515 init_cpu_vtimer();
569} 516}
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 05598649b326..388cc7420055 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -202,3 +202,22 @@ void free_initrd_mem(unsigned long start, unsigned long end)
202 } 202 }
203} 203}
204#endif 204#endif
205
206#ifdef CONFIG_MEMORY_HOTPLUG
207int arch_add_memory(int nid, u64 start, u64 size)
208{
209 struct pglist_data *pgdat;
210 struct zone *zone;
211 int rc;
212
213 pgdat = NODE_DATA(nid);
214 zone = pgdat->node_zones + ZONE_NORMAL;
215 rc = vmem_add_mapping(start, size);
216 if (rc)
217 return rc;
218 rc = __add_pages(zone, PFN_DOWN(start), PFN_DOWN(size));
219 if (rc)
220 vmem_remove_mapping(start, size);
221 return rc;
222}
223#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/arch/sh/boards/landisk/gio.c b/arch/sh/boards/landisk/gio.c
index 17025080db35..0c15b0a50b99 100644
--- a/arch/sh/boards/landisk/gio.c
+++ b/arch/sh/boards/landisk/gio.c
@@ -14,6 +14,7 @@
14 */ 14 */
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/smp_lock.h>
17#include <linux/kdev_t.h> 18#include <linux/kdev_t.h>
18#include <linux/cdev.h> 19#include <linux/cdev.h>
19#include <linux/fs.h> 20#include <linux/fs.h>
@@ -32,17 +33,20 @@ static int openCnt;
32static int gio_open(struct inode *inode, struct file *filp) 33static int gio_open(struct inode *inode, struct file *filp)
33{ 34{
34 int minor; 35 int minor;
36 int ret = -ENOENT;
35 37
38 lock_kernel();
36 minor = MINOR(inode->i_rdev); 39 minor = MINOR(inode->i_rdev);
37 if (minor < DEVCOUNT) { 40 if (minor < DEVCOUNT) {
38 if (openCnt > 0) { 41 if (openCnt > 0) {
39 return -EALREADY; 42 ret = -EALREADY;
40 } else { 43 } else {
41 openCnt++; 44 openCnt++;
42 return 0; 45 ret = 0;
43 } 46 }
44 } 47 }
45 return -ENOENT; 48 unlock_kernel();
49 return ret;
46} 50}
47 51
48static int gio_close(struct inode *inode, struct file *filp) 52static int gio_close(struct inode *inode, struct file *filp)
diff --git a/arch/sparc/kernel/apc.c b/arch/sparc/kernel/apc.c
index d06a405ca718..6707422c9847 100644
--- a/arch/sparc/kernel/apc.c
+++ b/arch/sparc/kernel/apc.c
@@ -10,6 +10,7 @@
10#include <linux/errno.h> 10#include <linux/errno.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/miscdevice.h> 12#include <linux/miscdevice.h>
13#include <linux/smp_lock.h>
13#include <linux/pm.h> 14#include <linux/pm.h>
14 15
15#include <asm/io.h> 16#include <asm/io.h>
@@ -75,6 +76,7 @@ static inline void apc_free(void)
75 76
76static int apc_open(struct inode *inode, struct file *f) 77static int apc_open(struct inode *inode, struct file *f)
77{ 78{
79 cycle_kernel_lock();
78 return 0; 80 return 0;
79} 81}
80 82
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
index e5d238970c7e..bedc4c159b1c 100644
--- a/arch/sparc64/kernel/time.c
+++ b/arch/sparc64/kernel/time.c
@@ -11,6 +11,7 @@
11#include <linux/errno.h> 11#include <linux/errno.h>
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/smp_lock.h>
14#include <linux/kernel.h> 15#include <linux/kernel.h>
15#include <linux/param.h> 16#include <linux/param.h>
16#include <linux/string.h> 17#include <linux/string.h>
@@ -1659,10 +1660,14 @@ static int mini_rtc_ioctl(struct inode *inode, struct file *file,
1659 1660
1660static int mini_rtc_open(struct inode *inode, struct file *file) 1661static int mini_rtc_open(struct inode *inode, struct file *file)
1661{ 1662{
1662 if (mini_rtc_status & RTC_IS_OPEN) 1663 lock_kernel();
1664 if (mini_rtc_status & RTC_IS_OPEN) {
1665 unlock_kernel();
1663 return -EBUSY; 1666 return -EBUSY;
1667 }
1664 1668
1665 mini_rtc_status |= RTC_IS_OPEN; 1669 mini_rtc_status |= RTC_IS_OPEN;
1670 unlock_kernel();
1666 1671
1667 return 0; 1672 return 0;
1668} 1673}
diff --git a/arch/um/drivers/harddog_kern.c b/arch/um/drivers/harddog_kern.c
index a9ad4bd6d953..d332503fa1be 100644
--- a/arch/um/drivers/harddog_kern.c
+++ b/arch/um/drivers/harddog_kern.c
@@ -66,6 +66,7 @@ static int harddog_open(struct inode *inode, struct file *file)
66 int err = -EBUSY; 66 int err = -EBUSY;
67 char *sock = NULL; 67 char *sock = NULL;
68 68
69 lock_kernel();
69 spin_lock(&lock); 70 spin_lock(&lock);
70 if(timer_alive) 71 if(timer_alive)
71 goto err; 72 goto err;
@@ -82,9 +83,11 @@ static int harddog_open(struct inode *inode, struct file *file)
82 83
83 timer_alive = 1; 84 timer_alive = 1;
84 spin_unlock(&lock); 85 spin_unlock(&lock);
86 unlock_kernel();
85 return nonseekable_open(inode, file); 87 return nonseekable_open(inode, file);
86err: 88err:
87 spin_unlock(&lock); 89 spin_unlock(&lock);
90 unlock_kernel();
88 return err; 91 return err;
89} 92}
90 93
diff --git a/arch/um/drivers/mmapper_kern.c b/arch/um/drivers/mmapper_kern.c
index 67b2f55a602f..eb240323c40a 100644
--- a/arch/um/drivers/mmapper_kern.c
+++ b/arch/um/drivers/mmapper_kern.c
@@ -16,6 +16,7 @@
16#include <linux/miscdevice.h> 16#include <linux/miscdevice.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/mm.h> 18#include <linux/mm.h>
19#include <linux/smp_lock.h>
19#include <asm/uaccess.h> 20#include <asm/uaccess.h>
20#include "mem_user.h" 21#include "mem_user.h"
21 22
@@ -77,6 +78,7 @@ out:
77 78
78static int mmapper_open(struct inode *inode, struct file *file) 79static int mmapper_open(struct inode *inode, struct file *file)
79{ 80{
81 cycle_kernel_lock();
80 return 0; 82 return 0;
81} 83}
82 84
diff --git a/arch/um/drivers/random.c b/arch/um/drivers/random.c
index 4949044773ba..6eabb7022a2d 100644
--- a/arch/um/drivers/random.c
+++ b/arch/um/drivers/random.c
@@ -7,6 +7,7 @@
7 * of the GNU General Public License, incorporated herein by reference. 7 * of the GNU General Public License, incorporated herein by reference.
8 */ 8 */
9#include <linux/sched.h> 9#include <linux/sched.h>
10#include <linux/smp_lock.h>
10#include <linux/module.h> 11#include <linux/module.h>
11#include <linux/fs.h> 12#include <linux/fs.h>
12#include <linux/interrupt.h> 13#include <linux/interrupt.h>
@@ -33,6 +34,8 @@ static DECLARE_WAIT_QUEUE_HEAD(host_read_wait);
33 34
34static int rng_dev_open (struct inode *inode, struct file *filp) 35static int rng_dev_open (struct inode *inode, struct file *filp)
35{ 36{
37 cycle_kernel_lock();
38
36 /* enforce read-only access to this chrdev */ 39 /* enforce read-only access to this chrdev */
37 if ((filp->f_mode & FMODE_READ) == 0) 40 if ((filp->f_mode & FMODE_READ) == 0)
38 return -EINVAL; 41 return -EINVAL;
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 00e6d1370954..75cb5da4ea0a 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -204,6 +204,7 @@
204#include <linux/module.h> 204#include <linux/module.h>
205 205
206#include <linux/poll.h> 206#include <linux/poll.h>
207#include <linux/smp_lock.h>
207#include <linux/types.h> 208#include <linux/types.h>
208#include <linux/stddef.h> 209#include <linux/stddef.h>
209#include <linux/timer.h> 210#include <linux/timer.h>
@@ -1549,10 +1550,12 @@ static int do_open(struct inode *inode, struct file *filp)
1549{ 1550{
1550 struct apm_user *as; 1551 struct apm_user *as;
1551 1552
1553 lock_kernel();
1552 as = kmalloc(sizeof(*as), GFP_KERNEL); 1554 as = kmalloc(sizeof(*as), GFP_KERNEL);
1553 if (as == NULL) { 1555 if (as == NULL) {
1554 printk(KERN_ERR "apm: cannot allocate struct of size %d bytes\n", 1556 printk(KERN_ERR "apm: cannot allocate struct of size %d bytes\n",
1555 sizeof(*as)); 1557 sizeof(*as));
1558 unlock_kernel();
1556 return -ENOMEM; 1559 return -ENOMEM;
1557 } 1560 }
1558 as->magic = APM_BIOS_MAGIC; 1561 as->magic = APM_BIOS_MAGIC;
@@ -1574,6 +1577,7 @@ static int do_open(struct inode *inode, struct file *filp)
1574 user_list = as; 1577 user_list = as;
1575 spin_unlock(&user_list_lock); 1578 spin_unlock(&user_list_lock);
1576 filp->private_data = as; 1579 filp->private_data = as;
1580 unlock_kernel();
1577 return 0; 1581 return 0;
1578} 1582}
1579 1583
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c
index 501ca1cea27d..987410745182 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_64.c
@@ -9,6 +9,7 @@
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/sched.h> 11#include <linux/sched.h>
12#include <linux/smp_lock.h>
12#include <linux/string.h> 13#include <linux/string.h>
13#include <linux/rcupdate.h> 14#include <linux/rcupdate.h>
14#include <linux/kallsyms.h> 15#include <linux/kallsyms.h>
@@ -532,10 +533,12 @@ static int open_exclu; /* already open exclusive? */
532 533
533static int mce_open(struct inode *inode, struct file *file) 534static int mce_open(struct inode *inode, struct file *file)
534{ 535{
536 lock_kernel();
535 spin_lock(&mce_state_lock); 537 spin_lock(&mce_state_lock);
536 538
537 if (open_exclu || (open_count && (file->f_flags & O_EXCL))) { 539 if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
538 spin_unlock(&mce_state_lock); 540 spin_unlock(&mce_state_lock);
541 unlock_kernel();
539 return -EBUSY; 542 return -EBUSY;
540 } 543 }
541 544
@@ -544,6 +547,7 @@ static int mce_open(struct inode *inode, struct file *file)
544 open_count++; 547 open_count++;
545 548
546 spin_unlock(&mce_state_lock); 549 spin_unlock(&mce_state_lock);
550 unlock_kernel();
547 551
548 return nonseekable_open(inode, file); 552 return nonseekable_open(inode, file);
549} 553}
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index daff52a62248..71f1c2654bec 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -33,6 +33,7 @@
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/poll.h> 34#include <linux/poll.h>
35#include <linux/smp.h> 35#include <linux/smp.h>
36#include <linux/smp_lock.h>
36#include <linux/major.h> 37#include <linux/major.h>
37#include <linux/fs.h> 38#include <linux/fs.h>
38#include <linux/smp_lock.h> 39#include <linux/smp_lock.h>
@@ -107,15 +108,23 @@ static ssize_t cpuid_read(struct file *file, char __user *buf,
107 108
108static int cpuid_open(struct inode *inode, struct file *file) 109static int cpuid_open(struct inode *inode, struct file *file)
109{ 110{
110 unsigned int cpu = iminor(file->f_path.dentry->d_inode); 111 unsigned int cpu;
111 struct cpuinfo_x86 *c = &cpu_data(cpu); 112 struct cpuinfo_x86 *c;
112 113 int ret = 0;
113 if (cpu >= NR_CPUS || !cpu_online(cpu)) 114
114 return -ENXIO; /* No such CPU */ 115 lock_kernel();
116
117 cpu = iminor(file->f_path.dentry->d_inode);
118 if (cpu >= NR_CPUS || !cpu_online(cpu)) {
119 ret = -ENXIO; /* No such CPU */
120 goto out;
121 }
122 c = &cpu_data(cpu);
115 if (c->cpuid_level < 0) 123 if (c->cpuid_level < 0)
116 return -EIO; /* CPUID not supported */ 124 ret = -EIO; /* CPUID not supported */
117 125out:
118 return 0; 126 unlock_kernel();
127 return ret;
119} 128}
120 129
121/* 130/*
diff --git a/arch/x86/kernel/microcode.c b/arch/x86/kernel/microcode.c
index 9758fea87c5b..f47ba8156f3e 100644
--- a/arch/x86/kernel/microcode.c
+++ b/arch/x86/kernel/microcode.c
@@ -76,6 +76,7 @@
76#include <linux/kernel.h> 76#include <linux/kernel.h>
77#include <linux/init.h> 77#include <linux/init.h>
78#include <linux/sched.h> 78#include <linux/sched.h>
79#include <linux/smp_lock.h>
79#include <linux/cpumask.h> 80#include <linux/cpumask.h>
80#include <linux/module.h> 81#include <linux/module.h>
81#include <linux/slab.h> 82#include <linux/slab.h>
@@ -423,6 +424,7 @@ out:
423 424
424static int microcode_open (struct inode *unused1, struct file *unused2) 425static int microcode_open (struct inode *unused1, struct file *unused2)
425{ 426{
427 cycle_kernel_lock();
426 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; 428 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
427} 429}
428 430
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 1f3abe048e93..a153b3905f60 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -117,12 +117,20 @@ static int msr_open(struct inode *inode, struct file *file)
117{ 117{
118 unsigned int cpu = iminor(file->f_path.dentry->d_inode); 118 unsigned int cpu = iminor(file->f_path.dentry->d_inode);
119 struct cpuinfo_x86 *c = &cpu_data(cpu); 119 struct cpuinfo_x86 *c = &cpu_data(cpu);
120 int ret = 0;
120 121
121 if (cpu >= NR_CPUS || !cpu_online(cpu)) 122 lock_kernel();
122 return -ENXIO; /* No such CPU */ 123 cpu = iminor(file->f_path.dentry->d_inode);
123 if (!cpu_has(c, X86_FEATURE_MSR))
124 return -EIO; /* MSR not supported */
125 124
125 if (cpu >= NR_CPUS || !cpu_online(cpu)) {
126 ret = -ENXIO; /* No such CPU */
127 goto out;
128 }
129 c = &cpu_data(cpu);
130 if (!cpu_has(c, X86_FEATURE_MSR))
131 ret = -EIO; /* MSR not supported */
132out:
133 unlock_kernel();
126 return 0; 134 return 0;
127} 135}
128 136
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c
index 74e992957ff6..2696a6837782 100644
--- a/arch/x86/kernel/traps_64.c
+++ b/arch/x86/kernel/traps_64.c
@@ -105,30 +105,7 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
105 105
106void printk_address(unsigned long address, int reliable) 106void printk_address(unsigned long address, int reliable)
107{ 107{
108#ifdef CONFIG_KALLSYMS 108 printk(" [<%016lx>] %s%pS\n", address, reliable ? "": "? ", (void *) address);
109 unsigned long offset = 0, symsize;
110 const char *symname;
111 char *modname;
112 char *delim = ":";
113 char namebuf[KSYM_NAME_LEN];
114 char reliab[4] = "";
115
116 symname = kallsyms_lookup(address, &symsize, &offset,
117 &modname, namebuf);
118 if (!symname) {
119 printk(" [<%016lx>]\n", address);
120 return;
121 }
122 if (!reliable)
123 strcpy(reliab, "? ");
124
125 if (!modname)
126 modname = delim = "";
127 printk(" [<%016lx>] %s%s%s%s%s+0x%lx/0x%lx\n",
128 address, reliab, delim, modname, delim, symname, offset, symsize);
129#else
130 printk(" [<%016lx>]\n", address);
131#endif
132} 109}
133 110
134static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, 111static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,