aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/kernel/vmlinux.lds.S2
-rw-r--r--arch/arm/kernel/vmlinux.lds.S2
-rw-r--r--arch/arm/mach-tegra/include/mach/kbc.h4
-rw-r--r--arch/blackfin/kernel/vmlinux.lds.S2
-rw-r--r--arch/cris/kernel/vmlinux.lds.S2
-rw-r--r--arch/frv/kernel/vmlinux.lds.S2
-rw-r--r--arch/m32r/kernel/vmlinux.lds.S2
-rw-r--r--arch/m68k/Kconfig1
-rw-r--r--arch/m68k/include/asm/bitops_no.h18
-rw-r--r--arch/m68k/include/asm/io_no.h8
-rw-r--r--arch/m68k/kernel/asm-offsets.c106
-rw-r--r--arch/m68k/kernel/asm-offsets_mm.c100
-rw-r--r--arch/m68k/kernel/asm-offsets_no.c76
-rw-r--r--arch/m68k/kernel/entry_no.S1
-rw-r--r--arch/m68k/kernel/irq.c28
-rw-r--r--arch/m68k/kernel/m68k_ksyms.c36
-rw-r--r--arch/m68k/kernel/m68k_ksyms_mm.c16
-rw-r--r--arch/m68k/kernel/m68k_ksyms_no.c78
-rw-r--r--arch/m68k/kernel/process_no.c2
-rw-r--r--arch/m68k/kernel/sys_m68k.c581
-rw-r--r--arch/m68k/kernel/sys_m68k_mm.c546
-rw-r--r--arch/m68k/kernel/sys_m68k_no.c94
-rw-r--r--arch/m68k/kernel/syscalltable.S1
-rw-r--r--arch/m68k/lib/Makefile13
-rw-r--r--arch/m68k/lib/Makefile_mm6
-rw-r--r--arch/m68k/lib/Makefile_no7
-rw-r--r--arch/m68k/lib/checksum.c5
-rw-r--r--arch/m68k/lib/checksum_no.c3
-rw-r--r--arch/m68k/lib/memcpy.c128
-rw-r--r--arch/m68k/lib/memmove.c2
-rw-r--r--arch/m68k/lib/memset.c114
-rw-r--r--arch/m68k/lib/muldi3.c99
-rw-r--r--arch/m68k/lib/muldi3_mm.c63
-rw-r--r--arch/m68k/lib/muldi3_no.c86
-rw-r--r--arch/m68k/lib/string.c223
-rw-r--r--arch/m68k/mm/Makefile14
-rw-r--r--arch/m68k/mm/Makefile_mm8
-rw-r--r--arch/m68k/mm/Makefile_no5
-rw-r--r--arch/m68k/mm/init_no.c51
-rw-r--r--arch/m68k/mm/kmap.c368
-rw-r--r--arch/m68k/mm/kmap_mm.c367
-rw-r--r--arch/m68k/mm/kmap_no.c45
-rw-r--r--arch/m68k/platform/68328/entry.S7
-rw-r--r--arch/m68k/platform/68360/entry.S7
-rw-r--r--arch/m68k/platform/coldfire/dma.c3
-rw-r--r--arch/m68k/platform/coldfire/entry.S11
-rw-r--r--arch/m68k/platform/coldfire/head.S1
-rw-r--r--arch/mips/kernel/vmlinux.lds.S2
-rw-r--r--arch/mn10300/kernel/vmlinux.lds.S2
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S2
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S2
-rw-r--r--arch/s390/Kconfig11
-rw-r--r--arch/s390/appldata/appldata_base.c2
-rw-r--r--arch/s390/include/asm/cmpxchg.h1
-rw-r--r--arch/s390/include/asm/elf.h12
-rw-r--r--arch/s390/include/asm/hugetlb.h17
-rw-r--r--arch/s390/include/asm/irq.h1
-rw-r--r--arch/s390/include/asm/lowcore.h4
-rw-r--r--arch/s390/include/asm/mmu.h9
-rw-r--r--arch/s390/include/asm/mmu_context.h6
-rw-r--r--arch/s390/include/asm/page.h60
-rw-r--r--arch/s390/include/asm/percpu.h68
-rw-r--r--arch/s390/include/asm/pgalloc.h57
-rw-r--r--arch/s390/include/asm/pgtable.h607
-rw-r--r--arch/s390/include/asm/processor.h1
-rw-r--r--arch/s390/include/asm/tlbflush.h13
-rw-r--r--arch/s390/include/asm/unistd.h1
-rw-r--r--arch/s390/kernel/asm-offsets.c4
-rw-r--r--arch/s390/kernel/entry.S1
-rw-r--r--arch/s390/kernel/entry64.S1
-rw-r--r--arch/s390/kernel/irq.c1
-rw-r--r--arch/s390/kernel/process.c19
-rw-r--r--arch/s390/kernel/setup.c31
-rw-r--r--arch/s390/kernel/smp.c30
-rw-r--r--arch/s390/kernel/time.c4
-rw-r--r--arch/s390/kernel/topology.c16
-rw-r--r--arch/s390/kernel/vdso32/Makefile3
-rw-r--r--arch/s390/kernel/vdso64/Makefile3
-rw-r--r--arch/s390/kernel/vmlinux.lds.S2
-rw-r--r--arch/s390/mm/extmem.c6
-rw-r--r--arch/s390/mm/fault.c187
-rw-r--r--arch/s390/mm/hugetlbpage.c10
-rw-r--r--arch/s390/mm/init.c3
-rw-r--r--arch/s390/mm/pageattr.c2
-rw-r--r--arch/s390/mm/pgtable.c68
-rw-r--r--arch/s390/mm/vmem.c14
-rw-r--r--arch/s390/oprofile/hwsampler.c21
-rw-r--r--arch/sh/kernel/vmlinux.lds.S2
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S2
-rw-r--r--arch/tile/kernel/vmlinux.lds.S2
-rw-r--r--arch/um/Kconfig.x864
-rw-r--r--arch/um/include/asm/common.lds.S2
-rw-r--r--arch/x86/include/asm/linkage.h5
-rw-r--r--arch/x86/include/asm/percpu.h7
-rw-r--r--arch/x86/kernel/apic/io_apic.c4
-rw-r--r--arch/x86/kernel/ptrace.c4
-rw-r--r--arch/x86/kernel/vmlinux.lds.S2
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S2
98 files changed, 2024 insertions, 2660 deletions
diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S
index 3d890a98a08b..f937ad123852 100644
--- a/arch/alpha/kernel/vmlinux.lds.S
+++ b/arch/alpha/kernel/vmlinux.lds.S
@@ -39,7 +39,7 @@ SECTIONS
39 __init_begin = ALIGN(PAGE_SIZE); 39 __init_begin = ALIGN(PAGE_SIZE);
40 INIT_TEXT_SECTION(PAGE_SIZE) 40 INIT_TEXT_SECTION(PAGE_SIZE)
41 INIT_DATA_SECTION(16) 41 INIT_DATA_SECTION(16)
42 PERCPU(L1_CACHE_BYTES, PAGE_SIZE) 42 PERCPU_SECTION(L1_CACHE_BYTES)
43 /* Align to THREAD_SIZE rather than PAGE_SIZE here so any padding page 43 /* Align to THREAD_SIZE rather than PAGE_SIZE here so any padding page
44 needed for the THREAD_SIZE aligned init_task gets freed after init */ 44 needed for the THREAD_SIZE aligned init_task gets freed after init */
45 . = ALIGN(THREAD_SIZE); 45 . = ALIGN(THREAD_SIZE);
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index b4348e62ef06..e5287f21badc 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -82,7 +82,7 @@ SECTIONS
82#endif 82#endif
83 } 83 }
84 84
85 PERCPU(32, PAGE_SIZE) 85 PERCPU_SECTION(32)
86 86
87#ifndef CONFIG_XIP_KERNEL 87#ifndef CONFIG_XIP_KERNEL
88 . = ALIGN(PAGE_SIZE); 88 . = ALIGN(PAGE_SIZE);
diff --git a/arch/arm/mach-tegra/include/mach/kbc.h b/arch/arm/mach-tegra/include/mach/kbc.h
index 04c779832c78..4f3572a1c684 100644
--- a/arch/arm/mach-tegra/include/mach/kbc.h
+++ b/arch/arm/mach-tegra/include/mach/kbc.h
@@ -50,13 +50,11 @@ struct tegra_kbc_platform_data {
50 unsigned int debounce_cnt; 50 unsigned int debounce_cnt;
51 unsigned int repeat_cnt; 51 unsigned int repeat_cnt;
52 52
53 unsigned int wake_cnt; /* 0:wake on any key >1:wake on wake_cfg */
54 const struct tegra_kbc_wake_key *wake_cfg;
55
56 struct tegra_kbc_pin_cfg pin_cfg[KBC_MAX_GPIO]; 53 struct tegra_kbc_pin_cfg pin_cfg[KBC_MAX_GPIO];
57 const struct matrix_keymap_data *keymap_data; 54 const struct matrix_keymap_data *keymap_data;
58 55
59 bool wakeup; 56 bool wakeup;
60 bool use_fn_map; 57 bool use_fn_map;
58 bool use_ghost_filter;
61}; 59};
62#endif 60#endif
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index 854fa49f1c3e..8d85c8c6f857 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -136,7 +136,7 @@ SECTIONS
136 136
137 . = ALIGN(16); 137 . = ALIGN(16);
138 INIT_DATA_SECTION(16) 138 INIT_DATA_SECTION(16)
139 PERCPU(32, PAGE_SIZE) 139 PERCPU_SECTION(32)
140 140
141 .exit.data : 141 .exit.data :
142 { 142 {
diff --git a/arch/cris/kernel/vmlinux.lds.S b/arch/cris/kernel/vmlinux.lds.S
index 728bbd9e7d4c..a6990cb0f098 100644
--- a/arch/cris/kernel/vmlinux.lds.S
+++ b/arch/cris/kernel/vmlinux.lds.S
@@ -102,7 +102,7 @@ SECTIONS
102#endif 102#endif
103 __vmlinux_end = .; /* Last address of the physical file. */ 103 __vmlinux_end = .; /* Last address of the physical file. */
104#ifdef CONFIG_ETRAX_ARCH_V32 104#ifdef CONFIG_ETRAX_ARCH_V32
105 PERCPU(32, PAGE_SIZE) 105 PERCPU_SECTION(32)
106 106
107 .init.ramfs : { 107 .init.ramfs : {
108 INIT_RAM_FS 108 INIT_RAM_FS
diff --git a/arch/frv/kernel/vmlinux.lds.S b/arch/frv/kernel/vmlinux.lds.S
index 0daae8af5787..7e958d829ec9 100644
--- a/arch/frv/kernel/vmlinux.lds.S
+++ b/arch/frv/kernel/vmlinux.lds.S
@@ -37,7 +37,7 @@ SECTIONS
37 _einittext = .; 37 _einittext = .;
38 38
39 INIT_DATA_SECTION(8) 39 INIT_DATA_SECTION(8)
40 PERCPU(L1_CACHE_BYTES, 4096) 40 PERCPU_SECTION(L1_CACHE_BYTES)
41 41
42 . = ALIGN(PAGE_SIZE); 42 . = ALIGN(PAGE_SIZE);
43 __init_end = .; 43 __init_end = .;
diff --git a/arch/m32r/kernel/vmlinux.lds.S b/arch/m32r/kernel/vmlinux.lds.S
index cf95aec77460..018e4a711d79 100644
--- a/arch/m32r/kernel/vmlinux.lds.S
+++ b/arch/m32r/kernel/vmlinux.lds.S
@@ -54,7 +54,7 @@ SECTIONS
54 __init_begin = .; 54 __init_begin = .;
55 INIT_TEXT_SECTION(PAGE_SIZE) 55 INIT_TEXT_SECTION(PAGE_SIZE)
56 INIT_DATA_SECTION(16) 56 INIT_DATA_SECTION(16)
57 PERCPU(32, PAGE_SIZE) 57 PERCPU_SECTION(32)
58 . = ALIGN(PAGE_SIZE); 58 . = ALIGN(PAGE_SIZE);
59 __init_end = .; 59 __init_end = .;
60 /* freed after init ends here */ 60 /* freed after init ends here */
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 75531da02a40..d66e34c718d0 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -5,6 +5,7 @@ config M68K
5 select HAVE_AOUT if MMU 5 select HAVE_AOUT if MMU
6 select GENERIC_ATOMIC64 if MMU 6 select GENERIC_ATOMIC64 if MMU
7 select HAVE_GENERIC_HARDIRQS if !MMU 7 select HAVE_GENERIC_HARDIRQS if !MMU
8 select GENERIC_IRQ_SHOW if !MMU
8 9
9config RWSEM_GENERIC_SPINLOCK 10config RWSEM_GENERIC_SPINLOCK
10 bool 11 bool
diff --git a/arch/m68k/include/asm/bitops_no.h b/arch/m68k/include/asm/bitops_no.h
index 7d3779fdc5b6..6b0e2d349f0e 100644
--- a/arch/m68k/include/asm/bitops_no.h
+++ b/arch/m68k/include/asm/bitops_no.h
@@ -246,23 +246,7 @@ static inline int __test_and_clear_bit_le(int nr, volatile void *addr)
246 return retval; 246 return retval;
247} 247}
248 248
249#define ext2_set_bit_atomic(lock, nr, addr) \ 249#include <asm-generic/bitops/ext2-atomic.h>
250 ({ \
251 int ret; \
252 spin_lock(lock); \
253 ret = __test_and_set_bit_le((nr), (addr)); \
254 spin_unlock(lock); \
255 ret; \
256 })
257
258#define ext2_clear_bit_atomic(lock, nr, addr) \
259 ({ \
260 int ret; \
261 spin_lock(lock); \
262 ret = __test_and_clear_bit_le((nr), (addr)); \
263 spin_unlock(lock); \
264 ret; \
265 })
266 250
267static inline int test_bit_le(int nr, const volatile void *addr) 251static inline int test_bit_le(int nr, const volatile void *addr)
268{ 252{
diff --git a/arch/m68k/include/asm/io_no.h b/arch/m68k/include/asm/io_no.h
index cf20f3097af6..353bf754a972 100644
--- a/arch/m68k/include/asm/io_no.h
+++ b/arch/m68k/include/asm/io_no.h
@@ -144,8 +144,10 @@ static inline void io_insl(unsigned int addr, void *buf, int len)
144#define IOMAP_NOCACHE_NONSER 2 144#define IOMAP_NOCACHE_NONSER 2
145#define IOMAP_WRITETHROUGH 3 145#define IOMAP_WRITETHROUGH 3
146 146
147extern void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag); 147static inline void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
148 148{
149 return (void *) physaddr;
150}
149static inline void *ioremap(unsigned long physaddr, unsigned long size) 151static inline void *ioremap(unsigned long physaddr, unsigned long size)
150{ 152{
151 return __ioremap(physaddr, size, IOMAP_NOCACHE_SER); 153 return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
@@ -163,7 +165,7 @@ static inline void *ioremap_fullcache(unsigned long physaddr, unsigned long size
163 return __ioremap(physaddr, size, IOMAP_FULL_CACHING); 165 return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
164} 166}
165 167
166extern void iounmap(void *addr); 168#define iounmap(addr) do { } while(0)
167 169
168/* 170/*
169 * Convert a physical pointer to a virtual kernel pointer for /dev/mem 171 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
diff --git a/arch/m68k/kernel/asm-offsets.c b/arch/m68k/kernel/asm-offsets.c
index 59a69a5c62f2..983fed9d469b 100644
--- a/arch/m68k/kernel/asm-offsets.c
+++ b/arch/m68k/kernel/asm-offsets.c
@@ -1,5 +1,105 @@
1#ifdef CONFIG_MMU 1/*
2#include "asm-offsets_mm.c" 2 * This program is used to generate definitions needed by
3 * assembly language modules.
4 *
5 * We use the technique used in the OSF Mach kernel code:
6 * generate asm statements containing #defines,
7 * compile this file to assembler, and then extract the
8 * #defines from the assembly-language output.
9 */
10
11#define ASM_OFFSETS_C
12
13#include <linux/stddef.h>
14#include <linux/sched.h>
15#include <linux/kernel_stat.h>
16#include <linux/kbuild.h>
17#include <asm/bootinfo.h>
18#include <asm/irq.h>
19#include <asm/amigahw.h>
20#include <linux/font.h>
21
22int main(void)
23{
24 /* offsets into the task struct */
25 DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
26 DEFINE(TASK_MM, offsetof(struct task_struct, mm));
27 DEFINE(TASK_INFO, offsetof(struct task_struct, thread.info));
28 DEFINE(TASK_TINFO, offsetof(struct task_struct, thread.info));
29
30 /* offsets into the thread struct */
31 DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
32 DEFINE(THREAD_USP, offsetof(struct thread_struct, usp));
33 DEFINE(THREAD_SR, offsetof(struct thread_struct, sr));
34 DEFINE(THREAD_FS, offsetof(struct thread_struct, fs));
35 DEFINE(THREAD_CRP, offsetof(struct thread_struct, crp));
36 DEFINE(THREAD_ESP0, offsetof(struct thread_struct, esp0));
37 DEFINE(THREAD_FPREG, offsetof(struct thread_struct, fp));
38 DEFINE(THREAD_FPCNTL, offsetof(struct thread_struct, fpcntl));
39 DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fpstate));
40
41 /* offsets into the thread_info struct */
42 DEFINE(TINFO_PREEMPT, offsetof(struct thread_info, preempt_count));
43 DEFINE(TINFO_FLAGS, offsetof(struct thread_info, flags));
44
45 /* offsets into the pt_regs */
46 DEFINE(PT_OFF_D0, offsetof(struct pt_regs, d0));
47 DEFINE(PT_OFF_ORIG_D0, offsetof(struct pt_regs, orig_d0));
48 DEFINE(PT_OFF_D1, offsetof(struct pt_regs, d1));
49 DEFINE(PT_OFF_D2, offsetof(struct pt_regs, d2));
50 DEFINE(PT_OFF_D3, offsetof(struct pt_regs, d3));
51 DEFINE(PT_OFF_D4, offsetof(struct pt_regs, d4));
52 DEFINE(PT_OFF_D5, offsetof(struct pt_regs, d5));
53 DEFINE(PT_OFF_A0, offsetof(struct pt_regs, a0));
54 DEFINE(PT_OFF_A1, offsetof(struct pt_regs, a1));
55 DEFINE(PT_OFF_A2, offsetof(struct pt_regs, a2));
56 DEFINE(PT_OFF_PC, offsetof(struct pt_regs, pc));
57 DEFINE(PT_OFF_SR, offsetof(struct pt_regs, sr));
58
59 /* bitfields are a bit difficult */
60#ifdef CONFIG_COLDFIRE
61 DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, sr) - 2);
3#else 62#else
4#include "asm-offsets_no.c" 63 DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, pc) + 4);
64#endif
65
66 /* offsets into the irq_cpustat_t struct */
67 DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
68
69 /* signal defines */
70 DEFINE(LSIGSEGV, SIGSEGV);
71 DEFINE(LSEGV_MAPERR, SEGV_MAPERR);
72 DEFINE(LSIGTRAP, SIGTRAP);
73 DEFINE(LTRAP_TRACE, TRAP_TRACE);
74
75#ifdef CONFIG_MMU
76 /* offsets into the bi_record struct */
77 DEFINE(BIR_TAG, offsetof(struct bi_record, tag));
78 DEFINE(BIR_SIZE, offsetof(struct bi_record, size));
79 DEFINE(BIR_DATA, offsetof(struct bi_record, data));
80
81 /* offsets into font_desc (drivers/video/console/font.h) */
82 DEFINE(FONT_DESC_IDX, offsetof(struct font_desc, idx));
83 DEFINE(FONT_DESC_NAME, offsetof(struct font_desc, name));
84 DEFINE(FONT_DESC_WIDTH, offsetof(struct font_desc, width));
85 DEFINE(FONT_DESC_HEIGHT, offsetof(struct font_desc, height));
86 DEFINE(FONT_DESC_DATA, offsetof(struct font_desc, data));
87 DEFINE(FONT_DESC_PREF, offsetof(struct font_desc, pref));
88
89 /* offsets into the custom struct */
90 DEFINE(CUSTOMBASE, &amiga_custom);
91 DEFINE(C_INTENAR, offsetof(struct CUSTOM, intenar));
92 DEFINE(C_INTREQR, offsetof(struct CUSTOM, intreqr));
93 DEFINE(C_INTENA, offsetof(struct CUSTOM, intena));
94 DEFINE(C_INTREQ, offsetof(struct CUSTOM, intreq));
95 DEFINE(C_SERDATR, offsetof(struct CUSTOM, serdatr));
96 DEFINE(C_SERDAT, offsetof(struct CUSTOM, serdat));
97 DEFINE(C_SERPER, offsetof(struct CUSTOM, serper));
98 DEFINE(CIAABASE, &ciaa);
99 DEFINE(CIABBASE, &ciab);
100 DEFINE(C_PRA, offsetof(struct CIA, pra));
101 DEFINE(ZTWOBASE, zTwoBase);
5#endif 102#endif
103
104 return 0;
105}
diff --git a/arch/m68k/kernel/asm-offsets_mm.c b/arch/m68k/kernel/asm-offsets_mm.c
deleted file mode 100644
index 78e59b82ebc3..000000000000
--- a/arch/m68k/kernel/asm-offsets_mm.c
+++ /dev/null
@@ -1,100 +0,0 @@
1/*
2 * This program is used to generate definitions needed by
3 * assembly language modules.
4 *
5 * We use the technique used in the OSF Mach kernel code:
6 * generate asm statements containing #defines,
7 * compile this file to assembler, and then extract the
8 * #defines from the assembly-language output.
9 */
10
11#define ASM_OFFSETS_C
12
13#include <linux/stddef.h>
14#include <linux/sched.h>
15#include <linux/kernel_stat.h>
16#include <linux/kbuild.h>
17#include <asm/bootinfo.h>
18#include <asm/irq.h>
19#include <asm/amigahw.h>
20#include <linux/font.h>
21
22int main(void)
23{
24 /* offsets into the task struct */
25 DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
26 DEFINE(TASK_INFO, offsetof(struct task_struct, thread.info));
27 DEFINE(TASK_MM, offsetof(struct task_struct, mm));
28#ifdef CONFIG_MMU
29 DEFINE(TASK_TINFO, offsetof(struct task_struct, thread.info));
30#endif
31
32 /* offsets into the thread struct */
33 DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
34 DEFINE(THREAD_USP, offsetof(struct thread_struct, usp));
35 DEFINE(THREAD_SR, offsetof(struct thread_struct, sr));
36 DEFINE(THREAD_FS, offsetof(struct thread_struct, fs));
37 DEFINE(THREAD_CRP, offsetof(struct thread_struct, crp));
38 DEFINE(THREAD_ESP0, offsetof(struct thread_struct, esp0));
39 DEFINE(THREAD_FPREG, offsetof(struct thread_struct, fp));
40 DEFINE(THREAD_FPCNTL, offsetof(struct thread_struct, fpcntl));
41 DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fpstate));
42
43 /* offsets into the thread_info struct */
44 DEFINE(TINFO_PREEMPT, offsetof(struct thread_info, preempt_count));
45 DEFINE(TINFO_FLAGS, offsetof(struct thread_info, flags));
46
47 /* offsets into the pt_regs */
48 DEFINE(PT_OFF_D0, offsetof(struct pt_regs, d0));
49 DEFINE(PT_OFF_ORIG_D0, offsetof(struct pt_regs, orig_d0));
50 DEFINE(PT_OFF_D1, offsetof(struct pt_regs, d1));
51 DEFINE(PT_OFF_D2, offsetof(struct pt_regs, d2));
52 DEFINE(PT_OFF_D3, offsetof(struct pt_regs, d3));
53 DEFINE(PT_OFF_D4, offsetof(struct pt_regs, d4));
54 DEFINE(PT_OFF_D5, offsetof(struct pt_regs, d5));
55 DEFINE(PT_OFF_A0, offsetof(struct pt_regs, a0));
56 DEFINE(PT_OFF_A1, offsetof(struct pt_regs, a1));
57 DEFINE(PT_OFF_A2, offsetof(struct pt_regs, a2));
58 DEFINE(PT_OFF_PC, offsetof(struct pt_regs, pc));
59 DEFINE(PT_OFF_SR, offsetof(struct pt_regs, sr));
60 /* bitfields are a bit difficult */
61 DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, pc) + 4);
62
63 /* offsets into the irq_cpustat_t struct */
64 DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
65
66 /* offsets into the bi_record struct */
67 DEFINE(BIR_TAG, offsetof(struct bi_record, tag));
68 DEFINE(BIR_SIZE, offsetof(struct bi_record, size));
69 DEFINE(BIR_DATA, offsetof(struct bi_record, data));
70
71 /* offsets into font_desc (drivers/video/console/font.h) */
72 DEFINE(FONT_DESC_IDX, offsetof(struct font_desc, idx));
73 DEFINE(FONT_DESC_NAME, offsetof(struct font_desc, name));
74 DEFINE(FONT_DESC_WIDTH, offsetof(struct font_desc, width));
75 DEFINE(FONT_DESC_HEIGHT, offsetof(struct font_desc, height));
76 DEFINE(FONT_DESC_DATA, offsetof(struct font_desc, data));
77 DEFINE(FONT_DESC_PREF, offsetof(struct font_desc, pref));
78
79 /* signal defines */
80 DEFINE(LSIGSEGV, SIGSEGV);
81 DEFINE(LSEGV_MAPERR, SEGV_MAPERR);
82 DEFINE(LSIGTRAP, SIGTRAP);
83 DEFINE(LTRAP_TRACE, TRAP_TRACE);
84
85 /* offsets into the custom struct */
86 DEFINE(CUSTOMBASE, &amiga_custom);
87 DEFINE(C_INTENAR, offsetof(struct CUSTOM, intenar));
88 DEFINE(C_INTREQR, offsetof(struct CUSTOM, intreqr));
89 DEFINE(C_INTENA, offsetof(struct CUSTOM, intena));
90 DEFINE(C_INTREQ, offsetof(struct CUSTOM, intreq));
91 DEFINE(C_SERDATR, offsetof(struct CUSTOM, serdatr));
92 DEFINE(C_SERDAT, offsetof(struct CUSTOM, serdat));
93 DEFINE(C_SERPER, offsetof(struct CUSTOM, serper));
94 DEFINE(CIAABASE, &ciaa);
95 DEFINE(CIABBASE, &ciab);
96 DEFINE(C_PRA, offsetof(struct CIA, pra));
97 DEFINE(ZTWOBASE, zTwoBase);
98
99 return 0;
100}
diff --git a/arch/m68k/kernel/asm-offsets_no.c b/arch/m68k/kernel/asm-offsets_no.c
deleted file mode 100644
index ffe02f41ad46..000000000000
--- a/arch/m68k/kernel/asm-offsets_no.c
+++ /dev/null
@@ -1,76 +0,0 @@
1/*
2 * This program is used to generate definitions needed by
3 * assembly language modules.
4 *
5 * We use the technique used in the OSF Mach kernel code:
6 * generate asm statements containing #defines,
7 * compile this file to assembler, and then extract the
8 * #defines from the assembly-language output.
9 */
10
11#include <linux/stddef.h>
12#include <linux/sched.h>
13#include <linux/kernel_stat.h>
14#include <linux/ptrace.h>
15#include <linux/hardirq.h>
16#include <linux/kbuild.h>
17#include <asm/bootinfo.h>
18#include <asm/irq.h>
19#include <asm/thread_info.h>
20
21int main(void)
22{
23 /* offsets into the task struct */
24 DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
25 DEFINE(TASK_MM, offsetof(struct task_struct, mm));
26
27 /* offsets into the irq_cpustat_t struct */
28 DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
29
30 /* offsets into the thread struct */
31 DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
32 DEFINE(THREAD_USP, offsetof(struct thread_struct, usp));
33 DEFINE(THREAD_SR, offsetof(struct thread_struct, sr));
34 DEFINE(THREAD_FS, offsetof(struct thread_struct, fs));
35 DEFINE(THREAD_CRP, offsetof(struct thread_struct, crp));
36 DEFINE(THREAD_ESP0, offsetof(struct thread_struct, esp0));
37 DEFINE(THREAD_FPREG, offsetof(struct thread_struct, fp));
38 DEFINE(THREAD_FPCNTL, offsetof(struct thread_struct, fpcntl));
39 DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fpstate));
40
41 /* offsets into the pt_regs */
42 DEFINE(PT_OFF_D0, offsetof(struct pt_regs, d0));
43 DEFINE(PT_OFF_ORIG_D0, offsetof(struct pt_regs, orig_d0));
44 DEFINE(PT_OFF_D1, offsetof(struct pt_regs, d1));
45 DEFINE(PT_OFF_D2, offsetof(struct pt_regs, d2));
46 DEFINE(PT_OFF_D3, offsetof(struct pt_regs, d3));
47 DEFINE(PT_OFF_D4, offsetof(struct pt_regs, d4));
48 DEFINE(PT_OFF_D5, offsetof(struct pt_regs, d5));
49 DEFINE(PT_OFF_A0, offsetof(struct pt_regs, a0));
50 DEFINE(PT_OFF_A1, offsetof(struct pt_regs, a1));
51 DEFINE(PT_OFF_A2, offsetof(struct pt_regs, a2));
52 DEFINE(PT_OFF_PC, offsetof(struct pt_regs, pc));
53 DEFINE(PT_OFF_SR, offsetof(struct pt_regs, sr));
54
55#ifdef CONFIG_COLDFIRE
56 /* bitfields are a bit difficult */
57 DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, sr) - 2);
58#else
59 /* bitfields are a bit difficult */
60 DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, pc) + 4);
61#endif
62
63 /* signal defines */
64 DEFINE(SIGSEGV, SIGSEGV);
65 DEFINE(SEGV_MAPERR, SEGV_MAPERR);
66 DEFINE(SIGTRAP, SIGTRAP);
67 DEFINE(TRAP_TRACE, TRAP_TRACE);
68
69 DEFINE(PT_PTRACED, PT_PTRACED);
70
71 /* Offsets in thread_info structure */
72 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
73 DEFINE(TI_PREEMPTCOUNT, offsetof(struct thread_info, preempt_count));
74
75 return 0;
76}
diff --git a/arch/m68k/kernel/entry_no.S b/arch/m68k/kernel/entry_no.S
index 2783f25e38bd..5f0f6b598b5a 100644
--- a/arch/m68k/kernel/entry_no.S
+++ b/arch/m68k/kernel/entry_no.S
@@ -24,7 +24,6 @@
24 * linux 2.4 support David McCullough <davidm@snapgear.com> 24 * linux 2.4 support David McCullough <davidm@snapgear.com>
25 */ 25 */
26 26
27#include <linux/sys.h>
28#include <linux/linkage.h> 27#include <linux/linkage.h>
29#include <asm/errno.h> 28#include <asm/errno.h>
30#include <asm/setup.h> 29#include <asm/setup.h>
diff --git a/arch/m68k/kernel/irq.c b/arch/m68k/kernel/irq.c
index 15dbc3e9d20c..544b8717d499 100644
--- a/arch/m68k/kernel/irq.c
+++ b/arch/m68k/kernel/irq.c
@@ -28,31 +28,3 @@ asmlinkage void do_IRQ(int irq, struct pt_regs *regs)
28 28
29 set_irq_regs(oldregs); 29 set_irq_regs(oldregs);
30} 30}
31
32int show_interrupts(struct seq_file *p, void *v)
33{
34 struct irqaction *ap;
35 int irq = *((loff_t *) v);
36
37 if (irq == 0)
38 seq_puts(p, " CPU0\n");
39
40 if (irq < NR_IRQS) {
41 struct irq_desc *desc = irq_to_desc(irq);
42
43 ap = desc->action;
44 if (ap) {
45 seq_printf(p, "%3d: ", irq);
46 seq_printf(p, "%10u ", kstat_irqs(irq));
47 seq_printf(p, "%14s ", irq_desc_get_chip(desc)->name);
48
49 seq_printf(p, "%s", ap->name);
50 for (ap = ap->next; ap; ap = ap->next)
51 seq_printf(p, ", %s", ap->name);
52 seq_putc(p, '\n');
53 }
54 }
55
56 return 0;
57}
58
diff --git a/arch/m68k/kernel/m68k_ksyms.c b/arch/m68k/kernel/m68k_ksyms.c
index 4752c28ce0ac..33f82769547c 100644
--- a/arch/m68k/kernel/m68k_ksyms.c
+++ b/arch/m68k/kernel/m68k_ksyms.c
@@ -1,5 +1,33 @@
1#ifdef CONFIG_MMU 1#include <linux/module.h>
2#include "m68k_ksyms_mm.c" 2
3#else 3asmlinkage long long __ashldi3 (long long, int);
4#include "m68k_ksyms_no.c" 4asmlinkage long long __ashrdi3 (long long, int);
5asmlinkage long long __lshrdi3 (long long, int);
6asmlinkage long long __muldi3 (long long, long long);
7
8/* The following are special because they're not called
9 explicitly (the C compiler generates them). Fortunately,
10 their interface isn't gonna change any time soon now, so
11 it's OK to leave it out of version control. */
12EXPORT_SYMBOL(__ashldi3);
13EXPORT_SYMBOL(__ashrdi3);
14EXPORT_SYMBOL(__lshrdi3);
15EXPORT_SYMBOL(__muldi3);
16
17#if !defined(__mc68020__) && !defined(__mc68030__) && \
18 !defined(__mc68040__) && !defined(__mc68060__) && !defined(__mcpu32__)
19/*
20 * Simpler 68k and ColdFire parts also need a few other gcc functions.
21 */
22extern long long __divsi3(long long, long long);
23extern long long __modsi3(long long, long long);
24extern long long __mulsi3(long long, long long);
25extern long long __udivsi3(long long, long long);
26extern long long __umodsi3(long long, long long);
27
28EXPORT_SYMBOL(__divsi3);
29EXPORT_SYMBOL(__modsi3);
30EXPORT_SYMBOL(__mulsi3);
31EXPORT_SYMBOL(__udivsi3);
32EXPORT_SYMBOL(__umodsi3);
5#endif 33#endif
diff --git a/arch/m68k/kernel/m68k_ksyms_mm.c b/arch/m68k/kernel/m68k_ksyms_mm.c
deleted file mode 100644
index d900e77e5363..000000000000
--- a/arch/m68k/kernel/m68k_ksyms_mm.c
+++ /dev/null
@@ -1,16 +0,0 @@
1#include <linux/module.h>
2
3asmlinkage long long __ashldi3 (long long, int);
4asmlinkage long long __ashrdi3 (long long, int);
5asmlinkage long long __lshrdi3 (long long, int);
6asmlinkage long long __muldi3 (long long, long long);
7
8/* The following are special because they're not called
9 explicitly (the C compiler generates them). Fortunately,
10 their interface isn't gonna change any time soon now, so
11 it's OK to leave it out of version control. */
12EXPORT_SYMBOL(__ashldi3);
13EXPORT_SYMBOL(__ashrdi3);
14EXPORT_SYMBOL(__lshrdi3);
15EXPORT_SYMBOL(__muldi3);
16
diff --git a/arch/m68k/kernel/m68k_ksyms_no.c b/arch/m68k/kernel/m68k_ksyms_no.c
deleted file mode 100644
index 39fe0a7aec32..000000000000
--- a/arch/m68k/kernel/m68k_ksyms_no.c
+++ /dev/null
@@ -1,78 +0,0 @@
1#include <linux/module.h>
2#include <linux/linkage.h>
3#include <linux/sched.h>
4#include <linux/string.h>
5#include <linux/mm.h>
6#include <linux/user.h>
7#include <linux/elfcore.h>
8#include <linux/in6.h>
9#include <linux/interrupt.h>
10
11#include <asm/setup.h>
12#include <asm/machdep.h>
13#include <asm/pgalloc.h>
14#include <asm/irq.h>
15#include <asm/io.h>
16#include <asm/checksum.h>
17#include <asm/current.h>
18
19extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
20
21/* platform dependent support */
22
23EXPORT_SYMBOL(__ioremap);
24EXPORT_SYMBOL(iounmap);
25EXPORT_SYMBOL(dump_fpu);
26
27EXPORT_SYMBOL(ip_fast_csum);
28
29EXPORT_SYMBOL(kernel_thread);
30
31/* Networking helper routines. */
32EXPORT_SYMBOL(csum_partial_copy_nocheck);
33
34/* The following are special because they're not called
35 explicitly (the C compiler generates them). Fortunately,
36 their interface isn't gonna change any time soon now, so
37 it's OK to leave it out of version control. */
38EXPORT_SYMBOL(memcpy);
39EXPORT_SYMBOL(memset);
40
41/*
42 * libgcc functions - functions that are used internally by the
43 * compiler... (prototypes are not correct though, but that
44 * doesn't really matter since they're not versioned).
45 */
46extern void __ashldi3(void);
47extern void __ashrdi3(void);
48extern void __divsi3(void);
49extern void __lshrdi3(void);
50extern void __modsi3(void);
51extern void __muldi3(void);
52extern void __mulsi3(void);
53extern void __udivsi3(void);
54extern void __umodsi3(void);
55
56 /* gcc lib functions */
57EXPORT_SYMBOL(__ashldi3);
58EXPORT_SYMBOL(__ashrdi3);
59EXPORT_SYMBOL(__divsi3);
60EXPORT_SYMBOL(__lshrdi3);
61EXPORT_SYMBOL(__modsi3);
62EXPORT_SYMBOL(__muldi3);
63EXPORT_SYMBOL(__mulsi3);
64EXPORT_SYMBOL(__udivsi3);
65EXPORT_SYMBOL(__umodsi3);
66
67#ifdef CONFIG_COLDFIRE
68extern unsigned int *dma_device_address;
69extern unsigned long dma_base_addr, _ramend;
70EXPORT_SYMBOL(dma_base_addr);
71EXPORT_SYMBOL(dma_device_address);
72EXPORT_SYMBOL(_ramend);
73
74extern asmlinkage void trap(void);
75extern void *_ramvec;
76EXPORT_SYMBOL(trap);
77EXPORT_SYMBOL(_ramvec);
78#endif /* CONFIG_COLDFIRE */
diff --git a/arch/m68k/kernel/process_no.c b/arch/m68k/kernel/process_no.c
index e2a63af5d517..9b86ad11c68e 100644
--- a/arch/m68k/kernel/process_no.c
+++ b/arch/m68k/kernel/process_no.c
@@ -151,6 +151,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
151 set_fs(fs); 151 set_fs(fs);
152 return retval; 152 return retval;
153} 153}
154EXPORT_SYMBOL(kernel_thread);
154 155
155void flush_thread(void) 156void flush_thread(void)
156{ 157{
@@ -283,6 +284,7 @@ int dump_fpu(struct pt_regs *regs, struct user_m68kfp_struct *fpu)
283#endif 284#endif
284 return 1; 285 return 1;
285} 286}
287EXPORT_SYMBOL(dump_fpu);
286 288
287/* 289/*
288 * Generic dumping code. Used for panic and debug. 290 * Generic dumping code. Used for panic and debug.
diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
index 63013df33584..8623f8dc16f8 100644
--- a/arch/m68k/kernel/sys_m68k.c
+++ b/arch/m68k/kernel/sys_m68k.c
@@ -1,5 +1,580 @@
1/*
2 * linux/arch/m68k/kernel/sys_m68k.c
3 *
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/m68k
6 * platform.
7 */
8
9#include <linux/capability.h>
10#include <linux/errno.h>
11#include <linux/sched.h>
12#include <linux/mm.h>
13#include <linux/fs.h>
14#include <linux/smp.h>
15#include <linux/sem.h>
16#include <linux/msg.h>
17#include <linux/shm.h>
18#include <linux/stat.h>
19#include <linux/syscalls.h>
20#include <linux/mman.h>
21#include <linux/file.h>
22#include <linux/ipc.h>
23
24#include <asm/setup.h>
25#include <asm/uaccess.h>
26#include <asm/cachectl.h>
27#include <asm/traps.h>
28#include <asm/page.h>
29#include <asm/unistd.h>
30#include <asm/cacheflush.h>
31
1#ifdef CONFIG_MMU 32#ifdef CONFIG_MMU
2#include "sys_m68k_mm.c" 33
34#include <asm/tlb.h>
35
36asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
37 unsigned long error_code);
38
39asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
40 unsigned long prot, unsigned long flags,
41 unsigned long fd, unsigned long pgoff)
42{
43 /*
44 * This is wrong for sun3 - there PAGE_SIZE is 8Kb,
45 * so we need to shift the argument down by 1; m68k mmap64(3)
46 * (in libc) expects the last argument of mmap2 in 4Kb units.
47 */
48 return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
49}
50
51/* Convert virtual (user) address VADDR to physical address PADDR */
52#define virt_to_phys_040(vaddr) \
53({ \
54 unsigned long _mmusr, _paddr; \
55 \
56 __asm__ __volatile__ (".chip 68040\n\t" \
57 "ptestr (%1)\n\t" \
58 "movec %%mmusr,%0\n\t" \
59 ".chip 68k" \
60 : "=r" (_mmusr) \
61 : "a" (vaddr)); \
62 _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
63 _paddr; \
64})
65
66static inline int
67cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
68{
69 unsigned long paddr, i;
70
71 switch (scope)
72 {
73 case FLUSH_SCOPE_ALL:
74 switch (cache)
75 {
76 case FLUSH_CACHE_DATA:
77 /* This nop is needed for some broken versions of the 68040. */
78 __asm__ __volatile__ ("nop\n\t"
79 ".chip 68040\n\t"
80 "cpusha %dc\n\t"
81 ".chip 68k");
82 break;
83 case FLUSH_CACHE_INSN:
84 __asm__ __volatile__ ("nop\n\t"
85 ".chip 68040\n\t"
86 "cpusha %ic\n\t"
87 ".chip 68k");
88 break;
89 default:
90 case FLUSH_CACHE_BOTH:
91 __asm__ __volatile__ ("nop\n\t"
92 ".chip 68040\n\t"
93 "cpusha %bc\n\t"
94 ".chip 68k");
95 break;
96 }
97 break;
98
99 case FLUSH_SCOPE_LINE:
100 /* Find the physical address of the first mapped page in the
101 address range. */
102 if ((paddr = virt_to_phys_040(addr))) {
103 paddr += addr & ~(PAGE_MASK | 15);
104 len = (len + (addr & 15) + 15) >> 4;
105 } else {
106 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
107
108 if (len <= tmp)
109 return 0;
110 addr += tmp;
111 len -= tmp;
112 tmp = PAGE_SIZE;
113 for (;;)
114 {
115 if ((paddr = virt_to_phys_040(addr)))
116 break;
117 if (len <= tmp)
118 return 0;
119 addr += tmp;
120 len -= tmp;
121 }
122 len = (len + 15) >> 4;
123 }
124 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
125 while (len--)
126 {
127 switch (cache)
128 {
129 case FLUSH_CACHE_DATA:
130 __asm__ __volatile__ ("nop\n\t"
131 ".chip 68040\n\t"
132 "cpushl %%dc,(%0)\n\t"
133 ".chip 68k"
134 : : "a" (paddr));
135 break;
136 case FLUSH_CACHE_INSN:
137 __asm__ __volatile__ ("nop\n\t"
138 ".chip 68040\n\t"
139 "cpushl %%ic,(%0)\n\t"
140 ".chip 68k"
141 : : "a" (paddr));
142 break;
143 default:
144 case FLUSH_CACHE_BOTH:
145 __asm__ __volatile__ ("nop\n\t"
146 ".chip 68040\n\t"
147 "cpushl %%bc,(%0)\n\t"
148 ".chip 68k"
149 : : "a" (paddr));
150 break;
151 }
152 if (!--i && len)
153 {
154 /*
155 * No need to page align here since it is done by
156 * virt_to_phys_040().
157 */
158 addr += PAGE_SIZE;
159 i = PAGE_SIZE / 16;
160 /* Recompute physical address when crossing a page
161 boundary. */
162 for (;;)
163 {
164 if ((paddr = virt_to_phys_040(addr)))
165 break;
166 if (len <= i)
167 return 0;
168 len -= i;
169 addr += PAGE_SIZE;
170 }
171 }
172 else
173 paddr += 16;
174 }
175 break;
176
177 default:
178 case FLUSH_SCOPE_PAGE:
179 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
180 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
181 {
182 if (!(paddr = virt_to_phys_040(addr)))
183 continue;
184 switch (cache)
185 {
186 case FLUSH_CACHE_DATA:
187 __asm__ __volatile__ ("nop\n\t"
188 ".chip 68040\n\t"
189 "cpushp %%dc,(%0)\n\t"
190 ".chip 68k"
191 : : "a" (paddr));
192 break;
193 case FLUSH_CACHE_INSN:
194 __asm__ __volatile__ ("nop\n\t"
195 ".chip 68040\n\t"
196 "cpushp %%ic,(%0)\n\t"
197 ".chip 68k"
198 : : "a" (paddr));
199 break;
200 default:
201 case FLUSH_CACHE_BOTH:
202 __asm__ __volatile__ ("nop\n\t"
203 ".chip 68040\n\t"
204 "cpushp %%bc,(%0)\n\t"
205 ".chip 68k"
206 : : "a" (paddr));
207 break;
208 }
209 }
210 break;
211 }
212 return 0;
213}
214
215#define virt_to_phys_060(vaddr) \
216({ \
217 unsigned long paddr; \
218 __asm__ __volatile__ (".chip 68060\n\t" \
219 "plpar (%0)\n\t" \
220 ".chip 68k" \
221 : "=a" (paddr) \
222 : "0" (vaddr)); \
223 (paddr); /* XXX */ \
224})
225
226static inline int
227cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
228{
229 unsigned long paddr, i;
230
231 /*
232 * 68060 manual says:
233 * cpush %dc : flush DC, remains valid (with our %cacr setup)
234 * cpush %ic : invalidate IC
235 * cpush %bc : flush DC + invalidate IC
236 */
237 switch (scope)
238 {
239 case FLUSH_SCOPE_ALL:
240 switch (cache)
241 {
242 case FLUSH_CACHE_DATA:
243 __asm__ __volatile__ (".chip 68060\n\t"
244 "cpusha %dc\n\t"
245 ".chip 68k");
246 break;
247 case FLUSH_CACHE_INSN:
248 __asm__ __volatile__ (".chip 68060\n\t"
249 "cpusha %ic\n\t"
250 ".chip 68k");
251 break;
252 default:
253 case FLUSH_CACHE_BOTH:
254 __asm__ __volatile__ (".chip 68060\n\t"
255 "cpusha %bc\n\t"
256 ".chip 68k");
257 break;
258 }
259 break;
260
261 case FLUSH_SCOPE_LINE:
262 /* Find the physical address of the first mapped page in the
263 address range. */
264 len += addr & 15;
265 addr &= -16;
266 if (!(paddr = virt_to_phys_060(addr))) {
267 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
268
269 if (len <= tmp)
270 return 0;
271 addr += tmp;
272 len -= tmp;
273 tmp = PAGE_SIZE;
274 for (;;)
275 {
276 if ((paddr = virt_to_phys_060(addr)))
277 break;
278 if (len <= tmp)
279 return 0;
280 addr += tmp;
281 len -= tmp;
282 }
283 }
284 len = (len + 15) >> 4;
285 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
286 while (len--)
287 {
288 switch (cache)
289 {
290 case FLUSH_CACHE_DATA:
291 __asm__ __volatile__ (".chip 68060\n\t"
292 "cpushl %%dc,(%0)\n\t"
293 ".chip 68k"
294 : : "a" (paddr));
295 break;
296 case FLUSH_CACHE_INSN:
297 __asm__ __volatile__ (".chip 68060\n\t"
298 "cpushl %%ic,(%0)\n\t"
299 ".chip 68k"
300 : : "a" (paddr));
301 break;
302 default:
303 case FLUSH_CACHE_BOTH:
304 __asm__ __volatile__ (".chip 68060\n\t"
305 "cpushl %%bc,(%0)\n\t"
306 ".chip 68k"
307 : : "a" (paddr));
308 break;
309 }
310 if (!--i && len)
311 {
312
313 /*
314 * We just want to jump to the first cache line
315 * in the next page.
316 */
317 addr += PAGE_SIZE;
318 addr &= PAGE_MASK;
319
320 i = PAGE_SIZE / 16;
321 /* Recompute physical address when crossing a page
322 boundary. */
323 for (;;)
324 {
325 if ((paddr = virt_to_phys_060(addr)))
326 break;
327 if (len <= i)
328 return 0;
329 len -= i;
330 addr += PAGE_SIZE;
331 }
332 }
333 else
334 paddr += 16;
335 }
336 break;
337
338 default:
339 case FLUSH_SCOPE_PAGE:
340 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
341 addr &= PAGE_MASK; /* Workaround for bug in some
342 revisions of the 68060 */
343 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
344 {
345 if (!(paddr = virt_to_phys_060(addr)))
346 continue;
347 switch (cache)
348 {
349 case FLUSH_CACHE_DATA:
350 __asm__ __volatile__ (".chip 68060\n\t"
351 "cpushp %%dc,(%0)\n\t"
352 ".chip 68k"
353 : : "a" (paddr));
354 break;
355 case FLUSH_CACHE_INSN:
356 __asm__ __volatile__ (".chip 68060\n\t"
357 "cpushp %%ic,(%0)\n\t"
358 ".chip 68k"
359 : : "a" (paddr));
360 break;
361 default:
362 case FLUSH_CACHE_BOTH:
363 __asm__ __volatile__ (".chip 68060\n\t"
364 "cpushp %%bc,(%0)\n\t"
365 ".chip 68k"
366 : : "a" (paddr));
367 break;
368 }
369 }
370 break;
371 }
372 return 0;
373}
374
375/* sys_cacheflush -- flush (part of) the processor cache. */
376asmlinkage int
377sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
378{
379 struct vm_area_struct *vma;
380 int ret = -EINVAL;
381
382 if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
383 cache & ~FLUSH_CACHE_BOTH)
384 goto out;
385
386 if (scope == FLUSH_SCOPE_ALL) {
387 /* Only the superuser may explicitly flush the whole cache. */
388 ret = -EPERM;
389 if (!capable(CAP_SYS_ADMIN))
390 goto out;
391 } else {
392 /*
393 * Verify that the specified address region actually belongs
394 * to this process.
395 */
396 vma = find_vma (current->mm, addr);
397 ret = -EINVAL;
398 /* Check for overflow. */
399 if (addr + len < addr)
400 goto out;
401 if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
402 goto out;
403 }
404
405 if (CPU_IS_020_OR_030) {
406 if (scope == FLUSH_SCOPE_LINE && len < 256) {
407 unsigned long cacr;
408 __asm__ ("movec %%cacr, %0" : "=r" (cacr));
409 if (cache & FLUSH_CACHE_INSN)
410 cacr |= 4;
411 if (cache & FLUSH_CACHE_DATA)
412 cacr |= 0x400;
413 len >>= 2;
414 while (len--) {
415 __asm__ __volatile__ ("movec %1, %%caar\n\t"
416 "movec %0, %%cacr"
417 : /* no outputs */
418 : "r" (cacr), "r" (addr));
419 addr += 4;
420 }
421 } else {
422 /* Flush the whole cache, even if page granularity requested. */
423 unsigned long cacr;
424 __asm__ ("movec %%cacr, %0" : "=r" (cacr));
425 if (cache & FLUSH_CACHE_INSN)
426 cacr |= 8;
427 if (cache & FLUSH_CACHE_DATA)
428 cacr |= 0x800;
429 __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
430 }
431 ret = 0;
432 goto out;
433 } else {
434 /*
435 * 040 or 060: don't blindly trust 'scope', someone could
436 * try to flush a few megs of memory.
437 */
438
439 if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
440 scope=FLUSH_SCOPE_PAGE;
441 if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
442 scope=FLUSH_SCOPE_ALL;
443 if (CPU_IS_040) {
444 ret = cache_flush_040 (addr, scope, cache, len);
445 } else if (CPU_IS_060) {
446 ret = cache_flush_060 (addr, scope, cache, len);
447 }
448 }
449out:
450 return ret;
451}
452
453/* This syscall gets its arguments in A0 (mem), D2 (oldval) and
454 D1 (newval). */
455asmlinkage int
456sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
457 unsigned long __user * mem)
458{
459 /* This was borrowed from ARM's implementation. */
460 for (;;) {
461 struct mm_struct *mm = current->mm;
462 pgd_t *pgd;
463 pmd_t *pmd;
464 pte_t *pte;
465 spinlock_t *ptl;
466 unsigned long mem_value;
467
468 down_read(&mm->mmap_sem);
469 pgd = pgd_offset(mm, (unsigned long)mem);
470 if (!pgd_present(*pgd))
471 goto bad_access;
472 pmd = pmd_offset(pgd, (unsigned long)mem);
473 if (!pmd_present(*pmd))
474 goto bad_access;
475 pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl);
476 if (!pte_present(*pte) || !pte_dirty(*pte)
477 || !pte_write(*pte)) {
478 pte_unmap_unlock(pte, ptl);
479 goto bad_access;
480 }
481
482 mem_value = *mem;
483 if (mem_value == oldval)
484 *mem = newval;
485
486 pte_unmap_unlock(pte, ptl);
487 up_read(&mm->mmap_sem);
488 return mem_value;
489
490 bad_access:
491 up_read(&mm->mmap_sem);
492 /* This is not necessarily a bad access, we can get here if
493 a memory we're trying to write to should be copied-on-write.
494 Make the kernel do the necessary page stuff, then re-iterate.
495 Simulate a write access fault to do that. */
496 {
497 /* The first argument of the function corresponds to
498 D1, which is the first field of struct pt_regs. */
499 struct pt_regs *fp = (struct pt_regs *)&newval;
500
501 /* '3' is an RMW flag. */
502 if (do_page_fault(fp, (unsigned long)mem, 3))
503 /* If the do_page_fault() failed, we don't
504 have anything meaningful to return.
505 There should be a SIGSEGV pending for
506 the process. */
507 return 0xdeadbeef;
508 }
509 }
510}
511
3#else 512#else
4#include "sys_m68k_no.c" 513
5#endif 514/* sys_cacheflush -- flush (part of) the processor cache. */
515asmlinkage int
516sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
517{
518 flush_cache_all();
519 return 0;
520}
521
522/* This syscall gets its arguments in A0 (mem), D2 (oldval) and
523 D1 (newval). */
524asmlinkage int
525sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
526 unsigned long __user * mem)
527{
528 struct mm_struct *mm = current->mm;
529 unsigned long mem_value;
530
531 down_read(&mm->mmap_sem);
532
533 mem_value = *mem;
534 if (mem_value == oldval)
535 *mem = newval;
536
537 up_read(&mm->mmap_sem);
538 return mem_value;
539}
540
541#endif /* CONFIG_MMU */
542
543asmlinkage int sys_getpagesize(void)
544{
545 return PAGE_SIZE;
546}
547
548/*
549 * Do a system call from kernel instead of calling sys_execve so we
550 * end up with proper pt_regs.
551 */
552int kernel_execve(const char *filename,
553 const char *const argv[],
554 const char *const envp[])
555{
556 register long __res asm ("%d0") = __NR_execve;
557 register long __a asm ("%d1") = (long)(filename);
558 register long __b asm ("%d2") = (long)(argv);
559 register long __c asm ("%d3") = (long)(envp);
560 asm volatile ("trap #0" : "+d" (__res)
561 : "d" (__a), "d" (__b), "d" (__c));
562 return __res;
563}
564
565asmlinkage unsigned long sys_get_thread_area(void)
566{
567 return current_thread_info()->tp_value;
568}
569
570asmlinkage int sys_set_thread_area(unsigned long tp)
571{
572 current_thread_info()->tp_value = tp;
573 return 0;
574}
575
576asmlinkage int sys_atomic_barrier(void)
577{
578 /* no code needed for uniprocs */
579 return 0;
580}
diff --git a/arch/m68k/kernel/sys_m68k_mm.c b/arch/m68k/kernel/sys_m68k_mm.c
deleted file mode 100644
index 3db2e7f902aa..000000000000
--- a/arch/m68k/kernel/sys_m68k_mm.c
+++ /dev/null
@@ -1,546 +0,0 @@
1/*
2 * linux/arch/m68k/kernel/sys_m68k.c
3 *
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/m68k
6 * platform.
7 */
8
9#include <linux/capability.h>
10#include <linux/errno.h>
11#include <linux/sched.h>
12#include <linux/mm.h>
13#include <linux/fs.h>
14#include <linux/smp.h>
15#include <linux/sem.h>
16#include <linux/msg.h>
17#include <linux/shm.h>
18#include <linux/stat.h>
19#include <linux/syscalls.h>
20#include <linux/mman.h>
21#include <linux/file.h>
22#include <linux/ipc.h>
23
24#include <asm/setup.h>
25#include <asm/uaccess.h>
26#include <asm/cachectl.h>
27#include <asm/traps.h>
28#include <asm/page.h>
29#include <asm/unistd.h>
30#include <linux/elf.h>
31#include <asm/tlb.h>
32
33asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
34 unsigned long error_code);
35
36asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
37 unsigned long prot, unsigned long flags,
38 unsigned long fd, unsigned long pgoff)
39{
40 /*
41 * This is wrong for sun3 - there PAGE_SIZE is 8Kb,
42 * so we need to shift the argument down by 1; m68k mmap64(3)
43 * (in libc) expects the last argument of mmap2 in 4Kb units.
44 */
45 return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
46}
47
48/* Convert virtual (user) address VADDR to physical address PADDR */
49#define virt_to_phys_040(vaddr) \
50({ \
51 unsigned long _mmusr, _paddr; \
52 \
53 __asm__ __volatile__ (".chip 68040\n\t" \
54 "ptestr (%1)\n\t" \
55 "movec %%mmusr,%0\n\t" \
56 ".chip 68k" \
57 : "=r" (_mmusr) \
58 : "a" (vaddr)); \
59 _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
60 _paddr; \
61})
62
63static inline int
64cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
65{
66 unsigned long paddr, i;
67
68 switch (scope)
69 {
70 case FLUSH_SCOPE_ALL:
71 switch (cache)
72 {
73 case FLUSH_CACHE_DATA:
74 /* This nop is needed for some broken versions of the 68040. */
75 __asm__ __volatile__ ("nop\n\t"
76 ".chip 68040\n\t"
77 "cpusha %dc\n\t"
78 ".chip 68k");
79 break;
80 case FLUSH_CACHE_INSN:
81 __asm__ __volatile__ ("nop\n\t"
82 ".chip 68040\n\t"
83 "cpusha %ic\n\t"
84 ".chip 68k");
85 break;
86 default:
87 case FLUSH_CACHE_BOTH:
88 __asm__ __volatile__ ("nop\n\t"
89 ".chip 68040\n\t"
90 "cpusha %bc\n\t"
91 ".chip 68k");
92 break;
93 }
94 break;
95
96 case FLUSH_SCOPE_LINE:
97 /* Find the physical address of the first mapped page in the
98 address range. */
99 if ((paddr = virt_to_phys_040(addr))) {
100 paddr += addr & ~(PAGE_MASK | 15);
101 len = (len + (addr & 15) + 15) >> 4;
102 } else {
103 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
104
105 if (len <= tmp)
106 return 0;
107 addr += tmp;
108 len -= tmp;
109 tmp = PAGE_SIZE;
110 for (;;)
111 {
112 if ((paddr = virt_to_phys_040(addr)))
113 break;
114 if (len <= tmp)
115 return 0;
116 addr += tmp;
117 len -= tmp;
118 }
119 len = (len + 15) >> 4;
120 }
121 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
122 while (len--)
123 {
124 switch (cache)
125 {
126 case FLUSH_CACHE_DATA:
127 __asm__ __volatile__ ("nop\n\t"
128 ".chip 68040\n\t"
129 "cpushl %%dc,(%0)\n\t"
130 ".chip 68k"
131 : : "a" (paddr));
132 break;
133 case FLUSH_CACHE_INSN:
134 __asm__ __volatile__ ("nop\n\t"
135 ".chip 68040\n\t"
136 "cpushl %%ic,(%0)\n\t"
137 ".chip 68k"
138 : : "a" (paddr));
139 break;
140 default:
141 case FLUSH_CACHE_BOTH:
142 __asm__ __volatile__ ("nop\n\t"
143 ".chip 68040\n\t"
144 "cpushl %%bc,(%0)\n\t"
145 ".chip 68k"
146 : : "a" (paddr));
147 break;
148 }
149 if (!--i && len)
150 {
151 /*
152 * No need to page align here since it is done by
153 * virt_to_phys_040().
154 */
155 addr += PAGE_SIZE;
156 i = PAGE_SIZE / 16;
157 /* Recompute physical address when crossing a page
158 boundary. */
159 for (;;)
160 {
161 if ((paddr = virt_to_phys_040(addr)))
162 break;
163 if (len <= i)
164 return 0;
165 len -= i;
166 addr += PAGE_SIZE;
167 }
168 }
169 else
170 paddr += 16;
171 }
172 break;
173
174 default:
175 case FLUSH_SCOPE_PAGE:
176 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
177 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
178 {
179 if (!(paddr = virt_to_phys_040(addr)))
180 continue;
181 switch (cache)
182 {
183 case FLUSH_CACHE_DATA:
184 __asm__ __volatile__ ("nop\n\t"
185 ".chip 68040\n\t"
186 "cpushp %%dc,(%0)\n\t"
187 ".chip 68k"
188 : : "a" (paddr));
189 break;
190 case FLUSH_CACHE_INSN:
191 __asm__ __volatile__ ("nop\n\t"
192 ".chip 68040\n\t"
193 "cpushp %%ic,(%0)\n\t"
194 ".chip 68k"
195 : : "a" (paddr));
196 break;
197 default:
198 case FLUSH_CACHE_BOTH:
199 __asm__ __volatile__ ("nop\n\t"
200 ".chip 68040\n\t"
201 "cpushp %%bc,(%0)\n\t"
202 ".chip 68k"
203 : : "a" (paddr));
204 break;
205 }
206 }
207 break;
208 }
209 return 0;
210}
211
212#define virt_to_phys_060(vaddr) \
213({ \
214 unsigned long paddr; \
215 __asm__ __volatile__ (".chip 68060\n\t" \
216 "plpar (%0)\n\t" \
217 ".chip 68k" \
218 : "=a" (paddr) \
219 : "0" (vaddr)); \
220 (paddr); /* XXX */ \
221})
222
223static inline int
224cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
225{
226 unsigned long paddr, i;
227
228 /*
229 * 68060 manual says:
230 * cpush %dc : flush DC, remains valid (with our %cacr setup)
231 * cpush %ic : invalidate IC
232 * cpush %bc : flush DC + invalidate IC
233 */
234 switch (scope)
235 {
236 case FLUSH_SCOPE_ALL:
237 switch (cache)
238 {
239 case FLUSH_CACHE_DATA:
240 __asm__ __volatile__ (".chip 68060\n\t"
241 "cpusha %dc\n\t"
242 ".chip 68k");
243 break;
244 case FLUSH_CACHE_INSN:
245 __asm__ __volatile__ (".chip 68060\n\t"
246 "cpusha %ic\n\t"
247 ".chip 68k");
248 break;
249 default:
250 case FLUSH_CACHE_BOTH:
251 __asm__ __volatile__ (".chip 68060\n\t"
252 "cpusha %bc\n\t"
253 ".chip 68k");
254 break;
255 }
256 break;
257
258 case FLUSH_SCOPE_LINE:
259 /* Find the physical address of the first mapped page in the
260 address range. */
261 len += addr & 15;
262 addr &= -16;
263 if (!(paddr = virt_to_phys_060(addr))) {
264 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
265
266 if (len <= tmp)
267 return 0;
268 addr += tmp;
269 len -= tmp;
270 tmp = PAGE_SIZE;
271 for (;;)
272 {
273 if ((paddr = virt_to_phys_060(addr)))
274 break;
275 if (len <= tmp)
276 return 0;
277 addr += tmp;
278 len -= tmp;
279 }
280 }
281 len = (len + 15) >> 4;
282 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
283 while (len--)
284 {
285 switch (cache)
286 {
287 case FLUSH_CACHE_DATA:
288 __asm__ __volatile__ (".chip 68060\n\t"
289 "cpushl %%dc,(%0)\n\t"
290 ".chip 68k"
291 : : "a" (paddr));
292 break;
293 case FLUSH_CACHE_INSN:
294 __asm__ __volatile__ (".chip 68060\n\t"
295 "cpushl %%ic,(%0)\n\t"
296 ".chip 68k"
297 : : "a" (paddr));
298 break;
299 default:
300 case FLUSH_CACHE_BOTH:
301 __asm__ __volatile__ (".chip 68060\n\t"
302 "cpushl %%bc,(%0)\n\t"
303 ".chip 68k"
304 : : "a" (paddr));
305 break;
306 }
307 if (!--i && len)
308 {
309
310 /*
311 * We just want to jump to the first cache line
312 * in the next page.
313 */
314 addr += PAGE_SIZE;
315 addr &= PAGE_MASK;
316
317 i = PAGE_SIZE / 16;
318 /* Recompute physical address when crossing a page
319 boundary. */
320 for (;;)
321 {
322 if ((paddr = virt_to_phys_060(addr)))
323 break;
324 if (len <= i)
325 return 0;
326 len -= i;
327 addr += PAGE_SIZE;
328 }
329 }
330 else
331 paddr += 16;
332 }
333 break;
334
335 default:
336 case FLUSH_SCOPE_PAGE:
337 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
338 addr &= PAGE_MASK; /* Workaround for bug in some
339 revisions of the 68060 */
340 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
341 {
342 if (!(paddr = virt_to_phys_060(addr)))
343 continue;
344 switch (cache)
345 {
346 case FLUSH_CACHE_DATA:
347 __asm__ __volatile__ (".chip 68060\n\t"
348 "cpushp %%dc,(%0)\n\t"
349 ".chip 68k"
350 : : "a" (paddr));
351 break;
352 case FLUSH_CACHE_INSN:
353 __asm__ __volatile__ (".chip 68060\n\t"
354 "cpushp %%ic,(%0)\n\t"
355 ".chip 68k"
356 : : "a" (paddr));
357 break;
358 default:
359 case FLUSH_CACHE_BOTH:
360 __asm__ __volatile__ (".chip 68060\n\t"
361 "cpushp %%bc,(%0)\n\t"
362 ".chip 68k"
363 : : "a" (paddr));
364 break;
365 }
366 }
367 break;
368 }
369 return 0;
370}
371
372/* sys_cacheflush -- flush (part of) the processor cache. */
373asmlinkage int
374sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
375{
376 struct vm_area_struct *vma;
377 int ret = -EINVAL;
378
379 if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
380 cache & ~FLUSH_CACHE_BOTH)
381 goto out;
382
383 if (scope == FLUSH_SCOPE_ALL) {
384 /* Only the superuser may explicitly flush the whole cache. */
385 ret = -EPERM;
386 if (!capable(CAP_SYS_ADMIN))
387 goto out;
388 } else {
389 /*
390 * Verify that the specified address region actually belongs
391 * to this process.
392 */
393 vma = find_vma (current->mm, addr);
394 ret = -EINVAL;
395 /* Check for overflow. */
396 if (addr + len < addr)
397 goto out;
398 if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
399 goto out;
400 }
401
402 if (CPU_IS_020_OR_030) {
403 if (scope == FLUSH_SCOPE_LINE && len < 256) {
404 unsigned long cacr;
405 __asm__ ("movec %%cacr, %0" : "=r" (cacr));
406 if (cache & FLUSH_CACHE_INSN)
407 cacr |= 4;
408 if (cache & FLUSH_CACHE_DATA)
409 cacr |= 0x400;
410 len >>= 2;
411 while (len--) {
412 __asm__ __volatile__ ("movec %1, %%caar\n\t"
413 "movec %0, %%cacr"
414 : /* no outputs */
415 : "r" (cacr), "r" (addr));
416 addr += 4;
417 }
418 } else {
419 /* Flush the whole cache, even if page granularity requested. */
420 unsigned long cacr;
421 __asm__ ("movec %%cacr, %0" : "=r" (cacr));
422 if (cache & FLUSH_CACHE_INSN)
423 cacr |= 8;
424 if (cache & FLUSH_CACHE_DATA)
425 cacr |= 0x800;
426 __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
427 }
428 ret = 0;
429 goto out;
430 } else {
431 /*
432 * 040 or 060: don't blindly trust 'scope', someone could
433 * try to flush a few megs of memory.
434 */
435
436 if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
437 scope=FLUSH_SCOPE_PAGE;
438 if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
439 scope=FLUSH_SCOPE_ALL;
440 if (CPU_IS_040) {
441 ret = cache_flush_040 (addr, scope, cache, len);
442 } else if (CPU_IS_060) {
443 ret = cache_flush_060 (addr, scope, cache, len);
444 }
445 }
446out:
447 return ret;
448}
449
450asmlinkage int sys_getpagesize(void)
451{
452 return PAGE_SIZE;
453}
454
455/*
456 * Do a system call from kernel instead of calling sys_execve so we
457 * end up with proper pt_regs.
458 */
459int kernel_execve(const char *filename,
460 const char *const argv[],
461 const char *const envp[])
462{
463 register long __res asm ("%d0") = __NR_execve;
464 register long __a asm ("%d1") = (long)(filename);
465 register long __b asm ("%d2") = (long)(argv);
466 register long __c asm ("%d3") = (long)(envp);
467 asm volatile ("trap #0" : "+d" (__res)
468 : "d" (__a), "d" (__b), "d" (__c));
469 return __res;
470}
471
472asmlinkage unsigned long sys_get_thread_area(void)
473{
474 return current_thread_info()->tp_value;
475}
476
477asmlinkage int sys_set_thread_area(unsigned long tp)
478{
479 current_thread_info()->tp_value = tp;
480 return 0;
481}
482
483/* This syscall gets its arguments in A0 (mem), D2 (oldval) and
484 D1 (newval). */
485asmlinkage int
486sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
487 unsigned long __user * mem)
488{
489 /* This was borrowed from ARM's implementation. */
490 for (;;) {
491 struct mm_struct *mm = current->mm;
492 pgd_t *pgd;
493 pmd_t *pmd;
494 pte_t *pte;
495 spinlock_t *ptl;
496 unsigned long mem_value;
497
498 down_read(&mm->mmap_sem);
499 pgd = pgd_offset(mm, (unsigned long)mem);
500 if (!pgd_present(*pgd))
501 goto bad_access;
502 pmd = pmd_offset(pgd, (unsigned long)mem);
503 if (!pmd_present(*pmd))
504 goto bad_access;
505 pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl);
506 if (!pte_present(*pte) || !pte_dirty(*pte)
507 || !pte_write(*pte)) {
508 pte_unmap_unlock(pte, ptl);
509 goto bad_access;
510 }
511
512 mem_value = *mem;
513 if (mem_value == oldval)
514 *mem = newval;
515
516 pte_unmap_unlock(pte, ptl);
517 up_read(&mm->mmap_sem);
518 return mem_value;
519
520 bad_access:
521 up_read(&mm->mmap_sem);
522 /* This is not necessarily a bad access, we can get here if
523 a memory we're trying to write to should be copied-on-write.
524 Make the kernel do the necessary page stuff, then re-iterate.
525 Simulate a write access fault to do that. */
526 {
527 /* The first argument of the function corresponds to
528 D1, which is the first field of struct pt_regs. */
529 struct pt_regs *fp = (struct pt_regs *)&newval;
530
531 /* '3' is an RMW flag. */
532 if (do_page_fault(fp, (unsigned long)mem, 3))
533 /* If the do_page_fault() failed, we don't
534 have anything meaningful to return.
535 There should be a SIGSEGV pending for
536 the process. */
537 return 0xdeadbeef;
538 }
539 }
540}
541
542asmlinkage int sys_atomic_barrier(void)
543{
544 /* no code needed for uniprocs */
545 return 0;
546}
diff --git a/arch/m68k/kernel/sys_m68k_no.c b/arch/m68k/kernel/sys_m68k_no.c
deleted file mode 100644
index 68488ae47f0a..000000000000
--- a/arch/m68k/kernel/sys_m68k_no.c
+++ /dev/null
@@ -1,94 +0,0 @@
1/*
2 * linux/arch/m68knommu/kernel/sys_m68k.c
3 *
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/m68k
6 * platform.
7 */
8
9#include <linux/errno.h>
10#include <linux/sched.h>
11#include <linux/mm.h>
12#include <linux/smp.h>
13#include <linux/sem.h>
14#include <linux/msg.h>
15#include <linux/shm.h>
16#include <linux/stat.h>
17#include <linux/syscalls.h>
18#include <linux/mman.h>
19#include <linux/file.h>
20#include <linux/ipc.h>
21#include <linux/fs.h>
22
23#include <asm/setup.h>
24#include <asm/uaccess.h>
25#include <asm/cachectl.h>
26#include <asm/traps.h>
27#include <asm/cacheflush.h>
28#include <asm/unistd.h>
29
30/* sys_cacheflush -- flush (part of) the processor cache. */
31asmlinkage int
32sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
33{
34 flush_cache_all();
35 return(0);
36}
37
38asmlinkage int sys_getpagesize(void)
39{
40 return PAGE_SIZE;
41}
42
43/*
44 * Do a system call from kernel instead of calling sys_execve so we
45 * end up with proper pt_regs.
46 */
47int kernel_execve(const char *filename,
48 const char *const argv[],
49 const char *const envp[])
50{
51 register long __res asm ("%d0") = __NR_execve;
52 register long __a asm ("%d1") = (long)(filename);
53 register long __b asm ("%d2") = (long)(argv);
54 register long __c asm ("%d3") = (long)(envp);
55 asm volatile ("trap #0" : "+d" (__res)
56 : "d" (__a), "d" (__b), "d" (__c));
57 return __res;
58}
59
60asmlinkage unsigned long sys_get_thread_area(void)
61{
62 return current_thread_info()->tp_value;
63}
64
65asmlinkage int sys_set_thread_area(unsigned long tp)
66{
67 current_thread_info()->tp_value = tp;
68 return 0;
69}
70
71/* This syscall gets its arguments in A0 (mem), D2 (oldval) and
72 D1 (newval). */
73asmlinkage int
74sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
75 unsigned long __user * mem)
76{
77 struct mm_struct *mm = current->mm;
78 unsigned long mem_value;
79
80 down_read(&mm->mmap_sem);
81
82 mem_value = *mem;
83 if (mem_value == oldval)
84 *mem = newval;
85
86 up_read(&mm->mmap_sem);
87 return mem_value;
88}
89
90asmlinkage int sys_atomic_barrier(void)
91{
92 /* no code needed for uniprocs */
93 return 0;
94}
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index 5909e392cb1e..6f7b09122a00 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -11,7 +11,6 @@
11 * Linux/m68k support by Hamish Macdonald 11 * Linux/m68k support by Hamish Macdonald
12 */ 12 */
13 13
14#include <linux/sys.h>
15#include <linux/linkage.h> 14#include <linux/linkage.h>
16 15
17#ifndef CONFIG_MMU 16#ifndef CONFIG_MMU
diff --git a/arch/m68k/lib/Makefile b/arch/m68k/lib/Makefile
index 1f95881d8437..df421e501436 100644
--- a/arch/m68k/lib/Makefile
+++ b/arch/m68k/lib/Makefile
@@ -1,5 +1,14 @@
1
2#
3# Makefile for m68k-specific library files..
4#
5
6lib-y := ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
7 memcpy.o memset.o memmove.o
8
1ifdef CONFIG_MMU 9ifdef CONFIG_MMU
2include arch/m68k/lib/Makefile_mm 10lib-y += string.o uaccess.o checksum_mm.o
3else 11else
4include arch/m68k/lib/Makefile_no 12lib-y += mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o delay.o checksum_no.o
5endif 13endif
14
diff --git a/arch/m68k/lib/Makefile_mm b/arch/m68k/lib/Makefile_mm
deleted file mode 100644
index af9abf8d9d98..000000000000
--- a/arch/m68k/lib/Makefile_mm
+++ /dev/null
@@ -1,6 +0,0 @@
1#
2# Makefile for m68k-specific library files..
3#
4
5lib-y := ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
6 checksum.o string.o uaccess.o
diff --git a/arch/m68k/lib/Makefile_no b/arch/m68k/lib/Makefile_no
deleted file mode 100644
index 32d852e586d7..000000000000
--- a/arch/m68k/lib/Makefile_no
+++ /dev/null
@@ -1,7 +0,0 @@
1#
2# Makefile for m68knommu specific library files..
3#
4
5lib-y := ashldi3.o ashrdi3.o lshrdi3.o \
6 muldi3.o mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o \
7 checksum.o memcpy.o memmove.o memset.o delay.o
diff --git a/arch/m68k/lib/checksum.c b/arch/m68k/lib/checksum.c
deleted file mode 100644
index 1297536060de..000000000000
--- a/arch/m68k/lib/checksum.c
+++ /dev/null
@@ -1,5 +0,0 @@
1#ifdef CONFIG_MMU
2#include "checksum_mm.c"
3#else
4#include "checksum_no.c"
5#endif
diff --git a/arch/m68k/lib/checksum_no.c b/arch/m68k/lib/checksum_no.c
index eccf25d3d73e..e4c6354da765 100644
--- a/arch/m68k/lib/checksum_no.c
+++ b/arch/m68k/lib/checksum_no.c
@@ -101,6 +101,7 @@ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
101{ 101{
102 return (__force __sum16)~do_csum(iph,ihl*4); 102 return (__force __sum16)~do_csum(iph,ihl*4);
103} 103}
104EXPORT_SYMBOL(ip_fast_csum);
104#endif 105#endif
105 106
106/* 107/*
@@ -140,6 +141,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
140 memcpy(dst, (__force const void *)src, len); 141 memcpy(dst, (__force const void *)src, len);
141 return csum_partial(dst, len, sum); 142 return csum_partial(dst, len, sum);
142} 143}
144EXPORT_SYMBOL(csum_partial_copy_from_user);
143 145
144/* 146/*
145 * copy from ds while checksumming, otherwise like csum_partial 147 * copy from ds while checksumming, otherwise like csum_partial
@@ -151,3 +153,4 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
151 memcpy(dst, src, len); 153 memcpy(dst, src, len);
152 return csum_partial(dst, len, sum); 154 return csum_partial(dst, len, sum);
153} 155}
156EXPORT_SYMBOL(csum_partial_copy_nocheck);
diff --git a/arch/m68k/lib/memcpy.c b/arch/m68k/lib/memcpy.c
index b50dbcad4746..62182c81e91c 100644
--- a/arch/m68k/lib/memcpy.c
+++ b/arch/m68k/lib/memcpy.c
@@ -1,62 +1,80 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file COPYING in the main directory of this archive
4 * for more details.
5 */
1 6
2#include <linux/types.h> 7#include <linux/module.h>
8#include <linux/string.h>
3 9
4void * memcpy(void * to, const void * from, size_t n) 10void *memcpy(void *to, const void *from, size_t n)
5{ 11{
6#ifdef CONFIG_COLDFIRE 12 void *xto = to;
7 void *xto = to; 13 size_t temp, temp1;
8 size_t temp;
9 14
10 if (!n) 15 if (!n)
11 return xto; 16 return xto;
12 if ((long) to & 1) 17 if ((long)to & 1) {
13 { 18 char *cto = to;
14 char *cto = to; 19 const char *cfrom = from;
15 const char *cfrom = from; 20 *cto++ = *cfrom++;
16 *cto++ = *cfrom++; 21 to = cto;
17 to = cto; 22 from = cfrom;
18 from = cfrom; 23 n--;
19 n--; 24 }
20 } 25 if (n > 2 && (long)to & 2) {
21 if (n > 2 && (long) to & 2) 26 short *sto = to;
22 { 27 const short *sfrom = from;
23 short *sto = to; 28 *sto++ = *sfrom++;
24 const short *sfrom = from; 29 to = sto;
25 *sto++ = *sfrom++; 30 from = sfrom;
26 to = sto; 31 n -= 2;
27 from = sfrom; 32 }
28 n -= 2; 33 temp = n >> 2;
29 } 34 if (temp) {
30 temp = n >> 2; 35 long *lto = to;
31 if (temp) 36 const long *lfrom = from;
32 { 37#if defined(__mc68020__) || defined(__mc68030__) || \
33 long *lto = to; 38 defined(__mc68040__) || defined(__mc68060__) || defined(__mcpu32__)
34 const long *lfrom = from; 39 asm volatile (
35 for (; temp; temp--) 40 " movel %2,%3\n"
36 *lto++ = *lfrom++; 41 " andw #7,%3\n"
37 to = lto; 42 " lsrl #3,%2\n"
38 from = lfrom; 43 " negw %3\n"
39 } 44 " jmp %%pc@(1f,%3:w:2)\n"
40 if (n & 2) 45 "4: movel %0@+,%1@+\n"
41 { 46 " movel %0@+,%1@+\n"
42 short *sto = to; 47 " movel %0@+,%1@+\n"
43 const short *sfrom = from; 48 " movel %0@+,%1@+\n"
44 *sto++ = *sfrom++; 49 " movel %0@+,%1@+\n"
45 to = sto; 50 " movel %0@+,%1@+\n"
46 from = sfrom; 51 " movel %0@+,%1@+\n"
47 } 52 " movel %0@+,%1@+\n"
48 if (n & 1) 53 "1: dbra %2,4b\n"
49 { 54 " clrw %2\n"
50 char *cto = to; 55 " subql #1,%2\n"
51 const char *cfrom = from; 56 " jpl 4b"
52 *cto = *cfrom; 57 : "=a" (lfrom), "=a" (lto), "=d" (temp), "=&d" (temp1)
53 } 58 : "0" (lfrom), "1" (lto), "2" (temp));
54 return xto;
55#else 59#else
56 const char *c_from = from; 60 for (; temp; temp--)
57 char *c_to = to; 61 *lto++ = *lfrom++;
58 while (n-- > 0)
59 *c_to++ = *c_from++;
60 return((void *) to);
61#endif 62#endif
63 to = lto;
64 from = lfrom;
65 }
66 if (n & 2) {
67 short *sto = to;
68 const short *sfrom = from;
69 *sto++ = *sfrom++;
70 to = sto;
71 from = sfrom;
72 }
73 if (n & 1) {
74 char *cto = to;
75 const char *cfrom = from;
76 *cto = *cfrom;
77 }
78 return xto;
62} 79}
80EXPORT_SYMBOL(memcpy);
diff --git a/arch/m68k/lib/memmove.c b/arch/m68k/lib/memmove.c
index b3dcfe9dab7e..6519f7f349f6 100644
--- a/arch/m68k/lib/memmove.c
+++ b/arch/m68k/lib/memmove.c
@@ -4,8 +4,6 @@
4 * for more details. 4 * for more details.
5 */ 5 */
6 6
7#define __IN_STRING_C
8
9#include <linux/module.h> 7#include <linux/module.h>
10#include <linux/string.h> 8#include <linux/string.h>
11 9
diff --git a/arch/m68k/lib/memset.c b/arch/m68k/lib/memset.c
index 1389bf455633..f649e6a2e644 100644
--- a/arch/m68k/lib/memset.c
+++ b/arch/m68k/lib/memset.c
@@ -1,47 +1,75 @@
1#include <linux/types.h> 1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file COPYING in the main directory of this archive
4 * for more details.
5 */
2 6
3void * memset(void * s, int c, size_t count) 7#include <linux/module.h>
8#include <linux/string.h>
9
10void *memset(void *s, int c, size_t count)
4{ 11{
5 void *xs = s; 12 void *xs = s;
6 size_t temp; 13 size_t temp;
7 14
8 if (!count) 15 if (!count)
9 return xs; 16 return xs;
10 c &= 0xff; 17 c &= 0xff;
11 c |= c << 8; 18 c |= c << 8;
12 c |= c << 16; 19 c |= c << 16;
13 if ((long) s & 1) 20 if ((long)s & 1) {
14 { 21 char *cs = s;
15 char *cs = s; 22 *cs++ = c;
16 *cs++ = c; 23 s = cs;
17 s = cs; 24 count--;
18 count--; 25 }
19 } 26 if (count > 2 && (long)s & 2) {
20 if (count > 2 && (long) s & 2) 27 short *ss = s;
21 { 28 *ss++ = c;
22 short *ss = s; 29 s = ss;
23 *ss++ = c; 30 count -= 2;
24 s = ss; 31 }
25 count -= 2; 32 temp = count >> 2;
26 } 33 if (temp) {
27 temp = count >> 2; 34 long *ls = s;
28 if (temp) 35#if defined(__mc68020__) || defined(__mc68030__) || \
29 { 36 defined(__mc68040__) || defined(__mc68060__) || defined(__mcpu32__)
30 long *ls = s; 37 size_t temp1;
31 for (; temp; temp--) 38 asm volatile (
32 *ls++ = c; 39 " movel %1,%2\n"
33 s = ls; 40 " andw #7,%2\n"
34 } 41 " lsrl #3,%1\n"
35 if (count & 2) 42 " negw %2\n"
36 { 43 " jmp %%pc@(2f,%2:w:2)\n"
37 short *ss = s; 44 "1: movel %3,%0@+\n"
38 *ss++ = c; 45 " movel %3,%0@+\n"
39 s = ss; 46 " movel %3,%0@+\n"
40 } 47 " movel %3,%0@+\n"
41 if (count & 1) 48 " movel %3,%0@+\n"
42 { 49 " movel %3,%0@+\n"
43 char *cs = s; 50 " movel %3,%0@+\n"
44 *cs = c; 51 " movel %3,%0@+\n"
45 } 52 "2: dbra %1,1b\n"
46 return xs; 53 " clrw %1\n"
54 " subql #1,%1\n"
55 " jpl 1b"
56 : "=a" (ls), "=d" (temp), "=&d" (temp1)
57 : "d" (c), "0" (ls), "1" (temp));
58#else
59 for (; temp; temp--)
60 *ls++ = c;
61#endif
62 s = ls;
63 }
64 if (count & 2) {
65 short *ss = s;
66 *ss++ = c;
67 s = ss;
68 }
69 if (count & 1) {
70 char *cs = s;
71 *cs = c;
72 }
73 return xs;
47} 74}
75EXPORT_SYMBOL(memset);
diff --git a/arch/m68k/lib/muldi3.c b/arch/m68k/lib/muldi3.c
index 16e0eb338ee0..079bafca073e 100644
--- a/arch/m68k/lib/muldi3.c
+++ b/arch/m68k/lib/muldi3.c
@@ -1,5 +1,98 @@
1#ifdef CONFIG_MMU 1/* muldi3.c extracted from gcc-2.7.2.3/libgcc2.c and
2#include "muldi3_mm.c" 2 gcc-2.7.2.3/longlong.h which is: */
3/* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
4
5This file is part of GNU CC.
6
7GNU CC is free software; you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by
9the Free Software Foundation; either version 2, or (at your option)
10any later version.
11
12GNU CC is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
18along with GNU CC; see the file COPYING. If not, write to
19the Free Software Foundation, 59 Temple Place - Suite 330,
20Boston, MA 02111-1307, USA. */
21
22#if defined(__mc68020__) || defined(__mc68030__) || \
23 defined(__mc68040__) || defined(__mc68060__) || defined(__mcpu32__)
24
25#define umul_ppmm(w1, w0, u, v) \
26 __asm__ ("mulu%.l %3,%1:%0" \
27 : "=d" ((USItype)(w0)), \
28 "=d" ((USItype)(w1)) \
29 : "%0" ((USItype)(u)), \
30 "dmi" ((USItype)(v)))
31
3#else 32#else
4#include "muldi3_no.c" 33
34#define SI_TYPE_SIZE 32
35#define __BITS4 (SI_TYPE_SIZE / 4)
36#define __ll_B (1L << (SI_TYPE_SIZE / 2))
37#define __ll_lowpart(t) ((USItype) (t) % __ll_B)
38#define __ll_highpart(t) ((USItype) (t) / __ll_B)
39
40#define umul_ppmm(w1, w0, u, v) \
41 do { \
42 USItype __x0, __x1, __x2, __x3; \
43 USItype __ul, __vl, __uh, __vh; \
44 \
45 __ul = __ll_lowpart (u); \
46 __uh = __ll_highpart (u); \
47 __vl = __ll_lowpart (v); \
48 __vh = __ll_highpart (v); \
49 \
50 __x0 = (USItype) __ul * __vl; \
51 __x1 = (USItype) __ul * __vh; \
52 __x2 = (USItype) __uh * __vl; \
53 __x3 = (USItype) __uh * __vh; \
54 \
55 __x1 += __ll_highpart (__x0);/* this can't give carry */ \
56 __x1 += __x2; /* but this indeed can */ \
57 if (__x1 < __x2) /* did we get it? */ \
58 __x3 += __ll_B; /* yes, add it in the proper pos. */ \
59 \
60 (w1) = __x3 + __ll_highpart (__x1); \
61 (w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0); \
62 } while (0)
63
5#endif 64#endif
65
66#define __umulsidi3(u, v) \
67 ({DIunion __w; \
68 umul_ppmm (__w.s.high, __w.s.low, u, v); \
69 __w.ll; })
70
71typedef int SItype __attribute__ ((mode (SI)));
72typedef unsigned int USItype __attribute__ ((mode (SI)));
73typedef int DItype __attribute__ ((mode (DI)));
74typedef int word_type __attribute__ ((mode (__word__)));
75
76struct DIstruct {SItype high, low;};
77
78typedef union
79{
80 struct DIstruct s;
81 DItype ll;
82} DIunion;
83
84DItype
85__muldi3 (DItype u, DItype v)
86{
87 DIunion w;
88 DIunion uu, vv;
89
90 uu.ll = u,
91 vv.ll = v;
92
93 w.ll = __umulsidi3 (uu.s.low, vv.s.low);
94 w.s.high += ((USItype) uu.s.low * (USItype) vv.s.high
95 + (USItype) uu.s.high * (USItype) vv.s.low);
96
97 return w.ll;
98}
diff --git a/arch/m68k/lib/muldi3_mm.c b/arch/m68k/lib/muldi3_mm.c
deleted file mode 100644
index be4f275649e3..000000000000
--- a/arch/m68k/lib/muldi3_mm.c
+++ /dev/null
@@ -1,63 +0,0 @@
1/* muldi3.c extracted from gcc-2.7.2.3/libgcc2.c and
2 gcc-2.7.2.3/longlong.h which is: */
3/* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
4
5This file is part of GNU CC.
6
7GNU CC is free software; you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by
9the Free Software Foundation; either version 2, or (at your option)
10any later version.
11
12GNU CC is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
18along with GNU CC; see the file COPYING. If not, write to
19the Free Software Foundation, 59 Temple Place - Suite 330,
20Boston, MA 02111-1307, USA. */
21
22#define BITS_PER_UNIT 8
23
24#define umul_ppmm(w1, w0, u, v) \
25 __asm__ ("mulu%.l %3,%1:%0" \
26 : "=d" ((USItype)(w0)), \
27 "=d" ((USItype)(w1)) \
28 : "%0" ((USItype)(u)), \
29 "dmi" ((USItype)(v)))
30
31#define __umulsidi3(u, v) \
32 ({DIunion __w; \
33 umul_ppmm (__w.s.high, __w.s.low, u, v); \
34 __w.ll; })
35
36typedef int SItype __attribute__ ((mode (SI)));
37typedef unsigned int USItype __attribute__ ((mode (SI)));
38typedef int DItype __attribute__ ((mode (DI)));
39typedef int word_type __attribute__ ((mode (__word__)));
40
41struct DIstruct {SItype high, low;};
42
43typedef union
44{
45 struct DIstruct s;
46 DItype ll;
47} DIunion;
48
49DItype
50__muldi3 (DItype u, DItype v)
51{
52 DIunion w;
53 DIunion uu, vv;
54
55 uu.ll = u,
56 vv.ll = v;
57
58 w.ll = __umulsidi3 (uu.s.low, vv.s.low);
59 w.s.high += ((USItype) uu.s.low * (USItype) vv.s.high
60 + (USItype) uu.s.high * (USItype) vv.s.low);
61
62 return w.ll;
63}
diff --git a/arch/m68k/lib/muldi3_no.c b/arch/m68k/lib/muldi3_no.c
deleted file mode 100644
index 34af72c30303..000000000000
--- a/arch/m68k/lib/muldi3_no.c
+++ /dev/null
@@ -1,86 +0,0 @@
1/* muldi3.c extracted from gcc-2.7.2.3/libgcc2.c and
2 gcc-2.7.2.3/longlong.h which is: */
3/* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
4
5This file is part of GNU CC.
6
7GNU CC is free software; you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by
9the Free Software Foundation; either version 2, or (at your option)
10any later version.
11
12GNU CC is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
18along with GNU CC; see the file COPYING. If not, write to
19the Free Software Foundation, 59 Temple Place - Suite 330,
20Boston, MA 02111-1307, USA. */
21
22#define BITS_PER_UNIT 8
23#define SI_TYPE_SIZE 32
24
25#define __BITS4 (SI_TYPE_SIZE / 4)
26#define __ll_B (1L << (SI_TYPE_SIZE / 2))
27#define __ll_lowpart(t) ((USItype) (t) % __ll_B)
28#define __ll_highpart(t) ((USItype) (t) / __ll_B)
29
30#define umul_ppmm(w1, w0, u, v) \
31 do { \
32 USItype __x0, __x1, __x2, __x3; \
33 USItype __ul, __vl, __uh, __vh; \
34 \
35 __ul = __ll_lowpart (u); \
36 __uh = __ll_highpart (u); \
37 __vl = __ll_lowpart (v); \
38 __vh = __ll_highpart (v); \
39 \
40 __x0 = (USItype) __ul * __vl; \
41 __x1 = (USItype) __ul * __vh; \
42 __x2 = (USItype) __uh * __vl; \
43 __x3 = (USItype) __uh * __vh; \
44 \
45 __x1 += __ll_highpart (__x0);/* this can't give carry */ \
46 __x1 += __x2; /* but this indeed can */ \
47 if (__x1 < __x2) /* did we get it? */ \
48 __x3 += __ll_B; /* yes, add it in the proper pos. */ \
49 \
50 (w1) = __x3 + __ll_highpart (__x1); \
51 (w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0); \
52 } while (0)
53
54#define __umulsidi3(u, v) \
55 ({DIunion __w; \
56 umul_ppmm (__w.s.high, __w.s.low, u, v); \
57 __w.ll; })
58
59typedef int SItype __attribute__ ((mode (SI)));
60typedef unsigned int USItype __attribute__ ((mode (SI)));
61typedef int DItype __attribute__ ((mode (DI)));
62typedef int word_type __attribute__ ((mode (__word__)));
63
64struct DIstruct {SItype high, low;};
65
66typedef union
67{
68 struct DIstruct s;
69 DItype ll;
70} DIunion;
71
72DItype
73__muldi3 (DItype u, DItype v)
74{
75 DIunion w;
76 DIunion uu, vv;
77
78 uu.ll = u,
79 vv.ll = v;
80
81 w.ll = __umulsidi3 (uu.s.low, vv.s.low);
82 w.s.high += ((USItype) uu.s.low * (USItype) vv.s.high
83 + (USItype) uu.s.high * (USItype) vv.s.low);
84
85 return w.ll;
86}
diff --git a/arch/m68k/lib/string.c b/arch/m68k/lib/string.c
index d399c5f25636..b9a57abfad08 100644
--- a/arch/m68k/lib/string.c
+++ b/arch/m68k/lib/string.c
@@ -20,226 +20,3 @@ char *strcat(char *dest, const char *src)
20 return __kernel_strcpy(dest + __kernel_strlen(dest), src); 20 return __kernel_strcpy(dest + __kernel_strlen(dest), src);
21} 21}
22EXPORT_SYMBOL(strcat); 22EXPORT_SYMBOL(strcat);
23
24void *memset(void *s, int c, size_t count)
25{
26 void *xs = s;
27 size_t temp, temp1;
28
29 if (!count)
30 return xs;
31 c &= 0xff;
32 c |= c << 8;
33 c |= c << 16;
34 if ((long)s & 1) {
35 char *cs = s;
36 *cs++ = c;
37 s = cs;
38 count--;
39 }
40 if (count > 2 && (long)s & 2) {
41 short *ss = s;
42 *ss++ = c;
43 s = ss;
44 count -= 2;
45 }
46 temp = count >> 2;
47 if (temp) {
48 long *ls = s;
49
50 asm volatile (
51 " movel %1,%2\n"
52 " andw #7,%2\n"
53 " lsrl #3,%1\n"
54 " negw %2\n"
55 " jmp %%pc@(2f,%2:w:2)\n"
56 "1: movel %3,%0@+\n"
57 " movel %3,%0@+\n"
58 " movel %3,%0@+\n"
59 " movel %3,%0@+\n"
60 " movel %3,%0@+\n"
61 " movel %3,%0@+\n"
62 " movel %3,%0@+\n"
63 " movel %3,%0@+\n"
64 "2: dbra %1,1b\n"
65 " clrw %1\n"
66 " subql #1,%1\n"
67 " jpl 1b"
68 : "=a" (ls), "=d" (temp), "=&d" (temp1)
69 : "d" (c), "0" (ls), "1" (temp));
70 s = ls;
71 }
72 if (count & 2) {
73 short *ss = s;
74 *ss++ = c;
75 s = ss;
76 }
77 if (count & 1) {
78 char *cs = s;
79 *cs = c;
80 }
81 return xs;
82}
83EXPORT_SYMBOL(memset);
84
85void *memcpy(void *to, const void *from, size_t n)
86{
87 void *xto = to;
88 size_t temp, temp1;
89
90 if (!n)
91 return xto;
92 if ((long)to & 1) {
93 char *cto = to;
94 const char *cfrom = from;
95 *cto++ = *cfrom++;
96 to = cto;
97 from = cfrom;
98 n--;
99 }
100 if (n > 2 && (long)to & 2) {
101 short *sto = to;
102 const short *sfrom = from;
103 *sto++ = *sfrom++;
104 to = sto;
105 from = sfrom;
106 n -= 2;
107 }
108 temp = n >> 2;
109 if (temp) {
110 long *lto = to;
111 const long *lfrom = from;
112
113 asm volatile (
114 " movel %2,%3\n"
115 " andw #7,%3\n"
116 " lsrl #3,%2\n"
117 " negw %3\n"
118 " jmp %%pc@(1f,%3:w:2)\n"
119 "4: movel %0@+,%1@+\n"
120 " movel %0@+,%1@+\n"
121 " movel %0@+,%1@+\n"
122 " movel %0@+,%1@+\n"
123 " movel %0@+,%1@+\n"
124 " movel %0@+,%1@+\n"
125 " movel %0@+,%1@+\n"
126 " movel %0@+,%1@+\n"
127 "1: dbra %2,4b\n"
128 " clrw %2\n"
129 " subql #1,%2\n"
130 " jpl 4b"
131 : "=a" (lfrom), "=a" (lto), "=d" (temp), "=&d" (temp1)
132 : "0" (lfrom), "1" (lto), "2" (temp));
133 to = lto;
134 from = lfrom;
135 }
136 if (n & 2) {
137 short *sto = to;
138 const short *sfrom = from;
139 *sto++ = *sfrom++;
140 to = sto;
141 from = sfrom;
142 }
143 if (n & 1) {
144 char *cto = to;
145 const char *cfrom = from;
146 *cto = *cfrom;
147 }
148 return xto;
149}
150EXPORT_SYMBOL(memcpy);
151
152void *memmove(void *dest, const void *src, size_t n)
153{
154 void *xdest = dest;
155 size_t temp;
156
157 if (!n)
158 return xdest;
159
160 if (dest < src) {
161 if ((long)dest & 1) {
162 char *cdest = dest;
163 const char *csrc = src;
164 *cdest++ = *csrc++;
165 dest = cdest;
166 src = csrc;
167 n--;
168 }
169 if (n > 2 && (long)dest & 2) {
170 short *sdest = dest;
171 const short *ssrc = src;
172 *sdest++ = *ssrc++;
173 dest = sdest;
174 src = ssrc;
175 n -= 2;
176 }
177 temp = n >> 2;
178 if (temp) {
179 long *ldest = dest;
180 const long *lsrc = src;
181 temp--;
182 do
183 *ldest++ = *lsrc++;
184 while (temp--);
185 dest = ldest;
186 src = lsrc;
187 }
188 if (n & 2) {
189 short *sdest = dest;
190 const short *ssrc = src;
191 *sdest++ = *ssrc++;
192 dest = sdest;
193 src = ssrc;
194 }
195 if (n & 1) {
196 char *cdest = dest;
197 const char *csrc = src;
198 *cdest = *csrc;
199 }
200 } else {
201 dest = (char *)dest + n;
202 src = (const char *)src + n;
203 if ((long)dest & 1) {
204 char *cdest = dest;
205 const char *csrc = src;
206 *--cdest = *--csrc;
207 dest = cdest;
208 src = csrc;
209 n--;
210 }
211 if (n > 2 && (long)dest & 2) {
212 short *sdest = dest;
213 const short *ssrc = src;
214 *--sdest = *--ssrc;
215 dest = sdest;
216 src = ssrc;
217 n -= 2;
218 }
219 temp = n >> 2;
220 if (temp) {
221 long *ldest = dest;
222 const long *lsrc = src;
223 temp--;
224 do
225 *--ldest = *--lsrc;
226 while (temp--);
227 dest = ldest;
228 src = lsrc;
229 }
230 if (n & 2) {
231 short *sdest = dest;
232 const short *ssrc = src;
233 *--sdest = *--ssrc;
234 dest = sdest;
235 src = ssrc;
236 }
237 if (n & 1) {
238 char *cdest = dest;
239 const char *csrc = src;
240 *--cdest = *--csrc;
241 }
242 }
243 return xdest;
244}
245EXPORT_SYMBOL(memmove);
diff --git a/arch/m68k/mm/Makefile b/arch/m68k/mm/Makefile
index b60270e4954b..09cadf1058d5 100644
--- a/arch/m68k/mm/Makefile
+++ b/arch/m68k/mm/Makefile
@@ -1,5 +1,9 @@
1ifdef CONFIG_MMU 1#
2include arch/m68k/mm/Makefile_mm 2# Makefile for the linux m68k-specific parts of the memory manager.
3else 3#
4include arch/m68k/mm/Makefile_no 4
5endif 5obj-y := init.o
6
7obj-$(CONFIG_MMU) += cache.o fault.o hwtest.o
8obj-$(CONFIG_MMU_MOTOROLA) += kmap.o memory.o motorola.o
9obj-$(CONFIG_MMU_SUN3) += sun3kmap.o sun3mmu.o
diff --git a/arch/m68k/mm/Makefile_mm b/arch/m68k/mm/Makefile_mm
deleted file mode 100644
index 5eaa43c4cb3c..000000000000
--- a/arch/m68k/mm/Makefile_mm
+++ /dev/null
@@ -1,8 +0,0 @@
1#
2# Makefile for the linux m68k-specific parts of the memory manager.
3#
4
5obj-y := cache.o init.o fault.o hwtest.o
6
7obj-$(CONFIG_MMU_MOTOROLA) += kmap.o memory.o motorola.o
8obj-$(CONFIG_MMU_SUN3) += sun3kmap.o sun3mmu.o
diff --git a/arch/m68k/mm/Makefile_no b/arch/m68k/mm/Makefile_no
deleted file mode 100644
index b54ab6b4b523..000000000000
--- a/arch/m68k/mm/Makefile_no
+++ /dev/null
@@ -1,5 +0,0 @@
1#
2# Makefile for the linux m68knommu specific parts of the memory manager.
3#
4
5obj-y += init.o kmap.o
diff --git a/arch/m68k/mm/init_no.c b/arch/m68k/mm/init_no.c
index 8a6653f56bd8..7cbd7bd1f8bc 100644
--- a/arch/m68k/mm/init_no.c
+++ b/arch/m68k/mm/init_no.c
@@ -38,28 +38,10 @@
38#include <asm/system.h> 38#include <asm/system.h>
39#include <asm/machdep.h> 39#include <asm/machdep.h>
40 40
41#undef DEBUG
42
43extern void die_if_kernel(char *,struct pt_regs *,long);
44extern void free_initmem(void);
45
46/* 41/*
47 * BAD_PAGE is the page that is used for page faults when linux
48 * is out-of-memory. Older versions of linux just did a
49 * do_exit(), but using this instead means there is less risk
50 * for a process dying in kernel mode, possibly leaving a inode
51 * unused etc..
52 *
53 * BAD_PAGETABLE is the accompanying page-table: it is initialized
54 * to point to BAD_PAGE entries.
55 *
56 * ZERO_PAGE is a special page that is used for zero-initialized 42 * ZERO_PAGE is a special page that is used for zero-initialized
57 * data and COW. 43 * data and COW.
58 */ 44 */
59static unsigned long empty_bad_page_table;
60
61static unsigned long empty_bad_page;
62
63unsigned long empty_zero_page; 45unsigned long empty_zero_page;
64 46
65extern unsigned long memory_start; 47extern unsigned long memory_start;
@@ -77,22 +59,9 @@ void __init paging_init(void)
77 * Make sure start_mem is page aligned, otherwise bootmem and 59 * Make sure start_mem is page aligned, otherwise bootmem and
78 * page_alloc get different views of the world. 60 * page_alloc get different views of the world.
79 */ 61 */
80#ifdef DEBUG
81 unsigned long start_mem = PAGE_ALIGN(memory_start);
82#endif
83 unsigned long end_mem = memory_end & PAGE_MASK; 62 unsigned long end_mem = memory_end & PAGE_MASK;
63 unsigned long zones_size[MAX_NR_ZONES] = {0, };
84 64
85#ifdef DEBUG
86 printk (KERN_DEBUG "start_mem is %#lx\nvirtual_end is %#lx\n",
87 start_mem, end_mem);
88#endif
89
90 /*
91 * Initialize the bad page table and bad page to point
92 * to a couple of allocated pages.
93 */
94 empty_bad_page_table = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
95 empty_bad_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
96 empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); 65 empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
97 memset((void *)empty_zero_page, 0, PAGE_SIZE); 66 memset((void *)empty_zero_page, 0, PAGE_SIZE);
98 67
@@ -101,19 +70,8 @@ void __init paging_init(void)
101 */ 70 */
102 set_fs (USER_DS); 71 set_fs (USER_DS);
103 72
104#ifdef DEBUG 73 zones_size[ZONE_DMA] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT;
105 printk (KERN_DEBUG "before free_area_init\n"); 74 free_area_init(zones_size);
106
107 printk (KERN_DEBUG "free_area_init -> start_mem is %#lx\nvirtual_end is %#lx\n",
108 start_mem, end_mem);
109#endif
110
111 {
112 unsigned long zones_size[MAX_NR_ZONES] = {0, };
113
114 zones_size[ZONE_DMA] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT;
115 free_area_init(zones_size);
116 }
117} 75}
118 76
119void __init mem_init(void) 77void __init mem_init(void)
@@ -166,8 +124,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
166} 124}
167#endif 125#endif
168 126
169void 127void free_initmem(void)
170free_initmem()
171{ 128{
172#ifdef CONFIG_RAMKERNEL 129#ifdef CONFIG_RAMKERNEL
173 unsigned long addr; 130 unsigned long addr;
diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c
index a373d136b2b2..69345849454b 100644
--- a/arch/m68k/mm/kmap.c
+++ b/arch/m68k/mm/kmap.c
@@ -1,5 +1,367 @@
1#ifdef CONFIG_MMU 1/*
2#include "kmap_mm.c" 2 * linux/arch/m68k/mm/kmap.c
3 *
4 * Copyright (C) 1997 Roman Hodek
5 *
6 * 10/01/99 cleaned up the code and changing to the same interface
7 * used by other architectures /Roman Zippel
8 */
9
10#include <linux/module.h>
11#include <linux/mm.h>
12#include <linux/kernel.h>
13#include <linux/string.h>
14#include <linux/types.h>
15#include <linux/slab.h>
16#include <linux/vmalloc.h>
17
18#include <asm/setup.h>
19#include <asm/segment.h>
20#include <asm/page.h>
21#include <asm/pgalloc.h>
22#include <asm/io.h>
23#include <asm/system.h>
24
25#undef DEBUG
26
27#define PTRTREESIZE (256*1024)
28
29/*
30 * For 040/060 we can use the virtual memory area like other architectures,
31 * but for 020/030 we want to use early termination page descriptor and we
32 * can't mix this with normal page descriptors, so we have to copy that code
33 * (mm/vmalloc.c) and return appriorate aligned addresses.
34 */
35
36#ifdef CPU_M68040_OR_M68060_ONLY
37
38#define IO_SIZE PAGE_SIZE
39
40static inline struct vm_struct *get_io_area(unsigned long size)
41{
42 return get_vm_area(size, VM_IOREMAP);
43}
44
45
46static inline void free_io_area(void *addr)
47{
48 vfree((void *)(PAGE_MASK & (unsigned long)addr));
49}
50
3#else 51#else
4#include "kmap_no.c" 52
53#define IO_SIZE (256*1024)
54
55static struct vm_struct *iolist;
56
57static struct vm_struct *get_io_area(unsigned long size)
58{
59 unsigned long addr;
60 struct vm_struct **p, *tmp, *area;
61
62 area = kmalloc(sizeof(*area), GFP_KERNEL);
63 if (!area)
64 return NULL;
65 addr = KMAP_START;
66 for (p = &iolist; (tmp = *p) ; p = &tmp->next) {
67 if (size + addr < (unsigned long)tmp->addr)
68 break;
69 if (addr > KMAP_END-size) {
70 kfree(area);
71 return NULL;
72 }
73 addr = tmp->size + (unsigned long)tmp->addr;
74 }
75 area->addr = (void *)addr;
76 area->size = size + IO_SIZE;
77 area->next = *p;
78 *p = area;
79 return area;
80}
81
82static inline void free_io_area(void *addr)
83{
84 struct vm_struct **p, *tmp;
85
86 if (!addr)
87 return;
88 addr = (void *)((unsigned long)addr & -IO_SIZE);
89 for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
90 if (tmp->addr == addr) {
91 *p = tmp->next;
92 __iounmap(tmp->addr, tmp->size);
93 kfree(tmp);
94 return;
95 }
96 }
97}
98
5#endif 99#endif
100
101/*
102 * Map some physical address range into the kernel address space.
103 */
104/* Rewritten by Andreas Schwab to remove all races. */
105
106void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
107{
108 struct vm_struct *area;
109 unsigned long virtaddr, retaddr;
110 long offset;
111 pgd_t *pgd_dir;
112 pmd_t *pmd_dir;
113 pte_t *pte_dir;
114
115 /*
116 * Don't allow mappings that wrap..
117 */
118 if (!size || physaddr > (unsigned long)(-size))
119 return NULL;
120
121#ifdef CONFIG_AMIGA
122 if (MACH_IS_AMIGA) {
123 if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000)
124 && (cacheflag == IOMAP_NOCACHE_SER))
125 return (void __iomem *)physaddr;
126 }
127#endif
128
129#ifdef DEBUG
130 printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag);
131#endif
132 /*
133 * Mappings have to be aligned
134 */
135 offset = physaddr & (IO_SIZE - 1);
136 physaddr &= -IO_SIZE;
137 size = (size + offset + IO_SIZE - 1) & -IO_SIZE;
138
139 /*
140 * Ok, go for it..
141 */
142 area = get_io_area(size);
143 if (!area)
144 return NULL;
145
146 virtaddr = (unsigned long)area->addr;
147 retaddr = virtaddr + offset;
148#ifdef DEBUG
149 printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr);
150#endif
151
152 /*
153 * add cache and table flags to physical address
154 */
155 if (CPU_IS_040_OR_060) {
156 physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 |
157 _PAGE_ACCESSED | _PAGE_DIRTY);
158 switch (cacheflag) {
159 case IOMAP_FULL_CACHING:
160 physaddr |= _PAGE_CACHE040;
161 break;
162 case IOMAP_NOCACHE_SER:
163 default:
164 physaddr |= _PAGE_NOCACHE_S;
165 break;
166 case IOMAP_NOCACHE_NONSER:
167 physaddr |= _PAGE_NOCACHE;
168 break;
169 case IOMAP_WRITETHROUGH:
170 physaddr |= _PAGE_CACHE040W;
171 break;
172 }
173 } else {
174 physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
175 switch (cacheflag) {
176 case IOMAP_NOCACHE_SER:
177 case IOMAP_NOCACHE_NONSER:
178 default:
179 physaddr |= _PAGE_NOCACHE030;
180 break;
181 case IOMAP_FULL_CACHING:
182 case IOMAP_WRITETHROUGH:
183 break;
184 }
185 }
186
187 while ((long)size > 0) {
188#ifdef DEBUG
189 if (!(virtaddr & (PTRTREESIZE-1)))
190 printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr);
191#endif
192 pgd_dir = pgd_offset_k(virtaddr);
193 pmd_dir = pmd_alloc(&init_mm, pgd_dir, virtaddr);
194 if (!pmd_dir) {
195 printk("ioremap: no mem for pmd_dir\n");
196 return NULL;
197 }
198
199 if (CPU_IS_020_OR_030) {
200 pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
201 physaddr += PTRTREESIZE;
202 virtaddr += PTRTREESIZE;
203 size -= PTRTREESIZE;
204 } else {
205 pte_dir = pte_alloc_kernel(pmd_dir, virtaddr);
206 if (!pte_dir) {
207 printk("ioremap: no mem for pte_dir\n");
208 return NULL;
209 }
210
211 pte_val(*pte_dir) = physaddr;
212 virtaddr += PAGE_SIZE;
213 physaddr += PAGE_SIZE;
214 size -= PAGE_SIZE;
215 }
216 }
217#ifdef DEBUG
218 printk("\n");
219#endif
220 flush_tlb_all();
221
222 return (void __iomem *)retaddr;
223}
224EXPORT_SYMBOL(__ioremap);
225
226/*
227 * Unmap a ioremap()ed region again
228 */
229void iounmap(void __iomem *addr)
230{
231#ifdef CONFIG_AMIGA
232 if ((!MACH_IS_AMIGA) ||
233 (((unsigned long)addr < 0x40000000) ||
234 ((unsigned long)addr > 0x60000000)))
235 free_io_area((__force void *)addr);
236#else
237 free_io_area((__force void *)addr);
238#endif
239}
240EXPORT_SYMBOL(iounmap);
241
242/*
243 * __iounmap unmaps nearly everything, so be careful
244 * it doesn't free currently pointer/page tables anymore but it
245 * wans't used anyway and might be added later.
246 */
247void __iounmap(void *addr, unsigned long size)
248{
249 unsigned long virtaddr = (unsigned long)addr;
250 pgd_t *pgd_dir;
251 pmd_t *pmd_dir;
252 pte_t *pte_dir;
253
254 while ((long)size > 0) {
255 pgd_dir = pgd_offset_k(virtaddr);
256 if (pgd_bad(*pgd_dir)) {
257 printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
258 pgd_clear(pgd_dir);
259 return;
260 }
261 pmd_dir = pmd_offset(pgd_dir, virtaddr);
262
263 if (CPU_IS_020_OR_030) {
264 int pmd_off = (virtaddr/PTRTREESIZE) & 15;
265 int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK;
266
267 if (pmd_type == _PAGE_PRESENT) {
268 pmd_dir->pmd[pmd_off] = 0;
269 virtaddr += PTRTREESIZE;
270 size -= PTRTREESIZE;
271 continue;
272 } else if (pmd_type == 0)
273 continue;
274 }
275
276 if (pmd_bad(*pmd_dir)) {
277 printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
278 pmd_clear(pmd_dir);
279 return;
280 }
281 pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
282
283 pte_val(*pte_dir) = 0;
284 virtaddr += PAGE_SIZE;
285 size -= PAGE_SIZE;
286 }
287
288 flush_tlb_all();
289}
290
291/*
292 * Set new cache mode for some kernel address space.
293 * The caller must push data for that range itself, if such data may already
294 * be in the cache.
295 */
296void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
297{
298 unsigned long virtaddr = (unsigned long)addr;
299 pgd_t *pgd_dir;
300 pmd_t *pmd_dir;
301 pte_t *pte_dir;
302
303 if (CPU_IS_040_OR_060) {
304 switch (cmode) {
305 case IOMAP_FULL_CACHING:
306 cmode = _PAGE_CACHE040;
307 break;
308 case IOMAP_NOCACHE_SER:
309 default:
310 cmode = _PAGE_NOCACHE_S;
311 break;
312 case IOMAP_NOCACHE_NONSER:
313 cmode = _PAGE_NOCACHE;
314 break;
315 case IOMAP_WRITETHROUGH:
316 cmode = _PAGE_CACHE040W;
317 break;
318 }
319 } else {
320 switch (cmode) {
321 case IOMAP_NOCACHE_SER:
322 case IOMAP_NOCACHE_NONSER:
323 default:
324 cmode = _PAGE_NOCACHE030;
325 break;
326 case IOMAP_FULL_CACHING:
327 case IOMAP_WRITETHROUGH:
328 cmode = 0;
329 }
330 }
331
332 while ((long)size > 0) {
333 pgd_dir = pgd_offset_k(virtaddr);
334 if (pgd_bad(*pgd_dir)) {
335 printk("iocachemode: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
336 pgd_clear(pgd_dir);
337 return;
338 }
339 pmd_dir = pmd_offset(pgd_dir, virtaddr);
340
341 if (CPU_IS_020_OR_030) {
342 int pmd_off = (virtaddr/PTRTREESIZE) & 15;
343
344 if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) {
345 pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] &
346 _CACHEMASK040) | cmode;
347 virtaddr += PTRTREESIZE;
348 size -= PTRTREESIZE;
349 continue;
350 }
351 }
352
353 if (pmd_bad(*pmd_dir)) {
354 printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
355 pmd_clear(pmd_dir);
356 return;
357 }
358 pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
359
360 pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode;
361 virtaddr += PAGE_SIZE;
362 size -= PAGE_SIZE;
363 }
364
365 flush_tlb_all();
366}
367EXPORT_SYMBOL(kernel_set_cachemode);
diff --git a/arch/m68k/mm/kmap_mm.c b/arch/m68k/mm/kmap_mm.c
deleted file mode 100644
index 69345849454b..000000000000
--- a/arch/m68k/mm/kmap_mm.c
+++ /dev/null
@@ -1,367 +0,0 @@
1/*
2 * linux/arch/m68k/mm/kmap.c
3 *
4 * Copyright (C) 1997 Roman Hodek
5 *
6 * 10/01/99 cleaned up the code and changing to the same interface
7 * used by other architectures /Roman Zippel
8 */
9
10#include <linux/module.h>
11#include <linux/mm.h>
12#include <linux/kernel.h>
13#include <linux/string.h>
14#include <linux/types.h>
15#include <linux/slab.h>
16#include <linux/vmalloc.h>
17
18#include <asm/setup.h>
19#include <asm/segment.h>
20#include <asm/page.h>
21#include <asm/pgalloc.h>
22#include <asm/io.h>
23#include <asm/system.h>
24
25#undef DEBUG
26
27#define PTRTREESIZE (256*1024)
28
29/*
30 * For 040/060 we can use the virtual memory area like other architectures,
31 * but for 020/030 we want to use early termination page descriptor and we
32 * can't mix this with normal page descriptors, so we have to copy that code
33 * (mm/vmalloc.c) and return appriorate aligned addresses.
34 */
35
36#ifdef CPU_M68040_OR_M68060_ONLY
37
38#define IO_SIZE PAGE_SIZE
39
40static inline struct vm_struct *get_io_area(unsigned long size)
41{
42 return get_vm_area(size, VM_IOREMAP);
43}
44
45
46static inline void free_io_area(void *addr)
47{
48 vfree((void *)(PAGE_MASK & (unsigned long)addr));
49}
50
51#else
52
53#define IO_SIZE (256*1024)
54
55static struct vm_struct *iolist;
56
57static struct vm_struct *get_io_area(unsigned long size)
58{
59 unsigned long addr;
60 struct vm_struct **p, *tmp, *area;
61
62 area = kmalloc(sizeof(*area), GFP_KERNEL);
63 if (!area)
64 return NULL;
65 addr = KMAP_START;
66 for (p = &iolist; (tmp = *p) ; p = &tmp->next) {
67 if (size + addr < (unsigned long)tmp->addr)
68 break;
69 if (addr > KMAP_END-size) {
70 kfree(area);
71 return NULL;
72 }
73 addr = tmp->size + (unsigned long)tmp->addr;
74 }
75 area->addr = (void *)addr;
76 area->size = size + IO_SIZE;
77 area->next = *p;
78 *p = area;
79 return area;
80}
81
82static inline void free_io_area(void *addr)
83{
84 struct vm_struct **p, *tmp;
85
86 if (!addr)
87 return;
88 addr = (void *)((unsigned long)addr & -IO_SIZE);
89 for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
90 if (tmp->addr == addr) {
91 *p = tmp->next;
92 __iounmap(tmp->addr, tmp->size);
93 kfree(tmp);
94 return;
95 }
96 }
97}
98
99#endif
100
101/*
102 * Map some physical address range into the kernel address space.
103 */
104/* Rewritten by Andreas Schwab to remove all races. */
105
106void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
107{
108 struct vm_struct *area;
109 unsigned long virtaddr, retaddr;
110 long offset;
111 pgd_t *pgd_dir;
112 pmd_t *pmd_dir;
113 pte_t *pte_dir;
114
115 /*
116 * Don't allow mappings that wrap..
117 */
118 if (!size || physaddr > (unsigned long)(-size))
119 return NULL;
120
121#ifdef CONFIG_AMIGA
122 if (MACH_IS_AMIGA) {
123 if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000)
124 && (cacheflag == IOMAP_NOCACHE_SER))
125 return (void __iomem *)physaddr;
126 }
127#endif
128
129#ifdef DEBUG
130 printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag);
131#endif
132 /*
133 * Mappings have to be aligned
134 */
135 offset = physaddr & (IO_SIZE - 1);
136 physaddr &= -IO_SIZE;
137 size = (size + offset + IO_SIZE - 1) & -IO_SIZE;
138
139 /*
140 * Ok, go for it..
141 */
142 area = get_io_area(size);
143 if (!area)
144 return NULL;
145
146 virtaddr = (unsigned long)area->addr;
147 retaddr = virtaddr + offset;
148#ifdef DEBUG
149 printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr);
150#endif
151
152 /*
153 * add cache and table flags to physical address
154 */
155 if (CPU_IS_040_OR_060) {
156 physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 |
157 _PAGE_ACCESSED | _PAGE_DIRTY);
158 switch (cacheflag) {
159 case IOMAP_FULL_CACHING:
160 physaddr |= _PAGE_CACHE040;
161 break;
162 case IOMAP_NOCACHE_SER:
163 default:
164 physaddr |= _PAGE_NOCACHE_S;
165 break;
166 case IOMAP_NOCACHE_NONSER:
167 physaddr |= _PAGE_NOCACHE;
168 break;
169 case IOMAP_WRITETHROUGH:
170 physaddr |= _PAGE_CACHE040W;
171 break;
172 }
173 } else {
174 physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
175 switch (cacheflag) {
176 case IOMAP_NOCACHE_SER:
177 case IOMAP_NOCACHE_NONSER:
178 default:
179 physaddr |= _PAGE_NOCACHE030;
180 break;
181 case IOMAP_FULL_CACHING:
182 case IOMAP_WRITETHROUGH:
183 break;
184 }
185 }
186
187 while ((long)size > 0) {
188#ifdef DEBUG
189 if (!(virtaddr & (PTRTREESIZE-1)))
190 printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr);
191#endif
192 pgd_dir = pgd_offset_k(virtaddr);
193 pmd_dir = pmd_alloc(&init_mm, pgd_dir, virtaddr);
194 if (!pmd_dir) {
195 printk("ioremap: no mem for pmd_dir\n");
196 return NULL;
197 }
198
199 if (CPU_IS_020_OR_030) {
200 pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
201 physaddr += PTRTREESIZE;
202 virtaddr += PTRTREESIZE;
203 size -= PTRTREESIZE;
204 } else {
205 pte_dir = pte_alloc_kernel(pmd_dir, virtaddr);
206 if (!pte_dir) {
207 printk("ioremap: no mem for pte_dir\n");
208 return NULL;
209 }
210
211 pte_val(*pte_dir) = physaddr;
212 virtaddr += PAGE_SIZE;
213 physaddr += PAGE_SIZE;
214 size -= PAGE_SIZE;
215 }
216 }
217#ifdef DEBUG
218 printk("\n");
219#endif
220 flush_tlb_all();
221
222 return (void __iomem *)retaddr;
223}
224EXPORT_SYMBOL(__ioremap);
225
226/*
227 * Unmap a ioremap()ed region again
228 */
229void iounmap(void __iomem *addr)
230{
231#ifdef CONFIG_AMIGA
232 if ((!MACH_IS_AMIGA) ||
233 (((unsigned long)addr < 0x40000000) ||
234 ((unsigned long)addr > 0x60000000)))
235 free_io_area((__force void *)addr);
236#else
237 free_io_area((__force void *)addr);
238#endif
239}
240EXPORT_SYMBOL(iounmap);
241
242/*
243 * __iounmap unmaps nearly everything, so be careful
244 * it doesn't free currently pointer/page tables anymore but it
245 * wans't used anyway and might be added later.
246 */
247void __iounmap(void *addr, unsigned long size)
248{
249 unsigned long virtaddr = (unsigned long)addr;
250 pgd_t *pgd_dir;
251 pmd_t *pmd_dir;
252 pte_t *pte_dir;
253
254 while ((long)size > 0) {
255 pgd_dir = pgd_offset_k(virtaddr);
256 if (pgd_bad(*pgd_dir)) {
257 printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
258 pgd_clear(pgd_dir);
259 return;
260 }
261 pmd_dir = pmd_offset(pgd_dir, virtaddr);
262
263 if (CPU_IS_020_OR_030) {
264 int pmd_off = (virtaddr/PTRTREESIZE) & 15;
265 int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK;
266
267 if (pmd_type == _PAGE_PRESENT) {
268 pmd_dir->pmd[pmd_off] = 0;
269 virtaddr += PTRTREESIZE;
270 size -= PTRTREESIZE;
271 continue;
272 } else if (pmd_type == 0)
273 continue;
274 }
275
276 if (pmd_bad(*pmd_dir)) {
277 printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
278 pmd_clear(pmd_dir);
279 return;
280 }
281 pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
282
283 pte_val(*pte_dir) = 0;
284 virtaddr += PAGE_SIZE;
285 size -= PAGE_SIZE;
286 }
287
288 flush_tlb_all();
289}
290
291/*
292 * Set new cache mode for some kernel address space.
293 * The caller must push data for that range itself, if such data may already
294 * be in the cache.
295 */
296void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
297{
298 unsigned long virtaddr = (unsigned long)addr;
299 pgd_t *pgd_dir;
300 pmd_t *pmd_dir;
301 pte_t *pte_dir;
302
303 if (CPU_IS_040_OR_060) {
304 switch (cmode) {
305 case IOMAP_FULL_CACHING:
306 cmode = _PAGE_CACHE040;
307 break;
308 case IOMAP_NOCACHE_SER:
309 default:
310 cmode = _PAGE_NOCACHE_S;
311 break;
312 case IOMAP_NOCACHE_NONSER:
313 cmode = _PAGE_NOCACHE;
314 break;
315 case IOMAP_WRITETHROUGH:
316 cmode = _PAGE_CACHE040W;
317 break;
318 }
319 } else {
320 switch (cmode) {
321 case IOMAP_NOCACHE_SER:
322 case IOMAP_NOCACHE_NONSER:
323 default:
324 cmode = _PAGE_NOCACHE030;
325 break;
326 case IOMAP_FULL_CACHING:
327 case IOMAP_WRITETHROUGH:
328 cmode = 0;
329 }
330 }
331
332 while ((long)size > 0) {
333 pgd_dir = pgd_offset_k(virtaddr);
334 if (pgd_bad(*pgd_dir)) {
335 printk("iocachemode: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
336 pgd_clear(pgd_dir);
337 return;
338 }
339 pmd_dir = pmd_offset(pgd_dir, virtaddr);
340
341 if (CPU_IS_020_OR_030) {
342 int pmd_off = (virtaddr/PTRTREESIZE) & 15;
343
344 if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) {
345 pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] &
346 _CACHEMASK040) | cmode;
347 virtaddr += PTRTREESIZE;
348 size -= PTRTREESIZE;
349 continue;
350 }
351 }
352
353 if (pmd_bad(*pmd_dir)) {
354 printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
355 pmd_clear(pmd_dir);
356 return;
357 }
358 pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
359
360 pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode;
361 virtaddr += PAGE_SIZE;
362 size -= PAGE_SIZE;
363 }
364
365 flush_tlb_all();
366}
367EXPORT_SYMBOL(kernel_set_cachemode);
diff --git a/arch/m68k/mm/kmap_no.c b/arch/m68k/mm/kmap_no.c
deleted file mode 100644
index ece8d5ad4e6c..000000000000
--- a/arch/m68k/mm/kmap_no.c
+++ /dev/null
@@ -1,45 +0,0 @@
1/*
2 * linux/arch/m68knommu/mm/kmap.c
3 *
4 * Copyright (C) 2000 Lineo, <davidm@snapgear.com>
5 * Copyright (C) 2000-2002 David McCullough <davidm@snapgear.com>
6 */
7
8#include <linux/mm.h>
9#include <linux/kernel.h>
10#include <linux/string.h>
11#include <linux/types.h>
12#include <linux/vmalloc.h>
13
14#include <asm/setup.h>
15#include <asm/segment.h>
16#include <asm/page.h>
17#include <asm/pgalloc.h>
18#include <asm/io.h>
19#include <asm/system.h>
20
21#undef DEBUG
22
23/*
24 * Map some physical address range into the kernel address space.
25 */
26void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
27{
28 return (void *)physaddr;
29}
30
31/*
32 * Unmap a ioremap()ed region again.
33 */
34void iounmap(void *addr)
35{
36}
37
38/*
39 * Set new cache mode for some kernel address space.
40 * The caller must push data for that range itself, if such data may already
41 * be in the cache.
42 */
43void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
44{
45}
diff --git a/arch/m68k/platform/68328/entry.S b/arch/m68k/platform/68328/entry.S
index 676960cf022a..f68dce766c0a 100644
--- a/arch/m68k/platform/68328/entry.S
+++ b/arch/m68k/platform/68328/entry.S
@@ -10,7 +10,6 @@
10 * Linux/m68k support by Hamish Macdonald 10 * Linux/m68k support by Hamish Macdonald
11 */ 11 */
12 12
13#include <linux/sys.h>
14#include <linux/linkage.h> 13#include <linux/linkage.h>
15#include <asm/thread_info.h> 14#include <asm/thread_info.h>
16#include <asm/unistd.h> 15#include <asm/unistd.h>
@@ -80,7 +79,7 @@ ENTRY(system_call)
80 movel %sp,%d1 /* get thread_info pointer */ 79 movel %sp,%d1 /* get thread_info pointer */
81 andl #-THREAD_SIZE,%d1 80 andl #-THREAD_SIZE,%d1
82 movel %d1,%a2 81 movel %d1,%a2
83 btst #(TIF_SYSCALL_TRACE%8),%a2@(TI_FLAGS+(31-TIF_SYSCALL_TRACE)/8) 82 btst #(TIF_SYSCALL_TRACE%8),%a2@(TINFO_FLAGS+(31-TIF_SYSCALL_TRACE)/8)
84 jne do_trace 83 jne do_trace
85 cmpl #NR_syscalls,%d0 84 cmpl #NR_syscalls,%d0
86 jcc badsys 85 jcc badsys
@@ -107,12 +106,12 @@ Luser_return:
107 andl #-THREAD_SIZE,%d1 106 andl #-THREAD_SIZE,%d1
108 movel %d1,%a2 107 movel %d1,%a2
1091: 1081:
110 move %a2@(TI_FLAGS),%d1 /* thread_info->flags */ 109 move %a2@(TINFO_FLAGS),%d1 /* thread_info->flags */
111 jne Lwork_to_do 110 jne Lwork_to_do
112 RESTORE_ALL 111 RESTORE_ALL
113 112
114Lwork_to_do: 113Lwork_to_do:
115 movel %a2@(TI_FLAGS),%d1 /* thread_info->flags */ 114 movel %a2@(TINFO_FLAGS),%d1 /* thread_info->flags */
116 btst #TIF_NEED_RESCHED,%d1 115 btst #TIF_NEED_RESCHED,%d1
117 jne reschedule 116 jne reschedule
118 117
diff --git a/arch/m68k/platform/68360/entry.S b/arch/m68k/platform/68360/entry.S
index 46c1b18c9dcb..a07b14feed92 100644
--- a/arch/m68k/platform/68360/entry.S
+++ b/arch/m68k/platform/68360/entry.S
@@ -12,7 +12,6 @@
12 * M68360 Port by SED Systems, and Lineo. 12 * M68360 Port by SED Systems, and Lineo.
13 */ 13 */
14 14
15#include <linux/sys.h>
16#include <linux/linkage.h> 15#include <linux/linkage.h>
17#include <asm/thread_info.h> 16#include <asm/thread_info.h>
18#include <asm/unistd.h> 17#include <asm/unistd.h>
@@ -76,7 +75,7 @@ ENTRY(system_call)
76 movel %sp,%d1 /* get thread_info pointer */ 75 movel %sp,%d1 /* get thread_info pointer */
77 andl #-THREAD_SIZE,%d1 76 andl #-THREAD_SIZE,%d1
78 movel %d1,%a2 77 movel %d1,%a2
79 btst #(TIF_SYSCALL_TRACE%8),%a2@(TI_FLAGS+(31-TIF_SYSCALL_TRACE)/8) 78 btst #(TIF_SYSCALL_TRACE%8),%a2@(TINFO_FLAGS+(31-TIF_SYSCALL_TRACE)/8)
80 jne do_trace 79 jne do_trace
81 cmpl #NR_syscalls,%d0 80 cmpl #NR_syscalls,%d0
82 jcc badsys 81 jcc badsys
@@ -103,12 +102,12 @@ Luser_return:
103 andl #-THREAD_SIZE,%d1 102 andl #-THREAD_SIZE,%d1
104 movel %d1,%a2 103 movel %d1,%a2
1051: 1041:
106 move %a2@(TI_FLAGS),%d1 /* thread_info->flags */ 105 move %a2@(TINFO_FLAGS),%d1 /* thread_info->flags */
107 jne Lwork_to_do 106 jne Lwork_to_do
108 RESTORE_ALL 107 RESTORE_ALL
109 108
110Lwork_to_do: 109Lwork_to_do:
111 movel %a2@(TI_FLAGS),%d1 /* thread_info->flags */ 110 movel %a2@(TINFO_FLAGS),%d1 /* thread_info->flags */
112 btst #TIF_NEED_RESCHED,%d1 111 btst #TIF_NEED_RESCHED,%d1
113 jne reschedule 112 jne reschedule
114 113
diff --git a/arch/m68k/platform/coldfire/dma.c b/arch/m68k/platform/coldfire/dma.c
index e88b95e2cc62..df5ce20d181c 100644
--- a/arch/m68k/platform/coldfire/dma.c
+++ b/arch/m68k/platform/coldfire/dma.c
@@ -9,6 +9,7 @@
9/***************************************************************************/ 9/***************************************************************************/
10 10
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/module.h>
12#include <asm/dma.h> 13#include <asm/dma.h>
13#include <asm/coldfire.h> 14#include <asm/coldfire.h>
14#include <asm/mcfsim.h> 15#include <asm/mcfsim.h>
@@ -33,7 +34,9 @@ unsigned int dma_base_addr[MAX_M68K_DMA_CHANNELS] = {
33 MCFDMA_BASE3, 34 MCFDMA_BASE3,
34#endif 35#endif
35}; 36};
37EXPORT_SYMBOL(dma_base_addr);
36 38
37unsigned int dma_device_address[MAX_M68K_DMA_CHANNELS]; 39unsigned int dma_device_address[MAX_M68K_DMA_CHANNELS];
40EXPORT_SYMBOL(dma_device_address);
38 41
39/***************************************************************************/ 42/***************************************************************************/
diff --git a/arch/m68k/platform/coldfire/entry.S b/arch/m68k/platform/coldfire/entry.S
index eab63f09965b..27c2b001161e 100644
--- a/arch/m68k/platform/coldfire/entry.S
+++ b/arch/m68k/platform/coldfire/entry.S
@@ -26,7 +26,6 @@
26 * Bug, speed and maintainability fixes by Philippe De Muyter <phdm@macqel.be> 26 * Bug, speed and maintainability fixes by Philippe De Muyter <phdm@macqel.be>
27 */ 27 */
28 28
29#include <linux/sys.h>
30#include <linux/linkage.h> 29#include <linux/linkage.h>
31#include <asm/unistd.h> 30#include <asm/unistd.h>
32#include <asm/thread_info.h> 31#include <asm/thread_info.h>
@@ -78,7 +77,7 @@ ENTRY(system_call)
78 movel %d2,%a0 77 movel %d2,%a0
79 movel %a0@,%a1 /* save top of frame */ 78 movel %a0@,%a1 /* save top of frame */
80 movel %sp,%a1@(TASK_THREAD+THREAD_ESP0) 79 movel %sp,%a1@(TASK_THREAD+THREAD_ESP0)
81 btst #(TIF_SYSCALL_TRACE%8),%a0@(TI_FLAGS+(31-TIF_SYSCALL_TRACE)/8) 80 btst #(TIF_SYSCALL_TRACE%8),%a0@(TINFO_FLAGS+(31-TIF_SYSCALL_TRACE)/8)
82 bnes 1f 81 bnes 1f
83 82
84 movel %d3,%a0 83 movel %d3,%a0
@@ -113,11 +112,11 @@ ret_from_exception:
113 movel %sp,%d1 /* get thread_info pointer */ 112 movel %sp,%d1 /* get thread_info pointer */
114 andl #-THREAD_SIZE,%d1 /* at base of kernel stack */ 113 andl #-THREAD_SIZE,%d1 /* at base of kernel stack */
115 movel %d1,%a0 114 movel %d1,%a0
116 movel %a0@(TI_FLAGS),%d1 /* get thread_info->flags */ 115 movel %a0@(TINFO_FLAGS),%d1 /* get thread_info->flags */
117 andl #(1<<TIF_NEED_RESCHED),%d1 116 andl #(1<<TIF_NEED_RESCHED),%d1
118 jeq Lkernel_return 117 jeq Lkernel_return
119 118
120 movel %a0@(TI_PREEMPTCOUNT),%d1 119 movel %a0@(TINFO_PREEMPT),%d1
121 cmpl #0,%d1 120 cmpl #0,%d1
122 jne Lkernel_return 121 jne Lkernel_return
123 122
@@ -137,14 +136,14 @@ Luser_return:
137 movel %sp,%d1 /* get thread_info pointer */ 136 movel %sp,%d1 /* get thread_info pointer */
138 andl #-THREAD_SIZE,%d1 /* at base of kernel stack */ 137 andl #-THREAD_SIZE,%d1 /* at base of kernel stack */
139 movel %d1,%a0 138 movel %d1,%a0
140 movel %a0@(TI_FLAGS),%d1 /* get thread_info->flags */ 139 movel %a0@(TINFO_FLAGS),%d1 /* get thread_info->flags */
141 jne Lwork_to_do /* still work to do */ 140 jne Lwork_to_do /* still work to do */
142 141
143Lreturn: 142Lreturn:
144 RESTORE_USER 143 RESTORE_USER
145 144
146Lwork_to_do: 145Lwork_to_do:
147 movel %a0@(TI_FLAGS),%d1 /* get thread_info->flags */ 146 movel %a0@(TINFO_FLAGS),%d1 /* get thread_info->flags */
148 move #0x2000,%sr /* enable intrs again */ 147 move #0x2000,%sr /* enable intrs again */
149 btst #TIF_NEED_RESCHED,%d1 148 btst #TIF_NEED_RESCHED,%d1
150 jne reschedule 149 jne reschedule
diff --git a/arch/m68k/platform/coldfire/head.S b/arch/m68k/platform/coldfire/head.S
index 6ae91a499184..c33483824a2e 100644
--- a/arch/m68k/platform/coldfire/head.S
+++ b/arch/m68k/platform/coldfire/head.S
@@ -8,7 +8,6 @@
8 8
9/*****************************************************************************/ 9/*****************************************************************************/
10 10
11#include <linux/sys.h>
12#include <linux/linkage.h> 11#include <linux/linkage.h>
13#include <linux/init.h> 12#include <linux/init.h>
14#include <asm/asm-offsets.h> 13#include <asm/asm-offsets.h>
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 01af3876cf90..a81176f44c74 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -118,7 +118,7 @@ SECTIONS
118 EXIT_DATA 118 EXIT_DATA
119 } 119 }
120 120
121 PERCPU(1 << CONFIG_MIPS_L1_CACHE_SHIFT, PAGE_SIZE) 121 PERCPU_SECTION(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
122 . = ALIGN(PAGE_SIZE); 122 . = ALIGN(PAGE_SIZE);
123 __init_end = .; 123 __init_end = .;
124 /* freed after init ends here */ 124 /* freed after init ends here */
diff --git a/arch/mn10300/kernel/vmlinux.lds.S b/arch/mn10300/kernel/vmlinux.lds.S
index 968bcd2cb022..6f702a6ab395 100644
--- a/arch/mn10300/kernel/vmlinux.lds.S
+++ b/arch/mn10300/kernel/vmlinux.lds.S
@@ -70,7 +70,7 @@ SECTIONS
70 .exit.text : { EXIT_TEXT; } 70 .exit.text : { EXIT_TEXT; }
71 .exit.data : { EXIT_DATA; } 71 .exit.data : { EXIT_DATA; }
72 72
73 PERCPU(32, PAGE_SIZE) 73 PERCPU_SECTION(32)
74 . = ALIGN(PAGE_SIZE); 74 . = ALIGN(PAGE_SIZE);
75 __init_end = .; 75 __init_end = .;
76 /* freed after init ends here */ 76 /* freed after init ends here */
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index e1a55849bfa7..fa6f2b8163e0 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -149,7 +149,7 @@ SECTIONS
149 EXIT_DATA 149 EXIT_DATA
150 } 150 }
151 151
152 PERCPU(L1_CACHE_BYTES, PAGE_SIZE) 152 PERCPU_SECTION(L1_CACHE_BYTES)
153 . = ALIGN(PAGE_SIZE); 153 . = ALIGN(PAGE_SIZE);
154 __init_end = .; 154 __init_end = .;
155 /* freed after init ends here */ 155 /* freed after init ends here */
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index b9150f07d266..920276c0f6a1 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -160,7 +160,7 @@ SECTIONS
160 INIT_RAM_FS 160 INIT_RAM_FS
161 } 161 }
162 162
163 PERCPU(L1_CACHE_BYTES, PAGE_SIZE) 163 PERCPU_SECTION(L1_CACHE_BYTES)
164 164
165 . = ALIGN(8); 165 . = ALIGN(8);
166 .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) { 166 .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) {
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 4a7f14079e03..ff2d2371b2e9 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -230,17 +230,6 @@ config SYSVIPC_COMPAT
230config AUDIT_ARCH 230config AUDIT_ARCH
231 def_bool y 231 def_bool y
232 232
233config S390_EXEC_PROTECT
234 def_bool y
235 prompt "Data execute protection"
236 help
237 This option allows to enable a buffer overflow protection for user
238 space programs and it also selects the addressing mode option above.
239 The kernel parameter noexec=on will enable this feature and also
240 switch the addressing modes, default is disabled. Enabling this (via
241 kernel parameter) on machines earlier than IBM System z9 this will
242 reduce system performance.
243
244comment "Code generation options" 233comment "Code generation options"
245 234
246choice 235choice
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 5c91995b74e4..24bff4f1cc52 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -130,9 +130,7 @@ static void appldata_work_fn(struct work_struct *work)
130{ 130{
131 struct list_head *lh; 131 struct list_head *lh;
132 struct appldata_ops *ops; 132 struct appldata_ops *ops;
133 int i;
134 133
135 i = 0;
136 get_online_cpus(); 134 get_online_cpus();
137 mutex_lock(&appldata_ops_mutex); 135 mutex_lock(&appldata_ops_mutex);
138 list_for_each(lh, &appldata_ops_list) { 136 list_for_each(lh, &appldata_ops_list) {
diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h
index 7488e52efa97..81d7908416cf 100644
--- a/arch/s390/include/asm/cmpxchg.h
+++ b/arch/s390/include/asm/cmpxchg.h
@@ -167,7 +167,6 @@ static inline unsigned long __cmpxchg(void *ptr, unsigned long old,
167#ifdef CONFIG_64BIT 167#ifdef CONFIG_64BIT
168#define cmpxchg64(ptr, o, n) \ 168#define cmpxchg64(ptr, o, n) \
169({ \ 169({ \
170 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
171 cmpxchg((ptr), (o), (n)); \ 170 cmpxchg((ptr), (o), (n)); \
172}) 171})
173#else /* CONFIG_64BIT */ 172#else /* CONFIG_64BIT */
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 10c029cfcc7d..64b61bf72e93 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -196,18 +196,6 @@ do { \
196} while (0) 196} while (0)
197#endif /* __s390x__ */ 197#endif /* __s390x__ */
198 198
199/*
200 * An executable for which elf_read_implies_exec() returns TRUE will
201 * have the READ_IMPLIES_EXEC personality flag set automatically.
202 */
203#define elf_read_implies_exec(ex, executable_stack) \
204({ \
205 if (current->mm->context.noexec && \
206 executable_stack != EXSTACK_DISABLE_X) \
207 disable_noexec(current->mm, current); \
208 current->mm->context.noexec == 0; \
209})
210
211#define STACK_RND_MASK 0x7ffUL 199#define STACK_RND_MASK 0x7ffUL
212 200
213#define ARCH_DLINFO \ 201#define ARCH_DLINFO \
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index b56403c2df28..799ed0f1643d 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -111,21 +111,10 @@ static inline void huge_ptep_invalidate(struct mm_struct *mm,
111{ 111{
112 pmd_t *pmdp = (pmd_t *) ptep; 112 pmd_t *pmdp = (pmd_t *) ptep;
113 113
114 if (!MACHINE_HAS_IDTE) { 114 if (MACHINE_HAS_IDTE)
115 __pmd_csp(pmdp);
116 if (mm->context.noexec) {
117 pmdp = get_shadow_table(pmdp);
118 __pmd_csp(pmdp);
119 }
120 return;
121 }
122
123 __pmd_idte(address, pmdp);
124 if (mm->context.noexec) {
125 pmdp = get_shadow_table(pmdp);
126 __pmd_idte(address, pmdp); 115 __pmd_idte(address, pmdp);
127 } 116 else
128 return; 117 __pmd_csp(pmdp);
129} 118}
130 119
131#define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \ 120#define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index db14a311f1d2..1544b90bd6d6 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -15,6 +15,7 @@ enum interruption_class {
15 EXTINT_VRT, 15 EXTINT_VRT,
16 EXTINT_SCP, 16 EXTINT_SCP,
17 EXTINT_IUC, 17 EXTINT_IUC,
18 EXTINT_CPM,
18 IOINT_QAI, 19 IOINT_QAI,
19 IOINT_QDI, 20 IOINT_QDI,
20 IOINT_DAS, 21 IOINT_DAS,
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 65e172f8209d..228cf0b295db 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -124,7 +124,7 @@ struct _lowcore {
124 /* Address space pointer. */ 124 /* Address space pointer. */
125 __u32 kernel_asce; /* 0x02ac */ 125 __u32 kernel_asce; /* 0x02ac */
126 __u32 user_asce; /* 0x02b0 */ 126 __u32 user_asce; /* 0x02b0 */
127 __u32 user_exec_asce; /* 0x02b4 */ 127 __u32 current_pid; /* 0x02b4 */
128 128
129 /* SMP info area */ 129 /* SMP info area */
130 __u32 cpu_nr; /* 0x02b8 */ 130 __u32 cpu_nr; /* 0x02b8 */
@@ -255,7 +255,7 @@ struct _lowcore {
255 /* Address space pointer. */ 255 /* Address space pointer. */
256 __u64 kernel_asce; /* 0x0310 */ 256 __u64 kernel_asce; /* 0x0310 */
257 __u64 user_asce; /* 0x0318 */ 257 __u64 user_asce; /* 0x0318 */
258 __u64 user_exec_asce; /* 0x0320 */ 258 __u64 current_pid; /* 0x0320 */
259 259
260 /* SMP info area */ 260 /* SMP info area */
261 __u32 cpu_nr; /* 0x0328 */ 261 __u32 cpu_nr; /* 0x0328 */
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index 78522cdefdd4..82d0847896a0 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -5,19 +5,18 @@ typedef struct {
5 atomic_t attach_count; 5 atomic_t attach_count;
6 unsigned int flush_mm; 6 unsigned int flush_mm;
7 spinlock_t list_lock; 7 spinlock_t list_lock;
8 struct list_head crst_list;
9 struct list_head pgtable_list; 8 struct list_head pgtable_list;
10 unsigned long asce_bits; 9 unsigned long asce_bits;
11 unsigned long asce_limit; 10 unsigned long asce_limit;
12 unsigned long vdso_base; 11 unsigned long vdso_base;
13 int noexec; 12 /* Cloned contexts will be created with extended page tables. */
14 int has_pgste; /* The mmu context has extended page tables */ 13 unsigned int alloc_pgste:1;
15 int alloc_pgste; /* cloned contexts will have extended page tables */ 14 /* The mmu context has extended page tables. */
15 unsigned int has_pgste:1;
16} mm_context_t; 16} mm_context_t;
17 17
18#define INIT_MM_CONTEXT(name) \ 18#define INIT_MM_CONTEXT(name) \
19 .context.list_lock = __SPIN_LOCK_UNLOCKED(name.context.list_lock), \ 19 .context.list_lock = __SPIN_LOCK_UNLOCKED(name.context.list_lock), \
20 .context.crst_list = LIST_HEAD_INIT(name.context.crst_list), \
21 .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), 20 .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list),
22 21
23#endif 22#endif
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 8c277caa8d3a..5682f160ff82 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -35,11 +35,9 @@ static inline int init_new_context(struct task_struct *tsk,
35 * and if has_pgste is set, it will create extended page 35 * and if has_pgste is set, it will create extended page
36 * tables. 36 * tables.
37 */ 37 */
38 mm->context.noexec = 0;
39 mm->context.has_pgste = 1; 38 mm->context.has_pgste = 1;
40 mm->context.alloc_pgste = 1; 39 mm->context.alloc_pgste = 1;
41 } else { 40 } else {
42 mm->context.noexec = (user_mode == SECONDARY_SPACE_MODE);
43 mm->context.has_pgste = 0; 41 mm->context.has_pgste = 0;
44 mm->context.alloc_pgste = 0; 42 mm->context.alloc_pgste = 0;
45 } 43 }
@@ -63,10 +61,8 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
63 S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); 61 S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
64 if (user_mode != HOME_SPACE_MODE) { 62 if (user_mode != HOME_SPACE_MODE) {
65 /* Load primary space page table origin. */ 63 /* Load primary space page table origin. */
66 pgd = mm->context.noexec ? get_shadow_table(pgd) : pgd;
67 S390_lowcore.user_exec_asce = mm->context.asce_bits | __pa(pgd);
68 asm volatile(LCTL_OPCODE" 1,1,%0\n" 64 asm volatile(LCTL_OPCODE" 1,1,%0\n"
69 : : "m" (S390_lowcore.user_exec_asce) ); 65 : : "m" (S390_lowcore.user_asce) );
70 } else 66 } else
71 /* Load home space page table origin. */ 67 /* Load home space page table origin. */
72 asm volatile(LCTL_OPCODE" 13,13,%0" 68 asm volatile(LCTL_OPCODE" 13,13,%0"
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 3c987e9ec8d6..accb372ddc7e 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -90,6 +90,7 @@ static inline void copy_page(void *to, void *from)
90 */ 90 */
91 91
92typedef struct { unsigned long pgprot; } pgprot_t; 92typedef struct { unsigned long pgprot; } pgprot_t;
93typedef struct { unsigned long pgste; } pgste_t;
93typedef struct { unsigned long pte; } pte_t; 94typedef struct { unsigned long pte; } pte_t;
94typedef struct { unsigned long pmd; } pmd_t; 95typedef struct { unsigned long pmd; } pmd_t;
95typedef struct { unsigned long pud; } pud_t; 96typedef struct { unsigned long pud; } pud_t;
@@ -97,18 +98,21 @@ typedef struct { unsigned long pgd; } pgd_t;
97typedef pte_t *pgtable_t; 98typedef pte_t *pgtable_t;
98 99
99#define pgprot_val(x) ((x).pgprot) 100#define pgprot_val(x) ((x).pgprot)
101#define pgste_val(x) ((x).pgste)
100#define pte_val(x) ((x).pte) 102#define pte_val(x) ((x).pte)
101#define pmd_val(x) ((x).pmd) 103#define pmd_val(x) ((x).pmd)
102#define pud_val(x) ((x).pud) 104#define pud_val(x) ((x).pud)
103#define pgd_val(x) ((x).pgd) 105#define pgd_val(x) ((x).pgd)
104 106
107#define __pgste(x) ((pgste_t) { (x) } )
105#define __pte(x) ((pte_t) { (x) } ) 108#define __pte(x) ((pte_t) { (x) } )
106#define __pmd(x) ((pmd_t) { (x) } ) 109#define __pmd(x) ((pmd_t) { (x) } )
110#define __pud(x) ((pud_t) { (x) } )
107#define __pgd(x) ((pgd_t) { (x) } ) 111#define __pgd(x) ((pgd_t) { (x) } )
108#define __pgprot(x) ((pgprot_t) { (x) } ) 112#define __pgprot(x) ((pgprot_t) { (x) } )
109 113
110static inline void 114static inline void page_set_storage_key(unsigned long addr,
111page_set_storage_key(unsigned long addr, unsigned int skey, int mapped) 115 unsigned char skey, int mapped)
112{ 116{
113 if (!mapped) 117 if (!mapped)
114 asm volatile(".insn rrf,0xb22b0000,%0,%1,8,0" 118 asm volatile(".insn rrf,0xb22b0000,%0,%1,8,0"
@@ -117,15 +121,59 @@ page_set_storage_key(unsigned long addr, unsigned int skey, int mapped)
117 asm volatile("sske %0,%1" : : "d" (skey), "a" (addr)); 121 asm volatile("sske %0,%1" : : "d" (skey), "a" (addr));
118} 122}
119 123
120static inline unsigned int 124static inline unsigned char page_get_storage_key(unsigned long addr)
121page_get_storage_key(unsigned long addr)
122{ 125{
123 unsigned int skey; 126 unsigned char skey;
124 127
125 asm volatile("iske %0,%1" : "=d" (skey) : "a" (addr), "0" (0)); 128 asm volatile("iske %0,%1" : "=d" (skey) : "a" (addr));
126 return skey; 129 return skey;
127} 130}
128 131
132static inline int page_reset_referenced(unsigned long addr)
133{
134 unsigned int ipm;
135
136 asm volatile(
137 " rrbe 0,%1\n"
138 " ipm %0\n"
139 : "=d" (ipm) : "a" (addr) : "cc");
140 return !!(ipm & 0x20000000);
141}
142
143/* Bits int the storage key */
144#define _PAGE_CHANGED 0x02 /* HW changed bit */
145#define _PAGE_REFERENCED 0x04 /* HW referenced bit */
146#define _PAGE_FP_BIT 0x08 /* HW fetch protection bit */
147#define _PAGE_ACC_BITS 0xf0 /* HW access control bits */
148
149/*
150 * Test and clear dirty bit in storage key.
151 * We can't clear the changed bit atomically. This is a potential
152 * race against modification of the referenced bit. This function
153 * should therefore only be called if it is not mapped in any
154 * address space.
155 */
156#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY
157static inline int page_test_and_clear_dirty(unsigned long pfn, int mapped)
158{
159 unsigned char skey;
160
161 skey = page_get_storage_key(pfn << PAGE_SHIFT);
162 if (!(skey & _PAGE_CHANGED))
163 return 0;
164 page_set_storage_key(pfn << PAGE_SHIFT, skey & ~_PAGE_CHANGED, mapped);
165 return 1;
166}
167
168/*
169 * Test and clear referenced bit in storage key.
170 */
171#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
172static inline int page_test_and_clear_young(unsigned long pfn)
173{
174 return page_reset_referenced(pfn << PAGE_SHIFT);
175}
176
129struct page; 177struct page;
130void arch_free_page(struct page *page, int order); 178void arch_free_page(struct page *page, int order);
131void arch_alloc_page(struct page *page, int order); 179void arch_alloc_page(struct page *page, int order);
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index f7ad8719d02d..5325c89a5843 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -1,6 +1,9 @@
1#ifndef __ARCH_S390_PERCPU__ 1#ifndef __ARCH_S390_PERCPU__
2#define __ARCH_S390_PERCPU__ 2#define __ARCH_S390_PERCPU__
3 3
4#include <linux/preempt.h>
5#include <asm/cmpxchg.h>
6
4/* 7/*
5 * s390 uses its own implementation for per cpu data, the offset of 8 * s390 uses its own implementation for per cpu data, the offset of
6 * the cpu local data area is cached in the cpu's lowcore memory. 9 * the cpu local data area is cached in the cpu's lowcore memory.
@@ -16,6 +19,71 @@
16#define ARCH_NEEDS_WEAK_PER_CPU 19#define ARCH_NEEDS_WEAK_PER_CPU
17#endif 20#endif
18 21
22#define arch_irqsafe_cpu_to_op(pcp, val, op) \
23do { \
24 typedef typeof(pcp) pcp_op_T__; \
25 pcp_op_T__ old__, new__, prev__; \
26 pcp_op_T__ *ptr__; \
27 preempt_disable(); \
28 ptr__ = __this_cpu_ptr(&(pcp)); \
29 prev__ = *ptr__; \
30 do { \
31 old__ = prev__; \
32 new__ = old__ op (val); \
33 switch (sizeof(*ptr__)) { \
34 case 8: \
35 prev__ = cmpxchg64(ptr__, old__, new__); \
36 break; \
37 default: \
38 prev__ = cmpxchg(ptr__, old__, new__); \
39 } \
40 } while (prev__ != old__); \
41 preempt_enable(); \
42} while (0)
43
44#define irqsafe_cpu_add_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +)
45#define irqsafe_cpu_add_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +)
46#define irqsafe_cpu_add_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +)
47#define irqsafe_cpu_add_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +)
48
49#define irqsafe_cpu_and_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &)
50#define irqsafe_cpu_and_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &)
51#define irqsafe_cpu_and_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &)
52#define irqsafe_cpu_and_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &)
53
54#define irqsafe_cpu_or_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |)
55#define irqsafe_cpu_or_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |)
56#define irqsafe_cpu_or_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |)
57#define irqsafe_cpu_or_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |)
58
59#define irqsafe_cpu_xor_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^)
60#define irqsafe_cpu_xor_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^)
61#define irqsafe_cpu_xor_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^)
62#define irqsafe_cpu_xor_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^)
63
64#define arch_irqsafe_cpu_cmpxchg(pcp, oval, nval) \
65({ \
66 typedef typeof(pcp) pcp_op_T__; \
67 pcp_op_T__ ret__; \
68 pcp_op_T__ *ptr__; \
69 preempt_disable(); \
70 ptr__ = __this_cpu_ptr(&(pcp)); \
71 switch (sizeof(*ptr__)) { \
72 case 8: \
73 ret__ = cmpxchg64(ptr__, oval, nval); \
74 break; \
75 default: \
76 ret__ = cmpxchg(ptr__, oval, nval); \
77 } \
78 preempt_enable(); \
79 ret__; \
80})
81
82#define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval)
83#define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval)
84#define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval)
85#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval)
86
19#include <asm-generic/percpu.h> 87#include <asm-generic/percpu.h>
20 88
21#endif /* __ARCH_S390_PERCPU__ */ 89#endif /* __ARCH_S390_PERCPU__ */
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 082eb4e50e8b..f6314af3b354 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -19,14 +19,13 @@
19 19
20#define check_pgt_cache() do {} while (0) 20#define check_pgt_cache() do {} while (0)
21 21
22unsigned long *crst_table_alloc(struct mm_struct *, int); 22unsigned long *crst_table_alloc(struct mm_struct *);
23void crst_table_free(struct mm_struct *, unsigned long *); 23void crst_table_free(struct mm_struct *, unsigned long *);
24void crst_table_free_rcu(struct mm_struct *, unsigned long *); 24void crst_table_free_rcu(struct mm_struct *, unsigned long *);
25 25
26unsigned long *page_table_alloc(struct mm_struct *); 26unsigned long *page_table_alloc(struct mm_struct *);
27void page_table_free(struct mm_struct *, unsigned long *); 27void page_table_free(struct mm_struct *, unsigned long *);
28void page_table_free_rcu(struct mm_struct *, unsigned long *); 28void page_table_free_rcu(struct mm_struct *, unsigned long *);
29void disable_noexec(struct mm_struct *, struct task_struct *);
30 29
31static inline void clear_table(unsigned long *s, unsigned long val, size_t n) 30static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
32{ 31{
@@ -50,9 +49,6 @@ static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
50static inline void crst_table_init(unsigned long *crst, unsigned long entry) 49static inline void crst_table_init(unsigned long *crst, unsigned long entry)
51{ 50{
52 clear_table(crst, entry, sizeof(unsigned long)*2048); 51 clear_table(crst, entry, sizeof(unsigned long)*2048);
53 crst = get_shadow_table(crst);
54 if (crst)
55 clear_table(crst, entry, sizeof(unsigned long)*2048);
56} 52}
57 53
58#ifndef __s390x__ 54#ifndef __s390x__
@@ -69,10 +65,7 @@ static inline unsigned long pgd_entry_type(struct mm_struct *mm)
69#define pmd_free(mm, x) do { } while (0) 65#define pmd_free(mm, x) do { } while (0)
70 66
71#define pgd_populate(mm, pgd, pud) BUG() 67#define pgd_populate(mm, pgd, pud) BUG()
72#define pgd_populate_kernel(mm, pgd, pud) BUG()
73
74#define pud_populate(mm, pud, pmd) BUG() 68#define pud_populate(mm, pud, pmd) BUG()
75#define pud_populate_kernel(mm, pud, pmd) BUG()
76 69
77#else /* __s390x__ */ 70#else /* __s390x__ */
78 71
@@ -90,7 +83,7 @@ void crst_table_downgrade(struct mm_struct *, unsigned long limit);
90 83
91static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) 84static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
92{ 85{
93 unsigned long *table = crst_table_alloc(mm, mm->context.noexec); 86 unsigned long *table = crst_table_alloc(mm);
94 if (table) 87 if (table)
95 crst_table_init(table, _REGION3_ENTRY_EMPTY); 88 crst_table_init(table, _REGION3_ENTRY_EMPTY);
96 return (pud_t *) table; 89 return (pud_t *) table;
@@ -99,43 +92,21 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
99 92
100static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) 93static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
101{ 94{
102 unsigned long *table = crst_table_alloc(mm, mm->context.noexec); 95 unsigned long *table = crst_table_alloc(mm);
103 if (table) 96 if (table)
104 crst_table_init(table, _SEGMENT_ENTRY_EMPTY); 97 crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
105 return (pmd_t *) table; 98 return (pmd_t *) table;
106} 99}
107#define pmd_free(mm, pmd) crst_table_free(mm, (unsigned long *) pmd) 100#define pmd_free(mm, pmd) crst_table_free(mm, (unsigned long *) pmd)
108 101
109static inline void pgd_populate_kernel(struct mm_struct *mm,
110 pgd_t *pgd, pud_t *pud)
111{
112 pgd_val(*pgd) = _REGION2_ENTRY | __pa(pud);
113}
114
115static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) 102static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
116{ 103{
117 pgd_populate_kernel(mm, pgd, pud); 104 pgd_val(*pgd) = _REGION2_ENTRY | __pa(pud);
118 if (mm->context.noexec) {
119 pgd = get_shadow_table(pgd);
120 pud = get_shadow_table(pud);
121 pgd_populate_kernel(mm, pgd, pud);
122 }
123}
124
125static inline void pud_populate_kernel(struct mm_struct *mm,
126 pud_t *pud, pmd_t *pmd)
127{
128 pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
129} 105}
130 106
131static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) 107static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
132{ 108{
133 pud_populate_kernel(mm, pud, pmd); 109 pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
134 if (mm->context.noexec) {
135 pud = get_shadow_table(pud);
136 pmd = get_shadow_table(pmd);
137 pud_populate_kernel(mm, pud, pmd);
138 }
139} 110}
140 111
141#endif /* __s390x__ */ 112#endif /* __s390x__ */
@@ -143,29 +114,19 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
143static inline pgd_t *pgd_alloc(struct mm_struct *mm) 114static inline pgd_t *pgd_alloc(struct mm_struct *mm)
144{ 115{
145 spin_lock_init(&mm->context.list_lock); 116 spin_lock_init(&mm->context.list_lock);
146 INIT_LIST_HEAD(&mm->context.crst_list);
147 INIT_LIST_HEAD(&mm->context.pgtable_list); 117 INIT_LIST_HEAD(&mm->context.pgtable_list);
148 return (pgd_t *) 118 return (pgd_t *) crst_table_alloc(mm);
149 crst_table_alloc(mm, user_mode == SECONDARY_SPACE_MODE);
150} 119}
151#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd) 120#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
152 121
153static inline void pmd_populate_kernel(struct mm_struct *mm,
154 pmd_t *pmd, pte_t *pte)
155{
156 pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
157}
158
159static inline void pmd_populate(struct mm_struct *mm, 122static inline void pmd_populate(struct mm_struct *mm,
160 pmd_t *pmd, pgtable_t pte) 123 pmd_t *pmd, pgtable_t pte)
161{ 124{
162 pmd_populate_kernel(mm, pmd, pte); 125 pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
163 if (mm->context.noexec) {
164 pmd = get_shadow_table(pmd);
165 pmd_populate_kernel(mm, pmd, pte + PTRS_PER_PTE);
166 }
167} 126}
168 127
128#define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte)
129
169#define pmd_pgtable(pmd) \ 130#define pmd_pgtable(pmd) \
170 (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE) 131 (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)
171 132
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 02ace3491c51..c4773a2ef3d3 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -31,9 +31,8 @@
31#ifndef __ASSEMBLY__ 31#ifndef __ASSEMBLY__
32#include <linux/sched.h> 32#include <linux/sched.h>
33#include <linux/mm_types.h> 33#include <linux/mm_types.h>
34#include <asm/bitops.h>
35#include <asm/bug.h> 34#include <asm/bug.h>
36#include <asm/processor.h> 35#include <asm/page.h>
37 36
38extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096))); 37extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
39extern void paging_init(void); 38extern void paging_init(void);
@@ -243,11 +242,13 @@ extern unsigned long VMALLOC_START;
243/* Software bits in the page table entry */ 242/* Software bits in the page table entry */
244#define _PAGE_SWT 0x001 /* SW pte type bit t */ 243#define _PAGE_SWT 0x001 /* SW pte type bit t */
245#define _PAGE_SWX 0x002 /* SW pte type bit x */ 244#define _PAGE_SWX 0x002 /* SW pte type bit x */
246#define _PAGE_SPECIAL 0x004 /* SW associated with special page */ 245#define _PAGE_SWC 0x004 /* SW pte changed bit (for KVM) */
246#define _PAGE_SWR 0x008 /* SW pte referenced bit (for KVM) */
247#define _PAGE_SPECIAL 0x010 /* SW associated with special page */
247#define __HAVE_ARCH_PTE_SPECIAL 248#define __HAVE_ARCH_PTE_SPECIAL
248 249
249/* Set of bits not changed in pte_modify */ 250/* Set of bits not changed in pte_modify */
250#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL) 251#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_SWC | _PAGE_SWR)
251 252
252/* Six different types of pages. */ 253/* Six different types of pages. */
253#define _PAGE_TYPE_EMPTY 0x400 254#define _PAGE_TYPE_EMPTY 0x400
@@ -256,8 +257,6 @@ extern unsigned long VMALLOC_START;
256#define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */ 257#define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */
257#define _PAGE_TYPE_RO 0x200 258#define _PAGE_TYPE_RO 0x200
258#define _PAGE_TYPE_RW 0x000 259#define _PAGE_TYPE_RW 0x000
259#define _PAGE_TYPE_EX_RO 0x202
260#define _PAGE_TYPE_EX_RW 0x002
261 260
262/* 261/*
263 * Only four types for huge pages, using the invalid bit and protection bit 262 * Only four types for huge pages, using the invalid bit and protection bit
@@ -287,8 +286,6 @@ extern unsigned long VMALLOC_START;
287 * _PAGE_TYPE_FILE 11?1 -> 11?1 286 * _PAGE_TYPE_FILE 11?1 -> 11?1
288 * _PAGE_TYPE_RO 0100 -> 1100 287 * _PAGE_TYPE_RO 0100 -> 1100
289 * _PAGE_TYPE_RW 0000 -> 1000 288 * _PAGE_TYPE_RW 0000 -> 1000
290 * _PAGE_TYPE_EX_RO 0110 -> 1110
291 * _PAGE_TYPE_EX_RW 0010 -> 1010
292 * 289 *
293 * pte_none is true for bits combinations 1000, 1010, 1100, 1110 290 * pte_none is true for bits combinations 1000, 1010, 1100, 1110
294 * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001 291 * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001
@@ -297,14 +294,17 @@ extern unsigned long VMALLOC_START;
297 */ 294 */
298 295
299/* Page status table bits for virtualization */ 296/* Page status table bits for virtualization */
300#define RCP_PCL_BIT 55 297#define RCP_ACC_BITS 0xf000000000000000UL
301#define RCP_HR_BIT 54 298#define RCP_FP_BIT 0x0800000000000000UL
302#define RCP_HC_BIT 53 299#define RCP_PCL_BIT 0x0080000000000000UL
303#define RCP_GR_BIT 50 300#define RCP_HR_BIT 0x0040000000000000UL
304#define RCP_GC_BIT 49 301#define RCP_HC_BIT 0x0020000000000000UL
305 302#define RCP_GR_BIT 0x0004000000000000UL
306/* User dirty bit for KVM's migration feature */ 303#define RCP_GC_BIT 0x0002000000000000UL
307#define KVM_UD_BIT 47 304
305/* User dirty / referenced bit for KVM's migration feature */
306#define KVM_UR_BIT 0x0000800000000000UL
307#define KVM_UC_BIT 0x0000400000000000UL
308 308
309#ifndef __s390x__ 309#ifndef __s390x__
310 310
@@ -377,85 +377,54 @@ extern unsigned long VMALLOC_START;
377#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \ 377#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
378 _ASCE_ALT_EVENT) 378 _ASCE_ALT_EVENT)
379 379
380/* Bits int the storage key */
381#define _PAGE_CHANGED 0x02 /* HW changed bit */
382#define _PAGE_REFERENCED 0x04 /* HW referenced bit */
383
384/* 380/*
385 * Page protection definitions. 381 * Page protection definitions.
386 */ 382 */
387#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE) 383#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE)
388#define PAGE_RO __pgprot(_PAGE_TYPE_RO) 384#define PAGE_RO __pgprot(_PAGE_TYPE_RO)
389#define PAGE_RW __pgprot(_PAGE_TYPE_RW) 385#define PAGE_RW __pgprot(_PAGE_TYPE_RW)
390#define PAGE_EX_RO __pgprot(_PAGE_TYPE_EX_RO)
391#define PAGE_EX_RW __pgprot(_PAGE_TYPE_EX_RW)
392 386
393#define PAGE_KERNEL PAGE_RW 387#define PAGE_KERNEL PAGE_RW
394#define PAGE_COPY PAGE_RO 388#define PAGE_COPY PAGE_RO
395 389
396/* 390/*
397 * Dependent on the EXEC_PROTECT option s390 can do execute protection. 391 * On s390 the page table entry has an invalid bit and a read-only bit.
398 * Write permission always implies read permission. In theory with a 392 * Read permission implies execute permission and write permission
399 * primary/secondary page table execute only can be implemented but 393 * implies read permission.
400 * it would cost an additional bit in the pte to distinguish all the
401 * different pte types. To avoid that execute permission currently
402 * implies read permission as well.
403 */ 394 */
404 /*xwr*/ 395 /*xwr*/
405#define __P000 PAGE_NONE 396#define __P000 PAGE_NONE
406#define __P001 PAGE_RO 397#define __P001 PAGE_RO
407#define __P010 PAGE_RO 398#define __P010 PAGE_RO
408#define __P011 PAGE_RO 399#define __P011 PAGE_RO
409#define __P100 PAGE_EX_RO 400#define __P100 PAGE_RO
410#define __P101 PAGE_EX_RO 401#define __P101 PAGE_RO
411#define __P110 PAGE_EX_RO 402#define __P110 PAGE_RO
412#define __P111 PAGE_EX_RO 403#define __P111 PAGE_RO
413 404
414#define __S000 PAGE_NONE 405#define __S000 PAGE_NONE
415#define __S001 PAGE_RO 406#define __S001 PAGE_RO
416#define __S010 PAGE_RW 407#define __S010 PAGE_RW
417#define __S011 PAGE_RW 408#define __S011 PAGE_RW
418#define __S100 PAGE_EX_RO 409#define __S100 PAGE_RO
419#define __S101 PAGE_EX_RO 410#define __S101 PAGE_RO
420#define __S110 PAGE_EX_RW 411#define __S110 PAGE_RW
421#define __S111 PAGE_EX_RW 412#define __S111 PAGE_RW
422
423#ifndef __s390x__
424# define PxD_SHADOW_SHIFT 1
425#else /* __s390x__ */
426# define PxD_SHADOW_SHIFT 2
427#endif /* __s390x__ */
428 413
429static inline void *get_shadow_table(void *table) 414static inline int mm_exclusive(struct mm_struct *mm)
430{ 415{
431 unsigned long addr, offset; 416 return likely(mm == current->active_mm &&
432 struct page *page; 417 atomic_read(&mm->context.attach_count) <= 1);
433
434 addr = (unsigned long) table;
435 offset = addr & ((PAGE_SIZE << PxD_SHADOW_SHIFT) - 1);
436 page = virt_to_page((void *)(addr ^ offset));
437 return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL);
438} 418}
439 419
440/* 420static inline int mm_has_pgste(struct mm_struct *mm)
441 * Certain architectures need to do special things when PTEs
442 * within a page table are directly modified. Thus, the following
443 * hook is made available.
444 */
445static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
446 pte_t *ptep, pte_t entry)
447{ 421{
448 *ptep = entry; 422#ifdef CONFIG_PGSTE
449 if (mm->context.noexec) { 423 if (unlikely(mm->context.has_pgste))
450 if (!(pte_val(entry) & _PAGE_INVALID) && 424 return 1;
451 (pte_val(entry) & _PAGE_SWX)) 425#endif
452 pte_val(entry) |= _PAGE_RO; 426 return 0;
453 else
454 pte_val(entry) = _PAGE_TYPE_EMPTY;
455 ptep[PTRS_PER_PTE] = entry;
456 }
457} 427}
458
459/* 428/*
460 * pgd/pmd/pte query functions 429 * pgd/pmd/pte query functions
461 */ 430 */
@@ -568,52 +537,127 @@ static inline int pte_special(pte_t pte)
568} 537}
569 538
570#define __HAVE_ARCH_PTE_SAME 539#define __HAVE_ARCH_PTE_SAME
571#define pte_same(a,b) (pte_val(a) == pte_val(b)) 540static inline int pte_same(pte_t a, pte_t b)
541{
542 return pte_val(a) == pte_val(b);
543}
572 544
573static inline void rcp_lock(pte_t *ptep) 545static inline pgste_t pgste_get_lock(pte_t *ptep)
574{ 546{
547 unsigned long new = 0;
575#ifdef CONFIG_PGSTE 548#ifdef CONFIG_PGSTE
576 unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE); 549 unsigned long old;
550
577 preempt_disable(); 551 preempt_disable();
578 while (test_and_set_bit(RCP_PCL_BIT, pgste)) 552 asm(
579 ; 553 " lg %0,%2\n"
554 "0: lgr %1,%0\n"
555 " nihh %0,0xff7f\n" /* clear RCP_PCL_BIT in old */
556 " oihh %1,0x0080\n" /* set RCP_PCL_BIT in new */
557 " csg %0,%1,%2\n"
558 " jl 0b\n"
559 : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
560 : "Q" (ptep[PTRS_PER_PTE]) : "cc");
580#endif 561#endif
562 return __pgste(new);
581} 563}
582 564
583static inline void rcp_unlock(pte_t *ptep) 565static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
584{ 566{
585#ifdef CONFIG_PGSTE 567#ifdef CONFIG_PGSTE
586 unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE); 568 asm(
587 clear_bit(RCP_PCL_BIT, pgste); 569 " nihh %1,0xff7f\n" /* clear RCP_PCL_BIT */
570 " stg %1,%0\n"
571 : "=Q" (ptep[PTRS_PER_PTE])
572 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) : "cc");
588 preempt_enable(); 573 preempt_enable();
589#endif 574#endif
590} 575}
591 576
592/* forward declaration for SetPageUptodate in page-flags.h*/ 577static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
593static inline void page_clear_dirty(struct page *page, int mapped);
594#include <linux/page-flags.h>
595
596static inline void ptep_rcp_copy(pte_t *ptep)
597{ 578{
598#ifdef CONFIG_PGSTE 579#ifdef CONFIG_PGSTE
599 struct page *page = virt_to_page(pte_val(*ptep)); 580 unsigned long pfn, bits;
600 unsigned int skey; 581 unsigned char skey;
601 unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE); 582
602 583 pfn = pte_val(*ptep) >> PAGE_SHIFT;
603 skey = page_get_storage_key(page_to_phys(page)); 584 skey = page_get_storage_key(pfn);
604 if (skey & _PAGE_CHANGED) { 585 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
605 set_bit_simple(RCP_GC_BIT, pgste); 586 /* Clear page changed & referenced bit in the storage key */
606 set_bit_simple(KVM_UD_BIT, pgste); 587 if (bits) {
588 skey ^= bits;
589 page_set_storage_key(pfn, skey, 1);
607 } 590 }
608 if (skey & _PAGE_REFERENCED) 591 /* Transfer page changed & referenced bit to guest bits in pgste */
609 set_bit_simple(RCP_GR_BIT, pgste); 592 pgste_val(pgste) |= bits << 48; /* RCP_GR_BIT & RCP_GC_BIT */
610 if (test_and_clear_bit_simple(RCP_HC_BIT, pgste)) { 593 /* Get host changed & referenced bits from pgste */
611 SetPageDirty(page); 594 bits |= (pgste_val(pgste) & (RCP_HR_BIT | RCP_HC_BIT)) >> 52;
612 set_bit_simple(KVM_UD_BIT, pgste); 595 /* Clear host bits in pgste. */
613 } 596 pgste_val(pgste) &= ~(RCP_HR_BIT | RCP_HC_BIT);
614 if (test_and_clear_bit_simple(RCP_HR_BIT, pgste)) 597 pgste_val(pgste) &= ~(RCP_ACC_BITS | RCP_FP_BIT);
615 SetPageReferenced(page); 598 /* Copy page access key and fetch protection bit to pgste */
599 pgste_val(pgste) |=
600 (unsigned long) (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
601 /* Transfer changed and referenced to kvm user bits */
602 pgste_val(pgste) |= bits << 45; /* KVM_UR_BIT & KVM_UC_BIT */
603 /* Transfer changed & referenced to pte sofware bits */
604 pte_val(*ptep) |= bits << 1; /* _PAGE_SWR & _PAGE_SWC */
616#endif 605#endif
606 return pgste;
607
608}
609
610static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
611{
612#ifdef CONFIG_PGSTE
613 int young;
614
615 young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
616 /* Transfer page referenced bit to pte software bit (host view) */
617 if (young || (pgste_val(pgste) & RCP_HR_BIT))
618 pte_val(*ptep) |= _PAGE_SWR;
619 /* Clear host referenced bit in pgste. */
620 pgste_val(pgste) &= ~RCP_HR_BIT;
621 /* Transfer page referenced bit to guest bit in pgste */
622 pgste_val(pgste) |= (unsigned long) young << 50; /* set RCP_GR_BIT */
623#endif
624 return pgste;
625
626}
627
628static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste)
629{
630#ifdef CONFIG_PGSTE
631 unsigned long pfn;
632 unsigned long okey, nkey;
633
634 pfn = pte_val(*ptep) >> PAGE_SHIFT;
635 okey = nkey = page_get_storage_key(pfn);
636 nkey &= ~(_PAGE_ACC_BITS | _PAGE_FP_BIT);
637 /* Set page access key and fetch protection bit from pgste */
638 nkey |= (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56;
639 if (okey != nkey)
640 page_set_storage_key(pfn, nkey, 1);
641#endif
642}
643
644/*
645 * Certain architectures need to do special things when PTEs
646 * within a page table are directly modified. Thus, the following
647 * hook is made available.
648 */
649static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
650 pte_t *ptep, pte_t entry)
651{
652 pgste_t pgste;
653
654 if (mm_has_pgste(mm)) {
655 pgste = pgste_get_lock(ptep);
656 pgste_set_pte(ptep, pgste);
657 *ptep = entry;
658 pgste_set_unlock(ptep, pgste);
659 } else
660 *ptep = entry;
617} 661}
618 662
619/* 663/*
@@ -627,19 +671,19 @@ static inline int pte_write(pte_t pte)
627 671
628static inline int pte_dirty(pte_t pte) 672static inline int pte_dirty(pte_t pte)
629{ 673{
630 /* A pte is neither clean nor dirty on s/390. The dirty bit 674#ifdef CONFIG_PGSTE
631 * is in the storage key. See page_test_and_clear_dirty for 675 if (pte_val(pte) & _PAGE_SWC)
632 * details. 676 return 1;
633 */ 677#endif
634 return 0; 678 return 0;
635} 679}
636 680
637static inline int pte_young(pte_t pte) 681static inline int pte_young(pte_t pte)
638{ 682{
639 /* A pte is neither young nor old on s/390. The young bit 683#ifdef CONFIG_PGSTE
640 * is in the storage key. See page_test_and_clear_young for 684 if (pte_val(pte) & _PAGE_SWR)
641 * details. 685 return 1;
642 */ 686#endif
643 return 0; 687 return 0;
644} 688}
645 689
@@ -647,64 +691,30 @@ static inline int pte_young(pte_t pte)
647 * pgd/pmd/pte modification functions 691 * pgd/pmd/pte modification functions
648 */ 692 */
649 693
650#ifndef __s390x__ 694static inline void pgd_clear(pgd_t *pgd)
651
652#define pgd_clear(pgd) do { } while (0)
653#define pud_clear(pud) do { } while (0)
654
655#else /* __s390x__ */
656
657static inline void pgd_clear_kernel(pgd_t * pgd)
658{ 695{
696#ifdef __s390x__
659 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) 697 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
660 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY; 698 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
699#endif
661} 700}
662 701
663static inline void pgd_clear(pgd_t * pgd) 702static inline void pud_clear(pud_t *pud)
664{
665 pgd_t *shadow = get_shadow_table(pgd);
666
667 pgd_clear_kernel(pgd);
668 if (shadow)
669 pgd_clear_kernel(shadow);
670}
671
672static inline void pud_clear_kernel(pud_t *pud)
673{ 703{
704#ifdef __s390x__
674 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 705 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
675 pud_val(*pud) = _REGION3_ENTRY_EMPTY; 706 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
707#endif
676} 708}
677 709
678static inline void pud_clear(pud_t *pud) 710static inline void pmd_clear(pmd_t *pmdp)
679{
680 pud_t *shadow = get_shadow_table(pud);
681
682 pud_clear_kernel(pud);
683 if (shadow)
684 pud_clear_kernel(shadow);
685}
686
687#endif /* __s390x__ */
688
689static inline void pmd_clear_kernel(pmd_t * pmdp)
690{ 711{
691 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; 712 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
692} 713}
693 714
694static inline void pmd_clear(pmd_t *pmd)
695{
696 pmd_t *shadow = get_shadow_table(pmd);
697
698 pmd_clear_kernel(pmd);
699 if (shadow)
700 pmd_clear_kernel(shadow);
701}
702
703static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 715static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
704{ 716{
705 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 717 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
706 if (mm->context.noexec)
707 pte_val(ptep[PTRS_PER_PTE]) = _PAGE_TYPE_EMPTY;
708} 718}
709 719
710/* 720/*
@@ -734,35 +744,27 @@ static inline pte_t pte_mkwrite(pte_t pte)
734 744
735static inline pte_t pte_mkclean(pte_t pte) 745static inline pte_t pte_mkclean(pte_t pte)
736{ 746{
737 /* The only user of pte_mkclean is the fork() code. 747#ifdef CONFIG_PGSTE
738 We must *not* clear the *physical* page dirty bit 748 pte_val(pte) &= ~_PAGE_SWC;
739 just because fork() wants to clear the dirty bit in 749#endif
740 *one* of the page's mappings. So we just do nothing. */
741 return pte; 750 return pte;
742} 751}
743 752
744static inline pte_t pte_mkdirty(pte_t pte) 753static inline pte_t pte_mkdirty(pte_t pte)
745{ 754{
746 /* We do not explicitly set the dirty bit because the
747 * sske instruction is slow. It is faster to let the
748 * next instruction set the dirty bit.
749 */
750 return pte; 755 return pte;
751} 756}
752 757
753static inline pte_t pte_mkold(pte_t pte) 758static inline pte_t pte_mkold(pte_t pte)
754{ 759{
755 /* S/390 doesn't keep its dirty/referenced bit in the pte. 760#ifdef CONFIG_PGSTE
756 * There is no point in clearing the real referenced bit. 761 pte_val(pte) &= ~_PAGE_SWR;
757 */ 762#endif
758 return pte; 763 return pte;
759} 764}
760 765
761static inline pte_t pte_mkyoung(pte_t pte) 766static inline pte_t pte_mkyoung(pte_t pte)
762{ 767{
763 /* S/390 doesn't keep its dirty/referenced bit in the pte.
764 * There is no point in setting the real referenced bit.
765 */
766 return pte; 768 return pte;
767} 769}
768 770
@@ -800,62 +802,60 @@ static inline pte_t pte_mkhuge(pte_t pte)
800} 802}
801#endif 803#endif
802 804
803#ifdef CONFIG_PGSTE
804/* 805/*
805 * Get (and clear) the user dirty bit for a PTE. 806 * Get (and clear) the user dirty bit for a pte.
806 */ 807 */
807static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm, 808static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm,
808 pte_t *ptep) 809 pte_t *ptep)
809{ 810{
810 int dirty; 811 pgste_t pgste;
811 unsigned long *pgste; 812 int dirty = 0;
812 struct page *page; 813
813 unsigned int skey; 814 if (mm_has_pgste(mm)) {
814 815 pgste = pgste_get_lock(ptep);
815 if (!mm->context.has_pgste) 816 pgste = pgste_update_all(ptep, pgste);
816 return -EINVAL; 817 dirty = !!(pgste_val(pgste) & KVM_UC_BIT);
817 rcp_lock(ptep); 818 pgste_val(pgste) &= ~KVM_UC_BIT;
818 pgste = (unsigned long *) (ptep + PTRS_PER_PTE); 819 pgste_set_unlock(ptep, pgste);
819 page = virt_to_page(pte_val(*ptep)); 820 return dirty;
820 skey = page_get_storage_key(page_to_phys(page));
821 if (skey & _PAGE_CHANGED) {
822 set_bit_simple(RCP_GC_BIT, pgste);
823 set_bit_simple(KVM_UD_BIT, pgste);
824 } 821 }
825 if (test_and_clear_bit_simple(RCP_HC_BIT, pgste)) {
826 SetPageDirty(page);
827 set_bit_simple(KVM_UD_BIT, pgste);
828 }
829 dirty = test_and_clear_bit_simple(KVM_UD_BIT, pgste);
830 if (skey & _PAGE_CHANGED)
831 page_clear_dirty(page, 1);
832 rcp_unlock(ptep);
833 return dirty; 822 return dirty;
834} 823}
835#endif 824
825/*
826 * Get (and clear) the user referenced bit for a pte.
827 */
828static inline int ptep_test_and_clear_user_young(struct mm_struct *mm,
829 pte_t *ptep)
830{
831 pgste_t pgste;
832 int young = 0;
833
834 if (mm_has_pgste(mm)) {
835 pgste = pgste_get_lock(ptep);
836 pgste = pgste_update_young(ptep, pgste);
837 young = !!(pgste_val(pgste) & KVM_UR_BIT);
838 pgste_val(pgste) &= ~KVM_UR_BIT;
839 pgste_set_unlock(ptep, pgste);
840 }
841 return young;
842}
836 843
837#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 844#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
838static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, 845static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
839 unsigned long addr, pte_t *ptep) 846 unsigned long addr, pte_t *ptep)
840{ 847{
841#ifdef CONFIG_PGSTE 848 pgste_t pgste;
842 unsigned long physpage; 849 pte_t pte;
843 int young;
844 unsigned long *pgste;
845 850
846 if (!vma->vm_mm->context.has_pgste) 851 if (mm_has_pgste(vma->vm_mm)) {
847 return 0; 852 pgste = pgste_get_lock(ptep);
848 physpage = pte_val(*ptep) & PAGE_MASK; 853 pgste = pgste_update_young(ptep, pgste);
849 pgste = (unsigned long *) (ptep + PTRS_PER_PTE); 854 pte = *ptep;
850 855 *ptep = pte_mkold(pte);
851 young = ((page_get_storage_key(physpage) & _PAGE_REFERENCED) != 0); 856 pgste_set_unlock(ptep, pgste);
852 rcp_lock(ptep); 857 return pte_young(pte);
853 if (young) 858 }
854 set_bit_simple(RCP_GR_BIT, pgste);
855 young |= test_and_clear_bit_simple(RCP_HR_BIT, pgste);
856 rcp_unlock(ptep);
857 return young;
858#endif
859 return 0; 859 return 0;
860} 860}
861 861
@@ -867,10 +867,7 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
867 * On s390 reference bits are in storage key and never in TLB 867 * On s390 reference bits are in storage key and never in TLB
868 * With virtualization we handle the reference bit, without we 868 * With virtualization we handle the reference bit, without we
869 * we can simply return */ 869 * we can simply return */
870#ifdef CONFIG_PGSTE
871 return ptep_test_and_clear_young(vma, address, ptep); 870 return ptep_test_and_clear_young(vma, address, ptep);
872#endif
873 return 0;
874} 871}
875 872
876static inline void __ptep_ipte(unsigned long address, pte_t *ptep) 873static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
@@ -890,25 +887,6 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
890 } 887 }
891} 888}
892 889
893static inline void ptep_invalidate(struct mm_struct *mm,
894 unsigned long address, pte_t *ptep)
895{
896 if (mm->context.has_pgste) {
897 rcp_lock(ptep);
898 __ptep_ipte(address, ptep);
899 ptep_rcp_copy(ptep);
900 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
901 rcp_unlock(ptep);
902 return;
903 }
904 __ptep_ipte(address, ptep);
905 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
906 if (mm->context.noexec) {
907 __ptep_ipte(address, ptep + PTRS_PER_PTE);
908 pte_val(*(ptep + PTRS_PER_PTE)) = _PAGE_TYPE_EMPTY;
909 }
910}
911
912/* 890/*
913 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush 891 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
914 * both clear the TLB for the unmapped pte. The reason is that 892 * both clear the TLB for the unmapped pte. The reason is that
@@ -923,24 +901,72 @@ static inline void ptep_invalidate(struct mm_struct *mm,
923 * is a nop. 901 * is a nop.
924 */ 902 */
925#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 903#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
926#define ptep_get_and_clear(__mm, __address, __ptep) \ 904static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
927({ \ 905 unsigned long address, pte_t *ptep)
928 pte_t __pte = *(__ptep); \ 906{
929 (__mm)->context.flush_mm = 1; \ 907 pgste_t pgste;
930 if (atomic_read(&(__mm)->context.attach_count) > 1 || \ 908 pte_t pte;
931 (__mm) != current->active_mm) \ 909
932 ptep_invalidate(__mm, __address, __ptep); \ 910 mm->context.flush_mm = 1;
933 else \ 911 if (mm_has_pgste(mm))
934 pte_clear((__mm), (__address), (__ptep)); \ 912 pgste = pgste_get_lock(ptep);
935 __pte; \ 913
936}) 914 pte = *ptep;
915 if (!mm_exclusive(mm))
916 __ptep_ipte(address, ptep);
917 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
918
919 if (mm_has_pgste(mm)) {
920 pgste = pgste_update_all(&pte, pgste);
921 pgste_set_unlock(ptep, pgste);
922 }
923 return pte;
924}
925
926#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
927static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
928 unsigned long address,
929 pte_t *ptep)
930{
931 pte_t pte;
932
933 mm->context.flush_mm = 1;
934 if (mm_has_pgste(mm))
935 pgste_get_lock(ptep);
936
937 pte = *ptep;
938 if (!mm_exclusive(mm))
939 __ptep_ipte(address, ptep);
940 return pte;
941}
942
943static inline void ptep_modify_prot_commit(struct mm_struct *mm,
944 unsigned long address,
945 pte_t *ptep, pte_t pte)
946{
947 *ptep = pte;
948 if (mm_has_pgste(mm))
949 pgste_set_unlock(ptep, *(pgste_t *)(ptep + PTRS_PER_PTE));
950}
937 951
938#define __HAVE_ARCH_PTEP_CLEAR_FLUSH 952#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
939static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, 953static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
940 unsigned long address, pte_t *ptep) 954 unsigned long address, pte_t *ptep)
941{ 955{
942 pte_t pte = *ptep; 956 pgste_t pgste;
943 ptep_invalidate(vma->vm_mm, address, ptep); 957 pte_t pte;
958
959 if (mm_has_pgste(vma->vm_mm))
960 pgste = pgste_get_lock(ptep);
961
962 pte = *ptep;
963 __ptep_ipte(address, ptep);
964 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
965
966 if (mm_has_pgste(vma->vm_mm)) {
967 pgste = pgste_update_all(&pte, pgste);
968 pgste_set_unlock(ptep, pgste);
969 }
944 return pte; 970 return pte;
945} 971}
946 972
@@ -953,76 +979,67 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
953 */ 979 */
954#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL 980#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
955static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, 981static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
956 unsigned long addr, 982 unsigned long address,
957 pte_t *ptep, int full) 983 pte_t *ptep, int full)
958{ 984{
959 pte_t pte = *ptep; 985 pgste_t pgste;
986 pte_t pte;
987
988 if (mm_has_pgste(mm))
989 pgste = pgste_get_lock(ptep);
990
991 pte = *ptep;
992 if (!full)
993 __ptep_ipte(address, ptep);
994 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
960 995
961 if (full) 996 if (mm_has_pgste(mm)) {
962 pte_clear(mm, addr, ptep); 997 pgste = pgste_update_all(&pte, pgste);
963 else 998 pgste_set_unlock(ptep, pgste);
964 ptep_invalidate(mm, addr, ptep); 999 }
965 return pte; 1000 return pte;
966} 1001}
967 1002
968#define __HAVE_ARCH_PTEP_SET_WRPROTECT 1003#define __HAVE_ARCH_PTEP_SET_WRPROTECT
969#define ptep_set_wrprotect(__mm, __addr, __ptep) \ 1004static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
970({ \ 1005 unsigned long address, pte_t *ptep)
971 pte_t __pte = *(__ptep); \ 1006{
972 if (pte_write(__pte)) { \ 1007 pgste_t pgste;
973 (__mm)->context.flush_mm = 1; \ 1008 pte_t pte = *ptep;
974 if (atomic_read(&(__mm)->context.attach_count) > 1 || \
975 (__mm) != current->active_mm) \
976 ptep_invalidate(__mm, __addr, __ptep); \
977 set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \
978 } \
979})
980 1009
981#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 1010 if (pte_write(pte)) {
982#define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \ 1011 mm->context.flush_mm = 1;
983({ \ 1012 if (mm_has_pgste(mm))
984 int __changed = !pte_same(*(__ptep), __entry); \ 1013 pgste = pgste_get_lock(ptep);
985 if (__changed) { \
986 ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \
987 set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \
988 } \
989 __changed; \
990})
991 1014
992/* 1015 if (!mm_exclusive(mm))
993 * Test and clear dirty bit in storage key. 1016 __ptep_ipte(address, ptep);
994 * We can't clear the changed bit atomically. This is a potential 1017 *ptep = pte_wrprotect(pte);
995 * race against modification of the referenced bit. This function
996 * should therefore only be called if it is not mapped in any
997 * address space.
998 */
999#define __HAVE_ARCH_PAGE_TEST_DIRTY
1000static inline int page_test_dirty(struct page *page)
1001{
1002 return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0;
1003}
1004 1018
1005#define __HAVE_ARCH_PAGE_CLEAR_DIRTY 1019 if (mm_has_pgste(mm))
1006static inline void page_clear_dirty(struct page *page, int mapped) 1020 pgste_set_unlock(ptep, pgste);
1007{ 1021 }
1008 page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY, mapped); 1022 return pte;
1009} 1023}
1010 1024
1011/* 1025#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1012 * Test and clear referenced bit in storage key. 1026static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1013 */ 1027 unsigned long address, pte_t *ptep,
1014#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG 1028 pte_t entry, int dirty)
1015static inline int page_test_and_clear_young(struct page *page)
1016{ 1029{
1017 unsigned long physpage = page_to_phys(page); 1030 pgste_t pgste;
1018 int ccode; 1031
1019 1032 if (pte_same(*ptep, entry))
1020 asm volatile( 1033 return 0;
1021 " rrbe 0,%1\n" 1034 if (mm_has_pgste(vma->vm_mm))
1022 " ipm %0\n" 1035 pgste = pgste_get_lock(ptep);
1023 " srl %0,28\n" 1036
1024 : "=d" (ccode) : "a" (physpage) : "cc" ); 1037 __ptep_ipte(address, ptep);
1025 return ccode & 2; 1038 *ptep = entry;
1039
1040 if (mm_has_pgste(vma->vm_mm))
1041 pgste_set_unlock(ptep, pgste);
1042 return 1;
1026} 1043}
1027 1044
1028/* 1045/*
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 2c79b6416271..1300c3025334 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -84,6 +84,7 @@ struct thread_struct {
84 struct per_event per_event; /* Cause of the last PER trap */ 84 struct per_event per_event; /* Cause of the last PER trap */
85 /* pfault_wait is used to block the process on a pfault event */ 85 /* pfault_wait is used to block the process on a pfault event */
86 unsigned long pfault_wait; 86 unsigned long pfault_wait;
87 struct list_head list;
87}; 88};
88 89
89typedef struct thread_struct thread_struct; 90typedef struct thread_struct thread_struct;
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index 29d5d6d4becc..b7a4f2eb0057 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -50,7 +50,7 @@ static inline void __tlb_flush_full(struct mm_struct *mm)
50 /* 50 /*
51 * If the process only ran on the local cpu, do a local flush. 51 * If the process only ran on the local cpu, do a local flush.
52 */ 52 */
53 local_cpumask = cpumask_of_cpu(smp_processor_id()); 53 cpumask_copy(&local_cpumask, cpumask_of(smp_processor_id()));
54 if (cpumask_equal(mm_cpumask(mm), &local_cpumask)) 54 if (cpumask_equal(mm_cpumask(mm), &local_cpumask))
55 __tlb_flush_local(); 55 __tlb_flush_local();
56 else 56 else
@@ -80,16 +80,11 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
80 * on all cpus instead of doing a local flush if the mm 80 * on all cpus instead of doing a local flush if the mm
81 * only ran on the local cpu. 81 * only ran on the local cpu.
82 */ 82 */
83 if (MACHINE_HAS_IDTE) { 83 if (MACHINE_HAS_IDTE)
84 if (mm->context.noexec)
85 __tlb_flush_idte((unsigned long)
86 get_shadow_table(mm->pgd) |
87 mm->context.asce_bits);
88 __tlb_flush_idte((unsigned long) mm->pgd | 84 __tlb_flush_idte((unsigned long) mm->pgd |
89 mm->context.asce_bits); 85 mm->context.asce_bits);
90 return; 86 else
91 } 87 __tlb_flush_full(mm);
92 __tlb_flush_full(mm);
93} 88}
94 89
95static inline void __tlb_flush_mm_cond(struct mm_struct * mm) 90static inline void __tlb_flush_mm_cond(struct mm_struct * mm)
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index e82152572377..9208e69245a0 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -385,6 +385,7 @@
385 385
386/* Ignore system calls that are also reachable via sys_socket */ 386/* Ignore system calls that are also reachable via sys_socket */
387#define __IGNORE_recvmmsg 387#define __IGNORE_recvmmsg
388#define __IGNORE_sendmmsg
388 389
389#define __ARCH_WANT_IPC_PARSE_VERSION 390#define __ARCH_WANT_IPC_PARSE_VERSION
390#define __ARCH_WANT_OLD_READDIR 391#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index fe03c140002a..edfbd17d7082 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -124,13 +124,11 @@ int main(void)
124 DEFINE(__LC_LAST_UPDATE_TIMER, offsetof(struct _lowcore, last_update_timer)); 124 DEFINE(__LC_LAST_UPDATE_TIMER, offsetof(struct _lowcore, last_update_timer));
125 DEFINE(__LC_LAST_UPDATE_CLOCK, offsetof(struct _lowcore, last_update_clock)); 125 DEFINE(__LC_LAST_UPDATE_CLOCK, offsetof(struct _lowcore, last_update_clock));
126 DEFINE(__LC_CURRENT, offsetof(struct _lowcore, current_task)); 126 DEFINE(__LC_CURRENT, offsetof(struct _lowcore, current_task));
127 DEFINE(__LC_CURRENT_PID, offsetof(struct _lowcore, current_pid));
127 DEFINE(__LC_THREAD_INFO, offsetof(struct _lowcore, thread_info)); 128 DEFINE(__LC_THREAD_INFO, offsetof(struct _lowcore, thread_info));
128 DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack)); 129 DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack));
129 DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack)); 130 DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack));
130 DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack)); 131 DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack));
131 DEFINE(__LC_KERNEL_ASCE, offsetof(struct _lowcore, kernel_asce));
132 DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce));
133 DEFINE(__LC_USER_EXEC_ASCE, offsetof(struct _lowcore, user_exec_asce));
134 DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock)); 132 DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock));
135 DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock)); 133 DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock));
136 DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags)); 134 DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags));
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 1b67fc6ebdc2..0476174dfff5 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -212,6 +212,7 @@ __switch_to:
212 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 212 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
213 lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 213 lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
214 st %r3,__LC_CURRENT # store task struct of next 214 st %r3,__LC_CURRENT # store task struct of next
215 mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
215 st %r5,__LC_THREAD_INFO # store thread info of next 216 st %r5,__LC_THREAD_INFO # store thread info of next
216 ahi %r5,STACK_SIZE # end of kernel stack of next 217 ahi %r5,STACK_SIZE # end of kernel stack of next
217 st %r5,__LC_KERNEL_STACK # store end of kernel stack 218 st %r5,__LC_KERNEL_STACK # store end of kernel stack
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 9fd864563499..d61967e2eab0 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -220,6 +220,7 @@ __switch_to:
220 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 220 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
221 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 221 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
222 stg %r3,__LC_CURRENT # store task struct of next 222 stg %r3,__LC_CURRENT # store task struct of next
223 mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next
223 stg %r5,__LC_THREAD_INFO # store thread info of next 224 stg %r5,__LC_THREAD_INFO # store thread info of next
224 aghi %r5,STACK_SIZE # end of kernel stack of next 225 aghi %r5,STACK_SIZE # end of kernel stack of next
225 stg %r5,__LC_KERNEL_STACK # store end of kernel stack 226 stg %r5,__LC_KERNEL_STACK # store end of kernel stack
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index ea5099c9709c..e204f9597aaf 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -32,6 +32,7 @@ static const struct irq_class intrclass_names[] = {
32 {.name = "VRT", .desc = "[EXT] Virtio" }, 32 {.name = "VRT", .desc = "[EXT] Virtio" },
33 {.name = "SCP", .desc = "[EXT] Service Call" }, 33 {.name = "SCP", .desc = "[EXT] Service Call" },
34 {.name = "IUC", .desc = "[EXT] IUCV" }, 34 {.name = "IUC", .desc = "[EXT] IUCV" },
35 {.name = "CPM", .desc = "[EXT] CPU Measurement" },
35 {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt" }, 36 {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt" },
36 {.name = "QDI", .desc = "[I/O] QDIO Interrupt" }, 37 {.name = "QDI", .desc = "[I/O] QDIO Interrupt" },
37 {.name = "DAS", .desc = "[I/O] DASD" }, 38 {.name = "DAS", .desc = "[I/O] DASD" },
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index a895e69379f7..541a7509faeb 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -9,41 +9,26 @@
9 9
10#include <linux/compiler.h> 10#include <linux/compiler.h>
11#include <linux/cpu.h> 11#include <linux/cpu.h>
12#include <linux/errno.h>
13#include <linux/sched.h> 12#include <linux/sched.h>
14#include <linux/kernel.h> 13#include <linux/kernel.h>
15#include <linux/mm.h> 14#include <linux/mm.h>
16#include <linux/fs.h>
17#include <linux/smp.h> 15#include <linux/smp.h>
18#include <linux/stddef.h>
19#include <linux/slab.h> 16#include <linux/slab.h>
20#include <linux/unistd.h>
21#include <linux/ptrace.h>
22#include <linux/vmalloc.h>
23#include <linux/user.h>
24#include <linux/interrupt.h> 17#include <linux/interrupt.h>
25#include <linux/delay.h>
26#include <linux/reboot.h>
27#include <linux/init.h>
28#include <linux/module.h>
29#include <linux/notifier.h>
30#include <linux/tick.h> 18#include <linux/tick.h>
31#include <linux/elfcore.h>
32#include <linux/kernel_stat.h>
33#include <linux/personality.h> 19#include <linux/personality.h>
34#include <linux/syscalls.h> 20#include <linux/syscalls.h>
35#include <linux/compat.h> 21#include <linux/compat.h>
36#include <linux/kprobes.h> 22#include <linux/kprobes.h>
37#include <linux/random.h> 23#include <linux/random.h>
38#include <asm/compat.h> 24#include <linux/module.h>
39#include <asm/uaccess.h>
40#include <asm/pgtable.h>
41#include <asm/system.h> 25#include <asm/system.h>
42#include <asm/io.h> 26#include <asm/io.h>
43#include <asm/processor.h> 27#include <asm/processor.h>
44#include <asm/irq.h> 28#include <asm/irq.h>
45#include <asm/timer.h> 29#include <asm/timer.h>
46#include <asm/nmi.h> 30#include <asm/nmi.h>
31#include <asm/compat.h>
47#include <asm/smp.h> 32#include <asm/smp.h>
48#include "entry.h" 33#include "entry.h"
49 34
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index f5434d1ecb31..0c35dee10b00 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -305,8 +305,7 @@ static int set_amode_and_uaccess(unsigned long user_amode,
305 */ 305 */
306static int __init early_parse_switch_amode(char *p) 306static int __init early_parse_switch_amode(char *p)
307{ 307{
308 if (user_mode != SECONDARY_SPACE_MODE) 308 user_mode = PRIMARY_SPACE_MODE;
309 user_mode = PRIMARY_SPACE_MODE;
310 return 0; 309 return 0;
311} 310}
312early_param("switch_amode", early_parse_switch_amode); 311early_param("switch_amode", early_parse_switch_amode);
@@ -315,10 +314,6 @@ static int __init early_parse_user_mode(char *p)
315{ 314{
316 if (p && strcmp(p, "primary") == 0) 315 if (p && strcmp(p, "primary") == 0)
317 user_mode = PRIMARY_SPACE_MODE; 316 user_mode = PRIMARY_SPACE_MODE;
318#ifdef CONFIG_S390_EXEC_PROTECT
319 else if (p && strcmp(p, "secondary") == 0)
320 user_mode = SECONDARY_SPACE_MODE;
321#endif
322 else if (!p || strcmp(p, "home") == 0) 317 else if (!p || strcmp(p, "home") == 0)
323 user_mode = HOME_SPACE_MODE; 318 user_mode = HOME_SPACE_MODE;
324 else 319 else
@@ -327,31 +322,9 @@ static int __init early_parse_user_mode(char *p)
327} 322}
328early_param("user_mode", early_parse_user_mode); 323early_param("user_mode", early_parse_user_mode);
329 324
330#ifdef CONFIG_S390_EXEC_PROTECT
331/*
332 * Enable execute protection?
333 */
334static int __init early_parse_noexec(char *p)
335{
336 if (!strncmp(p, "off", 3))
337 return 0;
338 user_mode = SECONDARY_SPACE_MODE;
339 return 0;
340}
341early_param("noexec", early_parse_noexec);
342#endif /* CONFIG_S390_EXEC_PROTECT */
343
344static void setup_addressing_mode(void) 325static void setup_addressing_mode(void)
345{ 326{
346 if (user_mode == SECONDARY_SPACE_MODE) { 327 if (user_mode == PRIMARY_SPACE_MODE) {
347 if (set_amode_and_uaccess(PSW_ASC_SECONDARY,
348 PSW32_ASC_SECONDARY))
349 pr_info("Execute protection active, "
350 "mvcos available\n");
351 else
352 pr_info("Execute protection active, "
353 "mvcos not available\n");
354 } else if (user_mode == PRIMARY_SPACE_MODE) {
355 if (set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY)) 328 if (set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY))
356 pr_info("Address spaces switched, " 329 pr_info("Address spaces switched, "
357 "mvcos available\n"); 330 "mvcos available\n");
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 63c7d9ff220d..f8e85ecbc459 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -335,7 +335,7 @@ static int smp_rescan_cpus_sigp(cpumask_t avail)
335 smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN; 335 smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
336 if (!cpu_stopped(logical_cpu)) 336 if (!cpu_stopped(logical_cpu))
337 continue; 337 continue;
338 cpu_set(logical_cpu, cpu_present_map); 338 set_cpu_present(logical_cpu, true);
339 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED; 339 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
340 logical_cpu = cpumask_next(logical_cpu, &avail); 340 logical_cpu = cpumask_next(logical_cpu, &avail);
341 if (logical_cpu >= nr_cpu_ids) 341 if (logical_cpu >= nr_cpu_ids)
@@ -367,7 +367,7 @@ static int smp_rescan_cpus_sclp(cpumask_t avail)
367 continue; 367 continue;
368 __cpu_logical_map[logical_cpu] = cpu_id; 368 __cpu_logical_map[logical_cpu] = cpu_id;
369 smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN; 369 smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
370 cpu_set(logical_cpu, cpu_present_map); 370 set_cpu_present(logical_cpu, true);
371 if (cpu >= info->configured) 371 if (cpu >= info->configured)
372 smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY; 372 smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
373 else 373 else
@@ -385,7 +385,7 @@ static int __smp_rescan_cpus(void)
385{ 385{
386 cpumask_t avail; 386 cpumask_t avail;
387 387
388 cpus_xor(avail, cpu_possible_map, cpu_present_map); 388 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
389 if (smp_use_sigp_detection) 389 if (smp_use_sigp_detection)
390 return smp_rescan_cpus_sigp(avail); 390 return smp_rescan_cpus_sigp(avail);
391 else 391 else
@@ -467,7 +467,7 @@ int __cpuinit start_secondary(void *cpuvoid)
467 notify_cpu_starting(smp_processor_id()); 467 notify_cpu_starting(smp_processor_id());
468 /* Mark this cpu as online */ 468 /* Mark this cpu as online */
469 ipi_call_lock(); 469 ipi_call_lock();
470 cpu_set(smp_processor_id(), cpu_online_map); 470 set_cpu_online(smp_processor_id(), true);
471 ipi_call_unlock(); 471 ipi_call_unlock();
472 /* Switch on interrupts */ 472 /* Switch on interrupts */
473 local_irq_enable(); 473 local_irq_enable();
@@ -644,7 +644,7 @@ int __cpu_disable(void)
644 struct ec_creg_mask_parms cr_parms; 644 struct ec_creg_mask_parms cr_parms;
645 int cpu = smp_processor_id(); 645 int cpu = smp_processor_id();
646 646
647 cpu_clear(cpu, cpu_online_map); 647 set_cpu_online(cpu, false);
648 648
649 /* Disable pfault pseudo page faults on this cpu. */ 649 /* Disable pfault pseudo page faults on this cpu. */
650 pfault_fini(); 650 pfault_fini();
@@ -654,8 +654,8 @@ int __cpu_disable(void)
654 654
655 /* disable all external interrupts */ 655 /* disable all external interrupts */
656 cr_parms.orvals[0] = 0; 656 cr_parms.orvals[0] = 0;
657 cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 | 657 cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 11 |
658 1 << 11 | 1 << 10 | 1 << 6 | 1 << 4); 658 1 << 10 | 1 << 9 | 1 << 6 | 1 << 4);
659 /* disable all I/O interrupts */ 659 /* disable all I/O interrupts */
660 cr_parms.orvals[6] = 0; 660 cr_parms.orvals[6] = 0;
661 cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 | 661 cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 |
@@ -681,7 +681,7 @@ void __cpu_die(unsigned int cpu)
681 atomic_dec(&init_mm.context.attach_count); 681 atomic_dec(&init_mm.context.attach_count);
682} 682}
683 683
684void cpu_die(void) 684void __noreturn cpu_die(void)
685{ 685{
686 idle_task_exit(); 686 idle_task_exit();
687 while (sigp(smp_processor_id(), sigp_stop) == sigp_busy) 687 while (sigp(smp_processor_id(), sigp_stop) == sigp_busy)
@@ -738,8 +738,8 @@ void __init smp_prepare_boot_cpu(void)
738 BUG_ON(smp_processor_id() != 0); 738 BUG_ON(smp_processor_id() != 0);
739 739
740 current_thread_info()->cpu = 0; 740 current_thread_info()->cpu = 0;
741 cpu_set(0, cpu_present_map); 741 set_cpu_present(0, true);
742 cpu_set(0, cpu_online_map); 742 set_cpu_online(0, true);
743 S390_lowcore.percpu_offset = __per_cpu_offset[0]; 743 S390_lowcore.percpu_offset = __per_cpu_offset[0];
744 current_set[0] = current; 744 current_set[0] = current;
745 smp_cpu_state[0] = CPU_STATE_CONFIGURED; 745 smp_cpu_state[0] = CPU_STATE_CONFIGURED;
@@ -1016,21 +1016,21 @@ int __ref smp_rescan_cpus(void)
1016 1016
1017 get_online_cpus(); 1017 get_online_cpus();
1018 mutex_lock(&smp_cpu_state_mutex); 1018 mutex_lock(&smp_cpu_state_mutex);
1019 newcpus = cpu_present_map; 1019 cpumask_copy(&newcpus, cpu_present_mask);
1020 rc = __smp_rescan_cpus(); 1020 rc = __smp_rescan_cpus();
1021 if (rc) 1021 if (rc)
1022 goto out; 1022 goto out;
1023 cpus_andnot(newcpus, cpu_present_map, newcpus); 1023 cpumask_andnot(&newcpus, cpu_present_mask, &newcpus);
1024 for_each_cpu_mask(cpu, newcpus) { 1024 for_each_cpu(cpu, &newcpus) {
1025 rc = smp_add_present_cpu(cpu); 1025 rc = smp_add_present_cpu(cpu);
1026 if (rc) 1026 if (rc)
1027 cpu_clear(cpu, cpu_present_map); 1027 set_cpu_present(cpu, false);
1028 } 1028 }
1029 rc = 0; 1029 rc = 0;
1030out: 1030out:
1031 mutex_unlock(&smp_cpu_state_mutex); 1031 mutex_unlock(&smp_cpu_state_mutex);
1032 put_online_cpus(); 1032 put_online_cpus();
1033 if (!cpus_empty(newcpus)) 1033 if (!cpumask_empty(&newcpus))
1034 topology_schedule_update(); 1034 topology_schedule_update();
1035 return rc; 1035 return rc;
1036} 1036}
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 87be655557aa..a59557f1fb5f 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -810,7 +810,7 @@ static int etr_sync_clock_stop(struct etr_aib *aib, int port)
810 etr_sync.etr_port = port; 810 etr_sync.etr_port = port;
811 get_online_cpus(); 811 get_online_cpus();
812 atomic_set(&etr_sync.cpus, num_online_cpus() - 1); 812 atomic_set(&etr_sync.cpus, num_online_cpus() - 1);
813 rc = stop_machine(etr_sync_clock, &etr_sync, &cpu_online_map); 813 rc = stop_machine(etr_sync_clock, &etr_sync, cpu_online_mask);
814 put_online_cpus(); 814 put_online_cpus();
815 return rc; 815 return rc;
816} 816}
@@ -1579,7 +1579,7 @@ static void stp_work_fn(struct work_struct *work)
1579 memset(&stp_sync, 0, sizeof(stp_sync)); 1579 memset(&stp_sync, 0, sizeof(stp_sync));
1580 get_online_cpus(); 1580 get_online_cpus();
1581 atomic_set(&stp_sync.cpus, num_online_cpus() - 1); 1581 atomic_set(&stp_sync.cpus, num_online_cpus() - 1);
1582 stop_machine(stp_sync_clock, &stp_sync, &cpu_online_map); 1582 stop_machine(stp_sync_clock, &stp_sync, cpu_online_mask);
1583 put_online_cpus(); 1583 put_online_cpus();
1584 1584
1585 if (!check_sync_clock()) 1585 if (!check_sync_clock())
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 94b06c31fc8a..2eafb8c7a746 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -52,20 +52,20 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
52{ 52{
53 cpumask_t mask; 53 cpumask_t mask;
54 54
55 cpus_clear(mask); 55 cpumask_clear(&mask);
56 if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) { 56 if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) {
57 cpumask_copy(&mask, cpumask_of(cpu)); 57 cpumask_copy(&mask, cpumask_of(cpu));
58 return mask; 58 return mask;
59 } 59 }
60 while (info) { 60 while (info) {
61 if (cpu_isset(cpu, info->mask)) { 61 if (cpumask_test_cpu(cpu, &info->mask)) {
62 mask = info->mask; 62 mask = info->mask;
63 break; 63 break;
64 } 64 }
65 info = info->next; 65 info = info->next;
66 } 66 }
67 if (cpus_empty(mask)) 67 if (cpumask_empty(&mask))
68 mask = cpumask_of_cpu(cpu); 68 cpumask_copy(&mask, cpumask_of(cpu));
69 return mask; 69 return mask;
70} 70}
71 71
@@ -85,10 +85,10 @@ static void add_cpus_to_mask(struct topology_cpu *tl_cpu,
85 if (cpu_logical_map(lcpu) != rcpu) 85 if (cpu_logical_map(lcpu) != rcpu)
86 continue; 86 continue;
87#ifdef CONFIG_SCHED_BOOK 87#ifdef CONFIG_SCHED_BOOK
88 cpu_set(lcpu, book->mask); 88 cpumask_set_cpu(lcpu, &book->mask);
89 cpu_book_id[lcpu] = book->id; 89 cpu_book_id[lcpu] = book->id;
90#endif 90#endif
91 cpu_set(lcpu, core->mask); 91 cpumask_set_cpu(lcpu, &core->mask);
92 cpu_core_id[lcpu] = core->id; 92 cpu_core_id[lcpu] = core->id;
93 smp_cpu_polarization[lcpu] = tl_cpu->pp; 93 smp_cpu_polarization[lcpu] = tl_cpu->pp;
94 } 94 }
@@ -101,13 +101,13 @@ static void clear_masks(void)
101 101
102 info = &core_info; 102 info = &core_info;
103 while (info) { 103 while (info) {
104 cpus_clear(info->mask); 104 cpumask_clear(&info->mask);
105 info = info->next; 105 info = info->next;
106 } 106 }
107#ifdef CONFIG_SCHED_BOOK 107#ifdef CONFIG_SCHED_BOOK
108 info = &book_info; 108 info = &book_info;
109 while (info) { 109 while (info) {
110 cpus_clear(info->mask); 110 cpumask_clear(&info->mask);
111 info = info->next; 111 info = info->next;
112 } 112 }
113#endif 113#endif
diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile
index d13e8755a8cc..8ad2b34ad151 100644
--- a/arch/s390/kernel/vdso32/Makefile
+++ b/arch/s390/kernel/vdso32/Makefile
@@ -22,6 +22,9 @@ obj-y += vdso32_wrapper.o
22extra-y += vdso32.lds 22extra-y += vdso32.lds
23CPPFLAGS_vdso32.lds += -P -C -U$(ARCH) 23CPPFLAGS_vdso32.lds += -P -C -U$(ARCH)
24 24
25# Disable gcov profiling for VDSO code
26GCOV_PROFILE := n
27
25# Force dependency (incbin is bad) 28# Force dependency (incbin is bad)
26$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so 29$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
27 30
diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
index 449352dda9cd..2a8ddfd12a5b 100644
--- a/arch/s390/kernel/vdso64/Makefile
+++ b/arch/s390/kernel/vdso64/Makefile
@@ -22,6 +22,9 @@ obj-y += vdso64_wrapper.o
22extra-y += vdso64.lds 22extra-y += vdso64.lds
23CPPFLAGS_vdso64.lds += -P -C -U$(ARCH) 23CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
24 24
25# Disable gcov profiling for VDSO code
26GCOV_PROFILE := n
27
25# Force dependency (incbin is bad) 28# Force dependency (incbin is bad)
26$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so 29$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
27 30
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 1bc18cdb525b..56fe6bc81fee 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -77,7 +77,7 @@ SECTIONS
77 . = ALIGN(PAGE_SIZE); 77 . = ALIGN(PAGE_SIZE);
78 INIT_DATA_SECTION(0x100) 78 INIT_DATA_SECTION(0x100)
79 79
80 PERCPU(0x100, PAGE_SIZE) 80 PERCPU_SECTION(0x100)
81 . = ALIGN(PAGE_SIZE); 81 . = ALIGN(PAGE_SIZE);
82 __init_end = .; /* freed after init ends here */ 82 __init_end = .; /* freed after init ends here */
83 83
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 3cc95dd0a3a6..075ddada4911 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -412,6 +412,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
412 struct dcss_segment *seg; 412 struct dcss_segment *seg;
413 int rc, diag_cc; 413 int rc, diag_cc;
414 414
415 start_addr = end_addr = 0;
415 seg = kmalloc(sizeof(*seg), GFP_KERNEL | GFP_DMA); 416 seg = kmalloc(sizeof(*seg), GFP_KERNEL | GFP_DMA);
416 if (seg == NULL) { 417 if (seg == NULL) {
417 rc = -ENOMEM; 418 rc = -ENOMEM;
@@ -573,6 +574,7 @@ segment_modify_shared (char *name, int do_nonshared)
573 unsigned long start_addr, end_addr, dummy; 574 unsigned long start_addr, end_addr, dummy;
574 int rc, diag_cc; 575 int rc, diag_cc;
575 576
577 start_addr = end_addr = 0;
576 mutex_lock(&dcss_lock); 578 mutex_lock(&dcss_lock);
577 seg = segment_by_name (name); 579 seg = segment_by_name (name);
578 if (seg == NULL) { 580 if (seg == NULL) {
@@ -681,8 +683,6 @@ void
681segment_save(char *name) 683segment_save(char *name)
682{ 684{
683 struct dcss_segment *seg; 685 struct dcss_segment *seg;
684 int startpfn = 0;
685 int endpfn = 0;
686 char cmd1[160]; 686 char cmd1[160];
687 char cmd2[80]; 687 char cmd2[80];
688 int i, response; 688 int i, response;
@@ -698,8 +698,6 @@ segment_save(char *name)
698 goto out; 698 goto out;
699 } 699 }
700 700
701 startpfn = seg->start_addr >> PAGE_SHIFT;
702 endpfn = (seg->end) >> PAGE_SHIFT;
703 sprintf(cmd1, "DEFSEG %s", name); 701 sprintf(cmd1, "DEFSEG %s", name);
704 for (i=0; i<seg->segcnt; i++) { 702 for (i=0; i<seg->segcnt; i++) {
705 sprintf(cmd1+strlen(cmd1), " %lX-%lX %s", 703 sprintf(cmd1+strlen(cmd1), " %lX-%lX %s",
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index ab988135e5c6..a0f9e730f26a 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -225,33 +225,6 @@ static noinline void do_sigbus(struct pt_regs *regs, long int_code,
225 force_sig_info(SIGBUS, &si, tsk); 225 force_sig_info(SIGBUS, &si, tsk);
226} 226}
227 227
228#ifdef CONFIG_S390_EXEC_PROTECT
229static noinline int signal_return(struct pt_regs *regs, long int_code,
230 unsigned long trans_exc_code)
231{
232 u16 instruction;
233 int rc;
234
235 rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
236
237 if (!rc && instruction == 0x0a77) {
238 clear_tsk_thread_flag(current, TIF_PER_TRAP);
239 if (is_compat_task())
240 sys32_sigreturn();
241 else
242 sys_sigreturn();
243 } else if (!rc && instruction == 0x0aad) {
244 clear_tsk_thread_flag(current, TIF_PER_TRAP);
245 if (is_compat_task())
246 sys32_rt_sigreturn();
247 else
248 sys_rt_sigreturn();
249 } else
250 do_sigsegv(regs, int_code, SEGV_MAPERR, trans_exc_code);
251 return 0;
252}
253#endif /* CONFIG_S390_EXEC_PROTECT */
254
255static noinline void do_fault_error(struct pt_regs *regs, long int_code, 228static noinline void do_fault_error(struct pt_regs *regs, long int_code,
256 unsigned long trans_exc_code, int fault) 229 unsigned long trans_exc_code, int fault)
257{ 230{
@@ -259,13 +232,6 @@ static noinline void do_fault_error(struct pt_regs *regs, long int_code,
259 232
260 switch (fault) { 233 switch (fault) {
261 case VM_FAULT_BADACCESS: 234 case VM_FAULT_BADACCESS:
262#ifdef CONFIG_S390_EXEC_PROTECT
263 if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_SECONDARY &&
264 (trans_exc_code & 3) == 0) {
265 signal_return(regs, int_code, trans_exc_code);
266 break;
267 }
268#endif /* CONFIG_S390_EXEC_PROTECT */
269 case VM_FAULT_BADMAP: 235 case VM_FAULT_BADMAP:
270 /* Bad memory access. Check if it is kernel or user space. */ 236 /* Bad memory access. Check if it is kernel or user space. */
271 if (regs->psw.mask & PSW_MASK_PSTATE) { 237 if (regs->psw.mask & PSW_MASK_PSTATE) {
@@ -414,11 +380,6 @@ void __kprobes do_dat_exception(struct pt_regs *regs, long pgm_int_code,
414 int access, fault; 380 int access, fault;
415 381
416 access = VM_READ | VM_EXEC | VM_WRITE; 382 access = VM_READ | VM_EXEC | VM_WRITE;
417#ifdef CONFIG_S390_EXEC_PROTECT
418 if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_SECONDARY &&
419 (trans_exc_code & 3) == 0)
420 access = VM_EXEC;
421#endif
422 fault = do_exception(regs, access, trans_exc_code); 383 fault = do_exception(regs, access, trans_exc_code);
423 if (unlikely(fault)) 384 if (unlikely(fault))
424 do_fault_error(regs, pgm_int_code & 255, trans_exc_code, fault); 385 do_fault_error(regs, pgm_int_code & 255, trans_exc_code, fault);
@@ -491,22 +452,28 @@ static int __init nopfault(char *str)
491 452
492__setup("nopfault", nopfault); 453__setup("nopfault", nopfault);
493 454
494typedef struct { 455struct pfault_refbk {
495 __u16 refdiagc; 456 u16 refdiagc;
496 __u16 reffcode; 457 u16 reffcode;
497 __u16 refdwlen; 458 u16 refdwlen;
498 __u16 refversn; 459 u16 refversn;
499 __u64 refgaddr; 460 u64 refgaddr;
500 __u64 refselmk; 461 u64 refselmk;
501 __u64 refcmpmk; 462 u64 refcmpmk;
502 __u64 reserved; 463 u64 reserved;
503} __attribute__ ((packed, aligned(8))) pfault_refbk_t; 464} __attribute__ ((packed, aligned(8)));
504 465
505int pfault_init(void) 466int pfault_init(void)
506{ 467{
507 pfault_refbk_t refbk = 468 struct pfault_refbk refbk = {
508 { 0x258, 0, 5, 2, __LC_CURRENT, 1ULL << 48, 1ULL << 48, 469 .refdiagc = 0x258,
509 __PF_RES_FIELD }; 470 .reffcode = 0,
471 .refdwlen = 5,
472 .refversn = 2,
473 .refgaddr = __LC_CURRENT_PID,
474 .refselmk = 1ULL << 48,
475 .refcmpmk = 1ULL << 48,
476 .reserved = __PF_RES_FIELD };
510 int rc; 477 int rc;
511 478
512 if (!MACHINE_IS_VM || pfault_disable) 479 if (!MACHINE_IS_VM || pfault_disable)
@@ -524,8 +491,12 @@ int pfault_init(void)
524 491
525void pfault_fini(void) 492void pfault_fini(void)
526{ 493{
527 pfault_refbk_t refbk = 494 struct pfault_refbk refbk = {
528 { 0x258, 1, 5, 2, 0ULL, 0ULL, 0ULL, 0ULL }; 495 .refdiagc = 0x258,
496 .reffcode = 1,
497 .refdwlen = 5,
498 .refversn = 2,
499 };
529 500
530 if (!MACHINE_IS_VM || pfault_disable) 501 if (!MACHINE_IS_VM || pfault_disable)
531 return; 502 return;
@@ -537,11 +508,15 @@ void pfault_fini(void)
537 : : "a" (&refbk), "m" (refbk) : "cc"); 508 : : "a" (&refbk), "m" (refbk) : "cc");
538} 509}
539 510
511static DEFINE_SPINLOCK(pfault_lock);
512static LIST_HEAD(pfault_list);
513
540static void pfault_interrupt(unsigned int ext_int_code, 514static void pfault_interrupt(unsigned int ext_int_code,
541 unsigned int param32, unsigned long param64) 515 unsigned int param32, unsigned long param64)
542{ 516{
543 struct task_struct *tsk; 517 struct task_struct *tsk;
544 __u16 subcode; 518 __u16 subcode;
519 pid_t pid;
545 520
546 /* 521 /*
547 * Get the external interruption subcode & pfault 522 * Get the external interruption subcode & pfault
@@ -553,44 +528,79 @@ static void pfault_interrupt(unsigned int ext_int_code,
553 if ((subcode & 0xff00) != __SUBCODE_MASK) 528 if ((subcode & 0xff00) != __SUBCODE_MASK)
554 return; 529 return;
555 kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++; 530 kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++;
556 531 if (subcode & 0x0080) {
557 /* 532 /* Get the token (= pid of the affected task). */
558 * Get the token (= address of the task structure of the affected task). 533 pid = sizeof(void *) == 4 ? param32 : param64;
559 */ 534 rcu_read_lock();
560#ifdef CONFIG_64BIT 535 tsk = find_task_by_pid_ns(pid, &init_pid_ns);
561 tsk = (struct task_struct *) param64; 536 if (tsk)
562#else 537 get_task_struct(tsk);
563 tsk = (struct task_struct *) param32; 538 rcu_read_unlock();
564#endif 539 if (!tsk)
565 540 return;
541 } else {
542 tsk = current;
543 }
544 spin_lock(&pfault_lock);
566 if (subcode & 0x0080) { 545 if (subcode & 0x0080) {
567 /* signal bit is set -> a page has been swapped in by VM */ 546 /* signal bit is set -> a page has been swapped in by VM */
568 if (xchg(&tsk->thread.pfault_wait, -1) != 0) { 547 if (tsk->thread.pfault_wait == 1) {
569 /* Initial interrupt was faster than the completion 548 /* Initial interrupt was faster than the completion
570 * interrupt. pfault_wait is valid. Set pfault_wait 549 * interrupt. pfault_wait is valid. Set pfault_wait
571 * back to zero and wake up the process. This can 550 * back to zero and wake up the process. This can
572 * safely be done because the task is still sleeping 551 * safely be done because the task is still sleeping
573 * and can't produce new pfaults. */ 552 * and can't produce new pfaults. */
574 tsk->thread.pfault_wait = 0; 553 tsk->thread.pfault_wait = 0;
554 list_del(&tsk->thread.list);
575 wake_up_process(tsk); 555 wake_up_process(tsk);
576 put_task_struct(tsk); 556 } else {
557 /* Completion interrupt was faster than initial
558 * interrupt. Set pfault_wait to -1 so the initial
559 * interrupt doesn't put the task to sleep. */
560 tsk->thread.pfault_wait = -1;
577 } 561 }
562 put_task_struct(tsk);
578 } else { 563 } else {
579 /* signal bit not set -> a real page is missing. */ 564 /* signal bit not set -> a real page is missing. */
580 get_task_struct(tsk); 565 if (tsk->thread.pfault_wait == -1) {
581 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
582 if (xchg(&tsk->thread.pfault_wait, 1) != 0) {
583 /* Completion interrupt was faster than the initial 566 /* Completion interrupt was faster than the initial
584 * interrupt (swapped in a -1 for pfault_wait). Set 567 * interrupt (pfault_wait == -1). Set pfault_wait
585 * pfault_wait back to zero and exit. This can be 568 * back to zero and exit. */
586 * done safely because tsk is running in kernel
587 * mode and can't produce new pfaults. */
588 tsk->thread.pfault_wait = 0; 569 tsk->thread.pfault_wait = 0;
589 set_task_state(tsk, TASK_RUNNING); 570 } else {
590 put_task_struct(tsk); 571 /* Initial interrupt arrived before completion
591 } else 572 * interrupt. Let the task sleep. */
573 tsk->thread.pfault_wait = 1;
574 list_add(&tsk->thread.list, &pfault_list);
575 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
592 set_tsk_need_resched(tsk); 576 set_tsk_need_resched(tsk);
577 }
578 }
579 spin_unlock(&pfault_lock);
580}
581
582static int __cpuinit pfault_cpu_notify(struct notifier_block *self,
583 unsigned long action, void *hcpu)
584{
585 struct thread_struct *thread, *next;
586 struct task_struct *tsk;
587
588 switch (action) {
589 case CPU_DEAD:
590 case CPU_DEAD_FROZEN:
591 spin_lock_irq(&pfault_lock);
592 list_for_each_entry_safe(thread, next, &pfault_list, list) {
593 thread->pfault_wait = 0;
594 list_del(&thread->list);
595 tsk = container_of(thread, struct task_struct, thread);
596 wake_up_process(tsk);
597 }
598 spin_unlock_irq(&pfault_lock);
599 break;
600 default:
601 break;
593 } 602 }
603 return NOTIFY_OK;
594} 604}
595 605
596static int __init pfault_irq_init(void) 606static int __init pfault_irq_init(void)
@@ -599,22 +609,21 @@ static int __init pfault_irq_init(void)
599 609
600 if (!MACHINE_IS_VM) 610 if (!MACHINE_IS_VM)
601 return 0; 611 return 0;
602 /*
603 * Try to get pfault pseudo page faults going.
604 */
605 rc = register_external_interrupt(0x2603, pfault_interrupt); 612 rc = register_external_interrupt(0x2603, pfault_interrupt);
606 if (rc) { 613 if (rc)
607 pfault_disable = 1; 614 goto out_extint;
608 return rc; 615 rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
609 } 616 if (rc)
610 if (pfault_init() == 0) 617 goto out_pfault;
611 return 0; 618 hotcpu_notifier(pfault_cpu_notify, 0);
619 return 0;
612 620
613 /* Tough luck, no pfault. */ 621out_pfault:
614 pfault_disable = 1;
615 unregister_external_interrupt(0x2603, pfault_interrupt); 622 unregister_external_interrupt(0x2603, pfault_interrupt);
616 return 0; 623out_extint:
624 pfault_disable = 1;
625 return rc;
617} 626}
618early_initcall(pfault_irq_init); 627early_initcall(pfault_irq_init);
619 628
620#endif 629#endif /* CONFIG_PFAULT */
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index 639cd21f2218..a4d856db9154 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -13,7 +13,6 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
13 pte_t *pteptr, pte_t pteval) 13 pte_t *pteptr, pte_t pteval)
14{ 14{
15 pmd_t *pmdp = (pmd_t *) pteptr; 15 pmd_t *pmdp = (pmd_t *) pteptr;
16 pte_t shadow_pteval = pteval;
17 unsigned long mask; 16 unsigned long mask;
18 17
19 if (!MACHINE_HAS_HPAGE) { 18 if (!MACHINE_HAS_HPAGE) {
@@ -21,18 +20,9 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
21 mask = pte_val(pteval) & 20 mask = pte_val(pteval) &
22 (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO); 21 (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
23 pte_val(pteval) = (_SEGMENT_ENTRY + __pa(pteptr)) | mask; 22 pte_val(pteval) = (_SEGMENT_ENTRY + __pa(pteptr)) | mask;
24 if (mm->context.noexec) {
25 pteptr += PTRS_PER_PTE;
26 pte_val(shadow_pteval) =
27 (_SEGMENT_ENTRY + __pa(pteptr)) | mask;
28 }
29 } 23 }
30 24
31 pmd_val(*pmdp) = pte_val(pteval); 25 pmd_val(*pmdp) = pte_val(pteval);
32 if (mm->context.noexec) {
33 pmdp = get_shadow_table(pmdp);
34 pmd_val(*pmdp) = pte_val(shadow_pteval);
35 }
36} 26}
37 27
38int arch_prepare_hugepage(struct page *page) 28int arch_prepare_hugepage(struct page *page)
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index bb409332a484..dfefc2171691 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -175,7 +175,8 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
175 pmd = pmd_offset(pud, address); 175 pmd = pmd_offset(pud, address);
176 pte = pte_offset_kernel(pmd, address); 176 pte = pte_offset_kernel(pmd, address);
177 if (!enable) { 177 if (!enable) {
178 ptep_invalidate(&init_mm, address, pte); 178 __ptep_ipte(address, pte);
179 pte_val(*pte) = _PAGE_TYPE_EMPTY;
179 continue; 180 continue;
180 } 181 }
181 *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW)); 182 *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW));
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index f05edcc3beff..d013ed39743b 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -28,7 +28,7 @@ static void change_page_attr(unsigned long addr, int numpages,
28 28
29 pte = *ptep; 29 pte = *ptep;
30 pte = set(pte); 30 pte = set(pte);
31 ptep_invalidate(&init_mm, addr, ptep); 31 __ptep_ipte(addr, ptep);
32 *ptep = pte; 32 *ptep = pte;
33 addr += PAGE_SIZE; 33 addr += PAGE_SIZE;
34 } 34 }
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index e1850c28cd68..8d4330642512 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -40,7 +40,6 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
40static DEFINE_PER_CPU(struct rcu_table_freelist *, rcu_table_freelist); 40static DEFINE_PER_CPU(struct rcu_table_freelist *, rcu_table_freelist);
41 41
42static void __page_table_free(struct mm_struct *mm, unsigned long *table); 42static void __page_table_free(struct mm_struct *mm, unsigned long *table);
43static void __crst_table_free(struct mm_struct *mm, unsigned long *table);
44 43
45static struct rcu_table_freelist *rcu_table_freelist_get(struct mm_struct *mm) 44static struct rcu_table_freelist *rcu_table_freelist_get(struct mm_struct *mm)
46{ 45{
@@ -67,7 +66,7 @@ static void rcu_table_freelist_callback(struct rcu_head *head)
67 while (batch->pgt_index > 0) 66 while (batch->pgt_index > 0)
68 __page_table_free(batch->mm, batch->table[--batch->pgt_index]); 67 __page_table_free(batch->mm, batch->table[--batch->pgt_index]);
69 while (batch->crst_index < RCU_FREELIST_SIZE) 68 while (batch->crst_index < RCU_FREELIST_SIZE)
70 __crst_table_free(batch->mm, batch->table[batch->crst_index++]); 69 crst_table_free(batch->mm, batch->table[batch->crst_index++]);
71 free_page((unsigned long) batch); 70 free_page((unsigned long) batch);
72} 71}
73 72
@@ -125,63 +124,33 @@ static int __init parse_vmalloc(char *arg)
125} 124}
126early_param("vmalloc", parse_vmalloc); 125early_param("vmalloc", parse_vmalloc);
127 126
128unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec) 127unsigned long *crst_table_alloc(struct mm_struct *mm)
129{ 128{
130 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); 129 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
131 130
132 if (!page) 131 if (!page)
133 return NULL; 132 return NULL;
134 page->index = 0;
135 if (noexec) {
136 struct page *shadow = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
137 if (!shadow) {
138 __free_pages(page, ALLOC_ORDER);
139 return NULL;
140 }
141 page->index = page_to_phys(shadow);
142 }
143 spin_lock_bh(&mm->context.list_lock);
144 list_add(&page->lru, &mm->context.crst_list);
145 spin_unlock_bh(&mm->context.list_lock);
146 return (unsigned long *) page_to_phys(page); 133 return (unsigned long *) page_to_phys(page);
147} 134}
148 135
149static void __crst_table_free(struct mm_struct *mm, unsigned long *table)
150{
151 unsigned long *shadow = get_shadow_table(table);
152
153 if (shadow)
154 free_pages((unsigned long) shadow, ALLOC_ORDER);
155 free_pages((unsigned long) table, ALLOC_ORDER);
156}
157
158void crst_table_free(struct mm_struct *mm, unsigned long *table) 136void crst_table_free(struct mm_struct *mm, unsigned long *table)
159{ 137{
160 struct page *page = virt_to_page(table); 138 free_pages((unsigned long) table, ALLOC_ORDER);
161
162 spin_lock_bh(&mm->context.list_lock);
163 list_del(&page->lru);
164 spin_unlock_bh(&mm->context.list_lock);
165 __crst_table_free(mm, table);
166} 139}
167 140
168void crst_table_free_rcu(struct mm_struct *mm, unsigned long *table) 141void crst_table_free_rcu(struct mm_struct *mm, unsigned long *table)
169{ 142{
170 struct rcu_table_freelist *batch; 143 struct rcu_table_freelist *batch;
171 struct page *page = virt_to_page(table);
172 144
173 spin_lock_bh(&mm->context.list_lock);
174 list_del(&page->lru);
175 spin_unlock_bh(&mm->context.list_lock);
176 if (atomic_read(&mm->mm_users) < 2 && 145 if (atomic_read(&mm->mm_users) < 2 &&
177 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) { 146 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
178 __crst_table_free(mm, table); 147 crst_table_free(mm, table);
179 return; 148 return;
180 } 149 }
181 batch = rcu_table_freelist_get(mm); 150 batch = rcu_table_freelist_get(mm);
182 if (!batch) { 151 if (!batch) {
183 smp_call_function(smp_sync, NULL, 1); 152 smp_call_function(smp_sync, NULL, 1);
184 __crst_table_free(mm, table); 153 crst_table_free(mm, table);
185 return; 154 return;
186 } 155 }
187 batch->table[--batch->crst_index] = table; 156 batch->table[--batch->crst_index] = table;
@@ -197,7 +166,7 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
197 166
198 BUG_ON(limit > (1UL << 53)); 167 BUG_ON(limit > (1UL << 53));
199repeat: 168repeat:
200 table = crst_table_alloc(mm, mm->context.noexec); 169 table = crst_table_alloc(mm);
201 if (!table) 170 if (!table)
202 return -ENOMEM; 171 return -ENOMEM;
203 spin_lock_bh(&mm->page_table_lock); 172 spin_lock_bh(&mm->page_table_lock);
@@ -273,7 +242,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
273 unsigned long *table; 242 unsigned long *table;
274 unsigned long bits; 243 unsigned long bits;
275 244
276 bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; 245 bits = (mm->context.has_pgste) ? 3UL : 1UL;
277 spin_lock_bh(&mm->context.list_lock); 246 spin_lock_bh(&mm->context.list_lock);
278 page = NULL; 247 page = NULL;
279 if (!list_empty(&mm->context.pgtable_list)) { 248 if (!list_empty(&mm->context.pgtable_list)) {
@@ -329,7 +298,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
329 struct page *page; 298 struct page *page;
330 unsigned long bits; 299 unsigned long bits;
331 300
332 bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; 301 bits = (mm->context.has_pgste) ? 3UL : 1UL;
333 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); 302 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
334 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 303 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
335 spin_lock_bh(&mm->context.list_lock); 304 spin_lock_bh(&mm->context.list_lock);
@@ -366,7 +335,7 @@ void page_table_free_rcu(struct mm_struct *mm, unsigned long *table)
366 page_table_free(mm, table); 335 page_table_free(mm, table);
367 return; 336 return;
368 } 337 }
369 bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; 338 bits = (mm->context.has_pgste) ? 3UL : 1UL;
370 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); 339 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
371 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 340 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
372 spin_lock_bh(&mm->context.list_lock); 341 spin_lock_bh(&mm->context.list_lock);
@@ -379,25 +348,6 @@ void page_table_free_rcu(struct mm_struct *mm, unsigned long *table)
379 rcu_table_freelist_finish(); 348 rcu_table_freelist_finish();
380} 349}
381 350
382void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
383{
384 struct page *page;
385
386 spin_lock_bh(&mm->context.list_lock);
387 /* Free shadow region and segment tables. */
388 list_for_each_entry(page, &mm->context.crst_list, lru)
389 if (page->index) {
390 free_pages((unsigned long) page->index, ALLOC_ORDER);
391 page->index = 0;
392 }
393 /* "Free" second halves of page tables. */
394 list_for_each_entry(page, &mm->context.pgtable_list, lru)
395 page->flags &= ~SECOND_HALVES;
396 spin_unlock_bh(&mm->context.list_lock);
397 mm->context.noexec = 0;
398 update_mm(mm, tsk);
399}
400
401/* 351/*
402 * switch on pgstes for its userspace process (for kvm) 352 * switch on pgstes for its userspace process (for kvm)
403 */ 353 */
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 34c43f23b28c..8c1970d1dd91 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -95,7 +95,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
95 pu_dir = vmem_pud_alloc(); 95 pu_dir = vmem_pud_alloc();
96 if (!pu_dir) 96 if (!pu_dir)
97 goto out; 97 goto out;
98 pgd_populate_kernel(&init_mm, pg_dir, pu_dir); 98 pgd_populate(&init_mm, pg_dir, pu_dir);
99 } 99 }
100 100
101 pu_dir = pud_offset(pg_dir, address); 101 pu_dir = pud_offset(pg_dir, address);
@@ -103,7 +103,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
103 pm_dir = vmem_pmd_alloc(); 103 pm_dir = vmem_pmd_alloc();
104 if (!pm_dir) 104 if (!pm_dir)
105 goto out; 105 goto out;
106 pud_populate_kernel(&init_mm, pu_dir, pm_dir); 106 pud_populate(&init_mm, pu_dir, pm_dir);
107 } 107 }
108 108
109 pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0)); 109 pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
@@ -123,7 +123,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
123 pt_dir = vmem_pte_alloc(); 123 pt_dir = vmem_pte_alloc();
124 if (!pt_dir) 124 if (!pt_dir)
125 goto out; 125 goto out;
126 pmd_populate_kernel(&init_mm, pm_dir, pt_dir); 126 pmd_populate(&init_mm, pm_dir, pt_dir);
127 } 127 }
128 128
129 pt_dir = pte_offset_kernel(pm_dir, address); 129 pt_dir = pte_offset_kernel(pm_dir, address);
@@ -159,7 +159,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
159 continue; 159 continue;
160 160
161 if (pmd_huge(*pm_dir)) { 161 if (pmd_huge(*pm_dir)) {
162 pmd_clear_kernel(pm_dir); 162 pmd_clear(pm_dir);
163 address += HPAGE_SIZE - PAGE_SIZE; 163 address += HPAGE_SIZE - PAGE_SIZE;
164 continue; 164 continue;
165 } 165 }
@@ -192,7 +192,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
192 pu_dir = vmem_pud_alloc(); 192 pu_dir = vmem_pud_alloc();
193 if (!pu_dir) 193 if (!pu_dir)
194 goto out; 194 goto out;
195 pgd_populate_kernel(&init_mm, pg_dir, pu_dir); 195 pgd_populate(&init_mm, pg_dir, pu_dir);
196 } 196 }
197 197
198 pu_dir = pud_offset(pg_dir, address); 198 pu_dir = pud_offset(pg_dir, address);
@@ -200,7 +200,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
200 pm_dir = vmem_pmd_alloc(); 200 pm_dir = vmem_pmd_alloc();
201 if (!pm_dir) 201 if (!pm_dir)
202 goto out; 202 goto out;
203 pud_populate_kernel(&init_mm, pu_dir, pm_dir); 203 pud_populate(&init_mm, pu_dir, pm_dir);
204 } 204 }
205 205
206 pm_dir = pmd_offset(pu_dir, address); 206 pm_dir = pmd_offset(pu_dir, address);
@@ -208,7 +208,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
208 pt_dir = vmem_pte_alloc(); 208 pt_dir = vmem_pte_alloc();
209 if (!pt_dir) 209 if (!pt_dir)
210 goto out; 210 goto out;
211 pmd_populate_kernel(&init_mm, pm_dir, pt_dir); 211 pmd_populate(&init_mm, pm_dir, pt_dir);
212 } 212 }
213 213
214 pt_dir = pte_offset_kernel(pm_dir, address); 214 pt_dir = pte_offset_kernel(pm_dir, address);
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c
index 33cbd373cce4..053caa0fd276 100644
--- a/arch/s390/oprofile/hwsampler.c
+++ b/arch/s390/oprofile/hwsampler.c
@@ -5,6 +5,7 @@
5 * Author: Heinz Graalfs <graalfs@de.ibm.com> 5 * Author: Heinz Graalfs <graalfs@de.ibm.com>
6 */ 6 */
7 7
8#include <linux/kernel_stat.h>
8#include <linux/kernel.h> 9#include <linux/kernel.h>
9#include <linux/module.h> 10#include <linux/module.h>
10#include <linux/smp.h> 11#include <linux/smp.h>
@@ -674,17 +675,11 @@ int hwsampler_activate(unsigned int cpu)
674static void hws_ext_handler(unsigned int ext_int_code, 675static void hws_ext_handler(unsigned int ext_int_code,
675 unsigned int param32, unsigned long param64) 676 unsigned int param32, unsigned long param64)
676{ 677{
677 int cpu;
678 struct hws_cpu_buffer *cb; 678 struct hws_cpu_buffer *cb;
679 679
680 cpu = smp_processor_id(); 680 kstat_cpu(smp_processor_id()).irqs[EXTINT_CPM]++;
681 cb = &per_cpu(sampler_cpu_buffer, cpu); 681 cb = &__get_cpu_var(sampler_cpu_buffer);
682 682 atomic_xchg(&cb->ext_params, atomic_read(&cb->ext_params) | param32);
683 atomic_xchg(
684 &cb->ext_params,
685 atomic_read(&cb->ext_params)
686 | S390_lowcore.ext_params);
687
688 if (hws_wq) 683 if (hws_wq)
689 queue_work(hws_wq, &cb->worker); 684 queue_work(hws_wq, &cb->worker);
690} 685}
@@ -764,7 +759,7 @@ static int worker_check_error(unsigned int cpu, int ext_params)
764 if (!sdbt || !*sdbt) 759 if (!sdbt || !*sdbt)
765 return -EINVAL; 760 return -EINVAL;
766 761
767 if (ext_params & EI_IEA) 762 if (ext_params & EI_PRA)
768 cb->req_alert++; 763 cb->req_alert++;
769 764
770 if (ext_params & EI_LSDA) 765 if (ext_params & EI_LSDA)
@@ -1009,7 +1004,7 @@ int hwsampler_deallocate()
1009 if (hws_state != HWS_STOPPED) 1004 if (hws_state != HWS_STOPPED)
1010 goto deallocate_exit; 1005 goto deallocate_exit;
1011 1006
1012 smp_ctl_clear_bit(0, 5); /* set bit 58 CR0 off */ 1007 ctl_clear_bit(0, 5); /* set bit 58 CR0 off */
1013 deallocate_sdbt(); 1008 deallocate_sdbt();
1014 1009
1015 hws_state = HWS_DEALLOCATED; 1010 hws_state = HWS_DEALLOCATED;
@@ -1123,7 +1118,7 @@ int hwsampler_shutdown()
1123 mutex_lock(&hws_sem); 1118 mutex_lock(&hws_sem);
1124 1119
1125 if (hws_state == HWS_STOPPED) { 1120 if (hws_state == HWS_STOPPED) {
1126 smp_ctl_clear_bit(0, 5); /* set bit 58 CR0 off */ 1121 ctl_clear_bit(0, 5); /* set bit 58 CR0 off */
1127 deallocate_sdbt(); 1122 deallocate_sdbt();
1128 } 1123 }
1129 if (hws_wq) { 1124 if (hws_wq) {
@@ -1198,7 +1193,7 @@ start_all_exit:
1198 hws_oom = 1; 1193 hws_oom = 1;
1199 hws_flush_all = 0; 1194 hws_flush_all = 0;
1200 /* now let them in, 1407 CPUMF external interrupts */ 1195 /* now let them in, 1407 CPUMF external interrupts */
1201 smp_ctl_set_bit(0, 5); /* set CR0 bit 58 */ 1196 ctl_set_bit(0, 5); /* set CR0 bit 58 */
1202 1197
1203 return 0; 1198 return 0;
1204} 1199}
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index af4d46187a79..731c10ce67b5 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -66,7 +66,7 @@ SECTIONS
66 __machvec_end = .; 66 __machvec_end = .;
67 } 67 }
68 68
69 PERCPU(L1_CACHE_BYTES, PAGE_SIZE) 69 PERCPU_SECTION(L1_CACHE_BYTES)
70 70
71 /* 71 /*
72 * .exit.text is discarded at runtime, not link time, to deal with 72 * .exit.text is discarded at runtime, not link time, to deal with
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index 92b557afe535..c0220759003e 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -108,7 +108,7 @@ SECTIONS
108 __sun4v_2insn_patch_end = .; 108 __sun4v_2insn_patch_end = .;
109 } 109 }
110 110
111 PERCPU(SMP_CACHE_BYTES, PAGE_SIZE) 111 PERCPU_SECTION(SMP_CACHE_BYTES)
112 112
113 . = ALIGN(PAGE_SIZE); 113 . = ALIGN(PAGE_SIZE);
114 __init_end = .; 114 __init_end = .;
diff --git a/arch/tile/kernel/vmlinux.lds.S b/arch/tile/kernel/vmlinux.lds.S
index 38f64fafdc10..631f10de12fe 100644
--- a/arch/tile/kernel/vmlinux.lds.S
+++ b/arch/tile/kernel/vmlinux.lds.S
@@ -60,7 +60,7 @@ SECTIONS
60 . = ALIGN(PAGE_SIZE); 60 . = ALIGN(PAGE_SIZE);
61 VMLINUX_SYMBOL(_sinitdata) = .; 61 VMLINUX_SYMBOL(_sinitdata) = .;
62 INIT_DATA_SECTION(16) :data =0 62 INIT_DATA_SECTION(16) :data =0
63 PERCPU(L2_CACHE_BYTES, PAGE_SIZE) 63 PERCPU_SECTION(L2_CACHE_BYTES)
64 . = ALIGN(PAGE_SIZE); 64 . = ALIGN(PAGE_SIZE);
65 VMLINUX_SYMBOL(_einitdata) = .; 65 VMLINUX_SYMBOL(_einitdata) = .;
66 66
diff --git a/arch/um/Kconfig.x86 b/arch/um/Kconfig.x86
index a9da516a5274..795ea8e869f4 100644
--- a/arch/um/Kconfig.x86
+++ b/arch/um/Kconfig.x86
@@ -29,10 +29,10 @@ config X86_64
29 def_bool 64BIT 29 def_bool 64BIT
30 30
31config RWSEM_XCHGADD_ALGORITHM 31config RWSEM_XCHGADD_ALGORITHM
32 def_bool X86_XADD 32 def_bool X86_XADD && 64BIT
33 33
34config RWSEM_GENERIC_SPINLOCK 34config RWSEM_GENERIC_SPINLOCK
35 def_bool !X86_XADD 35 def_bool !RWSEM_XCHGADD_ALGORITHM
36 36
37config 3_LEVEL_PGTABLES 37config 3_LEVEL_PGTABLES
38 bool "Three-level pagetables (EXPERIMENTAL)" if !64BIT 38 bool "Three-level pagetables (EXPERIMENTAL)" if !64BIT
diff --git a/arch/um/include/asm/common.lds.S b/arch/um/include/asm/common.lds.S
index 34bede8aad4a..4938de5512d2 100644
--- a/arch/um/include/asm/common.lds.S
+++ b/arch/um/include/asm/common.lds.S
@@ -42,7 +42,7 @@
42 INIT_SETUP(0) 42 INIT_SETUP(0)
43 } 43 }
44 44
45 PERCPU(32, 32) 45 PERCPU_SECTION(32)
46 46
47 .initcall.init : { 47 .initcall.init : {
48 INIT_CALLS 48 INIT_CALLS
diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h
index 12d55e773eb6..48142971b25d 100644
--- a/arch/x86/include/asm/linkage.h
+++ b/arch/x86/include/asm/linkage.h
@@ -8,11 +8,6 @@
8 8
9#ifdef CONFIG_X86_32 9#ifdef CONFIG_X86_32
10#define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0))) 10#define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0)))
11/*
12 * For 32-bit UML - mark functions implemented in assembly that use
13 * regparm input parameters:
14 */
15#define asmregparm __attribute__((regparm(3)))
16 11
17/* 12/*
18 * Make sure the compiler doesn't do anything stupid with the 13 * Make sure the compiler doesn't do anything stupid with the
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 53278b0dfdf6..a0a9779084d1 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -509,6 +509,11 @@ do { \
509 * it in software. The address used in the cmpxchg16 instruction must be 509 * it in software. The address used in the cmpxchg16 instruction must be
510 * aligned to a 16 byte boundary. 510 * aligned to a 16 byte boundary.
511 */ 511 */
512#ifdef CONFIG_SMP
513#define CMPXCHG16B_EMU_CALL "call this_cpu_cmpxchg16b_emu\n\t" ASM_NOP3
514#else
515#define CMPXCHG16B_EMU_CALL "call this_cpu_cmpxchg16b_emu\n\t" ASM_NOP2
516#endif
512#define percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) \ 517#define percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) \
513({ \ 518({ \
514 char __ret; \ 519 char __ret; \
@@ -517,7 +522,7 @@ do { \
517 typeof(o2) __o2 = o2; \ 522 typeof(o2) __o2 = o2; \
518 typeof(o2) __n2 = n2; \ 523 typeof(o2) __n2 = n2; \
519 typeof(o2) __dummy; \ 524 typeof(o2) __dummy; \
520 alternative_io("call this_cpu_cmpxchg16b_emu\n\t" ASM_NOP4, \ 525 alternative_io(CMPXCHG16B_EMU_CALL, \
521 "cmpxchg16b " __percpu_prefix "(%%rsi)\n\tsetz %0\n\t", \ 526 "cmpxchg16b " __percpu_prefix "(%%rsi)\n\tsetz %0\n\t", \
522 X86_FEATURE_CX16, \ 527 X86_FEATURE_CX16, \
523 ASM_OUTPUT2("=a"(__ret), "=d"(__dummy)), \ 528 ASM_OUTPUT2("=a"(__ret), "=d"(__dummy)), \
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 9488dcff7aec..e5293394b548 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -676,7 +676,7 @@ void mask_ioapic_entries(void)
676 int apic, pin; 676 int apic, pin;
677 677
678 for (apic = 0; apic < nr_ioapics; apic++) { 678 for (apic = 0; apic < nr_ioapics; apic++) {
679 if (ioapics[apic].saved_registers) 679 if (!ioapics[apic].saved_registers)
680 continue; 680 continue;
681 681
682 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) { 682 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) {
@@ -699,7 +699,7 @@ int restore_ioapic_entries(void)
699 int apic, pin; 699 int apic, pin;
700 700
701 for (apic = 0; apic < nr_ioapics; apic++) { 701 for (apic = 0; apic < nr_ioapics; apic++) {
702 if (ioapics[apic].saved_registers) 702 if (!ioapics[apic].saved_registers)
703 continue; 703 continue;
704 704
705 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) 705 for (pin = 0; pin < ioapics[apic].nr_registers; pin++)
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index f65e5b521dbd..807c2a2b80f1 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -1363,7 +1363,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
1363 * We must return the syscall number to actually look up in the table. 1363 * We must return the syscall number to actually look up in the table.
1364 * This can be -1L to skip running any syscall at all. 1364 * This can be -1L to skip running any syscall at all.
1365 */ 1365 */
1366asmregparm long syscall_trace_enter(struct pt_regs *regs) 1366long syscall_trace_enter(struct pt_regs *regs)
1367{ 1367{
1368 long ret = 0; 1368 long ret = 0;
1369 1369
@@ -1408,7 +1408,7 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs)
1408 return ret ?: regs->orig_ax; 1408 return ret ?: regs->orig_ax;
1409} 1409}
1410 1410
1411asmregparm void syscall_trace_leave(struct pt_regs *regs) 1411void syscall_trace_leave(struct pt_regs *regs)
1412{ 1412{
1413 bool step; 1413 bool step;
1414 1414
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 49927a863cc1..61682f0ac264 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -326,7 +326,7 @@ SECTIONS
326 } 326 }
327 327
328#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP) 328#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
329 PERCPU(INTERNODE_CACHE_BYTES, PAGE_SIZE) 329 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
330#endif 330#endif
331 331
332 . = ALIGN(PAGE_SIZE); 332 . = ALIGN(PAGE_SIZE);
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index a2820065927e..88ecea3facb4 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -155,7 +155,7 @@ SECTIONS
155 INIT_RAM_FS 155 INIT_RAM_FS
156 } 156 }
157 157
158 PERCPU(XCHAL_ICACHE_LINESIZE, PAGE_SIZE) 158 PERCPU_SECTION(XCHAL_ICACHE_LINESIZE)
159 159
160 /* We need this dummy segment here */ 160 /* We need this dummy segment here */
161 161