aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-07-31 18:32:05 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-31 18:32:05 -0400
commit26847fa6eb4fd653171f86d249caa761ce1e87c7 (patch)
tree8b315fd9fb903b1c3a942389a7be0580de5cd184 /arch
parent08843b79fb35d33859e0f8f11a7318341076e4d1 (diff)
parent7d25617597ff8dcfe4d0e1d0ac9214e7cc7ded92 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Martin Schwidefsky: "This it the second batch of s390 patches for the 3.6 merge window. Included is enablement for two common code changes, killable page faults and sorted exception tables. And the regular set of cleanup and bug fix patches." * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: s390: make use of user_mode() macro where possible s390/mm: rename user_mode variable to addressing_mode s390/mm: fix fault handling for page table walk case s390/mm: make page faults killable s390: update defconfig s390/mm: downgrade page table after fork of a 31 bit process s390/ipl: Use diagnose 8 command separation s390/linker script: use RO_DATA_SECTION s390/exceptions: sort exception table at build time s390/debug: remove module_exit function / move EXPORT_SYMBOLs
Diffstat (limited to 'arch')
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/defconfig5
-rw-r--r--arch/s390/include/asm/mmu_context.h16
-rw-r--r--arch/s390/include/asm/processor.h2
-rw-r--r--arch/s390/include/asm/setup.h2
-rw-r--r--arch/s390/kernel/debug.c70
-rw-r--r--arch/s390/kernel/dis.c4
-rw-r--r--arch/s390/kernel/early.c1
-rw-r--r--arch/s390/kernel/ipl.c12
-rw-r--r--arch/s390/kernel/setup.c12
-rw-r--r--arch/s390/kernel/traps.c16
-rw-r--r--arch/s390/kernel/vdso.c9
-rw-r--r--arch/s390/kernel/vmlinux.lds.S2
-rw-r--r--arch/s390/mm/fault.c35
-rw-r--r--arch/s390/mm/mmap.c12
-rw-r--r--arch/s390/mm/pgtable.c7
-rw-r--r--arch/s390/oprofile/backtrace.c2
17 files changed, 103 insertions, 105 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 296cd32466df..76de6b68487c 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -90,6 +90,7 @@ config S390
90 select HAVE_MEMBLOCK_NODE_MAP 90 select HAVE_MEMBLOCK_NODE_MAP
91 select HAVE_CMPXCHG_LOCAL 91 select HAVE_CMPXCHG_LOCAL
92 select ARCH_DISCARD_MEMBLOCK 92 select ARCH_DISCARD_MEMBLOCK
93 select BUILDTIME_EXTABLE_SORT
93 select ARCH_INLINE_SPIN_TRYLOCK 94 select ARCH_INLINE_SPIN_TRYLOCK
94 select ARCH_INLINE_SPIN_TRYLOCK_BH 95 select ARCH_INLINE_SPIN_TRYLOCK_BH
95 select ARCH_INLINE_SPIN_LOCK 96 select ARCH_INLINE_SPIN_LOCK
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 37d2bf267964..967923dea98d 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -7,6 +7,9 @@ CONFIG_TASK_DELAY_ACCT=y
7CONFIG_TASK_XACCT=y 7CONFIG_TASK_XACCT=y
8CONFIG_TASK_IO_ACCOUNTING=y 8CONFIG_TASK_IO_ACCOUNTING=y
9CONFIG_AUDIT=y 9CONFIG_AUDIT=y
10CONFIG_NO_HZ=y
11CONFIG_HIGH_RES_TIMERS=y
12CONFIG_RCU_FAST_NO_HZ=y
10CONFIG_IKCONFIG=y 13CONFIG_IKCONFIG=y
11CONFIG_IKCONFIG_PROC=y 14CONFIG_IKCONFIG_PROC=y
12CONFIG_CGROUPS=y 15CONFIG_CGROUPS=y
@@ -35,8 +38,6 @@ CONFIG_MODVERSIONS=y
35CONFIG_PARTITION_ADVANCED=y 38CONFIG_PARTITION_ADVANCED=y
36CONFIG_IBM_PARTITION=y 39CONFIG_IBM_PARTITION=y
37CONFIG_DEFAULT_DEADLINE=y 40CONFIG_DEFAULT_DEADLINE=y
38CONFIG_NO_HZ=y
39CONFIG_HIGH_RES_TIMERS=y
40CONFIG_PREEMPT=y 41CONFIG_PREEMPT=y
41CONFIG_MEMORY_HOTPLUG=y 42CONFIG_MEMORY_HOTPLUG=y
42CONFIG_MEMORY_HOTREMOVE=y 43CONFIG_MEMORY_HOTREMOVE=y
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 5c63615f1349..b749c5733657 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -11,7 +11,6 @@
11#include <asm/uaccess.h> 11#include <asm/uaccess.h>
12#include <asm/tlbflush.h> 12#include <asm/tlbflush.h>
13#include <asm/ctl_reg.h> 13#include <asm/ctl_reg.h>
14#include <asm-generic/mm_hooks.h>
15 14
16static inline int init_new_context(struct task_struct *tsk, 15static inline int init_new_context(struct task_struct *tsk,
17 struct mm_struct *mm) 16 struct mm_struct *mm)
@@ -58,7 +57,7 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
58 pgd_t *pgd = mm->pgd; 57 pgd_t *pgd = mm->pgd;
59 58
60 S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); 59 S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
61 if (user_mode != HOME_SPACE_MODE) { 60 if (addressing_mode != HOME_SPACE_MODE) {
62 /* Load primary space page table origin. */ 61 /* Load primary space page table origin. */
63 asm volatile(LCTL_OPCODE" 1,1,%0\n" 62 asm volatile(LCTL_OPCODE" 1,1,%0\n"
64 : : "m" (S390_lowcore.user_asce) ); 63 : : "m" (S390_lowcore.user_asce) );
@@ -91,4 +90,17 @@ static inline void activate_mm(struct mm_struct *prev,
91 switch_mm(prev, next, current); 90 switch_mm(prev, next, current);
92} 91}
93 92
93static inline void arch_dup_mmap(struct mm_struct *oldmm,
94 struct mm_struct *mm)
95{
96#ifdef CONFIG_64BIT
97 if (oldmm->context.asce_limit < mm->context.asce_limit)
98 crst_table_downgrade(mm, oldmm->context.asce_limit);
99#endif
100}
101
102static inline void arch_exit_mmap(struct mm_struct *mm)
103{
104}
105
94#endif /* __S390_MMU_CONTEXT_H */ 106#endif /* __S390_MMU_CONTEXT_H */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index c40fa91e38a8..11e4e3236937 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -120,7 +120,9 @@ struct stack_frame {
120 regs->psw.mask = psw_user_bits | PSW_MASK_BA; \ 120 regs->psw.mask = psw_user_bits | PSW_MASK_BA; \
121 regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ 121 regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
122 regs->gprs[15] = new_stackp; \ 122 regs->gprs[15] = new_stackp; \
123 __tlb_flush_mm(current->mm); \
123 crst_table_downgrade(current->mm, 1UL << 31); \ 124 crst_table_downgrade(current->mm, 1UL << 31); \
125 update_mm(current->mm, current); \
124} while (0) 126} while (0)
125 127
126/* Forward declaration, a strange C thing */ 128/* Forward declaration, a strange C thing */
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 57e80534375a..e6859d16ee2d 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -60,7 +60,7 @@ void create_mem_hole(struct mem_chunk memory_chunk[], unsigned long addr,
60#define SECONDARY_SPACE_MODE 2 60#define SECONDARY_SPACE_MODE 2
61#define HOME_SPACE_MODE 3 61#define HOME_SPACE_MODE 3
62 62
63extern unsigned int user_mode; 63extern unsigned int addressing_mode;
64 64
65/* 65/*
66 * Machine features detected in head.S 66 * Machine features detected in head.S
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 21be961e8a43..ba500d8dc392 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -110,6 +110,7 @@ struct debug_view debug_raw_view = {
110 NULL, 110 NULL,
111 NULL 111 NULL
112}; 112};
113EXPORT_SYMBOL(debug_raw_view);
113 114
114struct debug_view debug_hex_ascii_view = { 115struct debug_view debug_hex_ascii_view = {
115 "hex_ascii", 116 "hex_ascii",
@@ -119,6 +120,7 @@ struct debug_view debug_hex_ascii_view = {
119 NULL, 120 NULL,
120 NULL 121 NULL
121}; 122};
123EXPORT_SYMBOL(debug_hex_ascii_view);
122 124
123static struct debug_view debug_level_view = { 125static struct debug_view debug_level_view = {
124 "level", 126 "level",
@@ -155,6 +157,7 @@ struct debug_view debug_sprintf_view = {
155 NULL, 157 NULL,
156 NULL 158 NULL
157}; 159};
160EXPORT_SYMBOL(debug_sprintf_view);
158 161
159/* used by dump analysis tools to determine version of debug feature */ 162/* used by dump analysis tools to determine version of debug feature */
160static unsigned int __used debug_feature_version = __DEBUG_FEATURE_VERSION; 163static unsigned int __used debug_feature_version = __DEBUG_FEATURE_VERSION;
@@ -730,6 +733,7 @@ debug_info_t *debug_register(const char *name, int pages_per_area,
730 return debug_register_mode(name, pages_per_area, nr_areas, buf_size, 733 return debug_register_mode(name, pages_per_area, nr_areas, buf_size,
731 S_IRUSR | S_IWUSR, 0, 0); 734 S_IRUSR | S_IWUSR, 0, 0);
732} 735}
736EXPORT_SYMBOL(debug_register);
733 737
734/* 738/*
735 * debug_unregister: 739 * debug_unregister:
@@ -748,6 +752,7 @@ debug_unregister(debug_info_t * id)
748out: 752out:
749 return; 753 return;
750} 754}
755EXPORT_SYMBOL(debug_unregister);
751 756
752/* 757/*
753 * debug_set_size: 758 * debug_set_size:
@@ -810,7 +815,7 @@ debug_set_level(debug_info_t* id, int new_level)
810 } 815 }
811 spin_unlock_irqrestore(&id->lock,flags); 816 spin_unlock_irqrestore(&id->lock,flags);
812} 817}
813 818EXPORT_SYMBOL(debug_set_level);
814 819
815/* 820/*
816 * proceed_active_entry: 821 * proceed_active_entry:
@@ -930,7 +935,7 @@ debug_stop_all(void)
930 if (debug_stoppable) 935 if (debug_stoppable)
931 debug_active = 0; 936 debug_active = 0;
932} 937}
933 938EXPORT_SYMBOL(debug_stop_all);
934 939
935void debug_set_critical(void) 940void debug_set_critical(void)
936{ 941{
@@ -963,6 +968,7 @@ debug_event_common(debug_info_t * id, int level, const void *buf, int len)
963 968
964 return active; 969 return active;
965} 970}
971EXPORT_SYMBOL(debug_event_common);
966 972
967/* 973/*
968 * debug_exception_common: 974 * debug_exception_common:
@@ -990,6 +996,7 @@ debug_entry_t
990 996
991 return active; 997 return active;
992} 998}
999EXPORT_SYMBOL(debug_exception_common);
993 1000
994/* 1001/*
995 * counts arguments in format string for sprintf view 1002 * counts arguments in format string for sprintf view
@@ -1043,6 +1050,7 @@ debug_sprintf_event(debug_info_t* id, int level,char *string,...)
1043 1050
1044 return active; 1051 return active;
1045} 1052}
1053EXPORT_SYMBOL(debug_sprintf_event);
1046 1054
1047/* 1055/*
1048 * debug_sprintf_exception: 1056 * debug_sprintf_exception:
@@ -1081,25 +1089,7 @@ debug_sprintf_exception(debug_info_t* id, int level,char *string,...)
1081 1089
1082 return active; 1090 return active;
1083} 1091}
1084 1092EXPORT_SYMBOL(debug_sprintf_exception);
1085/*
1086 * debug_init:
1087 * - is called exactly once to initialize the debug feature
1088 */
1089
1090static int
1091__init debug_init(void)
1092{
1093 int rc = 0;
1094
1095 s390dbf_sysctl_header = register_sysctl_table(s390dbf_dir_table);
1096 mutex_lock(&debug_mutex);
1097 debug_debugfs_root_entry = debugfs_create_dir(DEBUG_DIR_ROOT,NULL);
1098 initialized = 1;
1099 mutex_unlock(&debug_mutex);
1100
1101 return rc;
1102}
1103 1093
1104/* 1094/*
1105 * debug_register_view: 1095 * debug_register_view:
@@ -1147,6 +1137,7 @@ debug_register_view(debug_info_t * id, struct debug_view *view)
1147out: 1137out:
1148 return rc; 1138 return rc;
1149} 1139}
1140EXPORT_SYMBOL(debug_register_view);
1150 1141
1151/* 1142/*
1152 * debug_unregister_view: 1143 * debug_unregister_view:
@@ -1176,6 +1167,7 @@ debug_unregister_view(debug_info_t * id, struct debug_view *view)
1176out: 1167out:
1177 return rc; 1168 return rc;
1178} 1169}
1170EXPORT_SYMBOL(debug_unregister_view);
1179 1171
1180static inline char * 1172static inline char *
1181debug_get_user_string(const char __user *user_buf, size_t user_len) 1173debug_get_user_string(const char __user *user_buf, size_t user_len)
@@ -1485,6 +1477,7 @@ debug_dflt_header_fn(debug_info_t * id, struct debug_view *view,
1485 except_str, entry->id.fields.cpuid, (void *) caller); 1477 except_str, entry->id.fields.cpuid, (void *) caller);
1486 return rc; 1478 return rc;
1487} 1479}
1480EXPORT_SYMBOL(debug_dflt_header_fn);
1488 1481
1489/* 1482/*
1490 * prints debug data sprintf-formated: 1483 * prints debug data sprintf-formated:
@@ -1533,33 +1526,16 @@ out:
1533} 1526}
1534 1527
1535/* 1528/*
1536 * clean up module 1529 * debug_init:
1530 * - is called exactly once to initialize the debug feature
1537 */ 1531 */
1538static void __exit debug_exit(void) 1532static int __init debug_init(void)
1539{ 1533{
1540 debugfs_remove(debug_debugfs_root_entry); 1534 s390dbf_sysctl_header = register_sysctl_table(s390dbf_dir_table);
1541 unregister_sysctl_table(s390dbf_sysctl_header); 1535 mutex_lock(&debug_mutex);
1542 return; 1536 debug_debugfs_root_entry = debugfs_create_dir(DEBUG_DIR_ROOT, NULL);
1537 initialized = 1;
1538 mutex_unlock(&debug_mutex);
1539 return 0;
1543} 1540}
1544
1545/*
1546 * module definitions
1547 */
1548postcore_initcall(debug_init); 1541postcore_initcall(debug_init);
1549module_exit(debug_exit);
1550MODULE_LICENSE("GPL");
1551
1552EXPORT_SYMBOL(debug_register);
1553EXPORT_SYMBOL(debug_unregister);
1554EXPORT_SYMBOL(debug_set_level);
1555EXPORT_SYMBOL(debug_stop_all);
1556EXPORT_SYMBOL(debug_register_view);
1557EXPORT_SYMBOL(debug_unregister_view);
1558EXPORT_SYMBOL(debug_event_common);
1559EXPORT_SYMBOL(debug_exception_common);
1560EXPORT_SYMBOL(debug_hex_ascii_view);
1561EXPORT_SYMBOL(debug_raw_view);
1562EXPORT_SYMBOL(debug_dflt_header_fn);
1563EXPORT_SYMBOL(debug_sprintf_view);
1564EXPORT_SYMBOL(debug_sprintf_exception);
1565EXPORT_SYMBOL(debug_sprintf_event);
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 1f6b428e2762..619c5d350726 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -1531,7 +1531,7 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
1531 1531
1532void show_code(struct pt_regs *regs) 1532void show_code(struct pt_regs *regs)
1533{ 1533{
1534 char *mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl"; 1534 char *mode = user_mode(regs) ? "User" : "Krnl";
1535 unsigned char code[64]; 1535 unsigned char code[64];
1536 char buffer[64], *ptr; 1536 char buffer[64], *ptr;
1537 mm_segment_t old_fs; 1537 mm_segment_t old_fs;
@@ -1540,7 +1540,7 @@ void show_code(struct pt_regs *regs)
1540 1540
1541 /* Get a snapshot of the 64 bytes surrounding the fault address. */ 1541 /* Get a snapshot of the 64 bytes surrounding the fault address. */
1542 old_fs = get_fs(); 1542 old_fs = get_fs();
1543 set_fs((regs->psw.mask & PSW_MASK_PSTATE) ? USER_DS : KERNEL_DS); 1543 set_fs(user_mode(regs) ? USER_DS : KERNEL_DS);
1544 for (start = 32; start && regs->psw.addr >= 34 - start; start -= 2) { 1544 for (start = 32; start && regs->psw.addr >= 34 - start; start -= 2) {
1545 addr = regs->psw.addr - 34 + start; 1545 addr = regs->psw.addr - 34 + start;
1546 if (__copy_from_user(code + start - 2, 1546 if (__copy_from_user(code + start - 2,
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index bc95a8ebd9cc..83c3271c442b 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -455,7 +455,6 @@ void __init startup_init(void)
455 init_kernel_storage_key(); 455 init_kernel_storage_key();
456 lockdep_init(); 456 lockdep_init();
457 lockdep_off(); 457 lockdep_off();
458 sort_main_extable();
459 setup_lowcore_early(); 458 setup_lowcore_early();
460 setup_facility_list(); 459 setup_facility_list();
461 detect_machine_type(); 460 detect_machine_type();
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index e64d141555ce..6ffcd3203215 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -1583,7 +1583,7 @@ static struct kset *vmcmd_kset;
1583 1583
1584static void vmcmd_run(struct shutdown_trigger *trigger) 1584static void vmcmd_run(struct shutdown_trigger *trigger)
1585{ 1585{
1586 char *cmd, *next_cmd; 1586 char *cmd;
1587 1587
1588 if (strcmp(trigger->name, ON_REIPL_STR) == 0) 1588 if (strcmp(trigger->name, ON_REIPL_STR) == 0)
1589 cmd = vmcmd_on_reboot; 1589 cmd = vmcmd_on_reboot;
@@ -1600,15 +1600,7 @@ static void vmcmd_run(struct shutdown_trigger *trigger)
1600 1600
1601 if (strlen(cmd) == 0) 1601 if (strlen(cmd) == 0)
1602 return; 1602 return;
1603 do { 1603 __cpcmd(cmd, NULL, 0, NULL);
1604 next_cmd = strchr(cmd, '\n');
1605 if (next_cmd) {
1606 next_cmd[0] = 0;
1607 next_cmd += 1;
1608 }
1609 __cpcmd(cmd, NULL, 0, NULL);
1610 cmd = next_cmd;
1611 } while (cmd != NULL);
1612} 1604}
1613 1605
1614static int vmcmd_init(void) 1606static int vmcmd_init(void)
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 743c0f32fe3b..f86c81e13c37 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -302,8 +302,8 @@ static int __init parse_vmalloc(char *arg)
302} 302}
303early_param("vmalloc", parse_vmalloc); 303early_param("vmalloc", parse_vmalloc);
304 304
305unsigned int user_mode = HOME_SPACE_MODE; 305unsigned int addressing_mode = HOME_SPACE_MODE;
306EXPORT_SYMBOL_GPL(user_mode); 306EXPORT_SYMBOL_GPL(addressing_mode);
307 307
308static int set_amode_primary(void) 308static int set_amode_primary(void)
309{ 309{
@@ -328,7 +328,7 @@ static int set_amode_primary(void)
328 */ 328 */
329static int __init early_parse_switch_amode(char *p) 329static int __init early_parse_switch_amode(char *p)
330{ 330{
331 user_mode = PRIMARY_SPACE_MODE; 331 addressing_mode = PRIMARY_SPACE_MODE;
332 return 0; 332 return 0;
333} 333}
334early_param("switch_amode", early_parse_switch_amode); 334early_param("switch_amode", early_parse_switch_amode);
@@ -336,9 +336,9 @@ early_param("switch_amode", early_parse_switch_amode);
336static int __init early_parse_user_mode(char *p) 336static int __init early_parse_user_mode(char *p)
337{ 337{
338 if (p && strcmp(p, "primary") == 0) 338 if (p && strcmp(p, "primary") == 0)
339 user_mode = PRIMARY_SPACE_MODE; 339 addressing_mode = PRIMARY_SPACE_MODE;
340 else if (!p || strcmp(p, "home") == 0) 340 else if (!p || strcmp(p, "home") == 0)
341 user_mode = HOME_SPACE_MODE; 341 addressing_mode = HOME_SPACE_MODE;
342 else 342 else
343 return 1; 343 return 1;
344 return 0; 344 return 0;
@@ -347,7 +347,7 @@ early_param("user_mode", early_parse_user_mode);
347 347
348static void setup_addressing_mode(void) 348static void setup_addressing_mode(void)
349{ 349{
350 if (user_mode == PRIMARY_SPACE_MODE) { 350 if (addressing_mode == PRIMARY_SPACE_MODE) {
351 if (set_amode_primary()) 351 if (set_amode_primary())
352 pr_info("Address spaces switched, " 352 pr_info("Address spaces switched, "
353 "mvcos available\n"); 353 "mvcos available\n");
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index af2421a0f315..01775c04a90e 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -185,7 +185,7 @@ void show_registers(struct pt_regs *regs)
185{ 185{
186 char *mode; 186 char *mode;
187 187
188 mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl"; 188 mode = user_mode(regs) ? "User" : "Krnl";
189 printk("%s PSW : %p %p", 189 printk("%s PSW : %p %p",
190 mode, (void *) regs->psw.mask, 190 mode, (void *) regs->psw.mask,
191 (void *) regs->psw.addr); 191 (void *) regs->psw.addr);
@@ -225,7 +225,7 @@ void show_regs(struct pt_regs *regs)
225 (void *) current->thread.ksp); 225 (void *) current->thread.ksp);
226 show_registers(regs); 226 show_registers(regs);
227 /* Show stack backtrace if pt_regs is from kernel mode */ 227 /* Show stack backtrace if pt_regs is from kernel mode */
228 if (!(regs->psw.mask & PSW_MASK_PSTATE)) 228 if (!user_mode(regs))
229 show_trace(NULL, (unsigned long *) regs->gprs[15]); 229 show_trace(NULL, (unsigned long *) regs->gprs[15]);
230 show_last_breaking_event(regs); 230 show_last_breaking_event(regs);
231} 231}
@@ -300,7 +300,7 @@ static void __kprobes do_trap(struct pt_regs *regs,
300 regs->int_code, si_signo) == NOTIFY_STOP) 300 regs->int_code, si_signo) == NOTIFY_STOP)
301 return; 301 return;
302 302
303 if (regs->psw.mask & PSW_MASK_PSTATE) { 303 if (user_mode(regs)) {
304 info.si_signo = si_signo; 304 info.si_signo = si_signo;
305 info.si_errno = 0; 305 info.si_errno = 0;
306 info.si_code = si_code; 306 info.si_code = si_code;
@@ -341,7 +341,7 @@ void __kprobes do_per_trap(struct pt_regs *regs)
341 341
342static void default_trap_handler(struct pt_regs *regs) 342static void default_trap_handler(struct pt_regs *regs)
343{ 343{
344 if (regs->psw.mask & PSW_MASK_PSTATE) { 344 if (user_mode(regs)) {
345 report_user_fault(regs, SIGSEGV); 345 report_user_fault(regs, SIGSEGV);
346 do_exit(SIGSEGV); 346 do_exit(SIGSEGV);
347 } else 347 } else
@@ -410,7 +410,7 @@ static void __kprobes illegal_op(struct pt_regs *regs)
410 410
411 location = get_psw_address(regs); 411 location = get_psw_address(regs);
412 412
413 if (regs->psw.mask & PSW_MASK_PSTATE) { 413 if (user_mode(regs)) {
414 if (get_user(*((__u16 *) opcode), (__u16 __user *) location)) 414 if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
415 return; 415 return;
416 if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) { 416 if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
@@ -478,7 +478,7 @@ void specification_exception(struct pt_regs *regs)
478 478
479 location = (__u16 __user *) get_psw_address(regs); 479 location = (__u16 __user *) get_psw_address(regs);
480 480
481 if (regs->psw.mask & PSW_MASK_PSTATE) { 481 if (user_mode(regs)) {
482 get_user(*((__u16 *) opcode), location); 482 get_user(*((__u16 *) opcode), location);
483 switch (opcode[0]) { 483 switch (opcode[0]) {
484 case 0x28: /* LDR Rx,Ry */ 484 case 0x28: /* LDR Rx,Ry */
@@ -531,7 +531,7 @@ static void data_exception(struct pt_regs *regs)
531 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); 531 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
532 532
533#ifdef CONFIG_MATHEMU 533#ifdef CONFIG_MATHEMU
534 else if (regs->psw.mask & PSW_MASK_PSTATE) { 534 else if (user_mode(regs)) {
535 __u8 opcode[6]; 535 __u8 opcode[6];
536 get_user(*((__u16 *) opcode), location); 536 get_user(*((__u16 *) opcode), location);
537 switch (opcode[0]) { 537 switch (opcode[0]) {
@@ -598,7 +598,7 @@ static void data_exception(struct pt_regs *regs)
598static void space_switch_exception(struct pt_regs *regs) 598static void space_switch_exception(struct pt_regs *regs)
599{ 599{
600 /* Set user psw back to home space mode. */ 600 /* Set user psw back to home space mode. */
601 if (regs->psw.mask & PSW_MASK_PSTATE) 601 if (user_mode(regs))
602 regs->psw.mask |= PSW_ASC_HOME; 602 regs->psw.mask |= PSW_ASC_HOME;
603 /* Send SIGILL. */ 603 /* Send SIGILL. */
604 do_trap(regs, SIGILL, ILL_PRVOPC, "space switch event"); 604 do_trap(regs, SIGILL, ILL_PRVOPC, "space switch event");
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index ea5590fdca3b..9a19ca367c17 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -84,7 +84,8 @@ struct vdso_data *vdso_data = &vdso_data_store.data;
84 */ 84 */
85static void vdso_init_data(struct vdso_data *vd) 85static void vdso_init_data(struct vdso_data *vd)
86{ 86{
87 vd->ectg_available = user_mode != HOME_SPACE_MODE && test_facility(31); 87 vd->ectg_available =
88 addressing_mode != HOME_SPACE_MODE && test_facility(31);
88} 89}
89 90
90#ifdef CONFIG_64BIT 91#ifdef CONFIG_64BIT
@@ -101,7 +102,7 @@ int vdso_alloc_per_cpu(struct _lowcore *lowcore)
101 102
102 lowcore->vdso_per_cpu_data = __LC_PASTE; 103 lowcore->vdso_per_cpu_data = __LC_PASTE;
103 104
104 if (user_mode == HOME_SPACE_MODE || !vdso_enabled) 105 if (addressing_mode == HOME_SPACE_MODE || !vdso_enabled)
105 return 0; 106 return 0;
106 107
107 segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER); 108 segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
@@ -146,7 +147,7 @@ void vdso_free_per_cpu(struct _lowcore *lowcore)
146 unsigned long segment_table, page_table, page_frame; 147 unsigned long segment_table, page_table, page_frame;
147 u32 *psal, *aste; 148 u32 *psal, *aste;
148 149
149 if (user_mode == HOME_SPACE_MODE || !vdso_enabled) 150 if (addressing_mode == HOME_SPACE_MODE || !vdso_enabled)
150 return; 151 return;
151 152
152 psal = (u32 *)(addr_t) lowcore->paste[4]; 153 psal = (u32 *)(addr_t) lowcore->paste[4];
@@ -164,7 +165,7 @@ static void vdso_init_cr5(void)
164{ 165{
165 unsigned long cr5; 166 unsigned long cr5;
166 167
167 if (user_mode == HOME_SPACE_MODE || !vdso_enabled) 168 if (addressing_mode == HOME_SPACE_MODE || !vdso_enabled)
168 return; 169 return;
169 cr5 = offsetof(struct _lowcore, paste); 170 cr5 = offsetof(struct _lowcore, paste);
170 __ctl_load(cr5, 5, 5); 171 __ctl_load(cr5, 5, 5);
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 21109c63eb12..de8fa9bbd35e 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -45,7 +45,7 @@ SECTIONS
45 45
46 .dummy : { *(.dummy) } :data 46 .dummy : { *(.dummy) } :data
47 47
48 RODATA 48 RO_DATA_SECTION(PAGE_SIZE)
49 49
50#ifdef CONFIG_SHARED_KERNEL 50#ifdef CONFIG_SHARED_KERNEL
51 . = ALIGN(0x100000); /* VM shared segments are 1MB aligned */ 51 . = ALIGN(0x100000); /* VM shared segments are 1MB aligned */
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 6a12d1bb6e09..6c013f544146 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -49,6 +49,7 @@
49#define VM_FAULT_BADCONTEXT 0x010000 49#define VM_FAULT_BADCONTEXT 0x010000
50#define VM_FAULT_BADMAP 0x020000 50#define VM_FAULT_BADMAP 0x020000
51#define VM_FAULT_BADACCESS 0x040000 51#define VM_FAULT_BADACCESS 0x040000
52#define VM_FAULT_SIGNAL 0x080000
52 53
53static unsigned long store_indication; 54static unsigned long store_indication;
54 55
@@ -110,7 +111,7 @@ static inline int user_space_fault(unsigned long trans_exc_code)
110 if (trans_exc_code == 2) 111 if (trans_exc_code == 2)
111 /* Access via secondary space, set_fs setting decides */ 112 /* Access via secondary space, set_fs setting decides */
112 return current->thread.mm_segment.ar4; 113 return current->thread.mm_segment.ar4;
113 if (user_mode == HOME_SPACE_MODE) 114 if (addressing_mode == HOME_SPACE_MODE)
114 /* User space if the access has been done via home space. */ 115 /* User space if the access has been done via home space. */
115 return trans_exc_code == 3; 116 return trans_exc_code == 3;
116 /* 117 /*
@@ -219,7 +220,7 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault)
219 case VM_FAULT_BADACCESS: 220 case VM_FAULT_BADACCESS:
220 case VM_FAULT_BADMAP: 221 case VM_FAULT_BADMAP:
221 /* Bad memory access. Check if it is kernel or user space. */ 222 /* Bad memory access. Check if it is kernel or user space. */
222 if (regs->psw.mask & PSW_MASK_PSTATE) { 223 if (user_mode(regs)) {
223 /* User mode accesses just cause a SIGSEGV */ 224 /* User mode accesses just cause a SIGSEGV */
224 si_code = (fault == VM_FAULT_BADMAP) ? 225 si_code = (fault == VM_FAULT_BADMAP) ?
225 SEGV_MAPERR : SEGV_ACCERR; 226 SEGV_MAPERR : SEGV_ACCERR;
@@ -229,15 +230,19 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault)
229 case VM_FAULT_BADCONTEXT: 230 case VM_FAULT_BADCONTEXT:
230 do_no_context(regs); 231 do_no_context(regs);
231 break; 232 break;
233 case VM_FAULT_SIGNAL:
234 if (!user_mode(regs))
235 do_no_context(regs);
236 break;
232 default: /* fault & VM_FAULT_ERROR */ 237 default: /* fault & VM_FAULT_ERROR */
233 if (fault & VM_FAULT_OOM) { 238 if (fault & VM_FAULT_OOM) {
234 if (!(regs->psw.mask & PSW_MASK_PSTATE)) 239 if (!user_mode(regs))
235 do_no_context(regs); 240 do_no_context(regs);
236 else 241 else
237 pagefault_out_of_memory(); 242 pagefault_out_of_memory();
238 } else if (fault & VM_FAULT_SIGBUS) { 243 } else if (fault & VM_FAULT_SIGBUS) {
239 /* Kernel mode? Handle exceptions or die */ 244 /* Kernel mode? Handle exceptions or die */
240 if (!(regs->psw.mask & PSW_MASK_PSTATE)) 245 if (!user_mode(regs))
241 do_no_context(regs); 246 do_no_context(regs);
242 else 247 else
243 do_sigbus(regs); 248 do_sigbus(regs);
@@ -286,7 +291,7 @@ static inline int do_exception(struct pt_regs *regs, int access)
286 291
287 address = trans_exc_code & __FAIL_ADDR_MASK; 292 address = trans_exc_code & __FAIL_ADDR_MASK;
288 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 293 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
289 flags = FAULT_FLAG_ALLOW_RETRY; 294 flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
290 if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400) 295 if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
291 flags |= FAULT_FLAG_WRITE; 296 flags |= FAULT_FLAG_WRITE;
292 down_read(&mm->mmap_sem); 297 down_read(&mm->mmap_sem);
@@ -335,6 +340,11 @@ retry:
335 * the fault. 340 * the fault.
336 */ 341 */
337 fault = handle_mm_fault(mm, vma, address, flags); 342 fault = handle_mm_fault(mm, vma, address, flags);
343 /* No reason to continue if interrupted by SIGKILL. */
344 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
345 fault = VM_FAULT_SIGNAL;
346 goto out;
347 }
338 if (unlikely(fault & VM_FAULT_ERROR)) 348 if (unlikely(fault & VM_FAULT_ERROR))
339 goto out_up; 349 goto out_up;
340 350
@@ -426,7 +436,7 @@ void __kprobes do_asce_exception(struct pt_regs *regs)
426 } 436 }
427 437
428 /* User mode accesses just cause a SIGSEGV */ 438 /* User mode accesses just cause a SIGSEGV */
429 if (regs->psw.mask & PSW_MASK_PSTATE) { 439 if (user_mode(regs)) {
430 do_sigsegv(regs, SEGV_MAPERR); 440 do_sigsegv(regs, SEGV_MAPERR);
431 return; 441 return;
432 } 442 }
@@ -441,6 +451,7 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
441 struct pt_regs regs; 451 struct pt_regs regs;
442 int access, fault; 452 int access, fault;
443 453
454 /* Emulate a uaccess fault from kernel mode. */
444 regs.psw.mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK; 455 regs.psw.mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK;
445 if (!irqs_disabled()) 456 if (!irqs_disabled())
446 regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT; 457 regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT;
@@ -450,12 +461,12 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
450 regs.int_parm_long = (uaddr & PAGE_MASK) | 2; 461 regs.int_parm_long = (uaddr & PAGE_MASK) | 2;
451 access = write ? VM_WRITE : VM_READ; 462 access = write ? VM_WRITE : VM_READ;
452 fault = do_exception(&regs, access); 463 fault = do_exception(&regs, access);
453 if (unlikely(fault)) { 464 /*
454 if (fault & VM_FAULT_OOM) 465 * Since the fault happened in kernel mode while performing a uaccess
455 return -EFAULT; 466 * all we need to do now is emulating a fixup in case "fault" is not
456 else if (fault & VM_FAULT_SIGBUS) 467 * zero.
457 do_sigbus(&regs); 468 * For the calling uaccess functions this results always in -EFAULT.
458 } 469 */
459 return fault ? -EFAULT : 0; 470 return fault ? -EFAULT : 0;
460} 471}
461 472
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 573384256c5c..c59a5efa58b1 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -103,9 +103,15 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
103 103
104int s390_mmap_check(unsigned long addr, unsigned long len) 104int s390_mmap_check(unsigned long addr, unsigned long len)
105{ 105{
106 int rc;
107
106 if (!is_compat_task() && 108 if (!is_compat_task() &&
107 len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) 109 len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) {
108 return crst_table_upgrade(current->mm, 1UL << 53); 110 rc = crst_table_upgrade(current->mm, 1UL << 53);
111 if (rc)
112 return rc;
113 update_mm(current->mm, current);
114 }
109 return 0; 115 return 0;
110} 116}
111 117
@@ -125,6 +131,7 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr,
125 rc = crst_table_upgrade(mm, 1UL << 53); 131 rc = crst_table_upgrade(mm, 1UL << 53);
126 if (rc) 132 if (rc)
127 return (unsigned long) rc; 133 return (unsigned long) rc;
134 update_mm(mm, current);
128 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); 135 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
129 } 136 }
130 return area; 137 return area;
@@ -147,6 +154,7 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
147 rc = crst_table_upgrade(mm, 1UL << 53); 154 rc = crst_table_upgrade(mm, 1UL << 53);
148 if (rc) 155 if (rc)
149 return (unsigned long) rc; 156 return (unsigned long) rc;
157 update_mm(mm, current);
150 area = arch_get_unmapped_area_topdown(filp, addr, len, 158 area = arch_get_unmapped_area_topdown(filp, addr, len,
151 pgoff, flags); 159 pgoff, flags);
152 } 160 }
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 1cab221077cc..18df31d1f2c9 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -85,7 +85,6 @@ repeat:
85 crst_table_free(mm, table); 85 crst_table_free(mm, table);
86 if (mm->context.asce_limit < limit) 86 if (mm->context.asce_limit < limit)
87 goto repeat; 87 goto repeat;
88 update_mm(mm, current);
89 return 0; 88 return 0;
90} 89}
91 90
@@ -93,9 +92,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
93{ 92{
94 pgd_t *pgd; 93 pgd_t *pgd;
95 94
96 if (mm->context.asce_limit <= limit)
97 return;
98 __tlb_flush_mm(mm);
99 while (mm->context.asce_limit > limit) { 95 while (mm->context.asce_limit > limit) {
100 pgd = mm->pgd; 96 pgd = mm->pgd;
101 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) { 97 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
@@ -118,7 +114,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
118 mm->task_size = mm->context.asce_limit; 114 mm->task_size = mm->context.asce_limit;
119 crst_table_free(mm, (unsigned long *) pgd); 115 crst_table_free(mm, (unsigned long *) pgd);
120 } 116 }
121 update_mm(mm, current);
122} 117}
123#endif 118#endif
124 119
@@ -801,7 +796,7 @@ int s390_enable_sie(void)
801 struct mm_struct *mm, *old_mm; 796 struct mm_struct *mm, *old_mm;
802 797
803 /* Do we have switched amode? If no, we cannot do sie */ 798 /* Do we have switched amode? If no, we cannot do sie */
804 if (user_mode == HOME_SPACE_MODE) 799 if (addressing_mode == HOME_SPACE_MODE)
805 return -EINVAL; 800 return -EINVAL;
806 801
807 /* Do we have pgstes? if yes, we are done */ 802 /* Do we have pgstes? if yes, we are done */
diff --git a/arch/s390/oprofile/backtrace.c b/arch/s390/oprofile/backtrace.c
index c82f62fb9c28..8a6811b2cdb9 100644
--- a/arch/s390/oprofile/backtrace.c
+++ b/arch/s390/oprofile/backtrace.c
@@ -58,7 +58,7 @@ void s390_backtrace(struct pt_regs * const regs, unsigned int depth)
58 unsigned long head; 58 unsigned long head;
59 struct stack_frame* head_sf; 59 struct stack_frame* head_sf;
60 60
61 if (user_mode (regs)) 61 if (user_mode(regs))
62 return; 62 return;
63 63
64 head = regs->gprs[15]; 64 head = regs->gprs[15];