aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Kconfig4
-rw-r--r--arch/s390/appldata/appldata_base.c8
-rw-r--r--arch/s390/defconfig11
-rw-r--r--arch/s390/kernel/dis.c2
-rw-r--r--arch/s390/kernel/smp.c4
-rw-r--r--arch/s390/kvm/diag.c2
-rw-r--r--arch/s390/kvm/interrupt.c7
-rw-r--r--arch/s390/kvm/kvm-s390.c13
-rw-r--r--arch/s390/mm/init.c49
-rw-r--r--arch/s390/mm/pgtable.c44
-rw-r--r--arch/s390/mm/vmem.c20
11 files changed, 104 insertions, 60 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 1d035082e78e..107e492cb47e 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -304,10 +304,14 @@ config ARCH_SPARSEMEM_ENABLE
304 def_bool y 304 def_bool y
305 select SPARSEMEM_VMEMMAP_ENABLE 305 select SPARSEMEM_VMEMMAP_ENABLE
306 select SPARSEMEM_VMEMMAP 306 select SPARSEMEM_VMEMMAP
307 select SPARSEMEM_STATIC if !64BIT
307 308
308config ARCH_SPARSEMEM_DEFAULT 309config ARCH_SPARSEMEM_DEFAULT
309 def_bool y 310 def_bool y
310 311
312config ARCH_SELECT_MEMORY_MODEL
313 def_bool y
314
311source "mm/Kconfig" 315source "mm/Kconfig"
312 316
313comment "I/O subsystem configuration" 317comment "I/O subsystem configuration"
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 655d52543e2d..ad40729bec3d 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -130,6 +130,7 @@ static void appldata_work_fn(struct work_struct *work)
130 130
131 P_DEBUG(" -= Work Queue =-\n"); 131 P_DEBUG(" -= Work Queue =-\n");
132 i = 0; 132 i = 0;
133 get_online_cpus();
133 spin_lock(&appldata_ops_lock); 134 spin_lock(&appldata_ops_lock);
134 list_for_each(lh, &appldata_ops_list) { 135 list_for_each(lh, &appldata_ops_list) {
135 ops = list_entry(lh, struct appldata_ops, list); 136 ops = list_entry(lh, struct appldata_ops, list);
@@ -140,6 +141,7 @@ static void appldata_work_fn(struct work_struct *work)
140 } 141 }
141 } 142 }
142 spin_unlock(&appldata_ops_lock); 143 spin_unlock(&appldata_ops_lock);
144 put_online_cpus();
143} 145}
144 146
145/* 147/*
@@ -266,12 +268,14 @@ appldata_timer_handler(ctl_table *ctl, int write, struct file *filp,
266 len = *lenp; 268 len = *lenp;
267 if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) 269 if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
268 return -EFAULT; 270 return -EFAULT;
271 get_online_cpus();
269 spin_lock(&appldata_timer_lock); 272 spin_lock(&appldata_timer_lock);
270 if (buf[0] == '1') 273 if (buf[0] == '1')
271 __appldata_vtimer_setup(APPLDATA_ADD_TIMER); 274 __appldata_vtimer_setup(APPLDATA_ADD_TIMER);
272 else if (buf[0] == '0') 275 else if (buf[0] == '0')
273 __appldata_vtimer_setup(APPLDATA_DEL_TIMER); 276 __appldata_vtimer_setup(APPLDATA_DEL_TIMER);
274 spin_unlock(&appldata_timer_lock); 277 spin_unlock(&appldata_timer_lock);
278 put_online_cpus();
275out: 279out:
276 *lenp = len; 280 *lenp = len;
277 *ppos += len; 281 *ppos += len;
@@ -314,10 +318,12 @@ appldata_interval_handler(ctl_table *ctl, int write, struct file *filp,
314 return -EINVAL; 318 return -EINVAL;
315 } 319 }
316 320
321 get_online_cpus();
317 spin_lock(&appldata_timer_lock); 322 spin_lock(&appldata_timer_lock);
318 appldata_interval = interval; 323 appldata_interval = interval;
319 __appldata_vtimer_setup(APPLDATA_MOD_TIMER); 324 __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
320 spin_unlock(&appldata_timer_lock); 325 spin_unlock(&appldata_timer_lock);
326 put_online_cpus();
321 327
322 P_INFO("Monitoring CPU interval set to %u milliseconds.\n", 328 P_INFO("Monitoring CPU interval set to %u milliseconds.\n",
323 interval); 329 interval);
@@ -556,8 +562,10 @@ static int __init appldata_init(void)
556 return -ENOMEM; 562 return -ENOMEM;
557 } 563 }
558 564
565 get_online_cpus();
559 for_each_online_cpu(i) 566 for_each_online_cpu(i)
560 appldata_online_cpu(i); 567 appldata_online_cpu(i);
568 put_online_cpus();
561 569
562 /* Register cpu hotplug notifier */ 570 /* Register cpu hotplug notifier */
563 register_hotcpu_notifier(&appldata_nb); 571 register_hotcpu_notifier(&appldata_nb);
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index aa341d0ea1e6..c5cdb975d590 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.25 3# Linux kernel version: 2.6.26-rc4
4# Wed Apr 30 11:07:45 2008 4# Fri May 30 09:49:33 2008
5# 5#
6CONFIG_SCHED_MC=y 6CONFIG_SCHED_MC=y
7CONFIG_MMU=y 7CONFIG_MMU=y
@@ -103,6 +103,7 @@ CONFIG_RT_MUTEXES=y
103# CONFIG_TINY_SHMEM is not set 103# CONFIG_TINY_SHMEM is not set
104CONFIG_BASE_SMALL=0 104CONFIG_BASE_SMALL=0
105CONFIG_MODULES=y 105CONFIG_MODULES=y
106# CONFIG_MODULE_FORCE_LOAD is not set
106CONFIG_MODULE_UNLOAD=y 107CONFIG_MODULE_UNLOAD=y
107# CONFIG_MODULE_FORCE_UNLOAD is not set 108# CONFIG_MODULE_FORCE_UNLOAD is not set
108CONFIG_MODVERSIONS=y 109CONFIG_MODVERSIONS=y
@@ -173,6 +174,7 @@ CONFIG_PREEMPT=y
173# CONFIG_PREEMPT_RCU is not set 174# CONFIG_PREEMPT_RCU is not set
174CONFIG_ARCH_SPARSEMEM_ENABLE=y 175CONFIG_ARCH_SPARSEMEM_ENABLE=y
175CONFIG_ARCH_SPARSEMEM_DEFAULT=y 176CONFIG_ARCH_SPARSEMEM_DEFAULT=y
177CONFIG_ARCH_SELECT_MEMORY_MODEL=y
176CONFIG_SELECT_MEMORY_MODEL=y 178CONFIG_SELECT_MEMORY_MODEL=y
177# CONFIG_FLATMEM_MANUAL is not set 179# CONFIG_FLATMEM_MANUAL is not set
178# CONFIG_DISCONTIGMEM_MANUAL is not set 180# CONFIG_DISCONTIGMEM_MANUAL is not set
@@ -210,6 +212,7 @@ CONFIG_FORCE_MAX_ZONEORDER=9
210CONFIG_PFAULT=y 212CONFIG_PFAULT=y
211# CONFIG_SHARED_KERNEL is not set 213# CONFIG_SHARED_KERNEL is not set
212# CONFIG_CMM is not set 214# CONFIG_CMM is not set
215# CONFIG_PAGE_STATES is not set
213CONFIG_VIRT_TIMER=y 216CONFIG_VIRT_TIMER=y
214CONFIG_VIRT_CPU_ACCOUNTING=y 217CONFIG_VIRT_CPU_ACCOUNTING=y
215# CONFIG_APPLDATA_BASE is not set 218# CONFIG_APPLDATA_BASE is not set
@@ -620,6 +623,7 @@ CONFIG_S390_VMUR=m
620# 623#
621# CONFIG_MEMSTICK is not set 624# CONFIG_MEMSTICK is not set
622# CONFIG_NEW_LEDS is not set 625# CONFIG_NEW_LEDS is not set
626CONFIG_ACCESSIBILITY=y
623 627
624# 628#
625# File systems 629# File systems
@@ -754,11 +758,12 @@ CONFIG_FRAME_WARN=2048
754CONFIG_MAGIC_SYSRQ=y 758CONFIG_MAGIC_SYSRQ=y
755# CONFIG_UNUSED_SYMBOLS is not set 759# CONFIG_UNUSED_SYMBOLS is not set
756CONFIG_DEBUG_FS=y 760CONFIG_DEBUG_FS=y
757CONFIG_HEADERS_CHECK=y 761# CONFIG_HEADERS_CHECK is not set
758CONFIG_DEBUG_KERNEL=y 762CONFIG_DEBUG_KERNEL=y
759# CONFIG_SCHED_DEBUG is not set 763# CONFIG_SCHED_DEBUG is not set
760# CONFIG_SCHEDSTATS is not set 764# CONFIG_SCHEDSTATS is not set
761# CONFIG_TIMER_STATS is not set 765# CONFIG_TIMER_STATS is not set
766# CONFIG_DEBUG_OBJECTS is not set
762# CONFIG_DEBUG_SLAB is not set 767# CONFIG_DEBUG_SLAB is not set
763CONFIG_DEBUG_PREEMPT=y 768CONFIG_DEBUG_PREEMPT=y
764# CONFIG_DEBUG_RT_MUTEXES is not set 769# CONFIG_DEBUG_RT_MUTEXES is not set
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index c14a336f6300..d2f270c995d9 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -208,7 +208,7 @@ static const unsigned char formats[][7] = {
208 [INSTR_RRF_F0FF] = { 0xff, F_16,F_24,F_28,0,0,0 }, /* e.g. madbr */ 208 [INSTR_RRF_F0FF] = { 0xff, F_16,F_24,F_28,0,0,0 }, /* e.g. madbr */
209 [INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 },/* e.g. didbr */ 209 [INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 },/* e.g. didbr */
210 [INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 },/* e.g. .insn */ 210 [INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 },/* e.g. .insn */
211 [INSTR_RRF_R0RR] = { 0xff, R_24,R_28,R_16,0,0,0 }, /* e.g. idte */ 211 [INSTR_RRF_R0RR] = { 0xff, R_24,R_16,R_28,0,0,0 }, /* e.g. idte */
212 [INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 }, /* e.g. fixr */ 212 [INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 }, /* e.g. fixr */
213 [INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 }, /* e.g. cfebr */ 213 [INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 }, /* e.g. cfebr */
214 [INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 }, /* e.g. sske */ 214 [INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 }, /* e.g. sske */
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 1f4228948dc4..5d4fa4b1c74c 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -711,7 +711,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
711 memset(sf, 0, sizeof(struct stack_frame)); 711 memset(sf, 0, sizeof(struct stack_frame));
712 sf->gprs[9] = (unsigned long) sf; 712 sf->gprs[9] = (unsigned long) sf;
713 cpu_lowcore->save_area[15] = (unsigned long) sf; 713 cpu_lowcore->save_area[15] = (unsigned long) sf;
714 __ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15); 714 __ctl_store(cpu_lowcore->cregs_save_area, 0, 15);
715 asm volatile( 715 asm volatile(
716 " stam 0,15,0(%0)" 716 " stam 0,15,0(%0)"
717 : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); 717 : : "a" (&cpu_lowcore->access_regs_save_area) : "memory");
@@ -1089,7 +1089,7 @@ out:
1089 1089
1090#ifdef CONFIG_HOTPLUG_CPU 1090#ifdef CONFIG_HOTPLUG_CPU
1091 1091
1092int smp_rescan_cpus(void) 1092int __ref smp_rescan_cpus(void)
1093{ 1093{
1094 cpumask_t newcpus; 1094 cpumask_t newcpus;
1095 int cpu; 1095 int cpu;
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index f639a152869f..a0775e1f08df 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -20,7 +20,7 @@ static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
20 VCPU_EVENT(vcpu, 5, "%s", "diag time slice end"); 20 VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
21 vcpu->stat.diagnose_44++; 21 vcpu->stat.diagnose_44++;
22 vcpu_put(vcpu); 22 vcpu_put(vcpu);
23 schedule(); 23 yield();
24 vcpu_load(vcpu); 24 vcpu_load(vcpu);
25 return 0; 25 return 0;
26} 26}
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index fcd1ed8015c1..84a7fed4cd4e 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -339,6 +339,11 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
339 if (kvm_cpu_has_interrupt(vcpu)) 339 if (kvm_cpu_has_interrupt(vcpu))
340 return 0; 340 return 0;
341 341
342 __set_cpu_idle(vcpu);
343 spin_lock_bh(&vcpu->arch.local_int.lock);
344 vcpu->arch.local_int.timer_due = 0;
345 spin_unlock_bh(&vcpu->arch.local_int.lock);
346
342 if (psw_interrupts_disabled(vcpu)) { 347 if (psw_interrupts_disabled(vcpu)) {
343 VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); 348 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
344 __unset_cpu_idle(vcpu); 349 __unset_cpu_idle(vcpu);
@@ -366,8 +371,6 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
366no_timer: 371no_timer:
367 spin_lock_bh(&vcpu->arch.local_int.float_int->lock); 372 spin_lock_bh(&vcpu->arch.local_int.float_int->lock);
368 spin_lock_bh(&vcpu->arch.local_int.lock); 373 spin_lock_bh(&vcpu->arch.local_int.lock);
369 __set_cpu_idle(vcpu);
370 vcpu->arch.local_int.timer_due = 0;
371 add_wait_queue(&vcpu->arch.local_int.wq, &wait); 374 add_wait_queue(&vcpu->arch.local_int.wq, &wait);
372 while (list_empty(&vcpu->arch.local_int.list) && 375 while (list_empty(&vcpu->arch.local_int.list) &&
373 list_empty(&vcpu->arch.local_int.float_int->list) && 376 list_empty(&vcpu->arch.local_int.float_int->list) &&
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 0ac36a649eba..6558b09ff579 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -423,6 +423,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
423 return -EINVAL; /* not implemented yet */ 423 return -EINVAL; /* not implemented yet */
424} 424}
425 425
426extern void s390_handle_mcck(void);
427
426static void __vcpu_run(struct kvm_vcpu *vcpu) 428static void __vcpu_run(struct kvm_vcpu *vcpu)
427{ 429{
428 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16); 430 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
@@ -430,13 +432,21 @@ static void __vcpu_run(struct kvm_vcpu *vcpu)
430 if (need_resched()) 432 if (need_resched())
431 schedule(); 433 schedule();
432 434
435 if (test_thread_flag(TIF_MCCK_PENDING))
436 s390_handle_mcck();
437
438 kvm_s390_deliver_pending_interrupts(vcpu);
439
433 vcpu->arch.sie_block->icptcode = 0; 440 vcpu->arch.sie_block->icptcode = 0;
434 local_irq_disable(); 441 local_irq_disable();
435 kvm_guest_enter(); 442 kvm_guest_enter();
436 local_irq_enable(); 443 local_irq_enable();
437 VCPU_EVENT(vcpu, 6, "entering sie flags %x", 444 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
438 atomic_read(&vcpu->arch.sie_block->cpuflags)); 445 atomic_read(&vcpu->arch.sie_block->cpuflags));
439 sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs); 446 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
447 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
448 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
449 }
440 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", 450 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
441 vcpu->arch.sie_block->icptcode); 451 vcpu->arch.sie_block->icptcode);
442 local_irq_disable(); 452 local_irq_disable();
@@ -475,7 +485,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
475 might_sleep(); 485 might_sleep();
476 486
477 do { 487 do {
478 kvm_s390_deliver_pending_interrupts(vcpu);
479 __vcpu_run(vcpu); 488 __vcpu_run(vcpu);
480 rc = kvm_handle_sie_intercept(vcpu); 489 rc = kvm_handle_sie_intercept(vcpu);
481 } while (!signal_pending(current) && !rc); 490 } while (!signal_pending(current) && !rc);
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 29f3a63806b9..05598649b326 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -44,37 +44,34 @@ char empty_zero_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
44 44
45void show_mem(void) 45void show_mem(void)
46{ 46{
47 int i, total = 0, reserved = 0; 47 unsigned long i, total = 0, reserved = 0;
48 int shared = 0, cached = 0; 48 unsigned long shared = 0, cached = 0;
49 unsigned long flags;
49 struct page *page; 50 struct page *page;
51 pg_data_t *pgdat;
50 52
51 printk("Mem-info:\n"); 53 printk("Mem-info:\n");
52 show_free_areas(); 54 show_free_areas();
53 i = max_mapnr; 55 for_each_online_pgdat(pgdat) {
54 while (i-- > 0) { 56 pgdat_resize_lock(pgdat, &flags);
55 if (!pfn_valid(i)) 57 for (i = 0; i < pgdat->node_spanned_pages; i++) {
56 continue; 58 if (!pfn_valid(pgdat->node_start_pfn + i))
57 page = pfn_to_page(i); 59 continue;
58 total++; 60 page = pfn_to_page(pgdat->node_start_pfn + i);
59 if (PageReserved(page)) 61 total++;
60 reserved++; 62 if (PageReserved(page))
61 else if (PageSwapCache(page)) 63 reserved++;
62 cached++; 64 else if (PageSwapCache(page))
63 else if (page_count(page)) 65 cached++;
64 shared += page_count(page) - 1; 66 else if (page_count(page))
67 shared += page_count(page) - 1;
68 }
69 pgdat_resize_unlock(pgdat, &flags);
65 } 70 }
66 printk("%d pages of RAM\n", total); 71 printk("%ld pages of RAM\n", total);
67 printk("%d reserved pages\n", reserved); 72 printk("%ld reserved pages\n", reserved);
68 printk("%d pages shared\n", shared); 73 printk("%ld pages shared\n", shared);
69 printk("%d pages swap cached\n", cached); 74 printk("%ld pages swap cached\n", cached);
70
71 printk("%lu pages dirty\n", global_page_state(NR_FILE_DIRTY));
72 printk("%lu pages writeback\n", global_page_state(NR_WRITEBACK));
73 printk("%lu pages mapped\n", global_page_state(NR_FILE_MAPPED));
74 printk("%lu pages slab\n",
75 global_page_state(NR_SLAB_RECLAIMABLE) +
76 global_page_state(NR_SLAB_UNRECLAIMABLE));
77 printk("%lu pages pagetables\n", global_page_state(NR_PAGETABLE));
78} 75}
79 76
80/* 77/*
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 5c1aea97cd12..3d98ba82ea67 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -254,36 +254,46 @@ void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
254int s390_enable_sie(void) 254int s390_enable_sie(void)
255{ 255{
256 struct task_struct *tsk = current; 256 struct task_struct *tsk = current;
257 struct mm_struct *mm; 257 struct mm_struct *mm, *old_mm;
258 int rc;
259 258
260 task_lock(tsk); 259 /* Do we have pgstes? if yes, we are done */
261
262 rc = 0;
263 if (tsk->mm->context.pgstes) 260 if (tsk->mm->context.pgstes)
264 goto unlock; 261 return 0;
265 262
266 rc = -EINVAL; 263 /* lets check if we are allowed to replace the mm */
264 task_lock(tsk);
267 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || 265 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
268 tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) 266 tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) {
269 goto unlock; 267 task_unlock(tsk);
268 return -EINVAL;
269 }
270 task_unlock(tsk);
270 271
271 tsk->mm->context.pgstes = 1; /* dirty little tricks .. */ 272 /* we copy the mm with pgstes enabled */
273 tsk->mm->context.pgstes = 1;
272 mm = dup_mm(tsk); 274 mm = dup_mm(tsk);
273 tsk->mm->context.pgstes = 0; 275 tsk->mm->context.pgstes = 0;
274
275 rc = -ENOMEM;
276 if (!mm) 276 if (!mm)
277 goto unlock; 277 return -ENOMEM;
278 mmput(tsk->mm); 278
279 /* Now lets check again if somebody attached ptrace etc */
280 task_lock(tsk);
281 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
282 tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) {
283 mmput(mm);
284 task_unlock(tsk);
285 return -EINVAL;
286 }
287
288 /* ok, we are alone. No ptrace, no threads, etc. */
289 old_mm = tsk->mm;
279 tsk->mm = tsk->active_mm = mm; 290 tsk->mm = tsk->active_mm = mm;
280 preempt_disable(); 291 preempt_disable();
281 update_mm(mm, tsk); 292 update_mm(mm, tsk);
282 cpu_set(smp_processor_id(), mm->cpu_vm_mask); 293 cpu_set(smp_processor_id(), mm->cpu_vm_mask);
283 preempt_enable(); 294 preempt_enable();
284 rc = 0;
285unlock:
286 task_unlock(tsk); 295 task_unlock(tsk);
287 return rc; 296 mmput(old_mm);
297 return 0;
288} 298}
289EXPORT_SYMBOL_GPL(s390_enable_sie); 299EXPORT_SYMBOL_GPL(s390_enable_sie);
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index ea2804808f39..e4868bfc672f 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -27,12 +27,19 @@ struct memory_segment {
27 27
28static LIST_HEAD(mem_segs); 28static LIST_HEAD(mem_segs);
29 29
30static pud_t *vmem_pud_alloc(void) 30static void __ref *vmem_alloc_pages(unsigned int order)
31{
32 if (slab_is_available())
33 return (void *)__get_free_pages(GFP_KERNEL, order);
34 return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
35}
36
37static inline pud_t *vmem_pud_alloc(void)
31{ 38{
32 pud_t *pud = NULL; 39 pud_t *pud = NULL;
33 40
34#ifdef CONFIG_64BIT 41#ifdef CONFIG_64BIT
35 pud = vmemmap_alloc_block(PAGE_SIZE * 4, 0); 42 pud = vmem_alloc_pages(2);
36 if (!pud) 43 if (!pud)
37 return NULL; 44 return NULL;
38 clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4); 45 clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
@@ -40,12 +47,12 @@ static pud_t *vmem_pud_alloc(void)
40 return pud; 47 return pud;
41} 48}
42 49
43static pmd_t *vmem_pmd_alloc(void) 50static inline pmd_t *vmem_pmd_alloc(void)
44{ 51{
45 pmd_t *pmd = NULL; 52 pmd_t *pmd = NULL;
46 53
47#ifdef CONFIG_64BIT 54#ifdef CONFIG_64BIT
48 pmd = vmemmap_alloc_block(PAGE_SIZE * 4, 0); 55 pmd = vmem_alloc_pages(2);
49 if (!pmd) 56 if (!pmd)
50 return NULL; 57 return NULL;
51 clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4); 58 clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
@@ -207,13 +214,14 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
207 if (pte_none(*pt_dir)) { 214 if (pte_none(*pt_dir)) {
208 unsigned long new_page; 215 unsigned long new_page;
209 216
210 new_page =__pa(vmemmap_alloc_block(PAGE_SIZE, 0)); 217 new_page =__pa(vmem_alloc_pages(0));
211 if (!new_page) 218 if (!new_page)
212 goto out; 219 goto out;
213 pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL); 220 pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
214 *pt_dir = pte; 221 *pt_dir = pte;
215 } 222 }
216 } 223 }
224 memset(start, 0, nr * sizeof(struct page));
217 ret = 0; 225 ret = 0;
218out: 226out:
219 flush_tlb_kernel_range(start_addr, end_addr); 227 flush_tlb_kernel_range(start_addr, end_addr);
@@ -228,7 +236,7 @@ static int insert_memory_segment(struct memory_segment *seg)
228{ 236{
229 struct memory_segment *tmp; 237 struct memory_segment *tmp;
230 238
231 if (seg->start + seg->size >= VMEM_MAX_PHYS || 239 if (seg->start + seg->size > VMEM_MAX_PHYS ||
232 seg->start + seg->size < seg->start) 240 seg->start + seg->size < seg->start)
233 return -ERANGE; 241 return -ERANGE;
234 242