aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/extmem.c6
-rw-r--r--arch/s390/mm/fault.c187
-rw-r--r--arch/s390/mm/hugetlbpage.c10
-rw-r--r--arch/s390/mm/init.c3
-rw-r--r--arch/s390/mm/pageattr.c2
-rw-r--r--arch/s390/mm/pgtable.c68
-rw-r--r--arch/s390/mm/vmem.c14
7 files changed, 119 insertions, 171 deletions
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 3cc95dd0a3a6..075ddada4911 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -412,6 +412,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
412 struct dcss_segment *seg; 412 struct dcss_segment *seg;
413 int rc, diag_cc; 413 int rc, diag_cc;
414 414
415 start_addr = end_addr = 0;
415 seg = kmalloc(sizeof(*seg), GFP_KERNEL | GFP_DMA); 416 seg = kmalloc(sizeof(*seg), GFP_KERNEL | GFP_DMA);
416 if (seg == NULL) { 417 if (seg == NULL) {
417 rc = -ENOMEM; 418 rc = -ENOMEM;
@@ -573,6 +574,7 @@ segment_modify_shared (char *name, int do_nonshared)
573 unsigned long start_addr, end_addr, dummy; 574 unsigned long start_addr, end_addr, dummy;
574 int rc, diag_cc; 575 int rc, diag_cc;
575 576
577 start_addr = end_addr = 0;
576 mutex_lock(&dcss_lock); 578 mutex_lock(&dcss_lock);
577 seg = segment_by_name (name); 579 seg = segment_by_name (name);
578 if (seg == NULL) { 580 if (seg == NULL) {
@@ -681,8 +683,6 @@ void
681segment_save(char *name) 683segment_save(char *name)
682{ 684{
683 struct dcss_segment *seg; 685 struct dcss_segment *seg;
684 int startpfn = 0;
685 int endpfn = 0;
686 char cmd1[160]; 686 char cmd1[160];
687 char cmd2[80]; 687 char cmd2[80];
688 int i, response; 688 int i, response;
@@ -698,8 +698,6 @@ segment_save(char *name)
698 goto out; 698 goto out;
699 } 699 }
700 700
701 startpfn = seg->start_addr >> PAGE_SHIFT;
702 endpfn = (seg->end) >> PAGE_SHIFT;
703 sprintf(cmd1, "DEFSEG %s", name); 701 sprintf(cmd1, "DEFSEG %s", name);
704 for (i=0; i<seg->segcnt; i++) { 702 for (i=0; i<seg->segcnt; i++) {
705 sprintf(cmd1+strlen(cmd1), " %lX-%lX %s", 703 sprintf(cmd1+strlen(cmd1), " %lX-%lX %s",
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index ab988135e5c6..a0f9e730f26a 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -225,33 +225,6 @@ static noinline void do_sigbus(struct pt_regs *regs, long int_code,
225 force_sig_info(SIGBUS, &si, tsk); 225 force_sig_info(SIGBUS, &si, tsk);
226} 226}
227 227
228#ifdef CONFIG_S390_EXEC_PROTECT
229static noinline int signal_return(struct pt_regs *regs, long int_code,
230 unsigned long trans_exc_code)
231{
232 u16 instruction;
233 int rc;
234
235 rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
236
237 if (!rc && instruction == 0x0a77) {
238 clear_tsk_thread_flag(current, TIF_PER_TRAP);
239 if (is_compat_task())
240 sys32_sigreturn();
241 else
242 sys_sigreturn();
243 } else if (!rc && instruction == 0x0aad) {
244 clear_tsk_thread_flag(current, TIF_PER_TRAP);
245 if (is_compat_task())
246 sys32_rt_sigreturn();
247 else
248 sys_rt_sigreturn();
249 } else
250 do_sigsegv(regs, int_code, SEGV_MAPERR, trans_exc_code);
251 return 0;
252}
253#endif /* CONFIG_S390_EXEC_PROTECT */
254
255static noinline void do_fault_error(struct pt_regs *regs, long int_code, 228static noinline void do_fault_error(struct pt_regs *regs, long int_code,
256 unsigned long trans_exc_code, int fault) 229 unsigned long trans_exc_code, int fault)
257{ 230{
@@ -259,13 +232,6 @@ static noinline void do_fault_error(struct pt_regs *regs, long int_code,
259 232
260 switch (fault) { 233 switch (fault) {
261 case VM_FAULT_BADACCESS: 234 case VM_FAULT_BADACCESS:
262#ifdef CONFIG_S390_EXEC_PROTECT
263 if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_SECONDARY &&
264 (trans_exc_code & 3) == 0) {
265 signal_return(regs, int_code, trans_exc_code);
266 break;
267 }
268#endif /* CONFIG_S390_EXEC_PROTECT */
269 case VM_FAULT_BADMAP: 235 case VM_FAULT_BADMAP:
270 /* Bad memory access. Check if it is kernel or user space. */ 236 /* Bad memory access. Check if it is kernel or user space. */
271 if (regs->psw.mask & PSW_MASK_PSTATE) { 237 if (regs->psw.mask & PSW_MASK_PSTATE) {
@@ -414,11 +380,6 @@ void __kprobes do_dat_exception(struct pt_regs *regs, long pgm_int_code,
414 int access, fault; 380 int access, fault;
415 381
416 access = VM_READ | VM_EXEC | VM_WRITE; 382 access = VM_READ | VM_EXEC | VM_WRITE;
417#ifdef CONFIG_S390_EXEC_PROTECT
418 if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_SECONDARY &&
419 (trans_exc_code & 3) == 0)
420 access = VM_EXEC;
421#endif
422 fault = do_exception(regs, access, trans_exc_code); 383 fault = do_exception(regs, access, trans_exc_code);
423 if (unlikely(fault)) 384 if (unlikely(fault))
424 do_fault_error(regs, pgm_int_code & 255, trans_exc_code, fault); 385 do_fault_error(regs, pgm_int_code & 255, trans_exc_code, fault);
@@ -491,22 +452,28 @@ static int __init nopfault(char *str)
491 452
492__setup("nopfault", nopfault); 453__setup("nopfault", nopfault);
493 454
494typedef struct { 455struct pfault_refbk {
495 __u16 refdiagc; 456 u16 refdiagc;
496 __u16 reffcode; 457 u16 reffcode;
497 __u16 refdwlen; 458 u16 refdwlen;
498 __u16 refversn; 459 u16 refversn;
499 __u64 refgaddr; 460 u64 refgaddr;
500 __u64 refselmk; 461 u64 refselmk;
501 __u64 refcmpmk; 462 u64 refcmpmk;
502 __u64 reserved; 463 u64 reserved;
503} __attribute__ ((packed, aligned(8))) pfault_refbk_t; 464} __attribute__ ((packed, aligned(8)));
504 465
505int pfault_init(void) 466int pfault_init(void)
506{ 467{
507 pfault_refbk_t refbk = 468 struct pfault_refbk refbk = {
508 { 0x258, 0, 5, 2, __LC_CURRENT, 1ULL << 48, 1ULL << 48, 469 .refdiagc = 0x258,
509 __PF_RES_FIELD }; 470 .reffcode = 0,
471 .refdwlen = 5,
472 .refversn = 2,
473 .refgaddr = __LC_CURRENT_PID,
474 .refselmk = 1ULL << 48,
475 .refcmpmk = 1ULL << 48,
476 .reserved = __PF_RES_FIELD };
510 int rc; 477 int rc;
511 478
512 if (!MACHINE_IS_VM || pfault_disable) 479 if (!MACHINE_IS_VM || pfault_disable)
@@ -524,8 +491,12 @@ int pfault_init(void)
524 491
525void pfault_fini(void) 492void pfault_fini(void)
526{ 493{
527 pfault_refbk_t refbk = 494 struct pfault_refbk refbk = {
528 { 0x258, 1, 5, 2, 0ULL, 0ULL, 0ULL, 0ULL }; 495 .refdiagc = 0x258,
496 .reffcode = 1,
497 .refdwlen = 5,
498 .refversn = 2,
499 };
529 500
530 if (!MACHINE_IS_VM || pfault_disable) 501 if (!MACHINE_IS_VM || pfault_disable)
531 return; 502 return;
@@ -537,11 +508,15 @@ void pfault_fini(void)
537 : : "a" (&refbk), "m" (refbk) : "cc"); 508 : : "a" (&refbk), "m" (refbk) : "cc");
538} 509}
539 510
511static DEFINE_SPINLOCK(pfault_lock);
512static LIST_HEAD(pfault_list);
513
540static void pfault_interrupt(unsigned int ext_int_code, 514static void pfault_interrupt(unsigned int ext_int_code,
541 unsigned int param32, unsigned long param64) 515 unsigned int param32, unsigned long param64)
542{ 516{
543 struct task_struct *tsk; 517 struct task_struct *tsk;
544 __u16 subcode; 518 __u16 subcode;
519 pid_t pid;
545 520
546 /* 521 /*
547 * Get the external interruption subcode & pfault 522 * Get the external interruption subcode & pfault
@@ -553,44 +528,79 @@ static void pfault_interrupt(unsigned int ext_int_code,
553 if ((subcode & 0xff00) != __SUBCODE_MASK) 528 if ((subcode & 0xff00) != __SUBCODE_MASK)
554 return; 529 return;
555 kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++; 530 kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++;
556 531 if (subcode & 0x0080) {
557 /* 532 /* Get the token (= pid of the affected task). */
558 * Get the token (= address of the task structure of the affected task). 533 pid = sizeof(void *) == 4 ? param32 : param64;
559 */ 534 rcu_read_lock();
560#ifdef CONFIG_64BIT 535 tsk = find_task_by_pid_ns(pid, &init_pid_ns);
561 tsk = (struct task_struct *) param64; 536 if (tsk)
562#else 537 get_task_struct(tsk);
563 tsk = (struct task_struct *) param32; 538 rcu_read_unlock();
564#endif 539 if (!tsk)
565 540 return;
541 } else {
542 tsk = current;
543 }
544 spin_lock(&pfault_lock);
566 if (subcode & 0x0080) { 545 if (subcode & 0x0080) {
567 /* signal bit is set -> a page has been swapped in by VM */ 546 /* signal bit is set -> a page has been swapped in by VM */
568 if (xchg(&tsk->thread.pfault_wait, -1) != 0) { 547 if (tsk->thread.pfault_wait == 1) {
569 /* Initial interrupt was faster than the completion 548 /* Initial interrupt was faster than the completion
570 * interrupt. pfault_wait is valid. Set pfault_wait 549 * interrupt. pfault_wait is valid. Set pfault_wait
571 * back to zero and wake up the process. This can 550 * back to zero and wake up the process. This can
572 * safely be done because the task is still sleeping 551 * safely be done because the task is still sleeping
573 * and can't produce new pfaults. */ 552 * and can't produce new pfaults. */
574 tsk->thread.pfault_wait = 0; 553 tsk->thread.pfault_wait = 0;
554 list_del(&tsk->thread.list);
575 wake_up_process(tsk); 555 wake_up_process(tsk);
576 put_task_struct(tsk); 556 } else {
557 /* Completion interrupt was faster than initial
558 * interrupt. Set pfault_wait to -1 so the initial
559 * interrupt doesn't put the task to sleep. */
560 tsk->thread.pfault_wait = -1;
577 } 561 }
562 put_task_struct(tsk);
578 } else { 563 } else {
579 /* signal bit not set -> a real page is missing. */ 564 /* signal bit not set -> a real page is missing. */
580 get_task_struct(tsk); 565 if (tsk->thread.pfault_wait == -1) {
581 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
582 if (xchg(&tsk->thread.pfault_wait, 1) != 0) {
583 /* Completion interrupt was faster than the initial 566 /* Completion interrupt was faster than the initial
584 * interrupt (swapped in a -1 for pfault_wait). Set 567 * interrupt (pfault_wait == -1). Set pfault_wait
585 * pfault_wait back to zero and exit. This can be 568 * back to zero and exit. */
586 * done safely because tsk is running in kernel
587 * mode and can't produce new pfaults. */
588 tsk->thread.pfault_wait = 0; 569 tsk->thread.pfault_wait = 0;
589 set_task_state(tsk, TASK_RUNNING); 570 } else {
590 put_task_struct(tsk); 571 /* Initial interrupt arrived before completion
591 } else 572 * interrupt. Let the task sleep. */
573 tsk->thread.pfault_wait = 1;
574 list_add(&tsk->thread.list, &pfault_list);
575 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
592 set_tsk_need_resched(tsk); 576 set_tsk_need_resched(tsk);
577 }
578 }
579 spin_unlock(&pfault_lock);
580}
581
582static int __cpuinit pfault_cpu_notify(struct notifier_block *self,
583 unsigned long action, void *hcpu)
584{
585 struct thread_struct *thread, *next;
586 struct task_struct *tsk;
587
588 switch (action) {
589 case CPU_DEAD:
590 case CPU_DEAD_FROZEN:
591 spin_lock_irq(&pfault_lock);
592 list_for_each_entry_safe(thread, next, &pfault_list, list) {
593 thread->pfault_wait = 0;
594 list_del(&thread->list);
595 tsk = container_of(thread, struct task_struct, thread);
596 wake_up_process(tsk);
597 }
598 spin_unlock_irq(&pfault_lock);
599 break;
600 default:
601 break;
593 } 602 }
603 return NOTIFY_OK;
594} 604}
595 605
596static int __init pfault_irq_init(void) 606static int __init pfault_irq_init(void)
@@ -599,22 +609,21 @@ static int __init pfault_irq_init(void)
599 609
600 if (!MACHINE_IS_VM) 610 if (!MACHINE_IS_VM)
601 return 0; 611 return 0;
602 /*
603 * Try to get pfault pseudo page faults going.
604 */
605 rc = register_external_interrupt(0x2603, pfault_interrupt); 612 rc = register_external_interrupt(0x2603, pfault_interrupt);
606 if (rc) { 613 if (rc)
607 pfault_disable = 1; 614 goto out_extint;
608 return rc; 615 rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
609 } 616 if (rc)
610 if (pfault_init() == 0) 617 goto out_pfault;
611 return 0; 618 hotcpu_notifier(pfault_cpu_notify, 0);
619 return 0;
612 620
613 /* Tough luck, no pfault. */ 621out_pfault:
614 pfault_disable = 1;
615 unregister_external_interrupt(0x2603, pfault_interrupt); 622 unregister_external_interrupt(0x2603, pfault_interrupt);
616 return 0; 623out_extint:
624 pfault_disable = 1;
625 return rc;
617} 626}
618early_initcall(pfault_irq_init); 627early_initcall(pfault_irq_init);
619 628
620#endif 629#endif /* CONFIG_PFAULT */
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index 639cd21f2218..a4d856db9154 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -13,7 +13,6 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
13 pte_t *pteptr, pte_t pteval) 13 pte_t *pteptr, pte_t pteval)
14{ 14{
15 pmd_t *pmdp = (pmd_t *) pteptr; 15 pmd_t *pmdp = (pmd_t *) pteptr;
16 pte_t shadow_pteval = pteval;
17 unsigned long mask; 16 unsigned long mask;
18 17
19 if (!MACHINE_HAS_HPAGE) { 18 if (!MACHINE_HAS_HPAGE) {
@@ -21,18 +20,9 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
21 mask = pte_val(pteval) & 20 mask = pte_val(pteval) &
22 (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO); 21 (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
23 pte_val(pteval) = (_SEGMENT_ENTRY + __pa(pteptr)) | mask; 22 pte_val(pteval) = (_SEGMENT_ENTRY + __pa(pteptr)) | mask;
24 if (mm->context.noexec) {
25 pteptr += PTRS_PER_PTE;
26 pte_val(shadow_pteval) =
27 (_SEGMENT_ENTRY + __pa(pteptr)) | mask;
28 }
29 } 23 }
30 24
31 pmd_val(*pmdp) = pte_val(pteval); 25 pmd_val(*pmdp) = pte_val(pteval);
32 if (mm->context.noexec) {
33 pmdp = get_shadow_table(pmdp);
34 pmd_val(*pmdp) = pte_val(shadow_pteval);
35 }
36} 26}
37 27
38int arch_prepare_hugepage(struct page *page) 28int arch_prepare_hugepage(struct page *page)
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index bb409332a484..dfefc2171691 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -175,7 +175,8 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
175 pmd = pmd_offset(pud, address); 175 pmd = pmd_offset(pud, address);
176 pte = pte_offset_kernel(pmd, address); 176 pte = pte_offset_kernel(pmd, address);
177 if (!enable) { 177 if (!enable) {
178 ptep_invalidate(&init_mm, address, pte); 178 __ptep_ipte(address, pte);
179 pte_val(*pte) = _PAGE_TYPE_EMPTY;
179 continue; 180 continue;
180 } 181 }
181 *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW)); 182 *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW));
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index f05edcc3beff..d013ed39743b 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -28,7 +28,7 @@ static void change_page_attr(unsigned long addr, int numpages,
28 28
29 pte = *ptep; 29 pte = *ptep;
30 pte = set(pte); 30 pte = set(pte);
31 ptep_invalidate(&init_mm, addr, ptep); 31 __ptep_ipte(addr, ptep);
32 *ptep = pte; 32 *ptep = pte;
33 addr += PAGE_SIZE; 33 addr += PAGE_SIZE;
34 } 34 }
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index e1850c28cd68..8d4330642512 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -40,7 +40,6 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
40static DEFINE_PER_CPU(struct rcu_table_freelist *, rcu_table_freelist); 40static DEFINE_PER_CPU(struct rcu_table_freelist *, rcu_table_freelist);
41 41
42static void __page_table_free(struct mm_struct *mm, unsigned long *table); 42static void __page_table_free(struct mm_struct *mm, unsigned long *table);
43static void __crst_table_free(struct mm_struct *mm, unsigned long *table);
44 43
45static struct rcu_table_freelist *rcu_table_freelist_get(struct mm_struct *mm) 44static struct rcu_table_freelist *rcu_table_freelist_get(struct mm_struct *mm)
46{ 45{
@@ -67,7 +66,7 @@ static void rcu_table_freelist_callback(struct rcu_head *head)
67 while (batch->pgt_index > 0) 66 while (batch->pgt_index > 0)
68 __page_table_free(batch->mm, batch->table[--batch->pgt_index]); 67 __page_table_free(batch->mm, batch->table[--batch->pgt_index]);
69 while (batch->crst_index < RCU_FREELIST_SIZE) 68 while (batch->crst_index < RCU_FREELIST_SIZE)
70 __crst_table_free(batch->mm, batch->table[batch->crst_index++]); 69 crst_table_free(batch->mm, batch->table[batch->crst_index++]);
71 free_page((unsigned long) batch); 70 free_page((unsigned long) batch);
72} 71}
73 72
@@ -125,63 +124,33 @@ static int __init parse_vmalloc(char *arg)
125} 124}
126early_param("vmalloc", parse_vmalloc); 125early_param("vmalloc", parse_vmalloc);
127 126
128unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec) 127unsigned long *crst_table_alloc(struct mm_struct *mm)
129{ 128{
130 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); 129 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
131 130
132 if (!page) 131 if (!page)
133 return NULL; 132 return NULL;
134 page->index = 0;
135 if (noexec) {
136 struct page *shadow = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
137 if (!shadow) {
138 __free_pages(page, ALLOC_ORDER);
139 return NULL;
140 }
141 page->index = page_to_phys(shadow);
142 }
143 spin_lock_bh(&mm->context.list_lock);
144 list_add(&page->lru, &mm->context.crst_list);
145 spin_unlock_bh(&mm->context.list_lock);
146 return (unsigned long *) page_to_phys(page); 133 return (unsigned long *) page_to_phys(page);
147} 134}
148 135
149static void __crst_table_free(struct mm_struct *mm, unsigned long *table)
150{
151 unsigned long *shadow = get_shadow_table(table);
152
153 if (shadow)
154 free_pages((unsigned long) shadow, ALLOC_ORDER);
155 free_pages((unsigned long) table, ALLOC_ORDER);
156}
157
158void crst_table_free(struct mm_struct *mm, unsigned long *table) 136void crst_table_free(struct mm_struct *mm, unsigned long *table)
159{ 137{
160 struct page *page = virt_to_page(table); 138 free_pages((unsigned long) table, ALLOC_ORDER);
161
162 spin_lock_bh(&mm->context.list_lock);
163 list_del(&page->lru);
164 spin_unlock_bh(&mm->context.list_lock);
165 __crst_table_free(mm, table);
166} 139}
167 140
168void crst_table_free_rcu(struct mm_struct *mm, unsigned long *table) 141void crst_table_free_rcu(struct mm_struct *mm, unsigned long *table)
169{ 142{
170 struct rcu_table_freelist *batch; 143 struct rcu_table_freelist *batch;
171 struct page *page = virt_to_page(table);
172 144
173 spin_lock_bh(&mm->context.list_lock);
174 list_del(&page->lru);
175 spin_unlock_bh(&mm->context.list_lock);
176 if (atomic_read(&mm->mm_users) < 2 && 145 if (atomic_read(&mm->mm_users) < 2 &&
177 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) { 146 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
178 __crst_table_free(mm, table); 147 crst_table_free(mm, table);
179 return; 148 return;
180 } 149 }
181 batch = rcu_table_freelist_get(mm); 150 batch = rcu_table_freelist_get(mm);
182 if (!batch) { 151 if (!batch) {
183 smp_call_function(smp_sync, NULL, 1); 152 smp_call_function(smp_sync, NULL, 1);
184 __crst_table_free(mm, table); 153 crst_table_free(mm, table);
185 return; 154 return;
186 } 155 }
187 batch->table[--batch->crst_index] = table; 156 batch->table[--batch->crst_index] = table;
@@ -197,7 +166,7 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
197 166
198 BUG_ON(limit > (1UL << 53)); 167 BUG_ON(limit > (1UL << 53));
199repeat: 168repeat:
200 table = crst_table_alloc(mm, mm->context.noexec); 169 table = crst_table_alloc(mm);
201 if (!table) 170 if (!table)
202 return -ENOMEM; 171 return -ENOMEM;
203 spin_lock_bh(&mm->page_table_lock); 172 spin_lock_bh(&mm->page_table_lock);
@@ -273,7 +242,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
273 unsigned long *table; 242 unsigned long *table;
274 unsigned long bits; 243 unsigned long bits;
275 244
276 bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; 245 bits = (mm->context.has_pgste) ? 3UL : 1UL;
277 spin_lock_bh(&mm->context.list_lock); 246 spin_lock_bh(&mm->context.list_lock);
278 page = NULL; 247 page = NULL;
279 if (!list_empty(&mm->context.pgtable_list)) { 248 if (!list_empty(&mm->context.pgtable_list)) {
@@ -329,7 +298,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
329 struct page *page; 298 struct page *page;
330 unsigned long bits; 299 unsigned long bits;
331 300
332 bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; 301 bits = (mm->context.has_pgste) ? 3UL : 1UL;
333 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); 302 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
334 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 303 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
335 spin_lock_bh(&mm->context.list_lock); 304 spin_lock_bh(&mm->context.list_lock);
@@ -366,7 +335,7 @@ void page_table_free_rcu(struct mm_struct *mm, unsigned long *table)
366 page_table_free(mm, table); 335 page_table_free(mm, table);
367 return; 336 return;
368 } 337 }
369 bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; 338 bits = (mm->context.has_pgste) ? 3UL : 1UL;
370 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); 339 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
371 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 340 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
372 spin_lock_bh(&mm->context.list_lock); 341 spin_lock_bh(&mm->context.list_lock);
@@ -379,25 +348,6 @@ void page_table_free_rcu(struct mm_struct *mm, unsigned long *table)
379 rcu_table_freelist_finish(); 348 rcu_table_freelist_finish();
380} 349}
381 350
382void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
383{
384 struct page *page;
385
386 spin_lock_bh(&mm->context.list_lock);
387 /* Free shadow region and segment tables. */
388 list_for_each_entry(page, &mm->context.crst_list, lru)
389 if (page->index) {
390 free_pages((unsigned long) page->index, ALLOC_ORDER);
391 page->index = 0;
392 }
393 /* "Free" second halves of page tables. */
394 list_for_each_entry(page, &mm->context.pgtable_list, lru)
395 page->flags &= ~SECOND_HALVES;
396 spin_unlock_bh(&mm->context.list_lock);
397 mm->context.noexec = 0;
398 update_mm(mm, tsk);
399}
400
401/* 351/*
402 * switch on pgstes for its userspace process (for kvm) 352 * switch on pgstes for its userspace process (for kvm)
403 */ 353 */
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 34c43f23b28c..8c1970d1dd91 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -95,7 +95,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
95 pu_dir = vmem_pud_alloc(); 95 pu_dir = vmem_pud_alloc();
96 if (!pu_dir) 96 if (!pu_dir)
97 goto out; 97 goto out;
98 pgd_populate_kernel(&init_mm, pg_dir, pu_dir); 98 pgd_populate(&init_mm, pg_dir, pu_dir);
99 } 99 }
100 100
101 pu_dir = pud_offset(pg_dir, address); 101 pu_dir = pud_offset(pg_dir, address);
@@ -103,7 +103,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
103 pm_dir = vmem_pmd_alloc(); 103 pm_dir = vmem_pmd_alloc();
104 if (!pm_dir) 104 if (!pm_dir)
105 goto out; 105 goto out;
106 pud_populate_kernel(&init_mm, pu_dir, pm_dir); 106 pud_populate(&init_mm, pu_dir, pm_dir);
107 } 107 }
108 108
109 pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0)); 109 pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
@@ -123,7 +123,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
123 pt_dir = vmem_pte_alloc(); 123 pt_dir = vmem_pte_alloc();
124 if (!pt_dir) 124 if (!pt_dir)
125 goto out; 125 goto out;
126 pmd_populate_kernel(&init_mm, pm_dir, pt_dir); 126 pmd_populate(&init_mm, pm_dir, pt_dir);
127 } 127 }
128 128
129 pt_dir = pte_offset_kernel(pm_dir, address); 129 pt_dir = pte_offset_kernel(pm_dir, address);
@@ -159,7 +159,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
159 continue; 159 continue;
160 160
161 if (pmd_huge(*pm_dir)) { 161 if (pmd_huge(*pm_dir)) {
162 pmd_clear_kernel(pm_dir); 162 pmd_clear(pm_dir);
163 address += HPAGE_SIZE - PAGE_SIZE; 163 address += HPAGE_SIZE - PAGE_SIZE;
164 continue; 164 continue;
165 } 165 }
@@ -192,7 +192,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
192 pu_dir = vmem_pud_alloc(); 192 pu_dir = vmem_pud_alloc();
193 if (!pu_dir) 193 if (!pu_dir)
194 goto out; 194 goto out;
195 pgd_populate_kernel(&init_mm, pg_dir, pu_dir); 195 pgd_populate(&init_mm, pg_dir, pu_dir);
196 } 196 }
197 197
198 pu_dir = pud_offset(pg_dir, address); 198 pu_dir = pud_offset(pg_dir, address);
@@ -200,7 +200,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
200 pm_dir = vmem_pmd_alloc(); 200 pm_dir = vmem_pmd_alloc();
201 if (!pm_dir) 201 if (!pm_dir)
202 goto out; 202 goto out;
203 pud_populate_kernel(&init_mm, pu_dir, pm_dir); 203 pud_populate(&init_mm, pu_dir, pm_dir);
204 } 204 }
205 205
206 pm_dir = pmd_offset(pu_dir, address); 206 pm_dir = pmd_offset(pu_dir, address);
@@ -208,7 +208,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
208 pt_dir = vmem_pte_alloc(); 208 pt_dir = vmem_pte_alloc();
209 if (!pt_dir) 209 if (!pt_dir)
210 goto out; 210 goto out;
211 pmd_populate_kernel(&init_mm, pm_dir, pt_dir); 211 pmd_populate(&init_mm, pm_dir, pt_dir);
212 } 212 }
213 213
214 pt_dir = pte_offset_kernel(pm_dir, address); 214 pt_dir = pte_offset_kernel(pm_dir, address);