aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-04-08 15:02:28 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-08 15:02:28 -0400
commitd586c86d50cefa0897a51a2dbc714060ccedae76 (patch)
tree76a7f454637badb74390047aebca5c071c0988fe /arch/s390/mm
parente9f37d3a8d126e73f5737ef548cdf6f618e295e4 (diff)
parent457f2180951cdcbfb4657ddcc83b486e93497f56 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull second set of s390 patches from Martin Schwidefsky: "The second part of Heikos uaccess rework, the page table walker for uaccess is now a thing of the past (yay!) The code change to fix the theoretical TLB flush problem allows us to add a TLB flush optimization for zEC12, this machine has new instructions that allow to do CPU local TLB flushes for single pages and for all pages of a specific address space. Plus the usual bug fixing and some more cleanup" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: s390/uaccess: rework uaccess code - fix locking issues s390/mm,tlb: optimize TLB flushing for zEC12 s390/mm,tlb: safeguard against speculative TLB creation s390/irq: Use defines for external interruption codes s390/irq: Add defines for external interruption codes s390/sclp: add timeout for queued requests kvm/s390: also set guest pages back to stable on kexec/kdump lcs: Add missing destroy_timer_on_stack() s390/tape: Add missing destroy_timer_on_stack() s390/tape: Use del_timer_sync() s390/3270: fix crash with multiple reset device requests s390/bitops,atomic: add missing memory barriers s390/zcrypt: add length check for aligned data to avoid overflow in msg-type 6
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/fault.c53
-rw-r--r--arch/s390/mm/hugetlbpage.c5
-rw-r--r--arch/s390/mm/init.c7
-rw-r--r--arch/s390/mm/pgtable.c12
-rw-r--r--arch/s390/mm/vmem.c2
5 files changed, 29 insertions, 50 deletions
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 88cef505453b..19f623f1f21c 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -106,21 +106,24 @@ void bust_spinlocks(int yes)
106 * Returns the address space associated with the fault. 106 * Returns the address space associated with the fault.
107 * Returns 0 for kernel space and 1 for user space. 107 * Returns 0 for kernel space and 1 for user space.
108 */ 108 */
109static inline int user_space_fault(unsigned long trans_exc_code) 109static inline int user_space_fault(struct pt_regs *regs)
110{ 110{
111 unsigned long trans_exc_code;
112
111 /* 113 /*
112 * The lowest two bits of the translation exception 114 * The lowest two bits of the translation exception
113 * identification indicate which paging table was used. 115 * identification indicate which paging table was used.
114 */ 116 */
115 trans_exc_code &= 3; 117 trans_exc_code = regs->int_parm_long & 3;
116 if (trans_exc_code == 2) 118 if (trans_exc_code == 3) /* home space -> kernel */
117 /* Access via secondary space, set_fs setting decides */ 119 return 0;
120 if (user_mode(regs))
121 return 1;
122 if (trans_exc_code == 2) /* secondary space -> set_fs */
118 return current->thread.mm_segment.ar4; 123 return current->thread.mm_segment.ar4;
119 /* 124 if (current->flags & PF_VCPU)
120 * Access via primary space or access register is from user space 125 return 1;
121 * and access via home space is from the kernel. 126 return 0;
122 */
123 return trans_exc_code != 3;
124} 127}
125 128
126static inline void report_user_fault(struct pt_regs *regs, long signr) 129static inline void report_user_fault(struct pt_regs *regs, long signr)
@@ -172,7 +175,7 @@ static noinline void do_no_context(struct pt_regs *regs)
172 * terminate things with extreme prejudice. 175 * terminate things with extreme prejudice.
173 */ 176 */
174 address = regs->int_parm_long & __FAIL_ADDR_MASK; 177 address = regs->int_parm_long & __FAIL_ADDR_MASK;
175 if (!user_space_fault(regs->int_parm_long)) 178 if (!user_space_fault(regs))
176 printk(KERN_ALERT "Unable to handle kernel pointer dereference" 179 printk(KERN_ALERT "Unable to handle kernel pointer dereference"
177 " at virtual kernel address %p\n", (void *)address); 180 " at virtual kernel address %p\n", (void *)address);
178 else 181 else
@@ -296,7 +299,7 @@ static inline int do_exception(struct pt_regs *regs, int access)
296 * user context. 299 * user context.
297 */ 300 */
298 fault = VM_FAULT_BADCONTEXT; 301 fault = VM_FAULT_BADCONTEXT;
299 if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) 302 if (unlikely(!user_space_fault(regs) || in_atomic() || !mm))
300 goto out; 303 goto out;
301 304
302 address = trans_exc_code & __FAIL_ADDR_MASK; 305 address = trans_exc_code & __FAIL_ADDR_MASK;
@@ -441,30 +444,6 @@ void __kprobes do_dat_exception(struct pt_regs *regs)
441 do_fault_error(regs, fault); 444 do_fault_error(regs, fault);
442} 445}
443 446
444int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
445{
446 struct pt_regs regs;
447 int access, fault;
448
449 /* Emulate a uaccess fault from kernel mode. */
450 regs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK;
451 if (!irqs_disabled())
452 regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT;
453 regs.psw.addr = (unsigned long) __builtin_return_address(0);
454 regs.psw.addr |= PSW_ADDR_AMODE;
455 regs.int_code = pgm_int_code;
456 regs.int_parm_long = (uaddr & PAGE_MASK) | 2;
457 access = write ? VM_WRITE : VM_READ;
458 fault = do_exception(&regs, access);
459 /*
460 * Since the fault happened in kernel mode while performing a uaccess
461 * all we need to do now is emulating a fixup in case "fault" is not
462 * zero.
463 * For the calling uaccess functions this results always in -EFAULT.
464 */
465 return fault ? -EFAULT : 0;
466}
467
468#ifdef CONFIG_PFAULT 447#ifdef CONFIG_PFAULT
469/* 448/*
470 * 'pfault' pseudo page faults routines. 449 * 'pfault' pseudo page faults routines.
@@ -645,7 +624,7 @@ static int __init pfault_irq_init(void)
645{ 624{
646 int rc; 625 int rc;
647 626
648 rc = register_external_interrupt(0x2603, pfault_interrupt); 627 rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
649 if (rc) 628 if (rc)
650 goto out_extint; 629 goto out_extint;
651 rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP; 630 rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
@@ -656,7 +635,7 @@ static int __init pfault_irq_init(void)
656 return 0; 635 return 0;
657 636
658out_pfault: 637out_pfault:
659 unregister_external_interrupt(0x2603, pfault_interrupt); 638 unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
660out_extint: 639out_extint:
661 pfault_disable = 1; 640 pfault_disable = 1;
662 return rc; 641 return rc;
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index d261c62e40a6..0727a55d87d9 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -123,10 +123,7 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
123 pmd_t *pmdp = (pmd_t *) ptep; 123 pmd_t *pmdp = (pmd_t *) ptep;
124 pte_t pte = huge_ptep_get(ptep); 124 pte_t pte = huge_ptep_get(ptep);
125 125
126 if (MACHINE_HAS_IDTE) 126 pmdp_flush_direct(mm, addr, pmdp);
127 __pmd_idte(addr, pmdp);
128 else
129 __pmd_csp(pmdp);
130 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; 127 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
131 return pte; 128 return pte;
132} 129}
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index ad446b0c55b6..0c1073ed1e84 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -124,8 +124,6 @@ void __init paging_init(void)
124 __ctl_load(S390_lowcore.kernel_asce, 13, 13); 124 __ctl_load(S390_lowcore.kernel_asce, 13, 13);
125 arch_local_irq_restore(4UL << (BITS_PER_LONG - 8)); 125 arch_local_irq_restore(4UL << (BITS_PER_LONG - 8));
126 126
127 atomic_set(&init_mm.context.attach_count, 1);
128
129 sparse_memory_present_with_active_regions(MAX_NUMNODES); 127 sparse_memory_present_with_active_regions(MAX_NUMNODES);
130 sparse_init(); 128 sparse_init();
131 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 129 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
@@ -136,6 +134,11 @@ void __init paging_init(void)
136 134
137void __init mem_init(void) 135void __init mem_init(void)
138{ 136{
137 if (MACHINE_HAS_TLB_LC)
138 cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
139 cpumask_set_cpu(0, mm_cpumask(&init_mm));
140 atomic_set(&init_mm.context.attach_count, 1);
141
139 max_mapnr = max_low_pfn; 142 max_mapnr = max_low_pfn;
140 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 143 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
141 144
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 5d8324cd866b..d7cfd57815fb 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -54,7 +54,7 @@ static void __crst_table_upgrade(void *arg)
54 struct mm_struct *mm = arg; 54 struct mm_struct *mm = arg;
55 55
56 if (current->active_mm == mm) 56 if (current->active_mm == mm)
57 update_mm(mm, current); 57 update_user_asce(mm, 1);
58 __tlb_flush_local(); 58 __tlb_flush_local();
59} 59}
60 60
@@ -107,8 +107,10 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
107{ 107{
108 pgd_t *pgd; 108 pgd_t *pgd;
109 109
110 if (current->active_mm == mm) 110 if (current->active_mm == mm) {
111 clear_user_asce(mm, 1);
111 __tlb_flush_mm(mm); 112 __tlb_flush_mm(mm);
113 }
112 while (mm->context.asce_limit > limit) { 114 while (mm->context.asce_limit > limit) {
113 pgd = mm->pgd; 115 pgd = mm->pgd;
114 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) { 116 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
@@ -132,7 +134,7 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
132 crst_table_free(mm, (unsigned long *) pgd); 134 crst_table_free(mm, (unsigned long *) pgd);
133 } 135 }
134 if (current->active_mm == mm) 136 if (current->active_mm == mm)
135 update_mm(mm, current); 137 update_user_asce(mm, 1);
136} 138}
137#endif 139#endif
138 140
@@ -198,7 +200,7 @@ static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
198static void gmap_flush_tlb(struct gmap *gmap) 200static void gmap_flush_tlb(struct gmap *gmap)
199{ 201{
200 if (MACHINE_HAS_IDTE) 202 if (MACHINE_HAS_IDTE)
201 __tlb_flush_idte((unsigned long) gmap->table | 203 __tlb_flush_asce(gmap->mm, (unsigned long) gmap->table |
202 _ASCE_TYPE_REGION1); 204 _ASCE_TYPE_REGION1);
203 else 205 else
204 __tlb_flush_global(); 206 __tlb_flush_global();
@@ -217,7 +219,7 @@ void gmap_free(struct gmap *gmap)
217 219
218 /* Flush tlb. */ 220 /* Flush tlb. */
219 if (MACHINE_HAS_IDTE) 221 if (MACHINE_HAS_IDTE)
220 __tlb_flush_idte((unsigned long) gmap->table | 222 __tlb_flush_asce(gmap->mm, (unsigned long) gmap->table |
221 _ASCE_TYPE_REGION1); 223 _ASCE_TYPE_REGION1);
222 else 224 else
223 __tlb_flush_global(); 225 __tlb_flush_global();
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index bcfb70b60be6..72b04de18283 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -138,7 +138,6 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
138 } 138 }
139 ret = 0; 139 ret = 0;
140out: 140out:
141 flush_tlb_kernel_range(start, end);
142 return ret; 141 return ret;
143} 142}
144 143
@@ -265,7 +264,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
265 memset((void *)start, 0, end - start); 264 memset((void *)start, 0, end - start);
266 ret = 0; 265 ret = 0;
267out: 266out:
268 flush_tlb_kernel_range(start, end);
269 return ret; 267 return ret;
270} 268}
271 269