aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-11 20:30:55 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-11 20:30:55 -0500
commit27afc5dbda52ee3dbcd0bda7375c917c6936b470 (patch)
tree47591400f85590d48fa71bbfa50e0707e20e4bd0 /arch/s390/mm
parent70e71ca0af244f48a5dcf56dc435243792e3a495 (diff)
parent351997810131565fe62aec2c366deccbf6bda3f4 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Martin Schwidefsky: "The most notable change for this pull request is the ftrace rework from Heiko. It brings a small performance improvement and the ground work to support a new gcc option to replace the mcount blocks with a single nop. Two new s390 specific system calls are added to emulate user space mmio for PCI, an artifact of the how PCI memory is accessed. Two patches for the memory management with changes to common code. For KVM mm_forbids_zeropage is added which disables the empty zero page for an mm that is used by a KVM process. And an optimization, pmdp_get_and_clear_full is added analog to ptep_get_and_clear_full. Some micro optimization for the cmpxchg and the spinlock code. And as usual bug fixes and cleanups" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (46 commits) s390/cputime: fix 31-bit compile s390/scm_block: make the number of reqs per HW req configurable s390/scm_block: handle multiple requests in one HW request s390/scm_block: allocate aidaw pages only when necessary s390/scm_block: use mempool to manage aidaw requests s390/eadm: change timeout value s390/mm: fix memory leak of ptlock in pmd_free_tlb s390: use local symbol names in entry[64].S s390/ptrace: always include vector registers in core files s390/simd: clear vector register pointer on fork/clone s390: translate cputime magic constants to macros s390/idle: convert open coded idle time seqcount s390/idle: add missing irq off lockdep annotation s390/debug: avoid function call for debug_sprintf_* s390/kprobes: fix instruction copy for out of line execution s390: remove diag 44 calls from cpu_relax() s390/dasd: retry partition detection s390/dasd: fix list corruption for sleep_on requests s390/dasd: fix infinite term I/O loop s390/dasd: remove unused code ...
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/fault.c10
-rw-r--r--arch/s390/mm/pgtable.c185
2 files changed, 88 insertions, 107 deletions
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index a2b81d6ce8a5..811937bb90be 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -261,8 +261,8 @@ static inline void report_user_fault(struct pt_regs *regs, long signr)
261 return; 261 return;
262 if (!printk_ratelimit()) 262 if (!printk_ratelimit())
263 return; 263 return;
264 printk(KERN_ALERT "User process fault: interruption code 0x%X ", 264 printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d",
265 regs->int_code); 265 regs->int_code & 0xffff, regs->int_code >> 17);
266 print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN); 266 print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN);
267 printk(KERN_CONT "\n"); 267 printk(KERN_CONT "\n");
268 printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n", 268 printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n",
@@ -548,7 +548,7 @@ out:
548 return fault; 548 return fault;
549} 549}
550 550
551void __kprobes do_protection_exception(struct pt_regs *regs) 551void do_protection_exception(struct pt_regs *regs)
552{ 552{
553 unsigned long trans_exc_code; 553 unsigned long trans_exc_code;
554 int fault; 554 int fault;
@@ -574,8 +574,9 @@ void __kprobes do_protection_exception(struct pt_regs *regs)
574 if (unlikely(fault)) 574 if (unlikely(fault))
575 do_fault_error(regs, fault); 575 do_fault_error(regs, fault);
576} 576}
577NOKPROBE_SYMBOL(do_protection_exception);
577 578
578void __kprobes do_dat_exception(struct pt_regs *regs) 579void do_dat_exception(struct pt_regs *regs)
579{ 580{
580 int access, fault; 581 int access, fault;
581 582
@@ -584,6 +585,7 @@ void __kprobes do_dat_exception(struct pt_regs *regs)
584 if (unlikely(fault)) 585 if (unlikely(fault))
585 do_fault_error(regs, fault); 586 do_fault_error(regs, fault);
586} 587}
588NOKPROBE_SYMBOL(do_dat_exception);
587 589
588#ifdef CONFIG_PFAULT 590#ifdef CONFIG_PFAULT
589/* 591/*
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 1b79ca67392f..71c7eff2c89f 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -18,6 +18,8 @@
18#include <linux/rcupdate.h> 18#include <linux/rcupdate.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/swapops.h> 20#include <linux/swapops.h>
21#include <linux/ksm.h>
22#include <linux/mman.h>
21 23
22#include <asm/pgtable.h> 24#include <asm/pgtable.h>
23#include <asm/pgalloc.h> 25#include <asm/pgalloc.h>
@@ -750,8 +752,7 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len)
750 break; 752 break;
751 /* Walk the process page table, lock and get pte pointer */ 753 /* Walk the process page table, lock and get pte pointer */
752 ptep = get_locked_pte(gmap->mm, addr, &ptl); 754 ptep = get_locked_pte(gmap->mm, addr, &ptl);
753 if (unlikely(!ptep)) 755 VM_BUG_ON(!ptep);
754 continue;
755 /* Set notification bit in the pgste of the pte */ 756 /* Set notification bit in the pgste of the pte */
756 entry = *ptep; 757 entry = *ptep;
757 if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) { 758 if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) {
@@ -761,7 +762,7 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len)
761 gaddr += PAGE_SIZE; 762 gaddr += PAGE_SIZE;
762 len -= PAGE_SIZE; 763 len -= PAGE_SIZE;
763 } 764 }
764 spin_unlock(ptl); 765 pte_unmap_unlock(ptep, ptl);
765 } 766 }
766 up_read(&gmap->mm->mmap_sem); 767 up_read(&gmap->mm->mmap_sem);
767 return rc; 768 return rc;
@@ -834,99 +835,6 @@ static inline void page_table_free_pgste(unsigned long *table)
834 __free_page(page); 835 __free_page(page);
835} 836}
836 837
837static inline unsigned long page_table_reset_pte(struct mm_struct *mm, pmd_t *pmd,
838 unsigned long addr, unsigned long end, bool init_skey)
839{
840 pte_t *start_pte, *pte;
841 spinlock_t *ptl;
842 pgste_t pgste;
843
844 start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
845 pte = start_pte;
846 do {
847 pgste = pgste_get_lock(pte);
848 pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
849 if (init_skey) {
850 unsigned long address;
851
852 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT |
853 PGSTE_GR_BIT | PGSTE_GC_BIT);
854
855 /* skip invalid and not writable pages */
856 if (pte_val(*pte) & _PAGE_INVALID ||
857 !(pte_val(*pte) & _PAGE_WRITE)) {
858 pgste_set_unlock(pte, pgste);
859 continue;
860 }
861
862 address = pte_val(*pte) & PAGE_MASK;
863 page_set_storage_key(address, PAGE_DEFAULT_KEY, 1);
864 }
865 pgste_set_unlock(pte, pgste);
866 } while (pte++, addr += PAGE_SIZE, addr != end);
867 pte_unmap_unlock(start_pte, ptl);
868
869 return addr;
870}
871
872static inline unsigned long page_table_reset_pmd(struct mm_struct *mm, pud_t *pud,
873 unsigned long addr, unsigned long end, bool init_skey)
874{
875 unsigned long next;
876 pmd_t *pmd;
877
878 pmd = pmd_offset(pud, addr);
879 do {
880 next = pmd_addr_end(addr, end);
881 if (pmd_none_or_clear_bad(pmd))
882 continue;
883 next = page_table_reset_pte(mm, pmd, addr, next, init_skey);
884 } while (pmd++, addr = next, addr != end);
885
886 return addr;
887}
888
889static inline unsigned long page_table_reset_pud(struct mm_struct *mm, pgd_t *pgd,
890 unsigned long addr, unsigned long end, bool init_skey)
891{
892 unsigned long next;
893 pud_t *pud;
894
895 pud = pud_offset(pgd, addr);
896 do {
897 next = pud_addr_end(addr, end);
898 if (pud_none_or_clear_bad(pud))
899 continue;
900 next = page_table_reset_pmd(mm, pud, addr, next, init_skey);
901 } while (pud++, addr = next, addr != end);
902
903 return addr;
904}
905
906void page_table_reset_pgste(struct mm_struct *mm, unsigned long start,
907 unsigned long end, bool init_skey)
908{
909 unsigned long addr, next;
910 pgd_t *pgd;
911
912 down_write(&mm->mmap_sem);
913 if (init_skey && mm_use_skey(mm))
914 goto out_up;
915 addr = start;
916 pgd = pgd_offset(mm, addr);
917 do {
918 next = pgd_addr_end(addr, end);
919 if (pgd_none_or_clear_bad(pgd))
920 continue;
921 next = page_table_reset_pud(mm, pgd, addr, next, init_skey);
922 } while (pgd++, addr = next, addr != end);
923 if (init_skey)
924 current->mm->context.use_skey = 1;
925out_up:
926 up_write(&mm->mmap_sem);
927}
928EXPORT_SYMBOL(page_table_reset_pgste);
929
930int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, 838int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
931 unsigned long key, bool nq) 839 unsigned long key, bool nq)
932{ 840{
@@ -992,11 +900,6 @@ static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm)
992 return NULL; 900 return NULL;
993} 901}
994 902
995void page_table_reset_pgste(struct mm_struct *mm, unsigned long start,
996 unsigned long end, bool init_skey)
997{
998}
999
1000static inline void page_table_free_pgste(unsigned long *table) 903static inline void page_table_free_pgste(unsigned long *table)
1001{ 904{
1002} 905}
@@ -1347,13 +1250,89 @@ EXPORT_SYMBOL_GPL(s390_enable_sie);
1347 * Enable storage key handling from now on and initialize the storage 1250 * Enable storage key handling from now on and initialize the storage
1348 * keys with the default key. 1251 * keys with the default key.
1349 */ 1252 */
1350void s390_enable_skey(void) 1253static int __s390_enable_skey(pte_t *pte, unsigned long addr,
1254 unsigned long next, struct mm_walk *walk)
1351{ 1255{
1352 page_table_reset_pgste(current->mm, 0, TASK_SIZE, true); 1256 unsigned long ptev;
1257 pgste_t pgste;
1258
1259 pgste = pgste_get_lock(pte);
1260 /*
1261 * Remove all zero page mappings,
1262 * after establishing a policy to forbid zero page mappings
1263 * following faults for that page will get fresh anonymous pages
1264 */
1265 if (is_zero_pfn(pte_pfn(*pte))) {
1266 ptep_flush_direct(walk->mm, addr, pte);
1267 pte_val(*pte) = _PAGE_INVALID;
1268 }
1269 /* Clear storage key */
1270 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT |
1271 PGSTE_GR_BIT | PGSTE_GC_BIT);
1272 ptev = pte_val(*pte);
1273 if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
1274 page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
1275 pgste_set_unlock(pte, pgste);
1276 return 0;
1277}
1278
1279int s390_enable_skey(void)
1280{
1281 struct mm_walk walk = { .pte_entry = __s390_enable_skey };
1282 struct mm_struct *mm = current->mm;
1283 struct vm_area_struct *vma;
1284 int rc = 0;
1285
1286 down_write(&mm->mmap_sem);
1287 if (mm_use_skey(mm))
1288 goto out_up;
1289
1290 mm->context.use_skey = 1;
1291 for (vma = mm->mmap; vma; vma = vma->vm_next) {
1292 if (ksm_madvise(vma, vma->vm_start, vma->vm_end,
1293 MADV_UNMERGEABLE, &vma->vm_flags)) {
1294 mm->context.use_skey = 0;
1295 rc = -ENOMEM;
1296 goto out_up;
1297 }
1298 }
1299 mm->def_flags &= ~VM_MERGEABLE;
1300
1301 walk.mm = mm;
1302 walk_page_range(0, TASK_SIZE, &walk);
1303
1304out_up:
1305 up_write(&mm->mmap_sem);
1306 return rc;
1353} 1307}
1354EXPORT_SYMBOL_GPL(s390_enable_skey); 1308EXPORT_SYMBOL_GPL(s390_enable_skey);
1355 1309
1356/* 1310/*
1311 * Reset CMMA state, make all pages stable again.
1312 */
1313static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
1314 unsigned long next, struct mm_walk *walk)
1315{
1316 pgste_t pgste;
1317
1318 pgste = pgste_get_lock(pte);
1319 pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
1320 pgste_set_unlock(pte, pgste);
1321 return 0;
1322}
1323
1324void s390_reset_cmma(struct mm_struct *mm)
1325{
1326 struct mm_walk walk = { .pte_entry = __s390_reset_cmma };
1327
1328 down_write(&mm->mmap_sem);
1329 walk.mm = mm;
1330 walk_page_range(0, TASK_SIZE, &walk);
1331 up_write(&mm->mmap_sem);
1332}
1333EXPORT_SYMBOL_GPL(s390_reset_cmma);
1334
1335/*
1357 * Test and reset if a guest page is dirty 1336 * Test and reset if a guest page is dirty
1358 */ 1337 */
1359bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *gmap) 1338bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *gmap)