diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-07 11:41:00 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-07 11:41:00 -0400 |
commit | ebb067d2f4e2db59b076f9c9cba0375a8ad1e07c (patch) | |
tree | 8d4fc065ab0fd45fca9483acfff93d4a6c74e981 /arch/s390 | |
parent | 33caee39925b887a99a2400dc5c980097c3573f9 (diff) | |
parent | 36e7fdaa1a04fcf65b864232e1af56a51c7814d6 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Martin Schwidefsky:
"Mostly cleanups and bug-fixes, with two exceptions.
The first is lazy flushing of I/O-TLBs for PCI to improve performance,
the second is software dirty bits in the pmd for the madvise-free
implementation"
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (24 commits)
s390/locking: Reenable optimistic spinning
s390/mm: implement dirty bits for large segment table entries
KVM: s390/mm: Fix page table locking vs. split pmd lock
s390/dasd: fix camel case
s390/3215: fix hanging console issue
s390/irq: improve displayed interrupt order in /proc/interrupts
s390/seccomp: fix error return for filtered system calls
s390/pci: introduce lazy IOTLB flushing for DMA unmap
dasd: fix error recovery for alias devices during format
dasd: fix list_del corruption during format
dasd: fix unresponsive device during format
dasd: use aliases for formatted devices during format
s390/pci: fix kmsg component
s390/kdump: Return NOTIFY_OK for all actions other than MEM_GOING_OFFLINE
s390/watchdog: Fix module name in Kconfig help text
s390/dasd: replace seq_printf by seq_puts
s390/dasd: replace pr_warning by pr_warn
s390/dasd: Move EXPORT_SYMBOL after function/variable
s390/dasd: remove unnecessary null test before debugfs_remove
s390/zfcp: use qdio buffer helpers
...
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/Kconfig | 1 | ||||
-rw-r--r-- | arch/s390/include/asm/pgtable.h | 197 | ||||
-rw-r--r-- | arch/s390/include/asm/qdio.h | 4 | ||||
-rw-r--r-- | arch/s390/include/asm/syscall.h | 2 | ||||
-rw-r--r-- | arch/s390/kernel/irq.c | 95 | ||||
-rw-r--r-- | arch/s390/kernel/setup.c | 2 | ||||
-rw-r--r-- | arch/s390/mm/hugetlbpage.c | 103 | ||||
-rw-r--r-- | arch/s390/mm/pgtable.c | 8 | ||||
-rw-r--r-- | arch/s390/pci/pci.c | 4 | ||||
-rw-r--r-- | arch/s390/pci/pci_clp.c | 4 | ||||
-rw-r--r-- | arch/s390/pci/pci_debug.c | 4 | ||||
-rw-r--r-- | arch/s390/pci/pci_dma.c | 50 | ||||
-rw-r--r-- | arch/s390/pci/pci_event.c | 4 | ||||
-rw-r--r-- | arch/s390/pci/pci_sysfs.c | 4 |
14 files changed, 266 insertions, 216 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 720a11d339eb..8ca60f8d5683 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -92,6 +92,7 @@ config S390 | |||
92 | select ARCH_INLINE_WRITE_UNLOCK_IRQ | 92 | select ARCH_INLINE_WRITE_UNLOCK_IRQ |
93 | select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE | 93 | select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE |
94 | select ARCH_SAVE_PAGE_KEYS if HIBERNATION | 94 | select ARCH_SAVE_PAGE_KEYS if HIBERNATION |
95 | select ARCH_SUPPORTS_ATOMIC_RMW | ||
95 | select ARCH_USE_CMPXCHG_LOCKREF | 96 | select ARCH_USE_CMPXCHG_LOCKREF |
96 | select ARCH_WANT_IPC_PARSE_VERSION | 97 | select ARCH_WANT_IPC_PARSE_VERSION |
97 | select BUILDTIME_EXTABLE_SORT | 98 | select BUILDTIME_EXTABLE_SORT |
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index fcba5e03839f..b76317c1f3eb 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h | |||
@@ -287,7 +287,14 @@ extern unsigned long MODULES_END; | |||
287 | #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */ | 287 | #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */ |
288 | #define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */ | 288 | #define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */ |
289 | #define _SEGMENT_ENTRY_PTL 0x0f /* page table length */ | 289 | #define _SEGMENT_ENTRY_PTL 0x0f /* page table length */ |
290 | #define _SEGMENT_ENTRY_NONE _SEGMENT_ENTRY_PROTECT | 290 | |
291 | #define _SEGMENT_ENTRY_DIRTY 0 /* No sw dirty bit for 31-bit */ | ||
292 | #define _SEGMENT_ENTRY_YOUNG 0 /* No sw young bit for 31-bit */ | ||
293 | #define _SEGMENT_ENTRY_READ 0 /* No sw read bit for 31-bit */ | ||
294 | #define _SEGMENT_ENTRY_WRITE 0 /* No sw write bit for 31-bit */ | ||
295 | #define _SEGMENT_ENTRY_LARGE 0 /* No large pages for 31-bit */ | ||
296 | #define _SEGMENT_ENTRY_BITS_LARGE 0 | ||
297 | #define _SEGMENT_ENTRY_ORIGIN_LARGE 0 | ||
291 | 298 | ||
292 | #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL) | 299 | #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL) |
293 | #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID) | 300 | #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID) |
@@ -350,7 +357,7 @@ extern unsigned long MODULES_END; | |||
350 | 357 | ||
351 | /* Bits in the segment table entry */ | 358 | /* Bits in the segment table entry */ |
352 | #define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL | 359 | #define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL |
353 | #define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff1ff33UL | 360 | #define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL |
354 | #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */ | 361 | #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */ |
355 | #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ | 362 | #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ |
356 | #define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */ | 363 | #define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */ |
@@ -359,30 +366,34 @@ extern unsigned long MODULES_END; | |||
359 | #define _SEGMENT_ENTRY (0) | 366 | #define _SEGMENT_ENTRY (0) |
360 | #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID) | 367 | #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID) |
361 | 368 | ||
362 | #define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */ | 369 | #define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */ |
363 | #define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */ | 370 | #define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */ |
364 | #define _SEGMENT_ENTRY_SPLIT 0x001 /* THP splitting bit */ | 371 | #define _SEGMENT_ENTRY_SPLIT 0x0800 /* THP splitting bit */ |
365 | #define _SEGMENT_ENTRY_YOUNG 0x002 /* SW segment young bit */ | 372 | #define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */ |
366 | #define _SEGMENT_ENTRY_NONE _SEGMENT_ENTRY_YOUNG | 373 | #define _SEGMENT_ENTRY_CO 0x0100 /* change-recording override */ |
374 | #define _SEGMENT_ENTRY_READ 0x0002 /* SW segment read bit */ | ||
375 | #define _SEGMENT_ENTRY_WRITE 0x0001 /* SW segment write bit */ | ||
367 | 376 | ||
368 | /* | 377 | /* |
369 | * Segment table entry encoding (R = read-only, I = invalid, y = young bit): | 378 | * Segment table entry encoding (R = read-only, I = invalid, y = young bit): |
370 | * ..R...I...y. | 379 | * dy..R...I...wr |
371 | * prot-none, old ..0...1...1. | 380 | * prot-none, clean, old 00..1...1...00 |
372 | * prot-none, young ..1...1...1. | 381 | * prot-none, clean, young 01..1...1...00 |
373 | * read-only, old ..1...1...0. | 382 | * prot-none, dirty, old 10..1...1...00 |
374 | * read-only, young ..1...0...1. | 383 | * prot-none, dirty, young 11..1...1...00 |
375 | * read-write, old ..0...1...0. | 384 | * read-only, clean, old 00..1...1...01 |
376 | * read-write, young ..0...0...1. | 385 | * read-only, clean, young 01..1...0...01 |
386 | * read-only, dirty, old 10..1...1...01 | ||
387 | * read-only, dirty, young 11..1...0...01 | ||
388 | * read-write, clean, old 00..1...1...11 | ||
389 | * read-write, clean, young 01..1...0...11 | ||
390 | * read-write, dirty, old 10..0...1...11 | ||
391 | * read-write, dirty, young 11..0...0...11 | ||
377 | * The segment table origin is used to distinguish empty (origin==0) from | 392 | * The segment table origin is used to distinguish empty (origin==0) from |
378 | * read-write, old segment table entries (origin!=0) | 393 | * read-write, old segment table entries (origin!=0) |
379 | */ | 394 | */ |
380 | 395 | ||
381 | #define _SEGMENT_ENTRY_SPLIT_BIT 0 /* THP splitting bit number */ | 396 | #define _SEGMENT_ENTRY_SPLIT_BIT 11 /* THP splitting bit number */ |
382 | |||
383 | /* Set of bits not changed in pmd_modify */ | ||
384 | #define _SEGMENT_CHG_MASK (_SEGMENT_ENTRY_ORIGIN | _SEGMENT_ENTRY_LARGE \ | ||
385 | | _SEGMENT_ENTRY_SPLIT | _SEGMENT_ENTRY_CO) | ||
386 | 397 | ||
387 | /* Page status table bits for virtualization */ | 398 | /* Page status table bits for virtualization */ |
388 | #define PGSTE_ACC_BITS 0xf000000000000000UL | 399 | #define PGSTE_ACC_BITS 0xf000000000000000UL |
@@ -455,10 +466,11 @@ extern unsigned long MODULES_END; | |||
455 | * Segment entry (large page) protection definitions. | 466 | * Segment entry (large page) protection definitions. |
456 | */ | 467 | */ |
457 | #define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \ | 468 | #define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \ |
458 | _SEGMENT_ENTRY_NONE) | ||
459 | #define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_INVALID | \ | ||
460 | _SEGMENT_ENTRY_PROTECT) | 469 | _SEGMENT_ENTRY_PROTECT) |
461 | #define SEGMENT_WRITE __pgprot(_SEGMENT_ENTRY_INVALID) | 470 | #define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_PROTECT | \ |
471 | _SEGMENT_ENTRY_READ) | ||
472 | #define SEGMENT_WRITE __pgprot(_SEGMENT_ENTRY_READ | \ | ||
473 | _SEGMENT_ENTRY_WRITE) | ||
462 | 474 | ||
463 | static inline int mm_has_pgste(struct mm_struct *mm) | 475 | static inline int mm_has_pgste(struct mm_struct *mm) |
464 | { | 476 | { |
@@ -569,25 +581,23 @@ static inline int pmd_none(pmd_t pmd) | |||
569 | 581 | ||
570 | static inline int pmd_large(pmd_t pmd) | 582 | static inline int pmd_large(pmd_t pmd) |
571 | { | 583 | { |
572 | #ifdef CONFIG_64BIT | ||
573 | return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0; | 584 | return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0; |
574 | #else | ||
575 | return 0; | ||
576 | #endif | ||
577 | } | 585 | } |
578 | 586 | ||
579 | static inline int pmd_prot_none(pmd_t pmd) | 587 | static inline int pmd_pfn(pmd_t pmd) |
580 | { | 588 | { |
581 | return (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID) && | 589 | unsigned long origin_mask; |
582 | (pmd_val(pmd) & _SEGMENT_ENTRY_NONE); | 590 | |
591 | origin_mask = _SEGMENT_ENTRY_ORIGIN; | ||
592 | if (pmd_large(pmd)) | ||
593 | origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE; | ||
594 | return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT; | ||
583 | } | 595 | } |
584 | 596 | ||
585 | static inline int pmd_bad(pmd_t pmd) | 597 | static inline int pmd_bad(pmd_t pmd) |
586 | { | 598 | { |
587 | #ifdef CONFIG_64BIT | ||
588 | if (pmd_large(pmd)) | 599 | if (pmd_large(pmd)) |
589 | return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0; | 600 | return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0; |
590 | #endif | ||
591 | return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0; | 601 | return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0; |
592 | } | 602 | } |
593 | 603 | ||
@@ -607,20 +617,22 @@ extern int pmdp_clear_flush_young(struct vm_area_struct *vma, | |||
607 | #define __HAVE_ARCH_PMD_WRITE | 617 | #define __HAVE_ARCH_PMD_WRITE |
608 | static inline int pmd_write(pmd_t pmd) | 618 | static inline int pmd_write(pmd_t pmd) |
609 | { | 619 | { |
610 | if (pmd_prot_none(pmd)) | 620 | return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0; |
611 | return 0; | 621 | } |
612 | return (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT) == 0; | 622 | |
623 | static inline int pmd_dirty(pmd_t pmd) | ||
624 | { | ||
625 | int dirty = 1; | ||
626 | if (pmd_large(pmd)) | ||
627 | dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0; | ||
628 | return dirty; | ||
613 | } | 629 | } |
614 | 630 | ||
615 | static inline int pmd_young(pmd_t pmd) | 631 | static inline int pmd_young(pmd_t pmd) |
616 | { | 632 | { |
617 | int young = 0; | 633 | int young = 1; |
618 | #ifdef CONFIG_64BIT | 634 | if (pmd_large(pmd)) |
619 | if (pmd_prot_none(pmd)) | ||
620 | young = (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT) != 0; | ||
621 | else | ||
622 | young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0; | 635 | young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0; |
623 | #endif | ||
624 | return young; | 636 | return young; |
625 | } | 637 | } |
626 | 638 | ||
@@ -1391,7 +1403,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) | |||
1391 | #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) | 1403 | #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) |
1392 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | 1404 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
1393 | 1405 | ||
1394 | #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) | 1406 | #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd)) |
1395 | 1407 | ||
1396 | /* Find an entry in the lowest level page table.. */ | 1408 | /* Find an entry in the lowest level page table.. */ |
1397 | #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr)) | 1409 | #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr)) |
@@ -1413,41 +1425,75 @@ static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) | |||
1413 | return pgprot_val(SEGMENT_WRITE); | 1425 | return pgprot_val(SEGMENT_WRITE); |
1414 | } | 1426 | } |
1415 | 1427 | ||
1416 | static inline pmd_t pmd_mkyoung(pmd_t pmd) | 1428 | static inline pmd_t pmd_wrprotect(pmd_t pmd) |
1417 | { | 1429 | { |
1418 | #ifdef CONFIG_64BIT | 1430 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE; |
1419 | if (pmd_prot_none(pmd)) { | 1431 | pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; |
1432 | return pmd; | ||
1433 | } | ||
1434 | |||
1435 | static inline pmd_t pmd_mkwrite(pmd_t pmd) | ||
1436 | { | ||
1437 | pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE; | ||
1438 | if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)) | ||
1439 | return pmd; | ||
1440 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; | ||
1441 | return pmd; | ||
1442 | } | ||
1443 | |||
1444 | static inline pmd_t pmd_mkclean(pmd_t pmd) | ||
1445 | { | ||
1446 | if (pmd_large(pmd)) { | ||
1447 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY; | ||
1420 | pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; | 1448 | pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; |
1421 | } else { | 1449 | } |
1450 | return pmd; | ||
1451 | } | ||
1452 | |||
1453 | static inline pmd_t pmd_mkdirty(pmd_t pmd) | ||
1454 | { | ||
1455 | if (pmd_large(pmd)) { | ||
1456 | pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY; | ||
1457 | if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) | ||
1458 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; | ||
1459 | } | ||
1460 | return pmd; | ||
1461 | } | ||
1462 | |||
1463 | static inline pmd_t pmd_mkyoung(pmd_t pmd) | ||
1464 | { | ||
1465 | if (pmd_large(pmd)) { | ||
1422 | pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; | 1466 | pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; |
1423 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID; | 1467 | if (pmd_val(pmd) & _SEGMENT_ENTRY_READ) |
1468 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID; | ||
1424 | } | 1469 | } |
1425 | #endif | ||
1426 | return pmd; | 1470 | return pmd; |
1427 | } | 1471 | } |
1428 | 1472 | ||
1429 | static inline pmd_t pmd_mkold(pmd_t pmd) | 1473 | static inline pmd_t pmd_mkold(pmd_t pmd) |
1430 | { | 1474 | { |
1431 | #ifdef CONFIG_64BIT | 1475 | if (pmd_large(pmd)) { |
1432 | if (pmd_prot_none(pmd)) { | ||
1433 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; | ||
1434 | } else { | ||
1435 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG; | 1476 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG; |
1436 | pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; | 1477 | pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; |
1437 | } | 1478 | } |
1438 | #endif | ||
1439 | return pmd; | 1479 | return pmd; |
1440 | } | 1480 | } |
1441 | 1481 | ||
1442 | static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) | 1482 | static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) |
1443 | { | 1483 | { |
1444 | int young; | 1484 | if (pmd_large(pmd)) { |
1445 | 1485 | pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE | | |
1446 | young = pmd_young(pmd); | 1486 | _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG | |
1447 | pmd_val(pmd) &= _SEGMENT_CHG_MASK; | 1487 | _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SPLIT; |
1488 | pmd_val(pmd) |= massage_pgprot_pmd(newprot); | ||
1489 | if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)) | ||
1490 | pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; | ||
1491 | if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG)) | ||
1492 | pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; | ||
1493 | return pmd; | ||
1494 | } | ||
1495 | pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN; | ||
1448 | pmd_val(pmd) |= massage_pgprot_pmd(newprot); | 1496 | pmd_val(pmd) |= massage_pgprot_pmd(newprot); |
1449 | if (young) | ||
1450 | pmd = pmd_mkyoung(pmd); | ||
1451 | return pmd; | 1497 | return pmd; |
1452 | } | 1498 | } |
1453 | 1499 | ||
@@ -1455,16 +1501,9 @@ static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot) | |||
1455 | { | 1501 | { |
1456 | pmd_t __pmd; | 1502 | pmd_t __pmd; |
1457 | pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot); | 1503 | pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot); |
1458 | return pmd_mkyoung(__pmd); | 1504 | return __pmd; |
1459 | } | 1505 | } |
1460 | 1506 | ||
1461 | static inline pmd_t pmd_mkwrite(pmd_t pmd) | ||
1462 | { | ||
1463 | /* Do not clobber PROT_NONE segments! */ | ||
1464 | if (!pmd_prot_none(pmd)) | ||
1465 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; | ||
1466 | return pmd; | ||
1467 | } | ||
1468 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ | 1507 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ |
1469 | 1508 | ||
1470 | static inline void __pmdp_csp(pmd_t *pmdp) | 1509 | static inline void __pmdp_csp(pmd_t *pmdp) |
@@ -1555,34 +1594,21 @@ extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); | |||
1555 | 1594 | ||
1556 | static inline int pmd_trans_splitting(pmd_t pmd) | 1595 | static inline int pmd_trans_splitting(pmd_t pmd) |
1557 | { | 1596 | { |
1558 | return pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT; | 1597 | return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) && |
1598 | (pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT); | ||
1559 | } | 1599 | } |
1560 | 1600 | ||
1561 | static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, | 1601 | static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, |
1562 | pmd_t *pmdp, pmd_t entry) | 1602 | pmd_t *pmdp, pmd_t entry) |
1563 | { | 1603 | { |
1564 | if (!(pmd_val(entry) & _SEGMENT_ENTRY_INVALID) && MACHINE_HAS_EDAT1) | ||
1565 | pmd_val(entry) |= _SEGMENT_ENTRY_CO; | ||
1566 | *pmdp = entry; | 1604 | *pmdp = entry; |
1567 | } | 1605 | } |
1568 | 1606 | ||
1569 | static inline pmd_t pmd_mkhuge(pmd_t pmd) | 1607 | static inline pmd_t pmd_mkhuge(pmd_t pmd) |
1570 | { | 1608 | { |
1571 | pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE; | 1609 | pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE; |
1572 | return pmd; | 1610 | pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; |
1573 | } | 1611 | pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; |
1574 | |||
1575 | static inline pmd_t pmd_wrprotect(pmd_t pmd) | ||
1576 | { | ||
1577 | /* Do not clobber PROT_NONE segments! */ | ||
1578 | if (!pmd_prot_none(pmd)) | ||
1579 | pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; | ||
1580 | return pmd; | ||
1581 | } | ||
1582 | |||
1583 | static inline pmd_t pmd_mkdirty(pmd_t pmd) | ||
1584 | { | ||
1585 | /* No dirty bit in the segment table entry. */ | ||
1586 | return pmd; | 1612 | return pmd; |
1587 | } | 1613 | } |
1588 | 1614 | ||
@@ -1647,11 +1673,6 @@ static inline int has_transparent_hugepage(void) | |||
1647 | { | 1673 | { |
1648 | return MACHINE_HAS_HPAGE ? 1 : 0; | 1674 | return MACHINE_HAS_HPAGE ? 1 : 0; |
1649 | } | 1675 | } |
1650 | |||
1651 | static inline unsigned long pmd_pfn(pmd_t pmd) | ||
1652 | { | ||
1653 | return pmd_val(pmd) >> PAGE_SHIFT; | ||
1654 | } | ||
1655 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | 1676 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
1656 | 1677 | ||
1657 | /* | 1678 | /* |
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h index d786c634e052..06f3034605a1 100644 --- a/arch/s390/include/asm/qdio.h +++ b/arch/s390/include/asm/qdio.h | |||
@@ -415,6 +415,10 @@ struct qdio_brinfo_entry_l2 { | |||
415 | #define QDIO_FLAG_SYNC_OUTPUT 0x02 | 415 | #define QDIO_FLAG_SYNC_OUTPUT 0x02 |
416 | #define QDIO_FLAG_PCI_OUT 0x10 | 416 | #define QDIO_FLAG_PCI_OUT 0x10 |
417 | 417 | ||
418 | int qdio_alloc_buffers(struct qdio_buffer **buf, unsigned int count); | ||
419 | void qdio_free_buffers(struct qdio_buffer **buf, unsigned int count); | ||
420 | void qdio_reset_buffers(struct qdio_buffer **buf, unsigned int count); | ||
421 | |||
418 | extern int qdio_allocate(struct qdio_initialize *); | 422 | extern int qdio_allocate(struct qdio_initialize *); |
419 | extern int qdio_establish(struct qdio_initialize *); | 423 | extern int qdio_establish(struct qdio_initialize *); |
420 | extern int qdio_activate(struct ccw_device *); | 424 | extern int qdio_activate(struct ccw_device *); |
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h index abad78d5b10c..5bc12598ae9e 100644 --- a/arch/s390/include/asm/syscall.h +++ b/arch/s390/include/asm/syscall.h | |||
@@ -54,7 +54,7 @@ static inline void syscall_set_return_value(struct task_struct *task, | |||
54 | struct pt_regs *regs, | 54 | struct pt_regs *regs, |
55 | int error, long val) | 55 | int error, long val) |
56 | { | 56 | { |
57 | regs->gprs[2] = error ? -error : val; | 57 | regs->gprs[2] = error ? error : val; |
58 | } | 58 | } |
59 | 59 | ||
60 | static inline void syscall_get_arguments(struct task_struct *task, | 60 | static inline void syscall_get_arguments(struct task_struct *task, |
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index 99b0b09646ca..8eb82443cfbd 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c | |||
@@ -30,6 +30,7 @@ DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat); | |||
30 | EXPORT_PER_CPU_SYMBOL_GPL(irq_stat); | 30 | EXPORT_PER_CPU_SYMBOL_GPL(irq_stat); |
31 | 31 | ||
32 | struct irq_class { | 32 | struct irq_class { |
33 | int irq; | ||
33 | char *name; | 34 | char *name; |
34 | char *desc; | 35 | char *desc; |
35 | }; | 36 | }; |
@@ -45,9 +46,9 @@ struct irq_class { | |||
45 | * up with having a sum which accounts each interrupt twice. | 46 | * up with having a sum which accounts each interrupt twice. |
46 | */ | 47 | */ |
47 | static const struct irq_class irqclass_main_desc[NR_IRQS_BASE] = { | 48 | static const struct irq_class irqclass_main_desc[NR_IRQS_BASE] = { |
48 | [EXT_INTERRUPT] = {.name = "EXT"}, | 49 | {.irq = EXT_INTERRUPT, .name = "EXT"}, |
49 | [IO_INTERRUPT] = {.name = "I/O"}, | 50 | {.irq = IO_INTERRUPT, .name = "I/O"}, |
50 | [THIN_INTERRUPT] = {.name = "AIO"}, | 51 | {.irq = THIN_INTERRUPT, .name = "AIO"}, |
51 | }; | 52 | }; |
52 | 53 | ||
53 | /* | 54 | /* |
@@ -56,38 +57,38 @@ static const struct irq_class irqclass_main_desc[NR_IRQS_BASE] = { | |||
56 | * In addition this list contains non external / I/O events like NMIs. | 57 | * In addition this list contains non external / I/O events like NMIs. |
57 | */ | 58 | */ |
58 | static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = { | 59 | static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = { |
59 | [IRQEXT_CLK] = {.name = "CLK", .desc = "[EXT] Clock Comparator"}, | 60 | {.irq = IRQEXT_CLK, .name = "CLK", .desc = "[EXT] Clock Comparator"}, |
60 | [IRQEXT_EXC] = {.name = "EXC", .desc = "[EXT] External Call"}, | 61 | {.irq = IRQEXT_EXC, .name = "EXC", .desc = "[EXT] External Call"}, |
61 | [IRQEXT_EMS] = {.name = "EMS", .desc = "[EXT] Emergency Signal"}, | 62 | {.irq = IRQEXT_EMS, .name = "EMS", .desc = "[EXT] Emergency Signal"}, |
62 | [IRQEXT_TMR] = {.name = "TMR", .desc = "[EXT] CPU Timer"}, | 63 | {.irq = IRQEXT_TMR, .name = "TMR", .desc = "[EXT] CPU Timer"}, |
63 | [IRQEXT_TLA] = {.name = "TAL", .desc = "[EXT] Timing Alert"}, | 64 | {.irq = IRQEXT_TLA, .name = "TAL", .desc = "[EXT] Timing Alert"}, |
64 | [IRQEXT_PFL] = {.name = "PFL", .desc = "[EXT] Pseudo Page Fault"}, | 65 | {.irq = IRQEXT_PFL, .name = "PFL", .desc = "[EXT] Pseudo Page Fault"}, |
65 | [IRQEXT_DSD] = {.name = "DSD", .desc = "[EXT] DASD Diag"}, | 66 | {.irq = IRQEXT_DSD, .name = "DSD", .desc = "[EXT] DASD Diag"}, |
66 | [IRQEXT_VRT] = {.name = "VRT", .desc = "[EXT] Virtio"}, | 67 | {.irq = IRQEXT_VRT, .name = "VRT", .desc = "[EXT] Virtio"}, |
67 | [IRQEXT_SCP] = {.name = "SCP", .desc = "[EXT] Service Call"}, | 68 | {.irq = IRQEXT_SCP, .name = "SCP", .desc = "[EXT] Service Call"}, |
68 | [IRQEXT_IUC] = {.name = "IUC", .desc = "[EXT] IUCV"}, | 69 | {.irq = IRQEXT_IUC, .name = "IUC", .desc = "[EXT] IUCV"}, |
69 | [IRQEXT_CMS] = {.name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"}, | 70 | {.irq = IRQEXT_CMS, .name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"}, |
70 | [IRQEXT_CMC] = {.name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"}, | 71 | {.irq = IRQEXT_CMC, .name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"}, |
71 | [IRQEXT_CMR] = {.name = "CMR", .desc = "[EXT] CPU-Measurement: RI"}, | 72 | {.irq = IRQEXT_CMR, .name = "CMR", .desc = "[EXT] CPU-Measurement: RI"}, |
72 | [IRQIO_CIO] = {.name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"}, | 73 | {.irq = IRQIO_CIO, .name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"}, |
73 | [IRQIO_QAI] = {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt"}, | 74 | {.irq = IRQIO_QAI, .name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt"}, |
74 | [IRQIO_DAS] = {.name = "DAS", .desc = "[I/O] DASD"}, | 75 | {.irq = IRQIO_DAS, .name = "DAS", .desc = "[I/O] DASD"}, |
75 | [IRQIO_C15] = {.name = "C15", .desc = "[I/O] 3215"}, | 76 | {.irq = IRQIO_C15, .name = "C15", .desc = "[I/O] 3215"}, |
76 | [IRQIO_C70] = {.name = "C70", .desc = "[I/O] 3270"}, | 77 | {.irq = IRQIO_C70, .name = "C70", .desc = "[I/O] 3270"}, |
77 | [IRQIO_TAP] = {.name = "TAP", .desc = "[I/O] Tape"}, | 78 | {.irq = IRQIO_TAP, .name = "TAP", .desc = "[I/O] Tape"}, |
78 | [IRQIO_VMR] = {.name = "VMR", .desc = "[I/O] Unit Record Devices"}, | 79 | {.irq = IRQIO_VMR, .name = "VMR", .desc = "[I/O] Unit Record Devices"}, |
79 | [IRQIO_LCS] = {.name = "LCS", .desc = "[I/O] LCS"}, | 80 | {.irq = IRQIO_LCS, .name = "LCS", .desc = "[I/O] LCS"}, |
80 | [IRQIO_CLW] = {.name = "CLW", .desc = "[I/O] CLAW"}, | 81 | {.irq = IRQIO_CLW, .name = "CLW", .desc = "[I/O] CLAW"}, |
81 | [IRQIO_CTC] = {.name = "CTC", .desc = "[I/O] CTC"}, | 82 | {.irq = IRQIO_CTC, .name = "CTC", .desc = "[I/O] CTC"}, |
82 | [IRQIO_APB] = {.name = "APB", .desc = "[I/O] AP Bus"}, | 83 | {.irq = IRQIO_APB, .name = "APB", .desc = "[I/O] AP Bus"}, |
83 | [IRQIO_ADM] = {.name = "ADM", .desc = "[I/O] EADM Subchannel"}, | 84 | {.irq = IRQIO_ADM, .name = "ADM", .desc = "[I/O] EADM Subchannel"}, |
84 | [IRQIO_CSC] = {.name = "CSC", .desc = "[I/O] CHSC Subchannel"}, | 85 | {.irq = IRQIO_CSC, .name = "CSC", .desc = "[I/O] CHSC Subchannel"}, |
85 | [IRQIO_PCI] = {.name = "PCI", .desc = "[I/O] PCI Interrupt" }, | 86 | {.irq = IRQIO_PCI, .name = "PCI", .desc = "[I/O] PCI Interrupt" }, |
86 | [IRQIO_MSI] = {.name = "MSI", .desc = "[I/O] MSI Interrupt" }, | 87 | {.irq = IRQIO_MSI, .name = "MSI", .desc = "[I/O] MSI Interrupt" }, |
87 | [IRQIO_VIR] = {.name = "VIR", .desc = "[I/O] Virtual I/O Devices"}, | 88 | {.irq = IRQIO_VIR, .name = "VIR", .desc = "[I/O] Virtual I/O Devices"}, |
88 | [IRQIO_VAI] = {.name = "VAI", .desc = "[I/O] Virtual I/O Devices AI"}, | 89 | {.irq = IRQIO_VAI, .name = "VAI", .desc = "[I/O] Virtual I/O Devices AI"}, |
89 | [NMI_NMI] = {.name = "NMI", .desc = "[NMI] Machine Check"}, | 90 | {.irq = NMI_NMI, .name = "NMI", .desc = "[NMI] Machine Check"}, |
90 | [CPU_RST] = {.name = "RST", .desc = "[CPU] CPU Restart"}, | 91 | {.irq = CPU_RST, .name = "RST", .desc = "[CPU] CPU Restart"}, |
91 | }; | 92 | }; |
92 | 93 | ||
93 | void __init init_IRQ(void) | 94 | void __init init_IRQ(void) |
@@ -116,33 +117,37 @@ void do_IRQ(struct pt_regs *regs, int irq) | |||
116 | */ | 117 | */ |
117 | int show_interrupts(struct seq_file *p, void *v) | 118 | int show_interrupts(struct seq_file *p, void *v) |
118 | { | 119 | { |
119 | int irq = *(loff_t *) v; | 120 | int index = *(loff_t *) v; |
120 | int cpu; | 121 | int cpu, irq; |
121 | 122 | ||
122 | get_online_cpus(); | 123 | get_online_cpus(); |
123 | if (irq == 0) { | 124 | if (index == 0) { |
124 | seq_puts(p, " "); | 125 | seq_puts(p, " "); |
125 | for_each_online_cpu(cpu) | 126 | for_each_online_cpu(cpu) |
126 | seq_printf(p, "CPU%d ", cpu); | 127 | seq_printf(p, "CPU%d ", cpu); |
127 | seq_putc(p, '\n'); | 128 | seq_putc(p, '\n'); |
128 | goto out; | 129 | goto out; |
129 | } | 130 | } |
130 | if (irq < NR_IRQS) { | 131 | if (index < NR_IRQS) { |
131 | if (irq >= NR_IRQS_BASE) | 132 | if (index >= NR_IRQS_BASE) |
132 | goto out; | 133 | goto out; |
133 | seq_printf(p, "%s: ", irqclass_main_desc[irq].name); | 134 | /* Adjust index to process irqclass_main_desc array entries */ |
135 | index--; | ||
136 | seq_printf(p, "%s: ", irqclass_main_desc[index].name); | ||
137 | irq = irqclass_main_desc[index].irq; | ||
134 | for_each_online_cpu(cpu) | 138 | for_each_online_cpu(cpu) |
135 | seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu)); | 139 | seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu)); |
136 | seq_putc(p, '\n'); | 140 | seq_putc(p, '\n'); |
137 | goto out; | 141 | goto out; |
138 | } | 142 | } |
139 | for (irq = 0; irq < NR_ARCH_IRQS; irq++) { | 143 | for (index = 0; index < NR_ARCH_IRQS; index++) { |
140 | seq_printf(p, "%s: ", irqclass_sub_desc[irq].name); | 144 | seq_printf(p, "%s: ", irqclass_sub_desc[index].name); |
145 | irq = irqclass_sub_desc[index].irq; | ||
141 | for_each_online_cpu(cpu) | 146 | for_each_online_cpu(cpu) |
142 | seq_printf(p, "%10u ", | 147 | seq_printf(p, "%10u ", |
143 | per_cpu(irq_stat, cpu).irqs[irq]); | 148 | per_cpu(irq_stat, cpu).irqs[irq]); |
144 | if (irqclass_sub_desc[irq].desc) | 149 | if (irqclass_sub_desc[index].desc) |
145 | seq_printf(p, " %s", irqclass_sub_desc[irq].desc); | 150 | seq_printf(p, " %s", irqclass_sub_desc[index].desc); |
146 | seq_putc(p, '\n'); | 151 | seq_putc(p, '\n'); |
147 | } | 152 | } |
148 | out: | 153 | out: |
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 1e2264b46e4c..ae1d5be7dd88 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -501,6 +501,8 @@ static int kdump_mem_notifier(struct notifier_block *nb, | |||
501 | { | 501 | { |
502 | struct memory_notify *arg = data; | 502 | struct memory_notify *arg = data; |
503 | 503 | ||
504 | if (action != MEM_GOING_OFFLINE) | ||
505 | return NOTIFY_OK; | ||
504 | if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res))) | 506 | if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res))) |
505 | return NOTIFY_BAD; | 507 | return NOTIFY_BAD; |
506 | if (arg->start_pfn > PFN_DOWN(crashk_res.end)) | 508 | if (arg->start_pfn > PFN_DOWN(crashk_res.end)) |
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index 0ff66a7e29bb..389bc17934b7 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c | |||
@@ -10,42 +10,33 @@ | |||
10 | 10 | ||
11 | static inline pmd_t __pte_to_pmd(pte_t pte) | 11 | static inline pmd_t __pte_to_pmd(pte_t pte) |
12 | { | 12 | { |
13 | int none, young, prot; | ||
14 | pmd_t pmd; | 13 | pmd_t pmd; |
15 | 14 | ||
16 | /* | 15 | /* |
17 | * Convert encoding pte bits pmd bits | 16 | * Convert encoding pte bits pmd bits |
18 | * .IR...wrdytp ..R...I...y. | 17 | * .IR...wrdytp dy..R...I...wr |
19 | * empty .10...000000 -> ..0...1...0. | 18 | * empty .10...000000 -> 00..0...1...00 |
20 | * prot-none, clean, old .11...000001 -> ..0...1...1. | 19 | * prot-none, clean, old .11...000001 -> 00..1...1...00 |
21 | * prot-none, clean, young .11...000101 -> ..1...1...1. | 20 | * prot-none, clean, young .11...000101 -> 01..1...1...00 |
22 | * prot-none, dirty, old .10...001001 -> ..0...1...1. | 21 | * prot-none, dirty, old .10...001001 -> 10..1...1...00 |
23 | * prot-none, dirty, young .10...001101 -> ..1...1...1. | 22 | * prot-none, dirty, young .10...001101 -> 11..1...1...00 |
24 | * read-only, clean, old .11...010001 -> ..1...1...0. | 23 | * read-only, clean, old .11...010001 -> 00..1...1...01 |
25 | * read-only, clean, young .01...010101 -> ..1...0...1. | 24 | * read-only, clean, young .01...010101 -> 01..1...0...01 |
26 | * read-only, dirty, old .11...011001 -> ..1...1...0. | 25 | * read-only, dirty, old .11...011001 -> 10..1...1...01 |
27 | * read-only, dirty, young .01...011101 -> ..1...0...1. | 26 | * read-only, dirty, young .01...011101 -> 11..1...0...01 |
28 | * read-write, clean, old .11...110001 -> ..0...1...0. | 27 | * read-write, clean, old .11...110001 -> 00..0...1...11 |
29 | * read-write, clean, young .01...110101 -> ..0...0...1. | 28 | * read-write, clean, young .01...110101 -> 01..0...0...11 |
30 | * read-write, dirty, old .10...111001 -> ..0...1...0. | 29 | * read-write, dirty, old .10...111001 -> 10..0...1...11 |
31 | * read-write, dirty, young .00...111101 -> ..0...0...1. | 30 | * read-write, dirty, young .00...111101 -> 11..0...0...11 |
32 | * Huge ptes are dirty by definition, a clean pte is made dirty | ||
33 | * by the conversion. | ||
34 | */ | 31 | */ |
35 | if (pte_present(pte)) { | 32 | if (pte_present(pte)) { |
36 | pmd_val(pmd) = pte_val(pte) & PAGE_MASK; | 33 | pmd_val(pmd) = pte_val(pte) & PAGE_MASK; |
37 | if (pte_val(pte) & _PAGE_INVALID) | 34 | pmd_val(pmd) |= (pte_val(pte) & _PAGE_READ) >> 4; |
38 | pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; | 35 | pmd_val(pmd) |= (pte_val(pte) & _PAGE_WRITE) >> 4; |
39 | none = (pte_val(pte) & _PAGE_PRESENT) && | 36 | pmd_val(pmd) |= (pte_val(pte) & _PAGE_INVALID) >> 5; |
40 | !(pte_val(pte) & _PAGE_READ) && | 37 | pmd_val(pmd) |= (pte_val(pte) & _PAGE_PROTECT); |
41 | !(pte_val(pte) & _PAGE_WRITE); | 38 | pmd_val(pmd) |= (pte_val(pte) & _PAGE_DIRTY) << 10; |
42 | prot = (pte_val(pte) & _PAGE_PROTECT) && | 39 | pmd_val(pmd) |= (pte_val(pte) & _PAGE_YOUNG) << 10; |
43 | !(pte_val(pte) & _PAGE_WRITE); | ||
44 | young = pte_val(pte) & _PAGE_YOUNG; | ||
45 | if (none || young) | ||
46 | pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; | ||
47 | if (prot || (none && young)) | ||
48 | pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; | ||
49 | } else | 40 | } else |
50 | pmd_val(pmd) = _SEGMENT_ENTRY_INVALID; | 41 | pmd_val(pmd) = _SEGMENT_ENTRY_INVALID; |
51 | return pmd; | 42 | return pmd; |
@@ -56,34 +47,31 @@ static inline pte_t __pmd_to_pte(pmd_t pmd) | |||
56 | pte_t pte; | 47 | pte_t pte; |
57 | 48 | ||
58 | /* | 49 | /* |
59 | * Convert encoding pmd bits pte bits | 50 | * Convert encoding pmd bits pte bits |
60 | * ..R...I...y. .IR...wrdytp | 51 | * dy..R...I...wr .IR...wrdytp |
61 | * empty ..0...1...0. -> .10...000000 | 52 | * empty 00..0...1...00 -> .10...001100 |
62 | * prot-none, old ..0...1...1. -> .10...001001 | 53 | * prot-none, clean, old 00..0...1...00 -> .10...000001 |
63 | * prot-none, young ..1...1...1. -> .10...001101 | 54 | * prot-none, clean, young 01..0...1...00 -> .10...000101 |
64 | * read-only, old ..1...1...0. -> .11...011001 | 55 | * prot-none, dirty, old 10..0...1...00 -> .10...001001 |
65 | * read-only, young ..1...0...1. -> .01...011101 | 56 | * prot-none, dirty, young 11..0...1...00 -> .10...001101 |
66 | * read-write, old ..0...1...0. -> .10...111001 | 57 | * read-only, clean, old 00..1...1...01 -> .11...010001 |
67 | * read-write, young ..0...0...1. -> .00...111101 | 58 | * read-only, clean, young 01..1...1...01 -> .11...010101 |
68 | * Huge ptes are dirty by definition | 59 | * read-only, dirty, old 10..1...1...01 -> .11...011001 |
60 | * read-only, dirty, young 11..1...1...01 -> .11...011101 | ||
61 | * read-write, clean, old 00..0...1...11 -> .10...110001 | ||
62 | * read-write, clean, young 01..0...1...11 -> .10...110101 | ||
63 | * read-write, dirty, old 10..0...1...11 -> .10...111001 | ||
64 | * read-write, dirty, young 11..0...1...11 -> .10...111101 | ||
69 | */ | 65 | */ |
70 | if (pmd_present(pmd)) { | 66 | if (pmd_present(pmd)) { |
71 | pte_val(pte) = _PAGE_PRESENT | _PAGE_LARGE | _PAGE_DIRTY | | 67 | pte_val(pte) = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN_LARGE; |
72 | (pmd_val(pmd) & PAGE_MASK); | 68 | pte_val(pte) |= _PAGE_LARGE | _PAGE_PRESENT; |
73 | if (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID) | 69 | pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_READ) << 4; |
74 | pte_val(pte) |= _PAGE_INVALID; | 70 | pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) << 4; |
75 | if (pmd_prot_none(pmd)) { | 71 | pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID) << 5; |
76 | if (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT) | 72 | pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT); |
77 | pte_val(pte) |= _PAGE_YOUNG; | 73 | pmd_val(pmd) |= (pte_val(pte) & _PAGE_DIRTY) << 10; |
78 | } else { | 74 | pmd_val(pmd) |= (pte_val(pte) & _PAGE_YOUNG) << 10; |
79 | pte_val(pte) |= _PAGE_READ; | ||
80 | if (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT) | ||
81 | pte_val(pte) |= _PAGE_PROTECT; | ||
82 | else | ||
83 | pte_val(pte) |= _PAGE_WRITE; | ||
84 | if (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) | ||
85 | pte_val(pte) |= _PAGE_YOUNG; | ||
86 | } | ||
87 | } else | 75 | } else |
88 | pte_val(pte) = _PAGE_INVALID; | 76 | pte_val(pte) = _PAGE_INVALID; |
89 | return pte; | 77 | return pte; |
@@ -96,6 +84,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, | |||
96 | 84 | ||
97 | pmd = __pte_to_pmd(pte); | 85 | pmd = __pte_to_pmd(pte); |
98 | if (!MACHINE_HAS_HPAGE) { | 86 | if (!MACHINE_HAS_HPAGE) { |
87 | /* Emulated huge ptes loose the dirty and young bit */ | ||
99 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN; | 88 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN; |
100 | pmd_val(pmd) |= pte_page(pte)[1].index; | 89 | pmd_val(pmd) |= pte_page(pte)[1].index; |
101 | } else | 90 | } else |
@@ -113,6 +102,8 @@ pte_t huge_ptep_get(pte_t *ptep) | |||
113 | origin = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN; | 102 | origin = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN; |
114 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN; | 103 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN; |
115 | pmd_val(pmd) |= *(unsigned long *) origin; | 104 | pmd_val(pmd) |= *(unsigned long *) origin; |
105 | /* Emulated huge ptes are young and dirty by definition */ | ||
106 | pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG | _SEGMENT_ENTRY_DIRTY; | ||
116 | } | 107 | } |
117 | return __pmd_to_pte(pmd); | 108 | return __pmd_to_pte(pmd); |
118 | } | 109 | } |
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 37b8241ec784..19daa53a3da4 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -1279,6 +1279,7 @@ static unsigned long page_table_realloc_pmd(struct mmu_gather *tlb, | |||
1279 | { | 1279 | { |
1280 | unsigned long next, *table, *new; | 1280 | unsigned long next, *table, *new; |
1281 | struct page *page; | 1281 | struct page *page; |
1282 | spinlock_t *ptl; | ||
1282 | pmd_t *pmd; | 1283 | pmd_t *pmd; |
1283 | 1284 | ||
1284 | pmd = pmd_offset(pud, addr); | 1285 | pmd = pmd_offset(pud, addr); |
@@ -1296,7 +1297,7 @@ again: | |||
1296 | if (!new) | 1297 | if (!new) |
1297 | return -ENOMEM; | 1298 | return -ENOMEM; |
1298 | 1299 | ||
1299 | spin_lock(&mm->page_table_lock); | 1300 | ptl = pmd_lock(mm, pmd); |
1300 | if (likely((unsigned long *) pmd_deref(*pmd) == table)) { | 1301 | if (likely((unsigned long *) pmd_deref(*pmd) == table)) { |
1301 | /* Nuke pmd entry pointing to the "short" page table */ | 1302 | /* Nuke pmd entry pointing to the "short" page table */ |
1302 | pmdp_flush_lazy(mm, addr, pmd); | 1303 | pmdp_flush_lazy(mm, addr, pmd); |
@@ -1310,7 +1311,7 @@ again: | |||
1310 | page_table_free_rcu(tlb, table); | 1311 | page_table_free_rcu(tlb, table); |
1311 | new = NULL; | 1312 | new = NULL; |
1312 | } | 1313 | } |
1313 | spin_unlock(&mm->page_table_lock); | 1314 | spin_unlock(ptl); |
1314 | if (new) { | 1315 | if (new) { |
1315 | page_table_free_pgste(new); | 1316 | page_table_free_pgste(new); |
1316 | goto again; | 1317 | goto again; |
@@ -1432,6 +1433,9 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, | |||
1432 | { | 1433 | { |
1433 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); | 1434 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); |
1434 | 1435 | ||
1436 | entry = pmd_mkyoung(entry); | ||
1437 | if (dirty) | ||
1438 | entry = pmd_mkdirty(entry); | ||
1435 | if (pmd_same(*pmdp, entry)) | 1439 | if (pmd_same(*pmdp, entry)) |
1436 | return 0; | 1440 | return 0; |
1437 | pmdp_invalidate(vma, address, pmdp); | 1441 | pmdp_invalidate(vma, address, pmdp); |
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 30de42730b2f..2fa7b14b9c08 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c | |||
@@ -15,8 +15,8 @@ | |||
15 | * Thomas Klein | 15 | * Thomas Klein |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #define COMPONENT "zPCI" | 18 | #define KMSG_COMPONENT "zpci" |
19 | #define pr_fmt(fmt) COMPONENT ": " fmt | 19 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
20 | 20 | ||
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c index 96545d7659fd..6e22a247de9b 100644 --- a/arch/s390/pci/pci_clp.c +++ b/arch/s390/pci/pci_clp.c | |||
@@ -5,8 +5,8 @@ | |||
5 | * Jan Glauber <jang@linux.vnet.ibm.com> | 5 | * Jan Glauber <jang@linux.vnet.ibm.com> |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #define COMPONENT "zPCI" | 8 | #define KMSG_COMPONENT "zpci" |
9 | #define pr_fmt(fmt) COMPONENT ": " fmt | 9 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
10 | 10 | ||
11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
12 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
diff --git a/arch/s390/pci/pci_debug.c b/arch/s390/pci/pci_debug.c index c5c66840ac00..eec598c5939f 100644 --- a/arch/s390/pci/pci_debug.c +++ b/arch/s390/pci/pci_debug.c | |||
@@ -5,8 +5,8 @@ | |||
5 | * Jan Glauber <jang@linux.vnet.ibm.com> | 5 | * Jan Glauber <jang@linux.vnet.ibm.com> |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #define COMPONENT "zPCI" | 8 | #define KMSG_COMPONENT "zpci" |
9 | #define pr_fmt(fmt) COMPONENT ": " fmt | 9 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
10 | 10 | ||
11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
12 | #include <linux/seq_file.h> | 12 | #include <linux/seq_file.h> |
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c index f91c03119804..4cbb29a4d615 100644 --- a/arch/s390/pci/pci_dma.c +++ b/arch/s390/pci/pci_dma.c | |||
@@ -16,6 +16,13 @@ | |||
16 | 16 | ||
17 | static struct kmem_cache *dma_region_table_cache; | 17 | static struct kmem_cache *dma_region_table_cache; |
18 | static struct kmem_cache *dma_page_table_cache; | 18 | static struct kmem_cache *dma_page_table_cache; |
19 | static int s390_iommu_strict; | ||
20 | |||
21 | static int zpci_refresh_global(struct zpci_dev *zdev) | ||
22 | { | ||
23 | return zpci_refresh_trans((u64) zdev->fh << 32, zdev->start_dma, | ||
24 | zdev->iommu_pages * PAGE_SIZE); | ||
25 | } | ||
19 | 26 | ||
20 | static unsigned long *dma_alloc_cpu_table(void) | 27 | static unsigned long *dma_alloc_cpu_table(void) |
21 | { | 28 | { |
@@ -155,18 +162,15 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa, | |||
155 | } | 162 | } |
156 | 163 | ||
157 | /* | 164 | /* |
158 | * rpcit is not required to establish new translations when previously | 165 | * With zdev->tlb_refresh == 0, rpcit is not required to establish new |
159 | * invalid translation-table entries are validated, however it is | 166 | * translations when previously invalid translation-table entries are |
160 | * required when altering previously valid entries. | 167 | * validated. With lazy unmap, it also is skipped for previously valid |
168 | * entries, but a global rpcit is then required before any address can | ||
169 | * be re-used, i.e. after each iommu bitmap wrap-around. | ||
161 | */ | 170 | */ |
162 | if (!zdev->tlb_refresh && | 171 | if (!zdev->tlb_refresh && |
163 | ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) | 172 | (!s390_iommu_strict || |
164 | /* | 173 | ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID))) |
165 | * TODO: also need to check that the old entry is indeed INVALID | ||
166 | * and not only for one page but for the whole range... | ||
167 | * -> now we WARN_ON in that case but with lazy unmap that | ||
168 | * needs to be redone! | ||
169 | */ | ||
170 | goto no_refresh; | 174 | goto no_refresh; |
171 | 175 | ||
172 | rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr, | 176 | rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr, |
@@ -220,16 +224,21 @@ static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev, | |||
220 | static unsigned long dma_alloc_iommu(struct zpci_dev *zdev, int size) | 224 | static unsigned long dma_alloc_iommu(struct zpci_dev *zdev, int size) |
221 | { | 225 | { |
222 | unsigned long offset, flags; | 226 | unsigned long offset, flags; |
227 | int wrap = 0; | ||
223 | 228 | ||
224 | spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags); | 229 | spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags); |
225 | offset = __dma_alloc_iommu(zdev, zdev->next_bit, size); | 230 | offset = __dma_alloc_iommu(zdev, zdev->next_bit, size); |
226 | if (offset == -1) | 231 | if (offset == -1) { |
232 | /* wrap-around */ | ||
227 | offset = __dma_alloc_iommu(zdev, 0, size); | 233 | offset = __dma_alloc_iommu(zdev, 0, size); |
234 | wrap = 1; | ||
235 | } | ||
228 | 236 | ||
229 | if (offset != -1) { | 237 | if (offset != -1) { |
230 | zdev->next_bit = offset + size; | 238 | zdev->next_bit = offset + size; |
231 | if (zdev->next_bit >= zdev->iommu_pages) | 239 | if (!zdev->tlb_refresh && !s390_iommu_strict && wrap) |
232 | zdev->next_bit = 0; | 240 | /* global flush after wrap-around with lazy unmap */ |
241 | zpci_refresh_global(zdev); | ||
233 | } | 242 | } |
234 | spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags); | 243 | spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags); |
235 | return offset; | 244 | return offset; |
@@ -243,7 +252,11 @@ static void dma_free_iommu(struct zpci_dev *zdev, unsigned long offset, int size | |||
243 | if (!zdev->iommu_bitmap) | 252 | if (!zdev->iommu_bitmap) |
244 | goto out; | 253 | goto out; |
245 | bitmap_clear(zdev->iommu_bitmap, offset, size); | 254 | bitmap_clear(zdev->iommu_bitmap, offset, size); |
246 | if (offset >= zdev->next_bit) | 255 | /* |
256 | * Lazy flush for unmap: need to move next_bit to avoid address re-use | ||
257 | * until wrap-around. | ||
258 | */ | ||
259 | if (!s390_iommu_strict && offset >= zdev->next_bit) | ||
247 | zdev->next_bit = offset + size; | 260 | zdev->next_bit = offset + size; |
248 | out: | 261 | out: |
249 | spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags); | 262 | spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags); |
@@ -504,3 +517,12 @@ struct dma_map_ops s390_dma_ops = { | |||
504 | /* dma_supported is unconditionally true without a callback */ | 517 | /* dma_supported is unconditionally true without a callback */ |
505 | }; | 518 | }; |
506 | EXPORT_SYMBOL_GPL(s390_dma_ops); | 519 | EXPORT_SYMBOL_GPL(s390_dma_ops); |
520 | |||
521 | static int __init s390_iommu_setup(char *str) | ||
522 | { | ||
523 | if (!strncmp(str, "strict", 6)) | ||
524 | s390_iommu_strict = 1; | ||
525 | return 0; | ||
526 | } | ||
527 | |||
528 | __setup("s390_iommu=", s390_iommu_setup); | ||
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c index 6d7f5a3016ca..460fdb21cf61 100644 --- a/arch/s390/pci/pci_event.c +++ b/arch/s390/pci/pci_event.c | |||
@@ -5,8 +5,8 @@ | |||
5 | * Jan Glauber <jang@linux.vnet.ibm.com> | 5 | * Jan Glauber <jang@linux.vnet.ibm.com> |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #define COMPONENT "zPCI" | 8 | #define KMSG_COMPONENT "zpci" |
9 | #define pr_fmt(fmt) COMPONENT ": " fmt | 9 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
10 | 10 | ||
11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
12 | #include <linux/pci.h> | 12 | #include <linux/pci.h> |
diff --git a/arch/s390/pci/pci_sysfs.c b/arch/s390/pci/pci_sysfs.c index 9190214b8702..fa3ce891e597 100644 --- a/arch/s390/pci/pci_sysfs.c +++ b/arch/s390/pci/pci_sysfs.c | |||
@@ -5,8 +5,8 @@ | |||
5 | * Jan Glauber <jang@linux.vnet.ibm.com> | 5 | * Jan Glauber <jang@linux.vnet.ibm.com> |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #define COMPONENT "zPCI" | 8 | #define KMSG_COMPONENT "zpci" |
9 | #define pr_fmt(fmt) COMPONENT ": " fmt | 9 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
10 | 10 | ||
11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
12 | #include <linux/stat.h> | 12 | #include <linux/stat.h> |