diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-18 15:17:16 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-18 15:17:16 -0400 |
commit | f61a657fdf221403d99e6f0d4c6d24762920d4a9 (patch) | |
tree | 54c5413977be2acdffcb31ee058af333ba3679a3 /arch/s390/mm | |
parent | 0e034f5c4bc408c943f9c4a06244415d75d7108c (diff) | |
parent | c53db5222b92d1df384a89ceba7808f8e4c535dd (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Martin Schwidefsky:
"The s390 patches for the 4.7 merge window have the usual bug fixes and
cleanups, and the following new features:
- An interface for dasd driver to query if a volume is online to
another operating system
- A new ioctl for the dasd driver to verify the format for a range of
tracks
- Following the example of x86 the struct fpu is now allocated with
the task_struct
- The 'report_error' interface for the PCI bus to send an
adapter-error notification from user space to the service element
of the machine"
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (29 commits)
s390/vmem: remove unused function parameter
s390/vmem: fix identity mapping
s390: add missing include statements
s390: add missing declarations
s390: make couple of variables and functions static
s390/cache: remove superfluous locking
s390/cpuinfo: simplify locking and skip offline cpus early
s390/3270: hangup the 3270 tty after a disconnect
s390/3270: handle reconnect of a tty with a different size
s390/3270: avoid endless I/O loop with disconnected 3270 terminals
s390/3270: fix garbled output on 3270 tty view
s390/3270: fix view reference counting
s390/3270: add missing tty_kref_put
s390/dumpstack: implement and use return_address()
s390/cpum_sf: Remove superfluous SMP function call
s390/cpum_cf: Remove superfluous SMP function call
s390/Kconfig: make z196 the default processor type
s390/sclp: avoid compile warning in sclp_pci_report
s390/fpu: allocate 'struct fpu' with the task_struct
s390/crypto: cleanup and move the header with the cpacf definitions
...
Diffstat (limited to 'arch/s390/mm')
-rw-r--r-- | arch/s390/mm/fault.c | 41 | ||||
-rw-r--r-- | arch/s390/mm/mmap.c | 1 | ||||
-rw-r--r-- | arch/s390/mm/vmem.c | 8 |
3 files changed, 38 insertions, 12 deletions
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index cce577feab1e..7a3144017301 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
@@ -631,6 +631,29 @@ void pfault_fini(void) | |||
631 | static DEFINE_SPINLOCK(pfault_lock); | 631 | static DEFINE_SPINLOCK(pfault_lock); |
632 | static LIST_HEAD(pfault_list); | 632 | static LIST_HEAD(pfault_list); |
633 | 633 | ||
634 | #define PF_COMPLETE 0x0080 | ||
635 | |||
636 | /* | ||
637 | * The mechanism of our pfault code: if Linux is running as guest, runs a user | ||
638 | * space process and the user space process accesses a page that the host has | ||
639 | * paged out we get a pfault interrupt. | ||
640 | * | ||
641 | * This allows us, within the guest, to schedule a different process. Without | ||
642 | * this mechanism the host would have to suspend the whole virtual cpu until | ||
643 | * the page has been paged in. | ||
644 | * | ||
645 | * So when we get such an interrupt then we set the state of the current task | ||
646 | * to uninterruptible and also set the need_resched flag. Both happens within | ||
647 | * interrupt context(!). If we later on want to return to user space we | ||
648 | * recognize the need_resched flag and then call schedule(). It's not very | ||
649 | * obvious how this works... | ||
650 | * | ||
651 | * Of course we have a lot of additional fun with the completion interrupt (-> | ||
652 | * host signals that a page of a process has been paged in and the process can | ||
653 | * continue to run). This interrupt can arrive on any cpu and, since we have | ||
654 | * virtual cpus, actually appear before the interrupt that signals that a page | ||
655 | * is missing. | ||
656 | */ | ||
634 | static void pfault_interrupt(struct ext_code ext_code, | 657 | static void pfault_interrupt(struct ext_code ext_code, |
635 | unsigned int param32, unsigned long param64) | 658 | unsigned int param32, unsigned long param64) |
636 | { | 659 | { |
@@ -639,10 +662,9 @@ static void pfault_interrupt(struct ext_code ext_code, | |||
639 | pid_t pid; | 662 | pid_t pid; |
640 | 663 | ||
641 | /* | 664 | /* |
642 | * Get the external interruption subcode & pfault | 665 | * Get the external interruption subcode & pfault initial/completion |
643 | * initial/completion signal bit. VM stores this | 666 | * signal bit. VM stores this in the 'cpu address' field associated |
644 | * in the 'cpu address' field associated with the | 667 | * with the external interrupt. |
645 | * external interrupt. | ||
646 | */ | 668 | */ |
647 | subcode = ext_code.subcode; | 669 | subcode = ext_code.subcode; |
648 | if ((subcode & 0xff00) != __SUBCODE_MASK) | 670 | if ((subcode & 0xff00) != __SUBCODE_MASK) |
@@ -658,7 +680,7 @@ static void pfault_interrupt(struct ext_code ext_code, | |||
658 | if (!tsk) | 680 | if (!tsk) |
659 | return; | 681 | return; |
660 | spin_lock(&pfault_lock); | 682 | spin_lock(&pfault_lock); |
661 | if (subcode & 0x0080) { | 683 | if (subcode & PF_COMPLETE) { |
662 | /* signal bit is set -> a page has been swapped in by VM */ | 684 | /* signal bit is set -> a page has been swapped in by VM */ |
663 | if (tsk->thread.pfault_wait == 1) { | 685 | if (tsk->thread.pfault_wait == 1) { |
664 | /* Initial interrupt was faster than the completion | 686 | /* Initial interrupt was faster than the completion |
@@ -687,8 +709,7 @@ static void pfault_interrupt(struct ext_code ext_code, | |||
687 | goto out; | 709 | goto out; |
688 | if (tsk->thread.pfault_wait == 1) { | 710 | if (tsk->thread.pfault_wait == 1) { |
689 | /* Already on the list with a reference: put to sleep */ | 711 | /* Already on the list with a reference: put to sleep */ |
690 | __set_task_state(tsk, TASK_UNINTERRUPTIBLE); | 712 | goto block; |
691 | set_tsk_need_resched(tsk); | ||
692 | } else if (tsk->thread.pfault_wait == -1) { | 713 | } else if (tsk->thread.pfault_wait == -1) { |
693 | /* Completion interrupt was faster than the initial | 714 | /* Completion interrupt was faster than the initial |
694 | * interrupt (pfault_wait == -1). Set pfault_wait | 715 | * interrupt (pfault_wait == -1). Set pfault_wait |
@@ -703,7 +724,11 @@ static void pfault_interrupt(struct ext_code ext_code, | |||
703 | get_task_struct(tsk); | 724 | get_task_struct(tsk); |
704 | tsk->thread.pfault_wait = 1; | 725 | tsk->thread.pfault_wait = 1; |
705 | list_add(&tsk->thread.list, &pfault_list); | 726 | list_add(&tsk->thread.list, &pfault_list); |
706 | __set_task_state(tsk, TASK_UNINTERRUPTIBLE); | 727 | block: |
728 | /* Since this must be a userspace fault, there | ||
729 | * is no kernel task state to trample. Rely on the | ||
730 | * return to userspace schedule() to block. */ | ||
731 | __set_current_state(TASK_UNINTERRUPTIBLE); | ||
707 | set_tsk_need_resched(tsk); | 732 | set_tsk_need_resched(tsk); |
708 | } | 733 | } |
709 | } | 734 | } |
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index 89cf09e5f168..eb9df2822da1 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c | |||
@@ -22,6 +22,7 @@ | |||
22 | * Started by Ingo Molnar <mingo@elte.hu> | 22 | * Started by Ingo Molnar <mingo@elte.hu> |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <linux/elf-randomize.h> | ||
25 | #include <linux/personality.h> | 26 | #include <linux/personality.h> |
26 | #include <linux/mm.h> | 27 | #include <linux/mm.h> |
27 | #include <linux/mman.h> | 28 | #include <linux/mman.h> |
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index d27fccbad7c1..d48cf25cfe99 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c | |||
@@ -56,7 +56,7 @@ static inline pmd_t *vmem_pmd_alloc(void) | |||
56 | return pmd; | 56 | return pmd; |
57 | } | 57 | } |
58 | 58 | ||
59 | static pte_t __ref *vmem_pte_alloc(unsigned long address) | 59 | static pte_t __ref *vmem_pte_alloc(void) |
60 | { | 60 | { |
61 | pte_t *pte; | 61 | pte_t *pte; |
62 | 62 | ||
@@ -121,7 +121,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) | |||
121 | continue; | 121 | continue; |
122 | } | 122 | } |
123 | if (pmd_none(*pm_dir)) { | 123 | if (pmd_none(*pm_dir)) { |
124 | pt_dir = vmem_pte_alloc(address); | 124 | pt_dir = vmem_pte_alloc(); |
125 | if (!pt_dir) | 125 | if (!pt_dir) |
126 | goto out; | 126 | goto out; |
127 | pmd_populate(&init_mm, pm_dir, pt_dir); | 127 | pmd_populate(&init_mm, pm_dir, pt_dir); |
@@ -233,7 +233,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) | |||
233 | address = (address + PMD_SIZE) & PMD_MASK; | 233 | address = (address + PMD_SIZE) & PMD_MASK; |
234 | continue; | 234 | continue; |
235 | } | 235 | } |
236 | pt_dir = vmem_pte_alloc(address); | 236 | pt_dir = vmem_pte_alloc(); |
237 | if (!pt_dir) | 237 | if (!pt_dir) |
238 | goto out; | 238 | goto out; |
239 | pmd_populate(&init_mm, pm_dir, pt_dir); | 239 | pmd_populate(&init_mm, pm_dir, pt_dir); |
@@ -370,7 +370,7 @@ void __init vmem_map_init(void) | |||
370 | ro_end = (unsigned long)&_eshared & PAGE_MASK; | 370 | ro_end = (unsigned long)&_eshared & PAGE_MASK; |
371 | for_each_memblock(memory, reg) { | 371 | for_each_memblock(memory, reg) { |
372 | start = reg->base; | 372 | start = reg->base; |
373 | end = reg->base + reg->size - 1; | 373 | end = reg->base + reg->size; |
374 | if (start >= ro_end || end <= ro_start) | 374 | if (start >= ro_end || end <= ro_start) |
375 | vmem_add_mem(start, end - start, 0); | 375 | vmem_add_mem(start, end - start, 0); |
376 | else if (start >= ro_start && end <= ro_end) | 376 | else if (start >= ro_start && end <= ro_end) |