diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-26 15:22:51 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-26 15:22:51 -0400 |
commit | 015cd867e566e3a27b5e8062eb24eeaa4d77297f (patch) | |
tree | d96c90119b3c454b5c5cfb07f2409a87bbd29754 /arch/s390/mm | |
parent | 85802a49a85c49d3e9174b686d471cb86c90a1cb (diff) | |
parent | 64a40c84001e55001a4d80496b6b56e4d04e4360 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Martin Schwidefsky:
"There are a couple of new things for s390 with this merge request:
- a new scheduling domain "drawer" is added to reflect the unusual
topology found on z13 machines. Performance tests showed up to 8
percent gain with the additional domain.
- the new crc-32 checksum crypto module uses the vector-galois-field
multiply and sum SIMD instruction to speed up crc-32 and crc-32c.
- proper __ro_after_init support, this requires RO_AFTER_INIT_DATA in
the generic vmlinux.lds linker script definitions.
- kcov instrumentation support. A prerequisite for that is the
inline assembly basic block cleanup, which is the reason for the
net/iucv/iucv.c change.
- support for 2GB pages is added to the hugetlbfs backend.
Then there are two removals:
- the oprofile hardware sampling support is dead code and is removed.
The oprofile user space uses the perf interface nowadays.
- the ETR clock synchronization is removed, this has been superseeded
be the STP clock synchronization. And it always has been
"interesting" code..
And the usual bug fixes and cleanups"
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (82 commits)
s390/pci: Delete an unnecessary check before the function call "pci_dev_put"
s390/smp: clean up a condition
s390/cio/chp : Remove deprecated create_singlethread_workqueue
s390/chsc: improve channel path descriptor determination
s390/chsc: sanitize fmt check for chp_desc determination
s390/cio: make fmt1 channel path descriptor optional
s390/chsc: fix ioctl CHSC_INFO_CU command
s390/cio/device_ops: fix kernel doc
s390/cio: allow to reset channel measurement block
s390/console: Make preferred console handling more consistent
s390/mm: fix gmap tlb flush issues
s390/mm: add support for 2GB hugepages
s390: have unique symbol for __switch_to address
s390/cpuinfo: show maximum thread id
s390/ptrace: clarify bits in the per_struct
s390: stack address vs thread_info
s390: remove pointless load within __switch_to
s390: enable kcov support
s390/cpumf: use basic block for ecctr inline assembly
s390/hypfs: use basic block for diag inline assembly
...
Diffstat (limited to 'arch/s390/mm')
-rw-r--r-- | arch/s390/mm/dump_pagetables.c | 2 | ||||
-rw-r--r-- | arch/s390/mm/fault.c | 2 | ||||
-rw-r--r-- | arch/s390/mm/gmap.c | 7 | ||||
-rw-r--r-- | arch/s390/mm/gup.c | 45 | ||||
-rw-r--r-- | arch/s390/mm/hugetlbpage.c | 129 | ||||
-rw-r--r-- | arch/s390/mm/init.c | 13 | ||||
-rw-r--r-- | arch/s390/mm/page-states.c | 13 | ||||
-rw-r--r-- | arch/s390/mm/pageattr.c | 267 | ||||
-rw-r--r-- | arch/s390/mm/pgtable.c | 93 | ||||
-rw-r--r-- | arch/s390/mm/vmem.c | 73 |
10 files changed, 493 insertions, 151 deletions
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c index 8556d6be9b54..861880df12c7 100644 --- a/arch/s390/mm/dump_pagetables.c +++ b/arch/s390/mm/dump_pagetables.c | |||
@@ -157,7 +157,7 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st, | |||
157 | pud = pud_offset(pgd, addr); | 157 | pud = pud_offset(pgd, addr); |
158 | if (!pud_none(*pud)) | 158 | if (!pud_none(*pud)) |
159 | if (pud_large(*pud)) { | 159 | if (pud_large(*pud)) { |
160 | prot = pud_val(*pud) & _REGION3_ENTRY_RO; | 160 | prot = pud_val(*pud) & _REGION_ENTRY_PROTECT; |
161 | note_page(m, st, prot, 2); | 161 | note_page(m, st, prot, 2); |
162 | } else | 162 | } else |
163 | walk_pmd_level(m, st, pud, addr); | 163 | walk_pmd_level(m, st, pud, addr); |
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 19288c1b36d3..6ad7eff84c82 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
@@ -624,7 +624,7 @@ void pfault_fini(void) | |||
624 | diag_stat_inc(DIAG_STAT_X258); | 624 | diag_stat_inc(DIAG_STAT_X258); |
625 | asm volatile( | 625 | asm volatile( |
626 | " diag %0,0,0x258\n" | 626 | " diag %0,0,0x258\n" |
627 | "0:\n" | 627 | "0: nopr %%r7\n" |
628 | EX_TABLE(0b,0b) | 628 | EX_TABLE(0b,0b) |
629 | : : "a" (&refbk), "m" (refbk) : "cc"); | 629 | : : "a" (&refbk), "m" (refbk) : "cc"); |
630 | } | 630 | } |
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index cace818d86eb..063c721ec0dc 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c | |||
@@ -85,7 +85,7 @@ EXPORT_SYMBOL_GPL(gmap_alloc); | |||
85 | static void gmap_flush_tlb(struct gmap *gmap) | 85 | static void gmap_flush_tlb(struct gmap *gmap) |
86 | { | 86 | { |
87 | if (MACHINE_HAS_IDTE) | 87 | if (MACHINE_HAS_IDTE) |
88 | __tlb_flush_asce(gmap->mm, gmap->asce); | 88 | __tlb_flush_idte(gmap->asce); |
89 | else | 89 | else |
90 | __tlb_flush_global(); | 90 | __tlb_flush_global(); |
91 | } | 91 | } |
@@ -124,7 +124,7 @@ void gmap_free(struct gmap *gmap) | |||
124 | 124 | ||
125 | /* Flush tlb. */ | 125 | /* Flush tlb. */ |
126 | if (MACHINE_HAS_IDTE) | 126 | if (MACHINE_HAS_IDTE) |
127 | __tlb_flush_asce(gmap->mm, gmap->asce); | 127 | __tlb_flush_idte(gmap->asce); |
128 | else | 128 | else |
129 | __tlb_flush_global(); | 129 | __tlb_flush_global(); |
130 | 130 | ||
@@ -430,6 +430,9 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr) | |||
430 | VM_BUG_ON(pgd_none(*pgd)); | 430 | VM_BUG_ON(pgd_none(*pgd)); |
431 | pud = pud_offset(pgd, vmaddr); | 431 | pud = pud_offset(pgd, vmaddr); |
432 | VM_BUG_ON(pud_none(*pud)); | 432 | VM_BUG_ON(pud_none(*pud)); |
433 | /* large puds cannot yet be handled */ | ||
434 | if (pud_large(*pud)) | ||
435 | return -EFAULT; | ||
433 | pmd = pmd_offset(pud, vmaddr); | 436 | pmd = pmd_offset(pud, vmaddr); |
434 | VM_BUG_ON(pmd_none(*pmd)); | 437 | VM_BUG_ON(pmd_none(*pmd)); |
435 | /* large pmds cannot yet be handled */ | 438 | /* large pmds cannot yet be handled */ |
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c index a8a6765f1a51..adb0c34bf431 100644 --- a/arch/s390/mm/gup.c +++ b/arch/s390/mm/gup.c | |||
@@ -128,6 +128,44 @@ static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, | |||
128 | return 1; | 128 | return 1; |
129 | } | 129 | } |
130 | 130 | ||
131 | static int gup_huge_pud(pud_t *pudp, pud_t pud, unsigned long addr, | ||
132 | unsigned long end, int write, struct page **pages, int *nr) | ||
133 | { | ||
134 | struct page *head, *page; | ||
135 | unsigned long mask; | ||
136 | int refs; | ||
137 | |||
138 | mask = (write ? _REGION_ENTRY_PROTECT : 0) | _REGION_ENTRY_INVALID; | ||
139 | if ((pud_val(pud) & mask) != 0) | ||
140 | return 0; | ||
141 | VM_BUG_ON(!pfn_valid(pud_pfn(pud))); | ||
142 | |||
143 | refs = 0; | ||
144 | head = pud_page(pud); | ||
145 | page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT); | ||
146 | do { | ||
147 | VM_BUG_ON_PAGE(compound_head(page) != head, page); | ||
148 | pages[*nr] = page; | ||
149 | (*nr)++; | ||
150 | page++; | ||
151 | refs++; | ||
152 | } while (addr += PAGE_SIZE, addr != end); | ||
153 | |||
154 | if (!page_cache_add_speculative(head, refs)) { | ||
155 | *nr -= refs; | ||
156 | return 0; | ||
157 | } | ||
158 | |||
159 | if (unlikely(pud_val(pud) != pud_val(*pudp))) { | ||
160 | *nr -= refs; | ||
161 | while (refs--) | ||
162 | put_page(head); | ||
163 | return 0; | ||
164 | } | ||
165 | |||
166 | return 1; | ||
167 | } | ||
168 | |||
131 | static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, | 169 | static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, |
132 | unsigned long end, int write, struct page **pages, int *nr) | 170 | unsigned long end, int write, struct page **pages, int *nr) |
133 | { | 171 | { |
@@ -144,7 +182,12 @@ static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, | |||
144 | next = pud_addr_end(addr, end); | 182 | next = pud_addr_end(addr, end); |
145 | if (pud_none(pud)) | 183 | if (pud_none(pud)) |
146 | return 0; | 184 | return 0; |
147 | if (!gup_pmd_range(pudp, pud, addr, next, write, pages, nr)) | 185 | if (unlikely(pud_large(pud))) { |
186 | if (!gup_huge_pud(pudp, pud, addr, next, write, pages, | ||
187 | nr)) | ||
188 | return 0; | ||
189 | } else if (!gup_pmd_range(pudp, pud, addr, next, write, pages, | ||
190 | nr)) | ||
148 | return 0; | 191 | return 0; |
149 | } while (pudp++, addr = next, addr != end); | 192 | } while (pudp++, addr = next, addr != end); |
150 | 193 | ||
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index 1b5e8983f4f3..e19d853883be 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c | |||
@@ -1,19 +1,22 @@ | |||
1 | /* | 1 | /* |
2 | * IBM System z Huge TLB Page Support for Kernel. | 2 | * IBM System z Huge TLB Page Support for Kernel. |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 2007 | 4 | * Copyright IBM Corp. 2007,2016 |
5 | * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com> | 5 | * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com> |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #define KMSG_COMPONENT "hugetlb" | ||
9 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
10 | |||
8 | #include <linux/mm.h> | 11 | #include <linux/mm.h> |
9 | #include <linux/hugetlb.h> | 12 | #include <linux/hugetlb.h> |
10 | 13 | ||
11 | static inline pmd_t __pte_to_pmd(pte_t pte) | 14 | static inline unsigned long __pte_to_rste(pte_t pte) |
12 | { | 15 | { |
13 | pmd_t pmd; | 16 | unsigned long rste; |
14 | 17 | ||
15 | /* | 18 | /* |
16 | * Convert encoding pte bits pmd bits | 19 | * Convert encoding pte bits pmd / pud bits |
17 | * lIR.uswrdy.p dy..R...I...wr | 20 | * lIR.uswrdy.p dy..R...I...wr |
18 | * empty 010.000000.0 -> 00..0...1...00 | 21 | * empty 010.000000.0 -> 00..0...1...00 |
19 | * prot-none, clean, old 111.000000.1 -> 00..1...1...00 | 22 | * prot-none, clean, old 111.000000.1 -> 00..1...1...00 |
@@ -33,25 +36,31 @@ static inline pmd_t __pte_to_pmd(pte_t pte) | |||
33 | * u unused, l large | 36 | * u unused, l large |
34 | */ | 37 | */ |
35 | if (pte_present(pte)) { | 38 | if (pte_present(pte)) { |
36 | pmd_val(pmd) = pte_val(pte) & PAGE_MASK; | 39 | rste = pte_val(pte) & PAGE_MASK; |
37 | pmd_val(pmd) |= (pte_val(pte) & _PAGE_READ) >> 4; | 40 | rste |= (pte_val(pte) & _PAGE_READ) >> 4; |
38 | pmd_val(pmd) |= (pte_val(pte) & _PAGE_WRITE) >> 4; | 41 | rste |= (pte_val(pte) & _PAGE_WRITE) >> 4; |
39 | pmd_val(pmd) |= (pte_val(pte) & _PAGE_INVALID) >> 5; | 42 | rste |= (pte_val(pte) & _PAGE_INVALID) >> 5; |
40 | pmd_val(pmd) |= (pte_val(pte) & _PAGE_PROTECT); | 43 | rste |= (pte_val(pte) & _PAGE_PROTECT); |
41 | pmd_val(pmd) |= (pte_val(pte) & _PAGE_DIRTY) << 10; | 44 | rste |= (pte_val(pte) & _PAGE_DIRTY) << 10; |
42 | pmd_val(pmd) |= (pte_val(pte) & _PAGE_YOUNG) << 10; | 45 | rste |= (pte_val(pte) & _PAGE_YOUNG) << 10; |
43 | pmd_val(pmd) |= (pte_val(pte) & _PAGE_SOFT_DIRTY) << 13; | 46 | rste |= (pte_val(pte) & _PAGE_SOFT_DIRTY) << 13; |
44 | } else | 47 | } else |
45 | pmd_val(pmd) = _SEGMENT_ENTRY_INVALID; | 48 | rste = _SEGMENT_ENTRY_INVALID; |
46 | return pmd; | 49 | return rste; |
47 | } | 50 | } |
48 | 51 | ||
49 | static inline pte_t __pmd_to_pte(pmd_t pmd) | 52 | static inline pte_t __rste_to_pte(unsigned long rste) |
50 | { | 53 | { |
54 | int present; | ||
51 | pte_t pte; | 55 | pte_t pte; |
52 | 56 | ||
57 | if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) | ||
58 | present = pud_present(__pud(rste)); | ||
59 | else | ||
60 | present = pmd_present(__pmd(rste)); | ||
61 | |||
53 | /* | 62 | /* |
54 | * Convert encoding pmd bits pte bits | 63 | * Convert encoding pmd / pud bits pte bits |
55 | * dy..R...I...wr lIR.uswrdy.p | 64 | * dy..R...I...wr lIR.uswrdy.p |
56 | * empty 00..0...1...00 -> 010.000000.0 | 65 | * empty 00..0...1...00 -> 010.000000.0 |
57 | * prot-none, clean, old 00..1...1...00 -> 111.000000.1 | 66 | * prot-none, clean, old 00..1...1...00 -> 111.000000.1 |
@@ -70,16 +79,16 @@ static inline pte_t __pmd_to_pte(pmd_t pmd) | |||
70 | * SW-bits: p present, y young, d dirty, r read, w write, s special, | 79 | * SW-bits: p present, y young, d dirty, r read, w write, s special, |
71 | * u unused, l large | 80 | * u unused, l large |
72 | */ | 81 | */ |
73 | if (pmd_present(pmd)) { | 82 | if (present) { |
74 | pte_val(pte) = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN_LARGE; | 83 | pte_val(pte) = rste & _SEGMENT_ENTRY_ORIGIN_LARGE; |
75 | pte_val(pte) |= _PAGE_LARGE | _PAGE_PRESENT; | 84 | pte_val(pte) |= _PAGE_LARGE | _PAGE_PRESENT; |
76 | pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_READ) << 4; | 85 | pte_val(pte) |= (rste & _SEGMENT_ENTRY_READ) << 4; |
77 | pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) << 4; | 86 | pte_val(pte) |= (rste & _SEGMENT_ENTRY_WRITE) << 4; |
78 | pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID) << 5; | 87 | pte_val(pte) |= (rste & _SEGMENT_ENTRY_INVALID) << 5; |
79 | pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT); | 88 | pte_val(pte) |= (rste & _SEGMENT_ENTRY_PROTECT); |
80 | pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) >> 10; | 89 | pte_val(pte) |= (rste & _SEGMENT_ENTRY_DIRTY) >> 10; |
81 | pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) >> 10; | 90 | pte_val(pte) |= (rste & _SEGMENT_ENTRY_YOUNG) >> 10; |
82 | pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY) >> 13; | 91 | pte_val(pte) |= (rste & _SEGMENT_ENTRY_SOFT_DIRTY) >> 13; |
83 | } else | 92 | } else |
84 | pte_val(pte) = _PAGE_INVALID; | 93 | pte_val(pte) = _PAGE_INVALID; |
85 | return pte; | 94 | return pte; |
@@ -88,27 +97,33 @@ static inline pte_t __pmd_to_pte(pmd_t pmd) | |||
88 | void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, | 97 | void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, |
89 | pte_t *ptep, pte_t pte) | 98 | pte_t *ptep, pte_t pte) |
90 | { | 99 | { |
91 | pmd_t pmd = __pte_to_pmd(pte); | 100 | unsigned long rste = __pte_to_rste(pte); |
92 | 101 | ||
93 | pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE; | 102 | /* Set correct table type for 2G hugepages */ |
94 | *(pmd_t *) ptep = pmd; | 103 | if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) |
104 | rste |= _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE; | ||
105 | else | ||
106 | rste |= _SEGMENT_ENTRY_LARGE; | ||
107 | pte_val(*ptep) = rste; | ||
95 | } | 108 | } |
96 | 109 | ||
97 | pte_t huge_ptep_get(pte_t *ptep) | 110 | pte_t huge_ptep_get(pte_t *ptep) |
98 | { | 111 | { |
99 | pmd_t pmd = *(pmd_t *) ptep; | 112 | return __rste_to_pte(pte_val(*ptep)); |
100 | |||
101 | return __pmd_to_pte(pmd); | ||
102 | } | 113 | } |
103 | 114 | ||
104 | pte_t huge_ptep_get_and_clear(struct mm_struct *mm, | 115 | pte_t huge_ptep_get_and_clear(struct mm_struct *mm, |
105 | unsigned long addr, pte_t *ptep) | 116 | unsigned long addr, pte_t *ptep) |
106 | { | 117 | { |
118 | pte_t pte = huge_ptep_get(ptep); | ||
107 | pmd_t *pmdp = (pmd_t *) ptep; | 119 | pmd_t *pmdp = (pmd_t *) ptep; |
108 | pmd_t old; | 120 | pud_t *pudp = (pud_t *) ptep; |
109 | 121 | ||
110 | old = pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); | 122 | if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) |
111 | return __pmd_to_pte(old); | 123 | pudp_xchg_direct(mm, addr, pudp, __pud(_REGION3_ENTRY_EMPTY)); |
124 | else | ||
125 | pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); | ||
126 | return pte; | ||
112 | } | 127 | } |
113 | 128 | ||
114 | pte_t *huge_pte_alloc(struct mm_struct *mm, | 129 | pte_t *huge_pte_alloc(struct mm_struct *mm, |
@@ -120,8 +135,12 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, | |||
120 | 135 | ||
121 | pgdp = pgd_offset(mm, addr); | 136 | pgdp = pgd_offset(mm, addr); |
122 | pudp = pud_alloc(mm, pgdp, addr); | 137 | pudp = pud_alloc(mm, pgdp, addr); |
123 | if (pudp) | 138 | if (pudp) { |
124 | pmdp = pmd_alloc(mm, pudp, addr); | 139 | if (sz == PUD_SIZE) |
140 | return (pte_t *) pudp; | ||
141 | else if (sz == PMD_SIZE) | ||
142 | pmdp = pmd_alloc(mm, pudp, addr); | ||
143 | } | ||
125 | return (pte_t *) pmdp; | 144 | return (pte_t *) pmdp; |
126 | } | 145 | } |
127 | 146 | ||
@@ -134,8 +153,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | |||
134 | pgdp = pgd_offset(mm, addr); | 153 | pgdp = pgd_offset(mm, addr); |
135 | if (pgd_present(*pgdp)) { | 154 | if (pgd_present(*pgdp)) { |
136 | pudp = pud_offset(pgdp, addr); | 155 | pudp = pud_offset(pgdp, addr); |
137 | if (pud_present(*pudp)) | 156 | if (pud_present(*pudp)) { |
157 | if (pud_large(*pudp)) | ||
158 | return (pte_t *) pudp; | ||
138 | pmdp = pmd_offset(pudp, addr); | 159 | pmdp = pmd_offset(pudp, addr); |
160 | } | ||
139 | } | 161 | } |
140 | return (pte_t *) pmdp; | 162 | return (pte_t *) pmdp; |
141 | } | 163 | } |
@@ -147,5 +169,34 @@ int pmd_huge(pmd_t pmd) | |||
147 | 169 | ||
148 | int pud_huge(pud_t pud) | 170 | int pud_huge(pud_t pud) |
149 | { | 171 | { |
150 | return 0; | 172 | return pud_large(pud); |
173 | } | ||
174 | |||
175 | struct page * | ||
176 | follow_huge_pud(struct mm_struct *mm, unsigned long address, | ||
177 | pud_t *pud, int flags) | ||
178 | { | ||
179 | if (flags & FOLL_GET) | ||
180 | return NULL; | ||
181 | |||
182 | return pud_page(*pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT); | ||
183 | } | ||
184 | |||
185 | static __init int setup_hugepagesz(char *opt) | ||
186 | { | ||
187 | unsigned long size; | ||
188 | char *string = opt; | ||
189 | |||
190 | size = memparse(opt, &opt); | ||
191 | if (MACHINE_HAS_EDAT1 && size == PMD_SIZE) { | ||
192 | hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); | ||
193 | } else if (MACHINE_HAS_EDAT2 && size == PUD_SIZE) { | ||
194 | hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); | ||
195 | } else { | ||
196 | pr_err("hugepagesz= specifies an unsupported page size %s\n", | ||
197 | string); | ||
198 | return 0; | ||
199 | } | ||
200 | return 1; | ||
151 | } | 201 | } |
202 | __setup("hugepagesz=", setup_hugepagesz); | ||
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 2489b2e917c8..f56a39bd8ba6 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
@@ -40,7 +40,7 @@ | |||
40 | #include <asm/ctl_reg.h> | 40 | #include <asm/ctl_reg.h> |
41 | #include <asm/sclp.h> | 41 | #include <asm/sclp.h> |
42 | 42 | ||
43 | pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE))); | 43 | pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir); |
44 | 44 | ||
45 | unsigned long empty_zero_page, zero_page_mask; | 45 | unsigned long empty_zero_page, zero_page_mask; |
46 | EXPORT_SYMBOL(empty_zero_page); | 46 | EXPORT_SYMBOL(empty_zero_page); |
@@ -111,17 +111,16 @@ void __init paging_init(void) | |||
111 | 111 | ||
112 | void mark_rodata_ro(void) | 112 | void mark_rodata_ro(void) |
113 | { | 113 | { |
114 | /* Text and rodata are already protected. Nothing to do here. */ | 114 | unsigned long size = __end_ro_after_init - __start_ro_after_init; |
115 | pr_info("Write protecting the kernel read-only data: %luk\n", | 115 | |
116 | ((unsigned long)&_eshared - (unsigned long)&_stext) >> 10); | 116 | set_memory_ro((unsigned long)__start_ro_after_init, size >> PAGE_SHIFT); |
117 | pr_info("Write protected read-only-after-init data: %luk\n", size >> 10); | ||
117 | } | 118 | } |
118 | 119 | ||
119 | void __init mem_init(void) | 120 | void __init mem_init(void) |
120 | { | 121 | { |
121 | if (MACHINE_HAS_TLB_LC) | 122 | cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask); |
122 | cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask); | ||
123 | cpumask_set_cpu(0, mm_cpumask(&init_mm)); | 123 | cpumask_set_cpu(0, mm_cpumask(&init_mm)); |
124 | atomic_set(&init_mm.context.attach_count, 1); | ||
125 | 124 | ||
126 | set_max_mapnr(max_low_pfn); | 125 | set_max_mapnr(max_low_pfn); |
127 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); | 126 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); |
diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c index a90d45e9dfb0..3330ea124eec 100644 --- a/arch/s390/mm/page-states.c +++ b/arch/s390/mm/page-states.c | |||
@@ -34,20 +34,25 @@ static int __init cmma(char *str) | |||
34 | } | 34 | } |
35 | __setup("cmma=", cmma); | 35 | __setup("cmma=", cmma); |
36 | 36 | ||
37 | void __init cmma_init(void) | 37 | static inline int cmma_test_essa(void) |
38 | { | 38 | { |
39 | register unsigned long tmp asm("0") = 0; | 39 | register unsigned long tmp asm("0") = 0; |
40 | register int rc asm("1") = -EOPNOTSUPP; | 40 | register int rc asm("1") = -EOPNOTSUPP; |
41 | 41 | ||
42 | if (!cmma_flag) | ||
43 | return; | ||
44 | asm volatile( | 42 | asm volatile( |
45 | " .insn rrf,0xb9ab0000,%1,%1,0,0\n" | 43 | " .insn rrf,0xb9ab0000,%1,%1,0,0\n" |
46 | "0: la %0,0\n" | 44 | "0: la %0,0\n" |
47 | "1:\n" | 45 | "1:\n" |
48 | EX_TABLE(0b,1b) | 46 | EX_TABLE(0b,1b) |
49 | : "+&d" (rc), "+&d" (tmp)); | 47 | : "+&d" (rc), "+&d" (tmp)); |
50 | if (rc) | 48 | return rc; |
49 | } | ||
50 | |||
51 | void __init cmma_init(void) | ||
52 | { | ||
53 | if (!cmma_flag) | ||
54 | return; | ||
55 | if (cmma_test_essa()) | ||
51 | cmma_flag = 0; | 56 | cmma_flag = 0; |
52 | } | 57 | } |
53 | 58 | ||
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c index f2a5c29a97e9..7104ffb5a67f 100644 --- a/arch/s390/mm/pageattr.c +++ b/arch/s390/mm/pageattr.c | |||
@@ -10,7 +10,6 @@ | |||
10 | #include <asm/pgtable.h> | 10 | #include <asm/pgtable.h> |
11 | #include <asm/page.h> | 11 | #include <asm/page.h> |
12 | 12 | ||
13 | #if PAGE_DEFAULT_KEY | ||
14 | static inline unsigned long sske_frame(unsigned long addr, unsigned char skey) | 13 | static inline unsigned long sske_frame(unsigned long addr, unsigned char skey) |
15 | { | 14 | { |
16 | asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],9,0" | 15 | asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],9,0" |
@@ -22,6 +21,8 @@ void __storage_key_init_range(unsigned long start, unsigned long end) | |||
22 | { | 21 | { |
23 | unsigned long boundary, size; | 22 | unsigned long boundary, size; |
24 | 23 | ||
24 | if (!PAGE_DEFAULT_KEY) | ||
25 | return; | ||
25 | while (start < end) { | 26 | while (start < end) { |
26 | if (MACHINE_HAS_EDAT1) { | 27 | if (MACHINE_HAS_EDAT1) { |
27 | /* set storage keys for a 1MB frame */ | 28 | /* set storage keys for a 1MB frame */ |
@@ -38,56 +39,254 @@ void __storage_key_init_range(unsigned long start, unsigned long end) | |||
38 | start += PAGE_SIZE; | 39 | start += PAGE_SIZE; |
39 | } | 40 | } |
40 | } | 41 | } |
41 | #endif | ||
42 | 42 | ||
43 | static pte_t *walk_page_table(unsigned long addr) | 43 | #ifdef CONFIG_PROC_FS |
44 | atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX]; | ||
45 | |||
46 | void arch_report_meminfo(struct seq_file *m) | ||
44 | { | 47 | { |
45 | pgd_t *pgdp; | 48 | seq_printf(m, "DirectMap4k: %8lu kB\n", |
46 | pud_t *pudp; | 49 | atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_4K]) << 2); |
50 | seq_printf(m, "DirectMap1M: %8lu kB\n", | ||
51 | atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_1M]) << 10); | ||
52 | seq_printf(m, "DirectMap2G: %8lu kB\n", | ||
53 | atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_2G]) << 21); | ||
54 | } | ||
55 | #endif /* CONFIG_PROC_FS */ | ||
56 | |||
57 | static void pgt_set(unsigned long *old, unsigned long new, unsigned long addr, | ||
58 | unsigned long dtt) | ||
59 | { | ||
60 | unsigned long table, mask; | ||
61 | |||
62 | mask = 0; | ||
63 | if (MACHINE_HAS_EDAT2) { | ||
64 | switch (dtt) { | ||
65 | case CRDTE_DTT_REGION3: | ||
66 | mask = ~(PTRS_PER_PUD * sizeof(pud_t) - 1); | ||
67 | break; | ||
68 | case CRDTE_DTT_SEGMENT: | ||
69 | mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1); | ||
70 | break; | ||
71 | case CRDTE_DTT_PAGE: | ||
72 | mask = ~(PTRS_PER_PTE * sizeof(pte_t) - 1); | ||
73 | break; | ||
74 | } | ||
75 | table = (unsigned long)old & mask; | ||
76 | crdte(*old, new, table, dtt, addr, S390_lowcore.kernel_asce); | ||
77 | } else if (MACHINE_HAS_IDTE) { | ||
78 | cspg(old, *old, new); | ||
79 | } else { | ||
80 | csp((unsigned int *)old + 1, *old, new); | ||
81 | } | ||
82 | } | ||
83 | |||
84 | struct cpa { | ||
85 | unsigned int set_ro : 1; | ||
86 | unsigned int clear_ro : 1; | ||
87 | }; | ||
88 | |||
89 | static int walk_pte_level(pmd_t *pmdp, unsigned long addr, unsigned long end, | ||
90 | struct cpa cpa) | ||
91 | { | ||
92 | pte_t *ptep, new; | ||
93 | |||
94 | ptep = pte_offset(pmdp, addr); | ||
95 | do { | ||
96 | if (pte_none(*ptep)) | ||
97 | return -EINVAL; | ||
98 | if (cpa.set_ro) | ||
99 | new = pte_wrprotect(*ptep); | ||
100 | else if (cpa.clear_ro) | ||
101 | new = pte_mkwrite(pte_mkdirty(*ptep)); | ||
102 | pgt_set((unsigned long *)ptep, pte_val(new), addr, CRDTE_DTT_PAGE); | ||
103 | ptep++; | ||
104 | addr += PAGE_SIZE; | ||
105 | cond_resched(); | ||
106 | } while (addr < end); | ||
107 | return 0; | ||
108 | } | ||
109 | |||
110 | static int split_pmd_page(pmd_t *pmdp, unsigned long addr) | ||
111 | { | ||
112 | unsigned long pte_addr, prot; | ||
113 | pte_t *pt_dir, *ptep; | ||
114 | pmd_t new; | ||
115 | int i, ro; | ||
116 | |||
117 | pt_dir = vmem_pte_alloc(); | ||
118 | if (!pt_dir) | ||
119 | return -ENOMEM; | ||
120 | pte_addr = pmd_pfn(*pmdp) << PAGE_SHIFT; | ||
121 | ro = !!(pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT); | ||
122 | prot = pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL); | ||
123 | ptep = pt_dir; | ||
124 | for (i = 0; i < PTRS_PER_PTE; i++) { | ||
125 | pte_val(*ptep) = pte_addr | prot; | ||
126 | pte_addr += PAGE_SIZE; | ||
127 | ptep++; | ||
128 | } | ||
129 | pmd_val(new) = __pa(pt_dir) | _SEGMENT_ENTRY; | ||
130 | pgt_set((unsigned long *)pmdp, pmd_val(new), addr, CRDTE_DTT_SEGMENT); | ||
131 | update_page_count(PG_DIRECT_MAP_4K, PTRS_PER_PTE); | ||
132 | update_page_count(PG_DIRECT_MAP_1M, -1); | ||
133 | return 0; | ||
134 | } | ||
135 | |||
136 | static void modify_pmd_page(pmd_t *pmdp, unsigned long addr, struct cpa cpa) | ||
137 | { | ||
138 | pmd_t new; | ||
139 | |||
140 | if (cpa.set_ro) | ||
141 | new = pmd_wrprotect(*pmdp); | ||
142 | else if (cpa.clear_ro) | ||
143 | new = pmd_mkwrite(pmd_mkdirty(*pmdp)); | ||
144 | pgt_set((unsigned long *)pmdp, pmd_val(new), addr, CRDTE_DTT_SEGMENT); | ||
145 | } | ||
146 | |||
147 | static int walk_pmd_level(pud_t *pudp, unsigned long addr, unsigned long end, | ||
148 | struct cpa cpa) | ||
149 | { | ||
150 | unsigned long next; | ||
47 | pmd_t *pmdp; | 151 | pmd_t *pmdp; |
48 | pte_t *ptep; | 152 | int rc = 0; |
49 | 153 | ||
50 | pgdp = pgd_offset_k(addr); | ||
51 | if (pgd_none(*pgdp)) | ||
52 | return NULL; | ||
53 | pudp = pud_offset(pgdp, addr); | ||
54 | if (pud_none(*pudp) || pud_large(*pudp)) | ||
55 | return NULL; | ||
56 | pmdp = pmd_offset(pudp, addr); | 154 | pmdp = pmd_offset(pudp, addr); |
57 | if (pmd_none(*pmdp) || pmd_large(*pmdp)) | 155 | do { |
58 | return NULL; | 156 | if (pmd_none(*pmdp)) |
59 | ptep = pte_offset_kernel(pmdp, addr); | 157 | return -EINVAL; |
60 | if (pte_none(*ptep)) | 158 | next = pmd_addr_end(addr, end); |
61 | return NULL; | 159 | if (pmd_large(*pmdp)) { |
62 | return ptep; | 160 | if (addr & ~PMD_MASK || addr + PMD_SIZE > next) { |
161 | rc = split_pmd_page(pmdp, addr); | ||
162 | if (rc) | ||
163 | return rc; | ||
164 | continue; | ||
165 | } | ||
166 | modify_pmd_page(pmdp, addr, cpa); | ||
167 | } else { | ||
168 | rc = walk_pte_level(pmdp, addr, next, cpa); | ||
169 | if (rc) | ||
170 | return rc; | ||
171 | } | ||
172 | pmdp++; | ||
173 | addr = next; | ||
174 | cond_resched(); | ||
175 | } while (addr < end); | ||
176 | return rc; | ||
63 | } | 177 | } |
64 | 178 | ||
65 | static void change_page_attr(unsigned long addr, int numpages, | 179 | static int split_pud_page(pud_t *pudp, unsigned long addr) |
66 | pte_t (*set) (pte_t)) | ||
67 | { | 180 | { |
68 | pte_t *ptep; | 181 | unsigned long pmd_addr, prot; |
69 | int i; | 182 | pmd_t *pm_dir, *pmdp; |
183 | pud_t new; | ||
184 | int i, ro; | ||
70 | 185 | ||
71 | for (i = 0; i < numpages; i++) { | 186 | pm_dir = vmem_pmd_alloc(); |
72 | ptep = walk_page_table(addr); | 187 | if (!pm_dir) |
73 | if (WARN_ON_ONCE(!ptep)) | 188 | return -ENOMEM; |
74 | break; | 189 | pmd_addr = pud_pfn(*pudp) << PAGE_SHIFT; |
75 | *ptep = set(*ptep); | 190 | ro = !!(pud_val(*pudp) & _REGION_ENTRY_PROTECT); |
76 | addr += PAGE_SIZE; | 191 | prot = pgprot_val(ro ? SEGMENT_KERNEL_RO : SEGMENT_KERNEL); |
192 | pmdp = pm_dir; | ||
193 | for (i = 0; i < PTRS_PER_PMD; i++) { | ||
194 | pmd_val(*pmdp) = pmd_addr | prot; | ||
195 | pmd_addr += PMD_SIZE; | ||
196 | pmdp++; | ||
77 | } | 197 | } |
78 | __tlb_flush_kernel(); | 198 | pud_val(new) = __pa(pm_dir) | _REGION3_ENTRY; |
199 | pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3); | ||
200 | update_page_count(PG_DIRECT_MAP_1M, PTRS_PER_PMD); | ||
201 | update_page_count(PG_DIRECT_MAP_2G, -1); | ||
202 | return 0; | ||
203 | } | ||
204 | |||
205 | static void modify_pud_page(pud_t *pudp, unsigned long addr, struct cpa cpa) | ||
206 | { | ||
207 | pud_t new; | ||
208 | |||
209 | if (cpa.set_ro) | ||
210 | new = pud_wrprotect(*pudp); | ||
211 | else if (cpa.clear_ro) | ||
212 | new = pud_mkwrite(pud_mkdirty(*pudp)); | ||
213 | pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3); | ||
214 | } | ||
215 | |||
216 | static int walk_pud_level(pgd_t *pgd, unsigned long addr, unsigned long end, | ||
217 | struct cpa cpa) | ||
218 | { | ||
219 | unsigned long next; | ||
220 | pud_t *pudp; | ||
221 | int rc = 0; | ||
222 | |||
223 | pudp = pud_offset(pgd, addr); | ||
224 | do { | ||
225 | if (pud_none(*pudp)) | ||
226 | return -EINVAL; | ||
227 | next = pud_addr_end(addr, end); | ||
228 | if (pud_large(*pudp)) { | ||
229 | if (addr & ~PUD_MASK || addr + PUD_SIZE > next) { | ||
230 | rc = split_pud_page(pudp, addr); | ||
231 | if (rc) | ||
232 | break; | ||
233 | continue; | ||
234 | } | ||
235 | modify_pud_page(pudp, addr, cpa); | ||
236 | } else { | ||
237 | rc = walk_pmd_level(pudp, addr, next, cpa); | ||
238 | } | ||
239 | pudp++; | ||
240 | addr = next; | ||
241 | cond_resched(); | ||
242 | } while (addr < end && !rc); | ||
243 | return rc; | ||
244 | } | ||
245 | |||
246 | static DEFINE_MUTEX(cpa_mutex); | ||
247 | |||
248 | static int change_page_attr(unsigned long addr, unsigned long end, | ||
249 | struct cpa cpa) | ||
250 | { | ||
251 | unsigned long next; | ||
252 | int rc = -EINVAL; | ||
253 | pgd_t *pgdp; | ||
254 | |||
255 | if (end >= MODULES_END) | ||
256 | return -EINVAL; | ||
257 | mutex_lock(&cpa_mutex); | ||
258 | pgdp = pgd_offset_k(addr); | ||
259 | do { | ||
260 | if (pgd_none(*pgdp)) | ||
261 | break; | ||
262 | next = pgd_addr_end(addr, end); | ||
263 | rc = walk_pud_level(pgdp, addr, next, cpa); | ||
264 | if (rc) | ||
265 | break; | ||
266 | cond_resched(); | ||
267 | } while (pgdp++, addr = next, addr < end && !rc); | ||
268 | mutex_unlock(&cpa_mutex); | ||
269 | return rc; | ||
79 | } | 270 | } |
80 | 271 | ||
81 | int set_memory_ro(unsigned long addr, int numpages) | 272 | int set_memory_ro(unsigned long addr, int numpages) |
82 | { | 273 | { |
83 | change_page_attr(addr, numpages, pte_wrprotect); | 274 | struct cpa cpa = { |
84 | return 0; | 275 | .set_ro = 1, |
276 | }; | ||
277 | |||
278 | addr &= PAGE_MASK; | ||
279 | return change_page_attr(addr, addr + numpages * PAGE_SIZE, cpa); | ||
85 | } | 280 | } |
86 | 281 | ||
87 | int set_memory_rw(unsigned long addr, int numpages) | 282 | int set_memory_rw(unsigned long addr, int numpages) |
88 | { | 283 | { |
89 | change_page_attr(addr, numpages, pte_mkwrite); | 284 | struct cpa cpa = { |
90 | return 0; | 285 | .clear_ro = 1, |
286 | }; | ||
287 | |||
288 | addr &= PAGE_MASK; | ||
289 | return change_page_attr(addr, addr + numpages * PAGE_SIZE, cpa); | ||
91 | } | 290 | } |
92 | 291 | ||
93 | /* not possible */ | 292 | /* not possible */ |
@@ -138,7 +337,7 @@ void __kernel_map_pages(struct page *page, int numpages, int enable) | |||
138 | nr = min(numpages - i, nr); | 337 | nr = min(numpages - i, nr); |
139 | if (enable) { | 338 | if (enable) { |
140 | for (j = 0; j < nr; j++) { | 339 | for (j = 0; j < nr; j++) { |
141 | pte_val(*pte) = __pa(address); | 340 | pte_val(*pte) = address | pgprot_val(PAGE_KERNEL); |
142 | address += PAGE_SIZE; | 341 | address += PAGE_SIZE; |
143 | pte++; | 342 | pte++; |
144 | } | 343 | } |
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 9f0ce0e6eeb4..b98d1a152d46 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -27,40 +27,37 @@ | |||
27 | static inline pte_t ptep_flush_direct(struct mm_struct *mm, | 27 | static inline pte_t ptep_flush_direct(struct mm_struct *mm, |
28 | unsigned long addr, pte_t *ptep) | 28 | unsigned long addr, pte_t *ptep) |
29 | { | 29 | { |
30 | int active, count; | ||
31 | pte_t old; | 30 | pte_t old; |
32 | 31 | ||
33 | old = *ptep; | 32 | old = *ptep; |
34 | if (unlikely(pte_val(old) & _PAGE_INVALID)) | 33 | if (unlikely(pte_val(old) & _PAGE_INVALID)) |
35 | return old; | 34 | return old; |
36 | active = (mm == current->active_mm) ? 1 : 0; | 35 | atomic_inc(&mm->context.flush_count); |
37 | count = atomic_add_return(0x10000, &mm->context.attach_count); | 36 | if (MACHINE_HAS_TLB_LC && |
38 | if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active && | ||
39 | cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) | 37 | cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) |
40 | __ptep_ipte_local(addr, ptep); | 38 | __ptep_ipte_local(addr, ptep); |
41 | else | 39 | else |
42 | __ptep_ipte(addr, ptep); | 40 | __ptep_ipte(addr, ptep); |
43 | atomic_sub(0x10000, &mm->context.attach_count); | 41 | atomic_dec(&mm->context.flush_count); |
44 | return old; | 42 | return old; |
45 | } | 43 | } |
46 | 44 | ||
47 | static inline pte_t ptep_flush_lazy(struct mm_struct *mm, | 45 | static inline pte_t ptep_flush_lazy(struct mm_struct *mm, |
48 | unsigned long addr, pte_t *ptep) | 46 | unsigned long addr, pte_t *ptep) |
49 | { | 47 | { |
50 | int active, count; | ||
51 | pte_t old; | 48 | pte_t old; |
52 | 49 | ||
53 | old = *ptep; | 50 | old = *ptep; |
54 | if (unlikely(pte_val(old) & _PAGE_INVALID)) | 51 | if (unlikely(pte_val(old) & _PAGE_INVALID)) |
55 | return old; | 52 | return old; |
56 | active = (mm == current->active_mm) ? 1 : 0; | 53 | atomic_inc(&mm->context.flush_count); |
57 | count = atomic_add_return(0x10000, &mm->context.attach_count); | 54 | if (cpumask_equal(&mm->context.cpu_attach_mask, |
58 | if ((count & 0xffff) <= active) { | 55 | cpumask_of(smp_processor_id()))) { |
59 | pte_val(*ptep) |= _PAGE_INVALID; | 56 | pte_val(*ptep) |= _PAGE_INVALID; |
60 | mm->context.flush_mm = 1; | 57 | mm->context.flush_mm = 1; |
61 | } else | 58 | } else |
62 | __ptep_ipte(addr, ptep); | 59 | __ptep_ipte(addr, ptep); |
63 | atomic_sub(0x10000, &mm->context.attach_count); | 60 | atomic_dec(&mm->context.flush_count); |
64 | return old; | 61 | return old; |
65 | } | 62 | } |
66 | 63 | ||
@@ -70,7 +67,6 @@ static inline pgste_t pgste_get_lock(pte_t *ptep) | |||
70 | #ifdef CONFIG_PGSTE | 67 | #ifdef CONFIG_PGSTE |
71 | unsigned long old; | 68 | unsigned long old; |
72 | 69 | ||
73 | preempt_disable(); | ||
74 | asm( | 70 | asm( |
75 | " lg %0,%2\n" | 71 | " lg %0,%2\n" |
76 | "0: lgr %1,%0\n" | 72 | "0: lgr %1,%0\n" |
@@ -93,7 +89,6 @@ static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste) | |||
93 | : "=Q" (ptep[PTRS_PER_PTE]) | 89 | : "=Q" (ptep[PTRS_PER_PTE]) |
94 | : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) | 90 | : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) |
95 | : "cc", "memory"); | 91 | : "cc", "memory"); |
96 | preempt_enable(); | ||
97 | #endif | 92 | #endif |
98 | } | 93 | } |
99 | 94 | ||
@@ -230,9 +225,11 @@ pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr, | |||
230 | pgste_t pgste; | 225 | pgste_t pgste; |
231 | pte_t old; | 226 | pte_t old; |
232 | 227 | ||
228 | preempt_disable(); | ||
233 | pgste = ptep_xchg_start(mm, addr, ptep); | 229 | pgste = ptep_xchg_start(mm, addr, ptep); |
234 | old = ptep_flush_direct(mm, addr, ptep); | 230 | old = ptep_flush_direct(mm, addr, ptep); |
235 | ptep_xchg_commit(mm, addr, ptep, pgste, old, new); | 231 | ptep_xchg_commit(mm, addr, ptep, pgste, old, new); |
232 | preempt_enable(); | ||
236 | return old; | 233 | return old; |
237 | } | 234 | } |
238 | EXPORT_SYMBOL(ptep_xchg_direct); | 235 | EXPORT_SYMBOL(ptep_xchg_direct); |
@@ -243,9 +240,11 @@ pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr, | |||
243 | pgste_t pgste; | 240 | pgste_t pgste; |
244 | pte_t old; | 241 | pte_t old; |
245 | 242 | ||
243 | preempt_disable(); | ||
246 | pgste = ptep_xchg_start(mm, addr, ptep); | 244 | pgste = ptep_xchg_start(mm, addr, ptep); |
247 | old = ptep_flush_lazy(mm, addr, ptep); | 245 | old = ptep_flush_lazy(mm, addr, ptep); |
248 | ptep_xchg_commit(mm, addr, ptep, pgste, old, new); | 246 | ptep_xchg_commit(mm, addr, ptep, pgste, old, new); |
247 | preempt_enable(); | ||
249 | return old; | 248 | return old; |
250 | } | 249 | } |
251 | EXPORT_SYMBOL(ptep_xchg_lazy); | 250 | EXPORT_SYMBOL(ptep_xchg_lazy); |
@@ -256,6 +255,7 @@ pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, | |||
256 | pgste_t pgste; | 255 | pgste_t pgste; |
257 | pte_t old; | 256 | pte_t old; |
258 | 257 | ||
258 | preempt_disable(); | ||
259 | pgste = ptep_xchg_start(mm, addr, ptep); | 259 | pgste = ptep_xchg_start(mm, addr, ptep); |
260 | old = ptep_flush_lazy(mm, addr, ptep); | 260 | old = ptep_flush_lazy(mm, addr, ptep); |
261 | if (mm_has_pgste(mm)) { | 261 | if (mm_has_pgste(mm)) { |
@@ -279,13 +279,13 @@ void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, | |||
279 | } else { | 279 | } else { |
280 | *ptep = pte; | 280 | *ptep = pte; |
281 | } | 281 | } |
282 | preempt_enable(); | ||
282 | } | 283 | } |
283 | EXPORT_SYMBOL(ptep_modify_prot_commit); | 284 | EXPORT_SYMBOL(ptep_modify_prot_commit); |
284 | 285 | ||
285 | static inline pmd_t pmdp_flush_direct(struct mm_struct *mm, | 286 | static inline pmd_t pmdp_flush_direct(struct mm_struct *mm, |
286 | unsigned long addr, pmd_t *pmdp) | 287 | unsigned long addr, pmd_t *pmdp) |
287 | { | 288 | { |
288 | int active, count; | ||
289 | pmd_t old; | 289 | pmd_t old; |
290 | 290 | ||
291 | old = *pmdp; | 291 | old = *pmdp; |
@@ -295,36 +295,34 @@ static inline pmd_t pmdp_flush_direct(struct mm_struct *mm, | |||
295 | __pmdp_csp(pmdp); | 295 | __pmdp_csp(pmdp); |
296 | return old; | 296 | return old; |
297 | } | 297 | } |
298 | active = (mm == current->active_mm) ? 1 : 0; | 298 | atomic_inc(&mm->context.flush_count); |
299 | count = atomic_add_return(0x10000, &mm->context.attach_count); | 299 | if (MACHINE_HAS_TLB_LC && |
300 | if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active && | ||
301 | cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) | 300 | cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) |
302 | __pmdp_idte_local(addr, pmdp); | 301 | __pmdp_idte_local(addr, pmdp); |
303 | else | 302 | else |
304 | __pmdp_idte(addr, pmdp); | 303 | __pmdp_idte(addr, pmdp); |
305 | atomic_sub(0x10000, &mm->context.attach_count); | 304 | atomic_dec(&mm->context.flush_count); |
306 | return old; | 305 | return old; |
307 | } | 306 | } |
308 | 307 | ||
309 | static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm, | 308 | static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm, |
310 | unsigned long addr, pmd_t *pmdp) | 309 | unsigned long addr, pmd_t *pmdp) |
311 | { | 310 | { |
312 | int active, count; | ||
313 | pmd_t old; | 311 | pmd_t old; |
314 | 312 | ||
315 | old = *pmdp; | 313 | old = *pmdp; |
316 | if (pmd_val(old) & _SEGMENT_ENTRY_INVALID) | 314 | if (pmd_val(old) & _SEGMENT_ENTRY_INVALID) |
317 | return old; | 315 | return old; |
318 | active = (mm == current->active_mm) ? 1 : 0; | 316 | atomic_inc(&mm->context.flush_count); |
319 | count = atomic_add_return(0x10000, &mm->context.attach_count); | 317 | if (cpumask_equal(&mm->context.cpu_attach_mask, |
320 | if ((count & 0xffff) <= active) { | 318 | cpumask_of(smp_processor_id()))) { |
321 | pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID; | 319 | pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID; |
322 | mm->context.flush_mm = 1; | 320 | mm->context.flush_mm = 1; |
323 | } else if (MACHINE_HAS_IDTE) | 321 | } else if (MACHINE_HAS_IDTE) |
324 | __pmdp_idte(addr, pmdp); | 322 | __pmdp_idte(addr, pmdp); |
325 | else | 323 | else |
326 | __pmdp_csp(pmdp); | 324 | __pmdp_csp(pmdp); |
327 | atomic_sub(0x10000, &mm->context.attach_count); | 325 | atomic_dec(&mm->context.flush_count); |
328 | return old; | 326 | return old; |
329 | } | 327 | } |
330 | 328 | ||
@@ -333,8 +331,10 @@ pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr, | |||
333 | { | 331 | { |
334 | pmd_t old; | 332 | pmd_t old; |
335 | 333 | ||
334 | preempt_disable(); | ||
336 | old = pmdp_flush_direct(mm, addr, pmdp); | 335 | old = pmdp_flush_direct(mm, addr, pmdp); |
337 | *pmdp = new; | 336 | *pmdp = new; |
337 | preempt_enable(); | ||
338 | return old; | 338 | return old; |
339 | } | 339 | } |
340 | EXPORT_SYMBOL(pmdp_xchg_direct); | 340 | EXPORT_SYMBOL(pmdp_xchg_direct); |
@@ -344,12 +344,53 @@ pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr, | |||
344 | { | 344 | { |
345 | pmd_t old; | 345 | pmd_t old; |
346 | 346 | ||
347 | preempt_disable(); | ||
347 | old = pmdp_flush_lazy(mm, addr, pmdp); | 348 | old = pmdp_flush_lazy(mm, addr, pmdp); |
348 | *pmdp = new; | 349 | *pmdp = new; |
350 | preempt_enable(); | ||
349 | return old; | 351 | return old; |
350 | } | 352 | } |
351 | EXPORT_SYMBOL(pmdp_xchg_lazy); | 353 | EXPORT_SYMBOL(pmdp_xchg_lazy); |
352 | 354 | ||
355 | static inline pud_t pudp_flush_direct(struct mm_struct *mm, | ||
356 | unsigned long addr, pud_t *pudp) | ||
357 | { | ||
358 | pud_t old; | ||
359 | |||
360 | old = *pudp; | ||
361 | if (pud_val(old) & _REGION_ENTRY_INVALID) | ||
362 | return old; | ||
363 | if (!MACHINE_HAS_IDTE) { | ||
364 | /* | ||
365 | * Invalid bit position is the same for pmd and pud, so we can | ||
366 | * re-use _pmd_csp() here | ||
367 | */ | ||
368 | __pmdp_csp((pmd_t *) pudp); | ||
369 | return old; | ||
370 | } | ||
371 | atomic_inc(&mm->context.flush_count); | ||
372 | if (MACHINE_HAS_TLB_LC && | ||
373 | cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) | ||
374 | __pudp_idte_local(addr, pudp); | ||
375 | else | ||
376 | __pudp_idte(addr, pudp); | ||
377 | atomic_dec(&mm->context.flush_count); | ||
378 | return old; | ||
379 | } | ||
380 | |||
381 | pud_t pudp_xchg_direct(struct mm_struct *mm, unsigned long addr, | ||
382 | pud_t *pudp, pud_t new) | ||
383 | { | ||
384 | pud_t old; | ||
385 | |||
386 | preempt_disable(); | ||
387 | old = pudp_flush_direct(mm, addr, pudp); | ||
388 | *pudp = new; | ||
389 | preempt_enable(); | ||
390 | return old; | ||
391 | } | ||
392 | EXPORT_SYMBOL(pudp_xchg_direct); | ||
393 | |||
353 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 394 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
354 | void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, | 395 | void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, |
355 | pgtable_t pgtable) | 396 | pgtable_t pgtable) |
@@ -398,20 +439,24 @@ void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
398 | pgste_t pgste; | 439 | pgste_t pgste; |
399 | 440 | ||
400 | /* the mm_has_pgste() check is done in set_pte_at() */ | 441 | /* the mm_has_pgste() check is done in set_pte_at() */ |
442 | preempt_disable(); | ||
401 | pgste = pgste_get_lock(ptep); | 443 | pgste = pgste_get_lock(ptep); |
402 | pgste_val(pgste) &= ~_PGSTE_GPS_ZERO; | 444 | pgste_val(pgste) &= ~_PGSTE_GPS_ZERO; |
403 | pgste_set_key(ptep, pgste, entry, mm); | 445 | pgste_set_key(ptep, pgste, entry, mm); |
404 | pgste = pgste_set_pte(ptep, pgste, entry); | 446 | pgste = pgste_set_pte(ptep, pgste, entry); |
405 | pgste_set_unlock(ptep, pgste); | 447 | pgste_set_unlock(ptep, pgste); |
448 | preempt_enable(); | ||
406 | } | 449 | } |
407 | 450 | ||
408 | void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 451 | void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
409 | { | 452 | { |
410 | pgste_t pgste; | 453 | pgste_t pgste; |
411 | 454 | ||
455 | preempt_disable(); | ||
412 | pgste = pgste_get_lock(ptep); | 456 | pgste = pgste_get_lock(ptep); |
413 | pgste_val(pgste) |= PGSTE_IN_BIT; | 457 | pgste_val(pgste) |= PGSTE_IN_BIT; |
414 | pgste_set_unlock(ptep, pgste); | 458 | pgste_set_unlock(ptep, pgste); |
459 | preempt_enable(); | ||
415 | } | 460 | } |
416 | 461 | ||
417 | static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry) | 462 | static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry) |
@@ -434,6 +479,7 @@ void ptep_zap_unused(struct mm_struct *mm, unsigned long addr, | |||
434 | pte_t pte; | 479 | pte_t pte; |
435 | 480 | ||
436 | /* Zap unused and logically-zero pages */ | 481 | /* Zap unused and logically-zero pages */ |
482 | preempt_disable(); | ||
437 | pgste = pgste_get_lock(ptep); | 483 | pgste = pgste_get_lock(ptep); |
438 | pgstev = pgste_val(pgste); | 484 | pgstev = pgste_val(pgste); |
439 | pte = *ptep; | 485 | pte = *ptep; |
@@ -446,6 +492,7 @@ void ptep_zap_unused(struct mm_struct *mm, unsigned long addr, | |||
446 | if (reset) | 492 | if (reset) |
447 | pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK; | 493 | pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK; |
448 | pgste_set_unlock(ptep, pgste); | 494 | pgste_set_unlock(ptep, pgste); |
495 | preempt_enable(); | ||
449 | } | 496 | } |
450 | 497 | ||
451 | void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 498 | void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
@@ -454,6 +501,7 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | |||
454 | pgste_t pgste; | 501 | pgste_t pgste; |
455 | 502 | ||
456 | /* Clear storage key */ | 503 | /* Clear storage key */ |
504 | preempt_disable(); | ||
457 | pgste = pgste_get_lock(ptep); | 505 | pgste = pgste_get_lock(ptep); |
458 | pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT | | 506 | pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT | |
459 | PGSTE_GR_BIT | PGSTE_GC_BIT); | 507 | PGSTE_GR_BIT | PGSTE_GC_BIT); |
@@ -461,6 +509,7 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | |||
461 | if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE)) | 509 | if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE)) |
462 | page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1); | 510 | page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1); |
463 | pgste_set_unlock(ptep, pgste); | 511 | pgste_set_unlock(ptep, pgste); |
512 | preempt_enable(); | ||
464 | } | 513 | } |
465 | 514 | ||
466 | /* | 515 | /* |
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index d48cf25cfe99..1848292766ef 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/hugetlb.h> | 11 | #include <linux/hugetlb.h> |
12 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
13 | #include <linux/memblock.h> | 13 | #include <linux/memblock.h> |
14 | #include <asm/cacheflush.h> | ||
14 | #include <asm/pgalloc.h> | 15 | #include <asm/pgalloc.h> |
15 | #include <asm/pgtable.h> | 16 | #include <asm/pgtable.h> |
16 | #include <asm/setup.h> | 17 | #include <asm/setup.h> |
@@ -29,9 +30,11 @@ static LIST_HEAD(mem_segs); | |||
29 | 30 | ||
30 | static void __ref *vmem_alloc_pages(unsigned int order) | 31 | static void __ref *vmem_alloc_pages(unsigned int order) |
31 | { | 32 | { |
33 | unsigned long size = PAGE_SIZE << order; | ||
34 | |||
32 | if (slab_is_available()) | 35 | if (slab_is_available()) |
33 | return (void *)__get_free_pages(GFP_KERNEL, order); | 36 | return (void *)__get_free_pages(GFP_KERNEL, order); |
34 | return alloc_bootmem_pages((1 << order) * PAGE_SIZE); | 37 | return alloc_bootmem_align(size, size); |
35 | } | 38 | } |
36 | 39 | ||
37 | static inline pud_t *vmem_pud_alloc(void) | 40 | static inline pud_t *vmem_pud_alloc(void) |
@@ -45,7 +48,7 @@ static inline pud_t *vmem_pud_alloc(void) | |||
45 | return pud; | 48 | return pud; |
46 | } | 49 | } |
47 | 50 | ||
48 | static inline pmd_t *vmem_pmd_alloc(void) | 51 | pmd_t *vmem_pmd_alloc(void) |
49 | { | 52 | { |
50 | pmd_t *pmd = NULL; | 53 | pmd_t *pmd = NULL; |
51 | 54 | ||
@@ -56,7 +59,7 @@ static inline pmd_t *vmem_pmd_alloc(void) | |||
56 | return pmd; | 59 | return pmd; |
57 | } | 60 | } |
58 | 61 | ||
59 | static pte_t __ref *vmem_pte_alloc(void) | 62 | pte_t __ref *vmem_pte_alloc(void) |
60 | { | 63 | { |
61 | pte_t *pte; | 64 | pte_t *pte; |
62 | 65 | ||
@@ -75,8 +78,9 @@ static pte_t __ref *vmem_pte_alloc(void) | |||
75 | /* | 78 | /* |
76 | * Add a physical memory range to the 1:1 mapping. | 79 | * Add a physical memory range to the 1:1 mapping. |
77 | */ | 80 | */ |
78 | static int vmem_add_mem(unsigned long start, unsigned long size, int ro) | 81 | static int vmem_add_mem(unsigned long start, unsigned long size) |
79 | { | 82 | { |
83 | unsigned long pages4k, pages1m, pages2g; | ||
80 | unsigned long end = start + size; | 84 | unsigned long end = start + size; |
81 | unsigned long address = start; | 85 | unsigned long address = start; |
82 | pgd_t *pg_dir; | 86 | pgd_t *pg_dir; |
@@ -85,6 +89,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) | |||
85 | pte_t *pt_dir; | 89 | pte_t *pt_dir; |
86 | int ret = -ENOMEM; | 90 | int ret = -ENOMEM; |
87 | 91 | ||
92 | pages4k = pages1m = pages2g = 0; | ||
88 | while (address < end) { | 93 | while (address < end) { |
89 | pg_dir = pgd_offset_k(address); | 94 | pg_dir = pgd_offset_k(address); |
90 | if (pgd_none(*pg_dir)) { | 95 | if (pgd_none(*pg_dir)) { |
@@ -97,10 +102,9 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) | |||
97 | if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address && | 102 | if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address && |
98 | !(address & ~PUD_MASK) && (address + PUD_SIZE <= end) && | 103 | !(address & ~PUD_MASK) && (address + PUD_SIZE <= end) && |
99 | !debug_pagealloc_enabled()) { | 104 | !debug_pagealloc_enabled()) { |
100 | pud_val(*pu_dir) = __pa(address) | | 105 | pud_val(*pu_dir) = address | pgprot_val(REGION3_KERNEL); |
101 | _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE | | ||
102 | (ro ? _REGION_ENTRY_PROTECT : 0); | ||
103 | address += PUD_SIZE; | 106 | address += PUD_SIZE; |
107 | pages2g++; | ||
104 | continue; | 108 | continue; |
105 | } | 109 | } |
106 | if (pud_none(*pu_dir)) { | 110 | if (pud_none(*pu_dir)) { |
@@ -113,11 +117,9 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) | |||
113 | if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address && | 117 | if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address && |
114 | !(address & ~PMD_MASK) && (address + PMD_SIZE <= end) && | 118 | !(address & ~PMD_MASK) && (address + PMD_SIZE <= end) && |
115 | !debug_pagealloc_enabled()) { | 119 | !debug_pagealloc_enabled()) { |
116 | pmd_val(*pm_dir) = __pa(address) | | 120 | pmd_val(*pm_dir) = address | pgprot_val(SEGMENT_KERNEL); |
117 | _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE | | ||
118 | _SEGMENT_ENTRY_YOUNG | | ||
119 | (ro ? _SEGMENT_ENTRY_PROTECT : 0); | ||
120 | address += PMD_SIZE; | 121 | address += PMD_SIZE; |
122 | pages1m++; | ||
121 | continue; | 123 | continue; |
122 | } | 124 | } |
123 | if (pmd_none(*pm_dir)) { | 125 | if (pmd_none(*pm_dir)) { |
@@ -128,12 +130,15 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) | |||
128 | } | 130 | } |
129 | 131 | ||
130 | pt_dir = pte_offset_kernel(pm_dir, address); | 132 | pt_dir = pte_offset_kernel(pm_dir, address); |
131 | pte_val(*pt_dir) = __pa(address) | | 133 | pte_val(*pt_dir) = address | pgprot_val(PAGE_KERNEL); |
132 | pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL); | ||
133 | address += PAGE_SIZE; | 134 | address += PAGE_SIZE; |
135 | pages4k++; | ||
134 | } | 136 | } |
135 | ret = 0; | 137 | ret = 0; |
136 | out: | 138 | out: |
139 | update_page_count(PG_DIRECT_MAP_4K, pages4k); | ||
140 | update_page_count(PG_DIRECT_MAP_1M, pages1m); | ||
141 | update_page_count(PG_DIRECT_MAP_2G, pages2g); | ||
137 | return ret; | 142 | return ret; |
138 | } | 143 | } |
139 | 144 | ||
@@ -143,15 +148,15 @@ out: | |||
143 | */ | 148 | */ |
144 | static void vmem_remove_range(unsigned long start, unsigned long size) | 149 | static void vmem_remove_range(unsigned long start, unsigned long size) |
145 | { | 150 | { |
151 | unsigned long pages4k, pages1m, pages2g; | ||
146 | unsigned long end = start + size; | 152 | unsigned long end = start + size; |
147 | unsigned long address = start; | 153 | unsigned long address = start; |
148 | pgd_t *pg_dir; | 154 | pgd_t *pg_dir; |
149 | pud_t *pu_dir; | 155 | pud_t *pu_dir; |
150 | pmd_t *pm_dir; | 156 | pmd_t *pm_dir; |
151 | pte_t *pt_dir; | 157 | pte_t *pt_dir; |
152 | pte_t pte; | ||
153 | 158 | ||
154 | pte_val(pte) = _PAGE_INVALID; | 159 | pages4k = pages1m = pages2g = 0; |
155 | while (address < end) { | 160 | while (address < end) { |
156 | pg_dir = pgd_offset_k(address); | 161 | pg_dir = pgd_offset_k(address); |
157 | if (pgd_none(*pg_dir)) { | 162 | if (pgd_none(*pg_dir)) { |
@@ -166,6 +171,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size) | |||
166 | if (pud_large(*pu_dir)) { | 171 | if (pud_large(*pu_dir)) { |
167 | pud_clear(pu_dir); | 172 | pud_clear(pu_dir); |
168 | address += PUD_SIZE; | 173 | address += PUD_SIZE; |
174 | pages2g++; | ||
169 | continue; | 175 | continue; |
170 | } | 176 | } |
171 | pm_dir = pmd_offset(pu_dir, address); | 177 | pm_dir = pmd_offset(pu_dir, address); |
@@ -176,13 +182,18 @@ static void vmem_remove_range(unsigned long start, unsigned long size) | |||
176 | if (pmd_large(*pm_dir)) { | 182 | if (pmd_large(*pm_dir)) { |
177 | pmd_clear(pm_dir); | 183 | pmd_clear(pm_dir); |
178 | address += PMD_SIZE; | 184 | address += PMD_SIZE; |
185 | pages1m++; | ||
179 | continue; | 186 | continue; |
180 | } | 187 | } |
181 | pt_dir = pte_offset_kernel(pm_dir, address); | 188 | pt_dir = pte_offset_kernel(pm_dir, address); |
182 | *pt_dir = pte; | 189 | pte_clear(&init_mm, address, pt_dir); |
183 | address += PAGE_SIZE; | 190 | address += PAGE_SIZE; |
191 | pages4k++; | ||
184 | } | 192 | } |
185 | flush_tlb_kernel_range(start, end); | 193 | flush_tlb_kernel_range(start, end); |
194 | update_page_count(PG_DIRECT_MAP_4K, -pages4k); | ||
195 | update_page_count(PG_DIRECT_MAP_1M, -pages1m); | ||
196 | update_page_count(PG_DIRECT_MAP_2G, -pages2g); | ||
186 | } | 197 | } |
187 | 198 | ||
188 | /* | 199 | /* |
@@ -341,7 +352,7 @@ int vmem_add_mapping(unsigned long start, unsigned long size) | |||
341 | if (ret) | 352 | if (ret) |
342 | goto out_free; | 353 | goto out_free; |
343 | 354 | ||
344 | ret = vmem_add_mem(start, size, 0); | 355 | ret = vmem_add_mem(start, size); |
345 | if (ret) | 356 | if (ret) |
346 | goto out_remove; | 357 | goto out_remove; |
347 | goto out; | 358 | goto out; |
@@ -362,31 +373,13 @@ out: | |||
362 | */ | 373 | */ |
363 | void __init vmem_map_init(void) | 374 | void __init vmem_map_init(void) |
364 | { | 375 | { |
365 | unsigned long ro_start, ro_end; | 376 | unsigned long size = _eshared - _stext; |
366 | struct memblock_region *reg; | 377 | struct memblock_region *reg; |
367 | phys_addr_t start, end; | ||
368 | 378 | ||
369 | ro_start = PFN_ALIGN((unsigned long)&_stext); | 379 | for_each_memblock(memory, reg) |
370 | ro_end = (unsigned long)&_eshared & PAGE_MASK; | 380 | vmem_add_mem(reg->base, reg->size); |
371 | for_each_memblock(memory, reg) { | 381 | set_memory_ro((unsigned long)_stext, size >> PAGE_SHIFT); |
372 | start = reg->base; | 382 | pr_info("Write protected kernel read-only data: %luk\n", size >> 10); |
373 | end = reg->base + reg->size; | ||
374 | if (start >= ro_end || end <= ro_start) | ||
375 | vmem_add_mem(start, end - start, 0); | ||
376 | else if (start >= ro_start && end <= ro_end) | ||
377 | vmem_add_mem(start, end - start, 1); | ||
378 | else if (start >= ro_start) { | ||
379 | vmem_add_mem(start, ro_end - start, 1); | ||
380 | vmem_add_mem(ro_end, end - ro_end, 0); | ||
381 | } else if (end < ro_end) { | ||
382 | vmem_add_mem(start, ro_start - start, 0); | ||
383 | vmem_add_mem(ro_start, end - ro_start, 1); | ||
384 | } else { | ||
385 | vmem_add_mem(start, ro_start - start, 0); | ||
386 | vmem_add_mem(ro_start, ro_end - ro_start, 1); | ||
387 | vmem_add_mem(ro_end, end - ro_end, 0); | ||
388 | } | ||
389 | } | ||
390 | } | 383 | } |
391 | 384 | ||
392 | /* | 385 | /* |