diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-06-22 04:24:43 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-06-22 04:24:43 -0400 |
commit | b7f797cb600fa88de04903be4df3c8a6cb1cb35c (patch) | |
tree | fb34c6081f4184cf336d3df2bd0d1c5de98916c1 /arch | |
parent | 99bd0c0fc4b04da54cb311953ef9489931c19c63 (diff) | |
parent | 0017c869ddcb73069905d09f9e98e68627466237 (diff) |
Merge branch 'for-tip' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu into x86/urgent
Diffstat (limited to 'arch')
29 files changed, 245 insertions, 110 deletions
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index 4829f96585b1..00a31deaa96e 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c | |||
@@ -146,7 +146,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr, | |||
146 | /* If for any reason at all we couldn't handle the fault, | 146 | /* If for any reason at all we couldn't handle the fault, |
147 | make sure we exit gracefully rather than endlessly redo | 147 | make sure we exit gracefully rather than endlessly redo |
148 | the fault. */ | 148 | the fault. */ |
149 | fault = handle_mm_fault(mm, vma, address, cause > 0); | 149 | fault = handle_mm_fault(mm, vma, address, cause > 0 ? FAULT_FLAG_WRITE : 0); |
150 | up_read(&mm->mmap_sem); | 150 | up_read(&mm->mmap_sem); |
151 | if (unlikely(fault & VM_FAULT_ERROR)) { | 151 | if (unlikely(fault & VM_FAULT_ERROR)) { |
152 | if (fault & VM_FAULT_OOM) | 152 | if (fault & VM_FAULT_OOM) |
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 0455557a2899..6fdcbb709827 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c | |||
@@ -208,7 +208,7 @@ good_area: | |||
208 | * than endlessly redo the fault. | 208 | * than endlessly redo the fault. |
209 | */ | 209 | */ |
210 | survive: | 210 | survive: |
211 | fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, fsr & (1 << 11)); | 211 | fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & (1 << 11)) ? FAULT_FLAG_WRITE : 0); |
212 | if (unlikely(fault & VM_FAULT_ERROR)) { | 212 | if (unlikely(fault & VM_FAULT_ERROR)) { |
213 | if (fault & VM_FAULT_OOM) | 213 | if (fault & VM_FAULT_OOM) |
214 | goto out_of_memory; | 214 | goto out_of_memory; |
diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c index 62d4abbaa654..b61d86d3debf 100644 --- a/arch/avr32/mm/fault.c +++ b/arch/avr32/mm/fault.c | |||
@@ -133,7 +133,7 @@ good_area: | |||
133 | * fault. | 133 | * fault. |
134 | */ | 134 | */ |
135 | survive: | 135 | survive: |
136 | fault = handle_mm_fault(mm, vma, address, writeaccess); | 136 | fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0); |
137 | if (unlikely(fault & VM_FAULT_ERROR)) { | 137 | if (unlikely(fault & VM_FAULT_ERROR)) { |
138 | if (fault & VM_FAULT_OOM) | 138 | if (fault & VM_FAULT_OOM) |
139 | goto out_of_memory; | 139 | goto out_of_memory; |
diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c index c4c76db90f9c..f925115e3250 100644 --- a/arch/cris/mm/fault.c +++ b/arch/cris/mm/fault.c | |||
@@ -163,7 +163,7 @@ do_page_fault(unsigned long address, struct pt_regs *regs, | |||
163 | * the fault. | 163 | * the fault. |
164 | */ | 164 | */ |
165 | 165 | ||
166 | fault = handle_mm_fault(mm, vma, address, writeaccess & 1); | 166 | fault = handle_mm_fault(mm, vma, address, (writeaccess & 1) ? FAULT_FLAG_WRITE : 0); |
167 | if (unlikely(fault & VM_FAULT_ERROR)) { | 167 | if (unlikely(fault & VM_FAULT_ERROR)) { |
168 | if (fault & VM_FAULT_OOM) | 168 | if (fault & VM_FAULT_OOM) |
169 | goto out_of_memory; | 169 | goto out_of_memory; |
diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c index 05093d41d98e..30f5d100a81c 100644 --- a/arch/frv/mm/fault.c +++ b/arch/frv/mm/fault.c | |||
@@ -163,7 +163,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear | |||
163 | * make sure we exit gracefully rather than endlessly redo | 163 | * make sure we exit gracefully rather than endlessly redo |
164 | * the fault. | 164 | * the fault. |
165 | */ | 165 | */ |
166 | fault = handle_mm_fault(mm, vma, ear0, write); | 166 | fault = handle_mm_fault(mm, vma, ear0, write ? FAULT_FLAG_WRITE : 0); |
167 | if (unlikely(fault & VM_FAULT_ERROR)) { | 167 | if (unlikely(fault & VM_FAULT_ERROR)) { |
168 | if (fault & VM_FAULT_OOM) | 168 | if (fault & VM_FAULT_OOM) |
169 | goto out_of_memory; | 169 | goto out_of_memory; |
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index 23088bed111e..19261a99e623 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c | |||
@@ -154,7 +154,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re | |||
154 | * sure we exit gracefully rather than endlessly redo the | 154 | * sure we exit gracefully rather than endlessly redo the |
155 | * fault. | 155 | * fault. |
156 | */ | 156 | */ |
157 | fault = handle_mm_fault(mm, vma, address, (mask & VM_WRITE) != 0); | 157 | fault = handle_mm_fault(mm, vma, address, (mask & VM_WRITE) ? FAULT_FLAG_WRITE : 0); |
158 | if (unlikely(fault & VM_FAULT_ERROR)) { | 158 | if (unlikely(fault & VM_FAULT_ERROR)) { |
159 | /* | 159 | /* |
160 | * We ran out of memory, or some other thing happened | 160 | * We ran out of memory, or some other thing happened |
diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c index 4a71df4c1b30..7274b47f4c22 100644 --- a/arch/m32r/mm/fault.c +++ b/arch/m32r/mm/fault.c | |||
@@ -196,7 +196,7 @@ survive: | |||
196 | */ | 196 | */ |
197 | addr = (address & PAGE_MASK); | 197 | addr = (address & PAGE_MASK); |
198 | set_thread_fault_code(error_code); | 198 | set_thread_fault_code(error_code); |
199 | fault = handle_mm_fault(mm, vma, addr, write); | 199 | fault = handle_mm_fault(mm, vma, addr, write ? FAULT_FLAG_WRITE : 0); |
200 | if (unlikely(fault & VM_FAULT_ERROR)) { | 200 | if (unlikely(fault & VM_FAULT_ERROR)) { |
201 | if (fault & VM_FAULT_OOM) | 201 | if (fault & VM_FAULT_OOM) |
202 | goto out_of_memory; | 202 | goto out_of_memory; |
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index f493f03231d5..d0e35cf99fc6 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c | |||
@@ -155,7 +155,7 @@ good_area: | |||
155 | */ | 155 | */ |
156 | 156 | ||
157 | survive: | 157 | survive: |
158 | fault = handle_mm_fault(mm, vma, address, write); | 158 | fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); |
159 | #ifdef DEBUG | 159 | #ifdef DEBUG |
160 | printk("handle_mm_fault returns %d\n",fault); | 160 | printk("handle_mm_fault returns %d\n",fault); |
161 | #endif | 161 | #endif |
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c index 5e67cd1fab40..956607a63f4c 100644 --- a/arch/microblaze/mm/fault.c +++ b/arch/microblaze/mm/fault.c | |||
@@ -232,7 +232,7 @@ good_area: | |||
232 | * the fault. | 232 | * the fault. |
233 | */ | 233 | */ |
234 | survive: | 234 | survive: |
235 | fault = handle_mm_fault(mm, vma, address, is_write); | 235 | fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0); |
236 | if (unlikely(fault & VM_FAULT_ERROR)) { | 236 | if (unlikely(fault & VM_FAULT_ERROR)) { |
237 | if (fault & VM_FAULT_OOM) | 237 | if (fault & VM_FAULT_OOM) |
238 | goto out_of_memory; | 238 | goto out_of_memory; |
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 55767ad9f00e..6751ce9ede9e 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c | |||
@@ -102,7 +102,7 @@ good_area: | |||
102 | * make sure we exit gracefully rather than endlessly redo | 102 | * make sure we exit gracefully rather than endlessly redo |
103 | * the fault. | 103 | * the fault. |
104 | */ | 104 | */ |
105 | fault = handle_mm_fault(mm, vma, address, write); | 105 | fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); |
106 | if (unlikely(fault & VM_FAULT_ERROR)) { | 106 | if (unlikely(fault & VM_FAULT_ERROR)) { |
107 | if (fault & VM_FAULT_OOM) | 107 | if (fault & VM_FAULT_OOM) |
108 | goto out_of_memory; | 108 | goto out_of_memory; |
diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c index 33cf25025dac..a62e1e138bc1 100644 --- a/arch/mn10300/mm/fault.c +++ b/arch/mn10300/mm/fault.c | |||
@@ -258,7 +258,7 @@ good_area: | |||
258 | * make sure we exit gracefully rather than endlessly redo | 258 | * make sure we exit gracefully rather than endlessly redo |
259 | * the fault. | 259 | * the fault. |
260 | */ | 260 | */ |
261 | fault = handle_mm_fault(mm, vma, address, write); | 261 | fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); |
262 | if (unlikely(fault & VM_FAULT_ERROR)) { | 262 | if (unlikely(fault & VM_FAULT_ERROR)) { |
263 | if (fault & VM_FAULT_OOM) | 263 | if (fault & VM_FAULT_OOM) |
264 | goto out_of_memory; | 264 | goto out_of_memory; |
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 92c7fa4ecc3f..bfb6dd6ab380 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c | |||
@@ -202,7 +202,7 @@ good_area: | |||
202 | * fault. | 202 | * fault. |
203 | */ | 203 | */ |
204 | 204 | ||
205 | fault = handle_mm_fault(mm, vma, address, (acc_type & VM_WRITE) != 0); | 205 | fault = handle_mm_fault(mm, vma, address, (acc_type & VM_WRITE) ? FAULT_FLAG_WRITE : 0); |
206 | if (unlikely(fault & VM_FAULT_ERROR)) { | 206 | if (unlikely(fault & VM_FAULT_ERROR)) { |
207 | /* | 207 | /* |
208 | * We hit a shared mapping outside of the file, or some | 208 | * We hit a shared mapping outside of the file, or some |
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 5beffc8f481e..830bef0a1131 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c | |||
@@ -302,7 +302,7 @@ good_area: | |||
302 | * the fault. | 302 | * the fault. |
303 | */ | 303 | */ |
304 | survive: | 304 | survive: |
305 | ret = handle_mm_fault(mm, vma, address, is_write); | 305 | ret = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0); |
306 | if (unlikely(ret & VM_FAULT_ERROR)) { | 306 | if (unlikely(ret & VM_FAULT_ERROR)) { |
307 | if (ret & VM_FAULT_OOM) | 307 | if (ret & VM_FAULT_OOM) |
308 | goto out_of_memory; | 308 | goto out_of_memory; |
diff --git a/arch/powerpc/platforms/cell/spu_fault.c b/arch/powerpc/platforms/cell/spu_fault.c index 95d8dadf2d87..d06ba87f1a19 100644 --- a/arch/powerpc/platforms/cell/spu_fault.c +++ b/arch/powerpc/platforms/cell/spu_fault.c | |||
@@ -70,7 +70,7 @@ int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea, | |||
70 | } | 70 | } |
71 | 71 | ||
72 | ret = 0; | 72 | ret = 0; |
73 | *flt = handle_mm_fault(mm, vma, ea, is_write); | 73 | *flt = handle_mm_fault(mm, vma, ea, is_write ? FAULT_FLAG_WRITE : 0); |
74 | if (unlikely(*flt & VM_FAULT_ERROR)) { | 74 | if (unlikely(*flt & VM_FAULT_ERROR)) { |
75 | if (*flt & VM_FAULT_OOM) { | 75 | if (*flt & VM_FAULT_OOM) { |
76 | ret = -ENOMEM; | 76 | ret = -ENOMEM; |
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index b0b84c35b0ad..cb5d59eab0ee 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c | |||
@@ -66,7 +66,7 @@ static int __handle_fault(struct mm_struct *mm, unsigned long address, | |||
66 | } | 66 | } |
67 | 67 | ||
68 | survive: | 68 | survive: |
69 | fault = handle_mm_fault(mm, vma, address, write_access); | 69 | fault = handle_mm_fault(mm, vma, address, write_access ? FAULT_FLAG_WRITE : 0); |
70 | if (unlikely(fault & VM_FAULT_ERROR)) { | 70 | if (unlikely(fault & VM_FAULT_ERROR)) { |
71 | if (fault & VM_FAULT_OOM) | 71 | if (fault & VM_FAULT_OOM) |
72 | goto out_of_memory; | 72 | goto out_of_memory; |
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 220a152c836c..74eb26bf1970 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
@@ -352,7 +352,7 @@ good_area: | |||
352 | * make sure we exit gracefully rather than endlessly redo | 352 | * make sure we exit gracefully rather than endlessly redo |
353 | * the fault. | 353 | * the fault. |
354 | */ | 354 | */ |
355 | fault = handle_mm_fault(mm, vma, address, write); | 355 | fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); |
356 | if (unlikely(fault & VM_FAULT_ERROR)) { | 356 | if (unlikely(fault & VM_FAULT_ERROR)) { |
357 | if (fault & VM_FAULT_OOM) { | 357 | if (fault & VM_FAULT_OOM) { |
358 | up_read(&mm->mmap_sem); | 358 | up_read(&mm->mmap_sem); |
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c index 2c50f80fc332..cc8ddbdf3d7a 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault_32.c | |||
@@ -133,7 +133,7 @@ good_area: | |||
133 | * the fault. | 133 | * the fault. |
134 | */ | 134 | */ |
135 | survive: | 135 | survive: |
136 | fault = handle_mm_fault(mm, vma, address, writeaccess); | 136 | fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0); |
137 | if (unlikely(fault & VM_FAULT_ERROR)) { | 137 | if (unlikely(fault & VM_FAULT_ERROR)) { |
138 | if (fault & VM_FAULT_OOM) | 138 | if (fault & VM_FAULT_OOM) |
139 | goto out_of_memory; | 139 | goto out_of_memory; |
diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c index 7876997ba19a..fcbb6e135cef 100644 --- a/arch/sh/mm/tlbflush_64.c +++ b/arch/sh/mm/tlbflush_64.c | |||
@@ -187,7 +187,7 @@ good_area: | |||
187 | * the fault. | 187 | * the fault. |
188 | */ | 188 | */ |
189 | survive: | 189 | survive: |
190 | fault = handle_mm_fault(mm, vma, address, writeaccess); | 190 | fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0); |
191 | if (unlikely(fault & VM_FAULT_ERROR)) { | 191 | if (unlikely(fault & VM_FAULT_ERROR)) { |
192 | if (fault & VM_FAULT_OOM) | 192 | if (fault & VM_FAULT_OOM) |
193 | goto out_of_memory; | 193 | goto out_of_memory; |
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index 12e447fc8542..a5e30c642ee3 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c | |||
@@ -241,7 +241,7 @@ good_area: | |||
241 | * make sure we exit gracefully rather than endlessly redo | 241 | * make sure we exit gracefully rather than endlessly redo |
242 | * the fault. | 242 | * the fault. |
243 | */ | 243 | */ |
244 | fault = handle_mm_fault(mm, vma, address, write); | 244 | fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); |
245 | if (unlikely(fault & VM_FAULT_ERROR)) { | 245 | if (unlikely(fault & VM_FAULT_ERROR)) { |
246 | if (fault & VM_FAULT_OOM) | 246 | if (fault & VM_FAULT_OOM) |
247 | goto out_of_memory; | 247 | goto out_of_memory; |
@@ -484,7 +484,7 @@ good_area: | |||
484 | if(!(vma->vm_flags & (VM_READ | VM_EXEC))) | 484 | if(!(vma->vm_flags & (VM_READ | VM_EXEC))) |
485 | goto bad_area; | 485 | goto bad_area; |
486 | } | 486 | } |
487 | switch (handle_mm_fault(mm, vma, address, write)) { | 487 | switch (handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0)) { |
488 | case VM_FAULT_SIGBUS: | 488 | case VM_FAULT_SIGBUS: |
489 | case VM_FAULT_OOM: | 489 | case VM_FAULT_OOM: |
490 | goto do_sigbus; | 490 | goto do_sigbus; |
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 4ab8993b0863..e5620b27c8bf 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c | |||
@@ -398,7 +398,7 @@ good_area: | |||
398 | goto bad_area; | 398 | goto bad_area; |
399 | } | 399 | } |
400 | 400 | ||
401 | fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE)); | 401 | fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0); |
402 | if (unlikely(fault & VM_FAULT_ERROR)) { | 402 | if (unlikely(fault & VM_FAULT_ERROR)) { |
403 | if (fault & VM_FAULT_OOM) | 403 | if (fault & VM_FAULT_OOM) |
404 | goto out_of_memory; | 404 | goto out_of_memory; |
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index 7384d8accfe7..637c6505dc00 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c | |||
@@ -65,7 +65,7 @@ good_area: | |||
65 | do { | 65 | do { |
66 | int fault; | 66 | int fault; |
67 | 67 | ||
68 | fault = handle_mm_fault(mm, vma, address, is_write); | 68 | fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0); |
69 | if (unlikely(fault & VM_FAULT_ERROR)) { | 69 | if (unlikely(fault & VM_FAULT_ERROR)) { |
70 | if (fault & VM_FAULT_OOM) { | 70 | if (fault & VM_FAULT_OOM) { |
71 | goto out_of_memory; | 71 | goto out_of_memory; |
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index caba99601703..eb0566e83319 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S | |||
@@ -845,7 +845,7 @@ ENTRY(aesni_cbc_enc) | |||
845 | */ | 845 | */ |
846 | ENTRY(aesni_cbc_dec) | 846 | ENTRY(aesni_cbc_dec) |
847 | cmp $16, LEN | 847 | cmp $16, LEN |
848 | jb .Lcbc_dec_ret | 848 | jb .Lcbc_dec_just_ret |
849 | mov 480(KEYP), KLEN | 849 | mov 480(KEYP), KLEN |
850 | add $240, KEYP | 850 | add $240, KEYP |
851 | movups (IVP), IV | 851 | movups (IVP), IV |
@@ -891,6 +891,7 @@ ENTRY(aesni_cbc_dec) | |||
891 | add $16, OUTP | 891 | add $16, OUTP |
892 | cmp $16, LEN | 892 | cmp $16, LEN |
893 | jge .Lcbc_dec_loop1 | 893 | jge .Lcbc_dec_loop1 |
894 | movups IV, (IVP) | ||
895 | .Lcbc_dec_ret: | 894 | .Lcbc_dec_ret: |
895 | movups IV, (IVP) | ||
896 | .Lcbc_dec_just_ret: | ||
896 | ret | 897 | ret |
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 4e663398f77f..c580c5ec1cad 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c | |||
@@ -198,6 +198,7 @@ static int ecb_encrypt(struct blkcipher_desc *desc, | |||
198 | 198 | ||
199 | blkcipher_walk_init(&walk, dst, src, nbytes); | 199 | blkcipher_walk_init(&walk, dst, src, nbytes); |
200 | err = blkcipher_walk_virt(desc, &walk); | 200 | err = blkcipher_walk_virt(desc, &walk); |
201 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
201 | 202 | ||
202 | kernel_fpu_begin(); | 203 | kernel_fpu_begin(); |
203 | while ((nbytes = walk.nbytes)) { | 204 | while ((nbytes = walk.nbytes)) { |
@@ -221,6 +222,7 @@ static int ecb_decrypt(struct blkcipher_desc *desc, | |||
221 | 222 | ||
222 | blkcipher_walk_init(&walk, dst, src, nbytes); | 223 | blkcipher_walk_init(&walk, dst, src, nbytes); |
223 | err = blkcipher_walk_virt(desc, &walk); | 224 | err = blkcipher_walk_virt(desc, &walk); |
225 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
224 | 226 | ||
225 | kernel_fpu_begin(); | 227 | kernel_fpu_begin(); |
226 | while ((nbytes = walk.nbytes)) { | 228 | while ((nbytes = walk.nbytes)) { |
@@ -266,6 +268,7 @@ static int cbc_encrypt(struct blkcipher_desc *desc, | |||
266 | 268 | ||
267 | blkcipher_walk_init(&walk, dst, src, nbytes); | 269 | blkcipher_walk_init(&walk, dst, src, nbytes); |
268 | err = blkcipher_walk_virt(desc, &walk); | 270 | err = blkcipher_walk_virt(desc, &walk); |
271 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
269 | 272 | ||
270 | kernel_fpu_begin(); | 273 | kernel_fpu_begin(); |
271 | while ((nbytes = walk.nbytes)) { | 274 | while ((nbytes = walk.nbytes)) { |
@@ -289,6 +292,7 @@ static int cbc_decrypt(struct blkcipher_desc *desc, | |||
289 | 292 | ||
290 | blkcipher_walk_init(&walk, dst, src, nbytes); | 293 | blkcipher_walk_init(&walk, dst, src, nbytes); |
291 | err = blkcipher_walk_virt(desc, &walk); | 294 | err = blkcipher_walk_virt(desc, &walk); |
295 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
292 | 296 | ||
293 | kernel_fpu_begin(); | 297 | kernel_fpu_begin(); |
294 | while ((nbytes = walk.nbytes)) { | 298 | while ((nbytes = walk.nbytes)) { |
diff --git a/arch/x86/crypto/fpu.c b/arch/x86/crypto/fpu.c index 5f9781a3815f..daef6cd2b45d 100644 --- a/arch/x86/crypto/fpu.c +++ b/arch/x86/crypto/fpu.c | |||
@@ -48,7 +48,7 @@ static int crypto_fpu_encrypt(struct blkcipher_desc *desc_in, | |||
48 | struct blkcipher_desc desc = { | 48 | struct blkcipher_desc desc = { |
49 | .tfm = child, | 49 | .tfm = child, |
50 | .info = desc_in->info, | 50 | .info = desc_in->info, |
51 | .flags = desc_in->flags, | 51 | .flags = desc_in->flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, |
52 | }; | 52 | }; |
53 | 53 | ||
54 | kernel_fpu_begin(); | 54 | kernel_fpu_begin(); |
@@ -67,7 +67,7 @@ static int crypto_fpu_decrypt(struct blkcipher_desc *desc_in, | |||
67 | struct blkcipher_desc desc = { | 67 | struct blkcipher_desc desc = { |
68 | .tfm = child, | 68 | .tfm = child, |
69 | .info = desc_in->info, | 69 | .info = desc_in->info, |
70 | .flags = desc_in->flags, | 70 | .flags = desc_in->flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, |
71 | }; | 71 | }; |
72 | 72 | ||
73 | kernel_fpu_begin(); | 73 | kernel_fpu_begin(); |
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 02ecb30982a3..103f1ddb0d85 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h | |||
@@ -42,6 +42,7 @@ | |||
42 | 42 | ||
43 | #else /* ...!ASSEMBLY */ | 43 | #else /* ...!ASSEMBLY */ |
44 | 44 | ||
45 | #include <linux/kernel.h> | ||
45 | #include <linux/stringify.h> | 46 | #include <linux/stringify.h> |
46 | 47 | ||
47 | #ifdef CONFIG_SMP | 48 | #ifdef CONFIG_SMP |
@@ -155,6 +156,15 @@ do { \ | |||
155 | /* We can use this directly for local CPU (faster). */ | 156 | /* We can use this directly for local CPU (faster). */ |
156 | DECLARE_PER_CPU(unsigned long, this_cpu_off); | 157 | DECLARE_PER_CPU(unsigned long, this_cpu_off); |
157 | 158 | ||
159 | #ifdef CONFIG_NEED_MULTIPLE_NODES | ||
160 | void *pcpu_lpage_remapped(void *kaddr); | ||
161 | #else | ||
162 | static inline void *pcpu_lpage_remapped(void *kaddr) | ||
163 | { | ||
164 | return NULL; | ||
165 | } | ||
166 | #endif | ||
167 | |||
158 | #endif /* !__ASSEMBLY__ */ | 168 | #endif /* !__ASSEMBLY__ */ |
159 | 169 | ||
160 | #ifdef CONFIG_SMP | 170 | #ifdef CONFIG_SMP |
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 9c3f0823e6aa..29a3eef7cf4a 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c | |||
@@ -124,7 +124,7 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, | |||
124 | } | 124 | } |
125 | 125 | ||
126 | /* | 126 | /* |
127 | * Remap allocator | 127 | * Large page remap allocator |
128 | * | 128 | * |
129 | * This allocator uses PMD page as unit. A PMD page is allocated for | 129 | * This allocator uses PMD page as unit. A PMD page is allocated for |
130 | * each cpu and each is remapped into vmalloc area using PMD mapping. | 130 | * each cpu and each is remapped into vmalloc area using PMD mapping. |
@@ -137,105 +137,185 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, | |||
137 | * better than only using 4k mappings while still being NUMA friendly. | 137 | * better than only using 4k mappings while still being NUMA friendly. |
138 | */ | 138 | */ |
139 | #ifdef CONFIG_NEED_MULTIPLE_NODES | 139 | #ifdef CONFIG_NEED_MULTIPLE_NODES |
140 | static size_t pcpur_size __initdata; | 140 | struct pcpul_ent { |
141 | static void **pcpur_ptrs __initdata; | 141 | unsigned int cpu; |
142 | void *ptr; | ||
143 | }; | ||
144 | |||
145 | static size_t pcpul_size; | ||
146 | static struct pcpul_ent *pcpul_map; | ||
147 | static struct vm_struct pcpul_vm; | ||
142 | 148 | ||
143 | static struct page * __init pcpur_get_page(unsigned int cpu, int pageno) | 149 | static struct page * __init pcpul_get_page(unsigned int cpu, int pageno) |
144 | { | 150 | { |
145 | size_t off = (size_t)pageno << PAGE_SHIFT; | 151 | size_t off = (size_t)pageno << PAGE_SHIFT; |
146 | 152 | ||
147 | if (off >= pcpur_size) | 153 | if (off >= pcpul_size) |
148 | return NULL; | 154 | return NULL; |
149 | 155 | ||
150 | return virt_to_page(pcpur_ptrs[cpu] + off); | 156 | return virt_to_page(pcpul_map[cpu].ptr + off); |
151 | } | 157 | } |
152 | 158 | ||
153 | static ssize_t __init setup_pcpu_remap(size_t static_size) | 159 | static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen) |
154 | { | 160 | { |
155 | static struct vm_struct vm; | 161 | size_t map_size, dyn_size; |
156 | size_t ptrs_size, dyn_size; | ||
157 | unsigned int cpu; | 162 | unsigned int cpu; |
163 | int i, j; | ||
158 | ssize_t ret; | 164 | ssize_t ret; |
159 | 165 | ||
160 | /* | 166 | if (!chosen) { |
161 | * If large page isn't supported, there's no benefit in doing | 167 | size_t vm_size = VMALLOC_END - VMALLOC_START; |
162 | * this. Also, on non-NUMA, embedding is better. | 168 | size_t tot_size = num_possible_cpus() * PMD_SIZE; |
163 | * | 169 | |
164 | * NOTE: disabled for now. | 170 | /* on non-NUMA, embedding is better */ |
165 | */ | 171 | if (!pcpu_need_numa()) |
166 | if (true || !cpu_has_pse || !pcpu_need_numa()) | 172 | return -EINVAL; |
173 | |||
174 | /* don't consume more than 20% of vmalloc area */ | ||
175 | if (tot_size > vm_size / 5) { | ||
176 | pr_info("PERCPU: too large chunk size %zuMB for " | ||
177 | "large page remap\n", tot_size >> 20); | ||
178 | return -EINVAL; | ||
179 | } | ||
180 | } | ||
181 | |||
182 | /* need PSE */ | ||
183 | if (!cpu_has_pse) { | ||
184 | pr_warning("PERCPU: lpage allocator requires PSE\n"); | ||
167 | return -EINVAL; | 185 | return -EINVAL; |
186 | } | ||
168 | 187 | ||
169 | /* | 188 | /* |
170 | * Currently supports only single page. Supporting multiple | 189 | * Currently supports only single page. Supporting multiple |
171 | * pages won't be too difficult if it ever becomes necessary. | 190 | * pages won't be too difficult if it ever becomes necessary. |
172 | */ | 191 | */ |
173 | pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE + | 192 | pcpul_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE + |
174 | PERCPU_DYNAMIC_RESERVE); | 193 | PERCPU_DYNAMIC_RESERVE); |
175 | if (pcpur_size > PMD_SIZE) { | 194 | if (pcpul_size > PMD_SIZE) { |
176 | pr_warning("PERCPU: static data is larger than large page, " | 195 | pr_warning("PERCPU: static data is larger than large page, " |
177 | "can't use large page\n"); | 196 | "can't use large page\n"); |
178 | return -EINVAL; | 197 | return -EINVAL; |
179 | } | 198 | } |
180 | dyn_size = pcpur_size - static_size - PERCPU_FIRST_CHUNK_RESERVE; | 199 | dyn_size = pcpul_size - static_size - PERCPU_FIRST_CHUNK_RESERVE; |
181 | 200 | ||
182 | /* allocate pointer array and alloc large pages */ | 201 | /* allocate pointer array and alloc large pages */ |
183 | ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0])); | 202 | map_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpul_map[0])); |
184 | pcpur_ptrs = alloc_bootmem(ptrs_size); | 203 | pcpul_map = alloc_bootmem(map_size); |
185 | 204 | ||
186 | for_each_possible_cpu(cpu) { | 205 | for_each_possible_cpu(cpu) { |
187 | pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PMD_SIZE, PMD_SIZE); | 206 | pcpul_map[cpu].cpu = cpu; |
188 | if (!pcpur_ptrs[cpu]) | 207 | pcpul_map[cpu].ptr = pcpu_alloc_bootmem(cpu, PMD_SIZE, |
208 | PMD_SIZE); | ||
209 | if (!pcpul_map[cpu].ptr) { | ||
210 | pr_warning("PERCPU: failed to allocate large page " | ||
211 | "for cpu%u\n", cpu); | ||
189 | goto enomem; | 212 | goto enomem; |
213 | } | ||
190 | 214 | ||
191 | /* | 215 | /* |
192 | * Only use pcpur_size bytes and give back the rest. | 216 | * Only use pcpul_size bytes and give back the rest. |
193 | * | 217 | * |
194 | * Ingo: The 2MB up-rounding bootmem is needed to make | 218 | * Ingo: The 2MB up-rounding bootmem is needed to make |
195 | * sure the partial 2MB page is still fully RAM - it's | 219 | * sure the partial 2MB page is still fully RAM - it's |
196 | * not well-specified to have a PAT-incompatible area | 220 | * not well-specified to have a PAT-incompatible area |
197 | * (unmapped RAM, device memory, etc.) in that hole. | 221 | * (unmapped RAM, device memory, etc.) in that hole. |
198 | */ | 222 | */ |
199 | free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size), | 223 | free_bootmem(__pa(pcpul_map[cpu].ptr + pcpul_size), |
200 | PMD_SIZE - pcpur_size); | 224 | PMD_SIZE - pcpul_size); |
201 | 225 | ||
202 | memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size); | 226 | memcpy(pcpul_map[cpu].ptr, __per_cpu_load, static_size); |
203 | } | 227 | } |
204 | 228 | ||
205 | /* allocate address and map */ | 229 | /* allocate address and map */ |
206 | vm.flags = VM_ALLOC; | 230 | pcpul_vm.flags = VM_ALLOC; |
207 | vm.size = num_possible_cpus() * PMD_SIZE; | 231 | pcpul_vm.size = num_possible_cpus() * PMD_SIZE; |
208 | vm_area_register_early(&vm, PMD_SIZE); | 232 | vm_area_register_early(&pcpul_vm, PMD_SIZE); |
209 | 233 | ||
210 | for_each_possible_cpu(cpu) { | 234 | for_each_possible_cpu(cpu) { |
211 | pmd_t *pmd; | 235 | pmd_t *pmd, pmd_v; |
212 | 236 | ||
213 | pmd = populate_extra_pmd((unsigned long)vm.addr | 237 | pmd = populate_extra_pmd((unsigned long)pcpul_vm.addr + |
214 | + cpu * PMD_SIZE); | 238 | cpu * PMD_SIZE); |
215 | set_pmd(pmd, pfn_pmd(page_to_pfn(virt_to_page(pcpur_ptrs[cpu])), | 239 | pmd_v = pfn_pmd(page_to_pfn(virt_to_page(pcpul_map[cpu].ptr)), |
216 | PAGE_KERNEL_LARGE)); | 240 | PAGE_KERNEL_LARGE); |
241 | set_pmd(pmd, pmd_v); | ||
217 | } | 242 | } |
218 | 243 | ||
219 | /* we're ready, commit */ | 244 | /* we're ready, commit */ |
220 | pr_info("PERCPU: Remapped at %p with large pages, static data " | 245 | pr_info("PERCPU: Remapped at %p with large pages, static data " |
221 | "%zu bytes\n", vm.addr, static_size); | 246 | "%zu bytes\n", pcpul_vm.addr, static_size); |
222 | 247 | ||
223 | ret = pcpu_setup_first_chunk(pcpur_get_page, static_size, | 248 | ret = pcpu_setup_first_chunk(pcpul_get_page, static_size, |
224 | PERCPU_FIRST_CHUNK_RESERVE, dyn_size, | 249 | PERCPU_FIRST_CHUNK_RESERVE, dyn_size, |
225 | PMD_SIZE, vm.addr, NULL); | 250 | PMD_SIZE, pcpul_vm.addr, NULL); |
226 | goto out_free_ar; | 251 | |
252 | /* sort pcpul_map array for pcpu_lpage_remapped() */ | ||
253 | for (i = 0; i < num_possible_cpus() - 1; i++) | ||
254 | for (j = i + 1; j < num_possible_cpus(); j++) | ||
255 | if (pcpul_map[i].ptr > pcpul_map[j].ptr) { | ||
256 | struct pcpul_ent tmp = pcpul_map[i]; | ||
257 | pcpul_map[i] = pcpul_map[j]; | ||
258 | pcpul_map[j] = tmp; | ||
259 | } | ||
260 | |||
261 | return ret; | ||
227 | 262 | ||
228 | enomem: | 263 | enomem: |
229 | for_each_possible_cpu(cpu) | 264 | for_each_possible_cpu(cpu) |
230 | if (pcpur_ptrs[cpu]) | 265 | if (pcpul_map[cpu].ptr) |
231 | free_bootmem(__pa(pcpur_ptrs[cpu]), PMD_SIZE); | 266 | free_bootmem(__pa(pcpul_map[cpu].ptr), pcpul_size); |
232 | ret = -ENOMEM; | 267 | free_bootmem(__pa(pcpul_map), map_size); |
233 | out_free_ar: | 268 | return -ENOMEM; |
234 | free_bootmem(__pa(pcpur_ptrs), ptrs_size); | 269 | } |
235 | return ret; | 270 | |
271 | /** | ||
272 | * pcpu_lpage_remapped - determine whether a kaddr is in pcpul recycled area | ||
273 | * @kaddr: the kernel address in question | ||
274 | * | ||
275 | * Determine whether @kaddr falls in the pcpul recycled area. This is | ||
276 | * used by pageattr to detect VM aliases and break up the pcpu PMD | ||
277 | * mapping such that the same physical page is not mapped under | ||
278 | * different attributes. | ||
279 | * | ||
280 | * The recycled area is always at the tail of a partially used PMD | ||
281 | * page. | ||
282 | * | ||
283 | * RETURNS: | ||
284 | * Address of corresponding remapped pcpu address if match is found; | ||
285 | * otherwise, NULL. | ||
286 | */ | ||
287 | void *pcpu_lpage_remapped(void *kaddr) | ||
288 | { | ||
289 | void *pmd_addr = (void *)((unsigned long)kaddr & PMD_MASK); | ||
290 | unsigned long offset = (unsigned long)kaddr & ~PMD_MASK; | ||
291 | int left = 0, right = num_possible_cpus() - 1; | ||
292 | int pos; | ||
293 | |||
294 | /* pcpul in use at all? */ | ||
295 | if (!pcpul_map) | ||
296 | return NULL; | ||
297 | |||
298 | /* okay, perform binary search */ | ||
299 | while (left <= right) { | ||
300 | pos = (left + right) / 2; | ||
301 | |||
302 | if (pcpul_map[pos].ptr < pmd_addr) | ||
303 | left = pos + 1; | ||
304 | else if (pcpul_map[pos].ptr > pmd_addr) | ||
305 | right = pos - 1; | ||
306 | else { | ||
307 | /* it shouldn't be in the area for the first chunk */ | ||
308 | WARN_ON(offset < pcpul_size); | ||
309 | |||
310 | return pcpul_vm.addr + | ||
311 | pcpul_map[pos].cpu * PMD_SIZE + offset; | ||
312 | } | ||
313 | } | ||
314 | |||
315 | return NULL; | ||
236 | } | 316 | } |
237 | #else | 317 | #else |
238 | static ssize_t __init setup_pcpu_remap(size_t static_size) | 318 | static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen) |
239 | { | 319 | { |
240 | return -EINVAL; | 320 | return -EINVAL; |
241 | } | 321 | } |
@@ -249,7 +329,7 @@ static ssize_t __init setup_pcpu_remap(size_t static_size) | |||
249 | * mapping so that it can use PMD mapping without additional TLB | 329 | * mapping so that it can use PMD mapping without additional TLB |
250 | * pressure. | 330 | * pressure. |
251 | */ | 331 | */ |
252 | static ssize_t __init setup_pcpu_embed(size_t static_size) | 332 | static ssize_t __init setup_pcpu_embed(size_t static_size, bool chosen) |
253 | { | 333 | { |
254 | size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE; | 334 | size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE; |
255 | 335 | ||
@@ -258,7 +338,7 @@ static ssize_t __init setup_pcpu_embed(size_t static_size) | |||
258 | * this. Also, embedding allocation doesn't play well with | 338 | * this. Also, embedding allocation doesn't play well with |
259 | * NUMA. | 339 | * NUMA. |
260 | */ | 340 | */ |
261 | if (!cpu_has_pse || pcpu_need_numa()) | 341 | if (!chosen && (!cpu_has_pse || pcpu_need_numa())) |
262 | return -EINVAL; | 342 | return -EINVAL; |
263 | 343 | ||
264 | return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE, | 344 | return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE, |
@@ -308,8 +388,11 @@ static ssize_t __init setup_pcpu_4k(size_t static_size) | |||
308 | void *ptr; | 388 | void *ptr; |
309 | 389 | ||
310 | ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE); | 390 | ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE); |
311 | if (!ptr) | 391 | if (!ptr) { |
392 | pr_warning("PERCPU: failed to allocate " | ||
393 | "4k page for cpu%u\n", cpu); | ||
312 | goto enomem; | 394 | goto enomem; |
395 | } | ||
313 | 396 | ||
314 | memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE); | 397 | memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE); |
315 | pcpu4k_pages[j++] = virt_to_page(ptr); | 398 | pcpu4k_pages[j++] = virt_to_page(ptr); |
@@ -333,6 +416,16 @@ out_free_ar: | |||
333 | return ret; | 416 | return ret; |
334 | } | 417 | } |
335 | 418 | ||
419 | /* for explicit first chunk allocator selection */ | ||
420 | static char pcpu_chosen_alloc[16] __initdata; | ||
421 | |||
422 | static int __init percpu_alloc_setup(char *str) | ||
423 | { | ||
424 | strncpy(pcpu_chosen_alloc, str, sizeof(pcpu_chosen_alloc) - 1); | ||
425 | return 0; | ||
426 | } | ||
427 | early_param("percpu_alloc", percpu_alloc_setup); | ||
428 | |||
336 | static inline void setup_percpu_segment(int cpu) | 429 | static inline void setup_percpu_segment(int cpu) |
337 | { | 430 | { |
338 | #ifdef CONFIG_X86_32 | 431 | #ifdef CONFIG_X86_32 |
@@ -346,11 +439,6 @@ static inline void setup_percpu_segment(int cpu) | |||
346 | #endif | 439 | #endif |
347 | } | 440 | } |
348 | 441 | ||
349 | /* | ||
350 | * Great future plan: | ||
351 | * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data. | ||
352 | * Always point %gs to its beginning | ||
353 | */ | ||
354 | void __init setup_per_cpu_areas(void) | 442 | void __init setup_per_cpu_areas(void) |
355 | { | 443 | { |
356 | size_t static_size = __per_cpu_end - __per_cpu_start; | 444 | size_t static_size = __per_cpu_end - __per_cpu_start; |
@@ -367,9 +455,26 @@ void __init setup_per_cpu_areas(void) | |||
367 | * of large page mappings. Please read comments on top of | 455 | * of large page mappings. Please read comments on top of |
368 | * each allocator for details. | 456 | * each allocator for details. |
369 | */ | 457 | */ |
370 | ret = setup_pcpu_remap(static_size); | 458 | ret = -EINVAL; |
371 | if (ret < 0) | 459 | if (strlen(pcpu_chosen_alloc)) { |
372 | ret = setup_pcpu_embed(static_size); | 460 | if (strcmp(pcpu_chosen_alloc, "4k")) { |
461 | if (!strcmp(pcpu_chosen_alloc, "lpage")) | ||
462 | ret = setup_pcpu_lpage(static_size, true); | ||
463 | else if (!strcmp(pcpu_chosen_alloc, "embed")) | ||
464 | ret = setup_pcpu_embed(static_size, true); | ||
465 | else | ||
466 | pr_warning("PERCPU: unknown allocator %s " | ||
467 | "specified\n", pcpu_chosen_alloc); | ||
468 | if (ret < 0) | ||
469 | pr_warning("PERCPU: %s allocator failed (%zd), " | ||
470 | "falling back to 4k\n", | ||
471 | pcpu_chosen_alloc, ret); | ||
472 | } | ||
473 | } else { | ||
474 | ret = setup_pcpu_lpage(static_size, false); | ||
475 | if (ret < 0) | ||
476 | ret = setup_pcpu_embed(static_size, false); | ||
477 | } | ||
373 | if (ret < 0) | 478 | if (ret < 0) |
374 | ret = setup_pcpu_4k(static_size); | 479 | ret = setup_pcpu_4k(static_size); |
375 | if (ret < 0) | 480 | if (ret < 0) |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index c403526d5d15..78a5fff857be 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -1113,7 +1113,7 @@ good_area: | |||
1113 | * make sure we exit gracefully rather than endlessly redo | 1113 | * make sure we exit gracefully rather than endlessly redo |
1114 | * the fault: | 1114 | * the fault: |
1115 | */ | 1115 | */ |
1116 | fault = handle_mm_fault(mm, vma, address, write); | 1116 | fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); |
1117 | 1117 | ||
1118 | if (unlikely(fault & VM_FAULT_ERROR)) { | 1118 | if (unlikely(fault & VM_FAULT_ERROR)) { |
1119 | mm_fault_error(regs, error_code, address, fault); | 1119 | mm_fault_error(regs, error_code, address, fault); |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 3cfe9ced8a4c..1b734d7a8966 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/interrupt.h> | 11 | #include <linux/interrupt.h> |
12 | #include <linux/seq_file.h> | 12 | #include <linux/seq_file.h> |
13 | #include <linux/debugfs.h> | 13 | #include <linux/debugfs.h> |
14 | #include <linux/pfn.h> | ||
14 | 15 | ||
15 | #include <asm/e820.h> | 16 | #include <asm/e820.h> |
16 | #include <asm/processor.h> | 17 | #include <asm/processor.h> |
@@ -681,8 +682,9 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias); | |||
681 | static int cpa_process_alias(struct cpa_data *cpa) | 682 | static int cpa_process_alias(struct cpa_data *cpa) |
682 | { | 683 | { |
683 | struct cpa_data alias_cpa; | 684 | struct cpa_data alias_cpa; |
684 | int ret = 0; | 685 | unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT); |
685 | unsigned long temp_cpa_vaddr, vaddr; | 686 | unsigned long vaddr, remapped; |
687 | int ret; | ||
686 | 688 | ||
687 | if (cpa->pfn >= max_pfn_mapped) | 689 | if (cpa->pfn >= max_pfn_mapped) |
688 | return 0; | 690 | return 0; |
@@ -706,42 +708,55 @@ static int cpa_process_alias(struct cpa_data *cpa) | |||
706 | PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) { | 708 | PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) { |
707 | 709 | ||
708 | alias_cpa = *cpa; | 710 | alias_cpa = *cpa; |
709 | temp_cpa_vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT); | 711 | alias_cpa.vaddr = &laddr; |
710 | alias_cpa.vaddr = &temp_cpa_vaddr; | ||
711 | alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); | 712 | alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); |
712 | 713 | ||
713 | |||
714 | ret = __change_page_attr_set_clr(&alias_cpa, 0); | 714 | ret = __change_page_attr_set_clr(&alias_cpa, 0); |
715 | if (ret) | ||
716 | return ret; | ||
715 | } | 717 | } |
716 | 718 | ||
717 | #ifdef CONFIG_X86_64 | 719 | #ifdef CONFIG_X86_64 |
718 | if (ret) | ||
719 | return ret; | ||
720 | /* | 720 | /* |
721 | * No need to redo, when the primary call touched the high | 721 | * If the primary call didn't touch the high mapping already |
722 | * mapping already: | 722 | * and the physical address is inside the kernel map, we need |
723 | */ | ||
724 | if (within(vaddr, (unsigned long) _text, _brk_end)) | ||
725 | return 0; | ||
726 | |||
727 | /* | ||
728 | * If the physical address is inside the kernel map, we need | ||
729 | * to touch the high mapped kernel as well: | 723 | * to touch the high mapped kernel as well: |
730 | */ | 724 | */ |
731 | if (!within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn())) | 725 | if (!within(vaddr, (unsigned long)_text, _brk_end) && |
732 | return 0; | 726 | within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn())) { |
727 | unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) + | ||
728 | __START_KERNEL_map - phys_base; | ||
729 | alias_cpa = *cpa; | ||
730 | alias_cpa.vaddr = &temp_cpa_vaddr; | ||
731 | alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); | ||
733 | 732 | ||
734 | alias_cpa = *cpa; | 733 | /* |
735 | temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) + __START_KERNEL_map - phys_base; | 734 | * The high mapping range is imprecise, so ignore the |
736 | alias_cpa.vaddr = &temp_cpa_vaddr; | 735 | * return value. |
737 | alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); | 736 | */ |
737 | __change_page_attr_set_clr(&alias_cpa, 0); | ||
738 | } | ||
739 | #endif | ||
738 | 740 | ||
739 | /* | 741 | /* |
740 | * The high mapping range is imprecise, so ignore the return value. | 742 | * If the PMD page was partially used for per-cpu remapping, |
743 | * the recycled area needs to be split and modified. Because | ||
744 | * the area is always proper subset of a PMD page | ||
745 | * cpa->numpages is guaranteed to be 1 for these areas, so | ||
746 | * there's no need to loop over and check for further remaps. | ||
741 | */ | 747 | */ |
742 | __change_page_attr_set_clr(&alias_cpa, 0); | 748 | remapped = (unsigned long)pcpu_lpage_remapped((void *)laddr); |
743 | #endif | 749 | if (remapped) { |
744 | return ret; | 750 | WARN_ON(cpa->numpages > 1); |
751 | alias_cpa = *cpa; | ||
752 | alias_cpa.vaddr = &remapped; | ||
753 | alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); | ||
754 | ret = __change_page_attr_set_clr(&alias_cpa, 0); | ||
755 | if (ret) | ||
756 | return ret; | ||
757 | } | ||
758 | |||
759 | return 0; | ||
745 | } | 760 | } |
746 | 761 | ||
747 | static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) | 762 | static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) |
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index bdd860d93f72..bc0733359a88 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c | |||
@@ -106,7 +106,7 @@ good_area: | |||
106 | * the fault. | 106 | * the fault. |
107 | */ | 107 | */ |
108 | survive: | 108 | survive: |
109 | fault = handle_mm_fault(mm, vma, address, is_write); | 109 | fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0); |
110 | if (unlikely(fault & VM_FAULT_ERROR)) { | 110 | if (unlikely(fault & VM_FAULT_ERROR)) { |
111 | if (fault & VM_FAULT_OOM) | 111 | if (fault & VM_FAULT_OOM) |
112 | goto out_of_memory; | 112 | goto out_of_memory; |