diff options
author | David Hildenbrand <dahi@linux.vnet.ibm.com> | 2015-05-11 11:52:09 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-05-19 02:39:14 -0400 |
commit | 2cb7c9cb426660b5ed58b643d9e7dd5d50ba901f (patch) | |
tree | 39976a7d4ee0ee34da0072e71fcd49e02480f096 /arch/microblaze | |
parent | b3c395ef5556a6c60f4426cc060f5b7bdcf82d5b (diff) |
sched/preempt, mm/kmap: Explicitly disable/enable preemption in kmap_atomic_*
The existing code relies on pagefault_disable() implicitly disabling
preemption, so that no schedule will happen between kmap_atomic() and
kunmap_atomic().
Let's make this explicit, to prepare for pagefault_disable() not
touching preemption anymore.
Reviewed-and-tested-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: David.Laight@ACULAB.COM
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: airlied@linux.ie
Cc: akpm@linux-foundation.org
Cc: benh@kernel.crashing.org
Cc: bigeasy@linutronix.de
Cc: borntraeger@de.ibm.com
Cc: daniel.vetter@intel.com
Cc: heiko.carstens@de.ibm.com
Cc: herbert@gondor.apana.org.au
Cc: hocko@suse.cz
Cc: hughd@google.com
Cc: mst@redhat.com
Cc: paulus@samba.org
Cc: ralf@linux-mips.org
Cc: schwidefsky@de.ibm.com
Cc: yang.shi@windriver.com
Link: http://lkml.kernel.org/r/1431359540-32227-5-git-send-email-dahi@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/microblaze')
-rw-r--r-- | arch/microblaze/mm/highmem.c | 4 |
1 files changed, 3 insertions, 1 deletions
diff --git a/arch/microblaze/mm/highmem.c b/arch/microblaze/mm/highmem.c index 5a92576fad92..2fcc5a52d84d 100644 --- a/arch/microblaze/mm/highmem.c +++ b/arch/microblaze/mm/highmem.c | |||
@@ -37,7 +37,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot) | |||
37 | unsigned long vaddr; | 37 | unsigned long vaddr; |
38 | int idx, type; | 38 | int idx, type; |
39 | 39 | ||
40 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ | 40 | preempt_disable(); |
41 | pagefault_disable(); | 41 | pagefault_disable(); |
42 | if (!PageHighMem(page)) | 42 | if (!PageHighMem(page)) |
43 | return page_address(page); | 43 | return page_address(page); |
@@ -63,6 +63,7 @@ void __kunmap_atomic(void *kvaddr) | |||
63 | 63 | ||
64 | if (vaddr < __fix_to_virt(FIX_KMAP_END)) { | 64 | if (vaddr < __fix_to_virt(FIX_KMAP_END)) { |
65 | pagefault_enable(); | 65 | pagefault_enable(); |
66 | preempt_enable(); | ||
66 | return; | 67 | return; |
67 | } | 68 | } |
68 | 69 | ||
@@ -84,5 +85,6 @@ void __kunmap_atomic(void *kvaddr) | |||
84 | #endif | 85 | #endif |
85 | kmap_atomic_idx_pop(); | 86 | kmap_atomic_idx_pop(); |
86 | pagefault_enable(); | 87 | pagefault_enable(); |
88 | preempt_enable(); | ||
87 | } | 89 | } |
88 | EXPORT_SYMBOL(__kunmap_atomic); | 90 | EXPORT_SYMBOL(__kunmap_atomic); |