aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mn10300
diff options
context:
space:
mode:
authorDavid Hildenbrand <dahi@linux.vnet.ibm.com>2015-05-11 11:52:09 -0400
committerIngo Molnar <mingo@kernel.org>2015-05-19 02:39:14 -0400
commit2cb7c9cb426660b5ed58b643d9e7dd5d50ba901f (patch)
tree39976a7d4ee0ee34da0072e71fcd49e02480f096 /arch/mn10300
parentb3c395ef5556a6c60f4426cc060f5b7bdcf82d5b (diff)
sched/preempt, mm/kmap: Explicitly disable/enable preemption in kmap_atomic_*
The existing code relies on pagefault_disable() implicitly disabling preemption, so that no schedule will happen between kmap_atomic() and kunmap_atomic(). Let's make this explicit, to prepare for pagefault_disable() not touching preemption anymore. Reviewed-and-tested-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: David.Laight@ACULAB.COM Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: airlied@linux.ie Cc: akpm@linux-foundation.org Cc: benh@kernel.crashing.org Cc: bigeasy@linutronix.de Cc: borntraeger@de.ibm.com Cc: daniel.vetter@intel.com Cc: heiko.carstens@de.ibm.com Cc: herbert@gondor.apana.org.au Cc: hocko@suse.cz Cc: hughd@google.com Cc: mst@redhat.com Cc: paulus@samba.org Cc: ralf@linux-mips.org Cc: schwidefsky@de.ibm.com Cc: yang.shi@windriver.com Link: http://lkml.kernel.org/r/1431359540-32227-5-git-send-email-dahi@linux.vnet.ibm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/mn10300')
-rw-r--r--arch/mn10300/include/asm/highmem.h3
1 files changed, 3 insertions, 0 deletions
diff --git a/arch/mn10300/include/asm/highmem.h b/arch/mn10300/include/asm/highmem.h
index 2fbbe4d920aa..1ddea5afba09 100644
--- a/arch/mn10300/include/asm/highmem.h
+++ b/arch/mn10300/include/asm/highmem.h
@@ -75,6 +75,7 @@ static inline void *kmap_atomic(struct page *page)
75 unsigned long vaddr; 75 unsigned long vaddr;
76 int idx, type; 76 int idx, type;
77 77
78 preempt_disable();
78 pagefault_disable(); 79 pagefault_disable();
79 if (page < highmem_start_page) 80 if (page < highmem_start_page)
80 return page_address(page); 81 return page_address(page);
@@ -98,6 +99,7 @@ static inline void __kunmap_atomic(unsigned long vaddr)
98 99
99 if (vaddr < FIXADDR_START) { /* FIXME */ 100 if (vaddr < FIXADDR_START) { /* FIXME */
100 pagefault_enable(); 101 pagefault_enable();
102 preempt_enable();
101 return; 103 return;
102 } 104 }
103 105
@@ -122,6 +124,7 @@ static inline void __kunmap_atomic(unsigned long vaddr)
122 124
123 kmap_atomic_idx_pop(); 125 kmap_atomic_idx_pop();
124 pagefault_enable(); 126 pagefault_enable();
127 preempt_enable();
125} 128}
126#endif /* __KERNEL__ */ 129#endif /* __KERNEL__ */
127 130