aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched/mm.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched/mm.h')
-rw-r--r--include/linux/sched/mm.h59
1 files changed, 35 insertions, 24 deletions
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 3d49b91b674d..1149533aa2fa 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -7,11 +7,12 @@
7#include <linux/sched.h> 7#include <linux/sched.h>
8#include <linux/mm_types.h> 8#include <linux/mm_types.h>
9#include <linux/gfp.h> 9#include <linux/gfp.h>
10#include <linux/sync_core.h>
10 11
11/* 12/*
12 * Routines for handling mm_structs 13 * Routines for handling mm_structs
13 */ 14 */
14extern struct mm_struct * mm_alloc(void); 15extern struct mm_struct *mm_alloc(void);
15 16
16/** 17/**
17 * mmgrab() - Pin a &struct mm_struct. 18 * mmgrab() - Pin a &struct mm_struct.
@@ -35,27 +36,7 @@ static inline void mmgrab(struct mm_struct *mm)
35 atomic_inc(&mm->mm_count); 36 atomic_inc(&mm->mm_count);
36} 37}
37 38
38/* mmdrop drops the mm and the page tables */ 39extern void mmdrop(struct mm_struct *mm);
39extern void __mmdrop(struct mm_struct *);
40static inline void mmdrop(struct mm_struct *mm)
41{
42 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
43 __mmdrop(mm);
44}
45
46static inline void mmdrop_async_fn(struct work_struct *work)
47{
48 struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
49 __mmdrop(mm);
50}
51
52static inline void mmdrop_async(struct mm_struct *mm)
53{
54 if (unlikely(atomic_dec_and_test(&mm->mm_count))) {
55 INIT_WORK(&mm->async_put_work, mmdrop_async_fn);
56 schedule_work(&mm->async_put_work);
57 }
58}
59 40
60/** 41/**
61 * mmget() - Pin the address space associated with a &struct mm_struct. 42 * mmget() - Pin the address space associated with a &struct mm_struct.
@@ -214,18 +195,48 @@ static inline void memalloc_noreclaim_restore(unsigned int flags)
214 195
215#ifdef CONFIG_MEMBARRIER 196#ifdef CONFIG_MEMBARRIER
216enum { 197enum {
217 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0), 198 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0),
218 MEMBARRIER_STATE_SWITCH_MM = (1U << 1), 199 MEMBARRIER_STATE_PRIVATE_EXPEDITED = (1U << 1),
200 MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = (1U << 2),
201 MEMBARRIER_STATE_GLOBAL_EXPEDITED = (1U << 3),
202 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = (1U << 4),
203 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = (1U << 5),
204};
205
206enum {
207 MEMBARRIER_FLAG_SYNC_CORE = (1U << 0),
219}; 208};
220 209
210#ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
211#include <asm/membarrier.h>
212#endif
213
214static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
215{
216 if (likely(!(atomic_read(&mm->membarrier_state) &
217 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE)))
218 return;
219 sync_core_before_usermode();
220}
221
221static inline void membarrier_execve(struct task_struct *t) 222static inline void membarrier_execve(struct task_struct *t)
222{ 223{
223 atomic_set(&t->mm->membarrier_state, 0); 224 atomic_set(&t->mm->membarrier_state, 0);
224} 225}
225#else 226#else
227#ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
228static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
229 struct mm_struct *next,
230 struct task_struct *tsk)
231{
232}
233#endif
226static inline void membarrier_execve(struct task_struct *t) 234static inline void membarrier_execve(struct task_struct *t)
227{ 235{
228} 236}
237static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
238{
239}
229#endif 240#endif
230 241
231#endif /* _LINUX_SCHED_MM_H */ 242#endif /* _LINUX_SCHED_MM_H */