aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/buffer_head.h2
-rw-r--r--include/linux/cpumask.h63
-rw-r--r--include/linux/fs.h44
-rw-r--r--include/linux/mm.h3
-rw-r--r--include/linux/mm_types.h4
-rw-r--r--include/linux/mmu_notifier.h279
-rw-r--r--include/linux/pagemap.h1
-rw-r--r--include/linux/rculist.h28
-rw-r--r--include/linux/rmap.h8
9 files changed, 372 insertions, 60 deletions
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 82aa36c53ea7..50cfe8ceb478 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -205,6 +205,8 @@ void block_invalidatepage(struct page *page, unsigned long offset);
205int block_write_full_page(struct page *page, get_block_t *get_block, 205int block_write_full_page(struct page *page, get_block_t *get_block,
206 struct writeback_control *wbc); 206 struct writeback_control *wbc);
207int block_read_full_page(struct page*, get_block_t*); 207int block_read_full_page(struct page*, get_block_t*);
208int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
209 unsigned long from);
208int block_write_begin(struct file *, struct address_space *, 210int block_write_begin(struct file *, struct address_space *,
209 loff_t, unsigned, unsigned, 211 loff_t, unsigned, unsigned,
210 struct page **, void **, get_block_t*); 212 struct page **, void **, get_block_t*);
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 1b5c98e7fef7..96d0509fb8d8 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -62,15 +62,7 @@
62 * int next_cpu_nr(cpu, mask) Next cpu past 'cpu', or nr_cpu_ids 62 * int next_cpu_nr(cpu, mask) Next cpu past 'cpu', or nr_cpu_ids
63 * 63 *
64 * cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set 64 * cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set
65 *ifdef CONFIG_HAS_CPUMASK_OF_CPU 65 * (can be used as an lvalue)
66 * cpumask_of_cpu_ptr_declare(v) Declares cpumask_t *v
67 * cpumask_of_cpu_ptr_next(v, cpu) Sets v = &cpumask_of_cpu_map[cpu]
68 * cpumask_of_cpu_ptr(v, cpu) Combines above two operations
69 *else
70 * cpumask_of_cpu_ptr_declare(v) Declares cpumask_t _v and *v = &_v
71 * cpumask_of_cpu_ptr_next(v, cpu) Sets _v = cpumask_of_cpu(cpu)
72 * cpumask_of_cpu_ptr(v, cpu) Combines above two operations
73 *endif
74 * CPU_MASK_ALL Initializer - all bits set 66 * CPU_MASK_ALL Initializer - all bits set
75 * CPU_MASK_NONE Initializer - no bits set 67 * CPU_MASK_NONE Initializer - no bits set
76 * unsigned long *cpus_addr(mask) Array of unsigned long's in mask 68 * unsigned long *cpus_addr(mask) Array of unsigned long's in mask
@@ -273,37 +265,30 @@ static inline void __cpus_shift_left(cpumask_t *dstp,
273 bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); 265 bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
274} 266}
275 267
268/*
269 * Special-case data structure for "single bit set only" constant CPU masks.
270 *
271 * We pre-generate all the 64 (or 32) possible bit positions, with enough
272 * padding to the left and the right, and return the constant pointer
273 * appropriately offset.
274 */
275extern const unsigned long
276 cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
277
278static inline const cpumask_t *get_cpu_mask(unsigned int cpu)
279{
280 const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
281 p -= cpu / BITS_PER_LONG;
282 return (const cpumask_t *)p;
283}
284
285/*
286 * In cases where we take the address of the cpumask immediately,
287 * gcc optimizes it out (it's a constant) and there's no huge stack
288 * variable created:
289 */
290#define cpumask_of_cpu(cpu) ({ *get_cpu_mask(cpu); })
276 291
277#ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP
278extern cpumask_t *cpumask_of_cpu_map;
279#define cpumask_of_cpu(cpu) (cpumask_of_cpu_map[cpu])
280#define cpumask_of_cpu_ptr(v, cpu) \
281 const cpumask_t *v = &cpumask_of_cpu(cpu)
282#define cpumask_of_cpu_ptr_declare(v) \
283 const cpumask_t *v
284#define cpumask_of_cpu_ptr_next(v, cpu) \
285 v = &cpumask_of_cpu(cpu)
286#else
287#define cpumask_of_cpu(cpu) \
288({ \
289 typeof(_unused_cpumask_arg_) m; \
290 if (sizeof(m) == sizeof(unsigned long)) { \
291 m.bits[0] = 1UL<<(cpu); \
292 } else { \
293 cpus_clear(m); \
294 cpu_set((cpu), m); \
295 } \
296 m; \
297})
298#define cpumask_of_cpu_ptr(v, cpu) \
299 cpumask_t _##v = cpumask_of_cpu(cpu); \
300 const cpumask_t *v = &_##v
301#define cpumask_of_cpu_ptr_declare(v) \
302 cpumask_t _##v; \
303 const cpumask_t *v = &_##v
304#define cpumask_of_cpu_ptr_next(v, cpu) \
305 _##v = cpumask_of_cpu(cpu)
306#endif
307 292
308#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS) 293#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
309 294
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 8252b045e624..580b513668fe 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -443,6 +443,27 @@ static inline size_t iov_iter_count(struct iov_iter *i)
443 return i->count; 443 return i->count;
444} 444}
445 445
446/*
447 * "descriptor" for what we're up to with a read.
448 * This allows us to use the same read code yet
449 * have multiple different users of the data that
450 * we read from a file.
451 *
452 * The simplest case just copies the data to user
453 * mode.
454 */
455typedef struct {
456 size_t written;
457 size_t count;
458 union {
459 char __user *buf;
460 void *data;
461 } arg;
462 int error;
463} read_descriptor_t;
464
465typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
466 unsigned long, unsigned long);
446 467
447struct address_space_operations { 468struct address_space_operations {
448 int (*writepage)(struct page *page, struct writeback_control *wbc); 469 int (*writepage)(struct page *page, struct writeback_control *wbc);
@@ -484,6 +505,8 @@ struct address_space_operations {
484 int (*migratepage) (struct address_space *, 505 int (*migratepage) (struct address_space *,
485 struct page *, struct page *); 506 struct page *, struct page *);
486 int (*launder_page) (struct page *); 507 int (*launder_page) (struct page *);
508 int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
509 unsigned long);
487}; 510};
488 511
489/* 512/*
@@ -1198,27 +1221,6 @@ struct block_device_operations {
1198 struct module *owner; 1221 struct module *owner;
1199}; 1222};
1200 1223
1201/*
1202 * "descriptor" for what we're up to with a read.
1203 * This allows us to use the same read code yet
1204 * have multiple different users of the data that
1205 * we read from a file.
1206 *
1207 * The simplest case just copies the data to user
1208 * mode.
1209 */
1210typedef struct {
1211 size_t written;
1212 size_t count;
1213 union {
1214 char __user * buf;
1215 void *data;
1216 } arg;
1217 int error;
1218} read_descriptor_t;
1219
1220typedef int (*read_actor_t)(read_descriptor_t *, struct page *, unsigned long, unsigned long);
1221
1222/* These macros are for out of kernel modules to test that 1224/* These macros are for out of kernel modules to test that
1223 * the kernel supports the unlocked_ioctl and compat_ioctl 1225 * the kernel supports the unlocked_ioctl and compat_ioctl
1224 * fields in struct file_operations. */ 1226 * fields in struct file_operations. */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 6e695eaab4ce..866a3dbe5c75 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1104,6 +1104,9 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
1104 unsigned long addr, unsigned long len, pgoff_t pgoff); 1104 unsigned long addr, unsigned long len, pgoff_t pgoff);
1105extern void exit_mmap(struct mm_struct *); 1105extern void exit_mmap(struct mm_struct *);
1106 1106
1107extern int mm_take_all_locks(struct mm_struct *mm);
1108extern void mm_drop_all_locks(struct mm_struct *mm);
1109
1107#ifdef CONFIG_PROC_FS 1110#ifdef CONFIG_PROC_FS
1108/* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */ 1111/* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */
1109extern void added_exe_file_vma(struct mm_struct *mm); 1112extern void added_exe_file_vma(struct mm_struct *mm);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 746f975b58ef..386edbe2cb4e 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -10,6 +10,7 @@
10#include <linux/rbtree.h> 10#include <linux/rbtree.h>
11#include <linux/rwsem.h> 11#include <linux/rwsem.h>
12#include <linux/completion.h> 12#include <linux/completion.h>
13#include <linux/cpumask.h>
13#include <asm/page.h> 14#include <asm/page.h>
14#include <asm/mmu.h> 15#include <asm/mmu.h>
15 16
@@ -253,6 +254,9 @@ struct mm_struct {
253 struct file *exe_file; 254 struct file *exe_file;
254 unsigned long num_exe_file_vmas; 255 unsigned long num_exe_file_vmas;
255#endif 256#endif
257#ifdef CONFIG_MMU_NOTIFIER
258 struct mmu_notifier_mm *mmu_notifier_mm;
259#endif
256}; 260};
257 261
258#endif /* _LINUX_MM_TYPES_H */ 262#endif /* _LINUX_MM_TYPES_H */
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
new file mode 100644
index 000000000000..b77486d152cd
--- /dev/null
+++ b/include/linux/mmu_notifier.h
@@ -0,0 +1,279 @@
1#ifndef _LINUX_MMU_NOTIFIER_H
2#define _LINUX_MMU_NOTIFIER_H
3
4#include <linux/list.h>
5#include <linux/spinlock.h>
6#include <linux/mm_types.h>
7
8struct mmu_notifier;
9struct mmu_notifier_ops;
10
11#ifdef CONFIG_MMU_NOTIFIER
12
13/*
14 * The mmu notifier_mm structure is allocated and installed in
15 * mm->mmu_notifier_mm inside the mm_take_all_locks() protected
16 * critical section and it's released only when mm_count reaches zero
17 * in mmdrop().
18 */
19struct mmu_notifier_mm {
20 /* all mmu notifiers registerd in this mm are queued in this list */
21 struct hlist_head list;
22 /* to serialize the list modifications and hlist_unhashed */
23 spinlock_t lock;
24};
25
26struct mmu_notifier_ops {
27 /*
28 * Called either by mmu_notifier_unregister or when the mm is
29 * being destroyed by exit_mmap, always before all pages are
30 * freed. This can run concurrently with other mmu notifier
31 * methods (the ones invoked outside the mm context) and it
32 * should tear down all secondary mmu mappings and freeze the
33 * secondary mmu. If this method isn't implemented you've to
34 * be sure that nothing could possibly write to the pages
35 * through the secondary mmu by the time the last thread with
36 * tsk->mm == mm exits.
37 *
38 * As side note: the pages freed after ->release returns could
39 * be immediately reallocated by the gart at an alias physical
40 * address with a different cache model, so if ->release isn't
41 * implemented because all _software_ driven memory accesses
42 * through the secondary mmu are terminated by the time the
43 * last thread of this mm quits, you've also to be sure that
44 * speculative _hardware_ operations can't allocate dirty
45 * cachelines in the cpu that could not be snooped and made
46 * coherent with the other read and write operations happening
47 * through the gart alias address, so leading to memory
48 * corruption.
49 */
50 void (*release)(struct mmu_notifier *mn,
51 struct mm_struct *mm);
52
53 /*
54 * clear_flush_young is called after the VM is
55 * test-and-clearing the young/accessed bitflag in the
56 * pte. This way the VM will provide proper aging to the
57 * accesses to the page through the secondary MMUs and not
58 * only to the ones through the Linux pte.
59 */
60 int (*clear_flush_young)(struct mmu_notifier *mn,
61 struct mm_struct *mm,
62 unsigned long address);
63
64 /*
65 * Before this is invoked any secondary MMU is still ok to
66 * read/write to the page previously pointed to by the Linux
67 * pte because the page hasn't been freed yet and it won't be
68 * freed until this returns. If required set_page_dirty has to
69 * be called internally to this method.
70 */
71 void (*invalidate_page)(struct mmu_notifier *mn,
72 struct mm_struct *mm,
73 unsigned long address);
74
75 /*
76 * invalidate_range_start() and invalidate_range_end() must be
77 * paired and are called only when the mmap_sem and/or the
78 * locks protecting the reverse maps are held. The subsystem
79 * must guarantee that no additional references are taken to
80 * the pages in the range established between the call to
81 * invalidate_range_start() and the matching call to
82 * invalidate_range_end().
83 *
84 * Invalidation of multiple concurrent ranges may be
85 * optionally permitted by the driver. Either way the
86 * establishment of sptes is forbidden in the range passed to
87 * invalidate_range_begin/end for the whole duration of the
88 * invalidate_range_begin/end critical section.
89 *
90 * invalidate_range_start() is called when all pages in the
91 * range are still mapped and have at least a refcount of one.
92 *
93 * invalidate_range_end() is called when all pages in the
94 * range have been unmapped and the pages have been freed by
95 * the VM.
96 *
97 * The VM will remove the page table entries and potentially
98 * the page between invalidate_range_start() and
99 * invalidate_range_end(). If the page must not be freed
100 * because of pending I/O or other circumstances then the
101 * invalidate_range_start() callback (or the initial mapping
102 * by the driver) must make sure that the refcount is kept
103 * elevated.
104 *
105 * If the driver increases the refcount when the pages are
106 * initially mapped into an address space then either
107 * invalidate_range_start() or invalidate_range_end() may
108 * decrease the refcount. If the refcount is decreased on
109 * invalidate_range_start() then the VM can free pages as page
110 * table entries are removed. If the refcount is only
111 * droppped on invalidate_range_end() then the driver itself
112 * will drop the last refcount but it must take care to flush
113 * any secondary tlb before doing the final free on the
114 * page. Pages will no longer be referenced by the linux
115 * address space but may still be referenced by sptes until
116 * the last refcount is dropped.
117 */
118 void (*invalidate_range_start)(struct mmu_notifier *mn,
119 struct mm_struct *mm,
120 unsigned long start, unsigned long end);
121 void (*invalidate_range_end)(struct mmu_notifier *mn,
122 struct mm_struct *mm,
123 unsigned long start, unsigned long end);
124};
125
126/*
127 * The notifier chains are protected by mmap_sem and/or the reverse map
128 * semaphores. Notifier chains are only changed when all reverse maps and
129 * the mmap_sem locks are taken.
130 *
131 * Therefore notifier chains can only be traversed when either
132 *
133 * 1. mmap_sem is held.
134 * 2. One of the reverse map locks is held (i_mmap_lock or anon_vma->lock).
135 * 3. No other concurrent thread can access the list (release)
136 */
137struct mmu_notifier {
138 struct hlist_node hlist;
139 const struct mmu_notifier_ops *ops;
140};
141
142static inline int mm_has_notifiers(struct mm_struct *mm)
143{
144 return unlikely(mm->mmu_notifier_mm);
145}
146
147extern int mmu_notifier_register(struct mmu_notifier *mn,
148 struct mm_struct *mm);
149extern int __mmu_notifier_register(struct mmu_notifier *mn,
150 struct mm_struct *mm);
151extern void mmu_notifier_unregister(struct mmu_notifier *mn,
152 struct mm_struct *mm);
153extern void __mmu_notifier_mm_destroy(struct mm_struct *mm);
154extern void __mmu_notifier_release(struct mm_struct *mm);
155extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
156 unsigned long address);
157extern void __mmu_notifier_invalidate_page(struct mm_struct *mm,
158 unsigned long address);
159extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
160 unsigned long start, unsigned long end);
161extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
162 unsigned long start, unsigned long end);
163
164static inline void mmu_notifier_release(struct mm_struct *mm)
165{
166 if (mm_has_notifiers(mm))
167 __mmu_notifier_release(mm);
168}
169
170static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
171 unsigned long address)
172{
173 if (mm_has_notifiers(mm))
174 return __mmu_notifier_clear_flush_young(mm, address);
175 return 0;
176}
177
178static inline void mmu_notifier_invalidate_page(struct mm_struct *mm,
179 unsigned long address)
180{
181 if (mm_has_notifiers(mm))
182 __mmu_notifier_invalidate_page(mm, address);
183}
184
185static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
186 unsigned long start, unsigned long end)
187{
188 if (mm_has_notifiers(mm))
189 __mmu_notifier_invalidate_range_start(mm, start, end);
190}
191
192static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
193 unsigned long start, unsigned long end)
194{
195 if (mm_has_notifiers(mm))
196 __mmu_notifier_invalidate_range_end(mm, start, end);
197}
198
199static inline void mmu_notifier_mm_init(struct mm_struct *mm)
200{
201 mm->mmu_notifier_mm = NULL;
202}
203
204static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
205{
206 if (mm_has_notifiers(mm))
207 __mmu_notifier_mm_destroy(mm);
208}
209
210/*
211 * These two macros will sometime replace ptep_clear_flush.
212 * ptep_clear_flush is impleemnted as macro itself, so this also is
213 * implemented as a macro until ptep_clear_flush will converted to an
214 * inline function, to diminish the risk of compilation failure. The
215 * invalidate_page method over time can be moved outside the PT lock
216 * and these two macros can be later removed.
217 */
218#define ptep_clear_flush_notify(__vma, __address, __ptep) \
219({ \
220 pte_t __pte; \
221 struct vm_area_struct *___vma = __vma; \
222 unsigned long ___address = __address; \
223 __pte = ptep_clear_flush(___vma, ___address, __ptep); \
224 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
225 __pte; \
226})
227
228#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
229({ \
230 int __young; \
231 struct vm_area_struct *___vma = __vma; \
232 unsigned long ___address = __address; \
233 __young = ptep_clear_flush_young(___vma, ___address, __ptep); \
234 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
235 ___address); \
236 __young; \
237})
238
239#else /* CONFIG_MMU_NOTIFIER */
240
241static inline void mmu_notifier_release(struct mm_struct *mm)
242{
243}
244
245static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
246 unsigned long address)
247{
248 return 0;
249}
250
251static inline void mmu_notifier_invalidate_page(struct mm_struct *mm,
252 unsigned long address)
253{
254}
255
256static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
257 unsigned long start, unsigned long end)
258{
259}
260
261static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
262 unsigned long start, unsigned long end)
263{
264}
265
266static inline void mmu_notifier_mm_init(struct mm_struct *mm)
267{
268}
269
270static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
271{
272}
273
274#define ptep_clear_flush_young_notify ptep_clear_flush_young
275#define ptep_clear_flush_notify ptep_clear_flush
276
277#endif /* CONFIG_MMU_NOTIFIER */
278
279#endif /* _LINUX_MMU_NOTIFIER_H */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index a81d81890422..a39b38ccdc97 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -20,6 +20,7 @@
20 */ 20 */
21#define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */ 21#define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */
22#define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */ 22#define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */
23#define AS_MM_ALL_LOCKS (__GFP_BITS_SHIFT + 2) /* under mm_take_all_locks() */
23 24
24static inline void mapping_set_error(struct address_space *mapping, int error) 25static inline void mapping_set_error(struct address_space *mapping, int error)
25{ 26{
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index b0f39be08b6c..eb4443c7e05b 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -98,6 +98,34 @@ static inline void list_del_rcu(struct list_head *entry)
98} 98}
99 99
100/** 100/**
101 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
102 * @n: the element to delete from the hash list.
103 *
104 * Note: list_unhashed() on the node return true after this. It is
105 * useful for RCU based read lockfree traversal if the writer side
106 * must know if the list entry is still hashed or already unhashed.
107 *
108 * In particular, it means that we can not poison the forward pointers
109 * that may still be used for walking the hash list and we can only
110 * zero the pprev pointer so list_unhashed() will return true after
111 * this.
112 *
113 * The caller must take whatever precautions are necessary (such as
114 * holding appropriate locks) to avoid racing with another
115 * list-mutation primitive, such as hlist_add_head_rcu() or
116 * hlist_del_rcu(), running on this same list. However, it is
117 * perfectly legal to run concurrently with the _rcu list-traversal
118 * primitives, such as hlist_for_each_entry_rcu().
119 */
120static inline void hlist_del_init_rcu(struct hlist_node *n)
121{
122 if (!hlist_unhashed(n)) {
123 __hlist_del(n);
124 n->pprev = NULL;
125 }
126}
127
128/**
101 * list_replace_rcu - replace old entry by new one 129 * list_replace_rcu - replace old entry by new one
102 * @old : the element to be replaced 130 * @old : the element to be replaced
103 * @new : the new element to insert 131 * @new : the new element to insert
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 1383692ac5bd..69407f85e10b 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -26,6 +26,14 @@
26 */ 26 */
27struct anon_vma { 27struct anon_vma {
28 spinlock_t lock; /* Serialize access to vma list */ 28 spinlock_t lock; /* Serialize access to vma list */
29 /*
30 * NOTE: the LSB of the head.next is set by
31 * mm_take_all_locks() _after_ taking the above lock. So the
32 * head must only be read/written after taking the above lock
33 * to be sure to see a valid next pointer. The LSB bit itself
34 * is serialized by a system wide lock only visible to
35 * mm_take_all_locks() (mm_all_locks_mutex).
36 */
29 struct list_head head; /* List of private "related" vmas */ 37 struct list_head head; /* List of private "related" vmas */
30}; 38};
31 39