diff options
Diffstat (limited to 'include/linux/rmap.h')
-rw-r--r-- | include/linux/rmap.h | 36 |
1 files changed, 20 insertions, 16 deletions
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 3fce545df394..bfe1f4780644 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h | |||
@@ -37,14 +37,14 @@ struct anon_vma { | |||
37 | atomic_t refcount; | 37 | atomic_t refcount; |
38 | 38 | ||
39 | /* | 39 | /* |
40 | * NOTE: the LSB of the head.next is set by | 40 | * NOTE: the LSB of the rb_root.rb_node is set by |
41 | * mm_take_all_locks() _after_ taking the above lock. So the | 41 | * mm_take_all_locks() _after_ taking the above lock. So the |
42 | * head must only be read/written after taking the above lock | 42 | * rb_root must only be read/written after taking the above lock |
43 | * to be sure to see a valid next pointer. The LSB bit itself | 43 | * to be sure to see a valid next pointer. The LSB bit itself |
44 | * is serialized by a system wide lock only visible to | 44 | * is serialized by a system wide lock only visible to |
45 | * mm_take_all_locks() (mm_all_locks_mutex). | 45 | * mm_take_all_locks() (mm_all_locks_mutex). |
46 | */ | 46 | */ |
47 | struct list_head head; /* Chain of private "related" vmas */ | 47 | struct rb_root rb_root; /* Interval tree of private "related" vmas */ |
48 | }; | 48 | }; |
49 | 49 | ||
50 | /* | 50 | /* |
@@ -57,14 +57,29 @@ struct anon_vma { | |||
57 | * with a VMA, or the VMAs associated with an anon_vma. | 57 | * with a VMA, or the VMAs associated with an anon_vma. |
58 | * The "same_vma" list contains the anon_vma_chains linking | 58 | * The "same_vma" list contains the anon_vma_chains linking |
59 | * all the anon_vmas associated with this VMA. | 59 | * all the anon_vmas associated with this VMA. |
60 | * The "same_anon_vma" list contains the anon_vma_chains | 60 | * The "rb" field indexes on an interval tree the anon_vma_chains |
61 | * which link all the VMAs associated with this anon_vma. | 61 | * which link all the VMAs associated with this anon_vma. |
62 | */ | 62 | */ |
63 | struct anon_vma_chain { | 63 | struct anon_vma_chain { |
64 | struct vm_area_struct *vma; | 64 | struct vm_area_struct *vma; |
65 | struct anon_vma *anon_vma; | 65 | struct anon_vma *anon_vma; |
66 | struct list_head same_vma; /* locked by mmap_sem & page_table_lock */ | 66 | struct list_head same_vma; /* locked by mmap_sem & page_table_lock */ |
67 | struct list_head same_anon_vma; /* locked by anon_vma->mutex */ | 67 | struct rb_node rb; /* locked by anon_vma->mutex */ |
68 | unsigned long rb_subtree_last; | ||
69 | #ifdef CONFIG_DEBUG_VM_RB | ||
70 | unsigned long cached_vma_start, cached_vma_last; | ||
71 | #endif | ||
72 | }; | ||
73 | |||
74 | enum ttu_flags { | ||
75 | TTU_UNMAP = 0, /* unmap mode */ | ||
76 | TTU_MIGRATION = 1, /* migration mode */ | ||
77 | TTU_MUNLOCK = 2, /* munlock mode */ | ||
78 | TTU_ACTION_MASK = 0xff, | ||
79 | |||
80 | TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */ | ||
81 | TTU_IGNORE_ACCESS = (1 << 9), /* don't age */ | ||
82 | TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */ | ||
68 | }; | 83 | }; |
69 | 84 | ||
70 | #ifdef CONFIG_MMU | 85 | #ifdef CONFIG_MMU |
@@ -120,7 +135,6 @@ void anon_vma_init(void); /* create anon_vma_cachep */ | |||
120 | int anon_vma_prepare(struct vm_area_struct *); | 135 | int anon_vma_prepare(struct vm_area_struct *); |
121 | void unlink_anon_vmas(struct vm_area_struct *); | 136 | void unlink_anon_vmas(struct vm_area_struct *); |
122 | int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *); | 137 | int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *); |
123 | void anon_vma_moveto_tail(struct vm_area_struct *); | ||
124 | int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *); | 138 | int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *); |
125 | 139 | ||
126 | static inline void anon_vma_merge(struct vm_area_struct *vma, | 140 | static inline void anon_vma_merge(struct vm_area_struct *vma, |
@@ -161,16 +175,6 @@ int page_referenced(struct page *, int is_locked, | |||
161 | int page_referenced_one(struct page *, struct vm_area_struct *, | 175 | int page_referenced_one(struct page *, struct vm_area_struct *, |
162 | unsigned long address, unsigned int *mapcount, unsigned long *vm_flags); | 176 | unsigned long address, unsigned int *mapcount, unsigned long *vm_flags); |
163 | 177 | ||
164 | enum ttu_flags { | ||
165 | TTU_UNMAP = 0, /* unmap mode */ | ||
166 | TTU_MIGRATION = 1, /* migration mode */ | ||
167 | TTU_MUNLOCK = 2, /* munlock mode */ | ||
168 | TTU_ACTION_MASK = 0xff, | ||
169 | |||
170 | TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */ | ||
171 | TTU_IGNORE_ACCESS = (1 << 9), /* don't age */ | ||
172 | TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */ | ||
173 | }; | ||
174 | #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) | 178 | #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) |
175 | 179 | ||
176 | int try_to_unmap(struct page *, enum ttu_flags flags); | 180 | int try_to_unmap(struct page *, enum ttu_flags flags); |