diff options
Diffstat (limited to 'include/linux/rmap.h')
| -rw-r--r-- | include/linux/rmap.h | 81 |
1 files changed, 76 insertions, 5 deletions
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index cb0ba7032609..d25bd224d370 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h | |||
| @@ -26,6 +26,9 @@ | |||
| 26 | */ | 26 | */ |
| 27 | struct anon_vma { | 27 | struct anon_vma { |
| 28 | spinlock_t lock; /* Serialize access to vma list */ | 28 | spinlock_t lock; /* Serialize access to vma list */ |
| 29 | #ifdef CONFIG_KSM | ||
| 30 | atomic_t ksm_refcount; | ||
| 31 | #endif | ||
| 29 | /* | 32 | /* |
| 30 | * NOTE: the LSB of the head.next is set by | 33 | * NOTE: the LSB of the head.next is set by |
| 31 | * mm_take_all_locks() _after_ taking the above lock. So the | 34 | * mm_take_all_locks() _after_ taking the above lock. So the |
| @@ -34,10 +37,58 @@ struct anon_vma { | |||
| 34 | * is serialized by a system wide lock only visible to | 37 | * is serialized by a system wide lock only visible to |
| 35 | * mm_take_all_locks() (mm_all_locks_mutex). | 38 | * mm_take_all_locks() (mm_all_locks_mutex). |
| 36 | */ | 39 | */ |
| 37 | struct list_head head; /* List of private "related" vmas */ | 40 | struct list_head head; /* Chain of private "related" vmas */ |
| 41 | }; | ||
| 42 | |||
| 43 | /* | ||
| 44 | * The copy-on-write semantics of fork mean that an anon_vma | ||
| 45 | * can become associated with multiple processes. Furthermore, | ||
| 46 | * each child process will have its own anon_vma, where new | ||
| 47 | * pages for that process are instantiated. | ||
| 48 | * | ||
| 49 | * This structure allows us to find the anon_vmas associated | ||
| 50 | * with a VMA, or the VMAs associated with an anon_vma. | ||
| 51 | * The "same_vma" list contains the anon_vma_chains linking | ||
| 52 | * all the anon_vmas associated with this VMA. | ||
| 53 | * The "same_anon_vma" list contains the anon_vma_chains | ||
| 54 | * which link all the VMAs associated with this anon_vma. | ||
| 55 | */ | ||
| 56 | struct anon_vma_chain { | ||
| 57 | struct vm_area_struct *vma; | ||
| 58 | struct anon_vma *anon_vma; | ||
| 59 | struct list_head same_vma; /* locked by mmap_sem & page_table_lock */ | ||
| 60 | struct list_head same_anon_vma; /* locked by anon_vma->lock */ | ||
| 38 | }; | 61 | }; |
| 39 | 62 | ||
| 40 | #ifdef CONFIG_MMU | 63 | #ifdef CONFIG_MMU |
| 64 | #ifdef CONFIG_KSM | ||
| 65 | static inline void ksm_refcount_init(struct anon_vma *anon_vma) | ||
| 66 | { | ||
| 67 | atomic_set(&anon_vma->ksm_refcount, 0); | ||
| 68 | } | ||
| 69 | |||
| 70 | static inline int ksm_refcount(struct anon_vma *anon_vma) | ||
| 71 | { | ||
| 72 | return atomic_read(&anon_vma->ksm_refcount); | ||
| 73 | } | ||
| 74 | #else | ||
| 75 | static inline void ksm_refcount_init(struct anon_vma *anon_vma) | ||
| 76 | { | ||
| 77 | } | ||
| 78 | |||
| 79 | static inline int ksm_refcount(struct anon_vma *anon_vma) | ||
| 80 | { | ||
| 81 | return 0; | ||
| 82 | } | ||
| 83 | #endif /* CONFIG_KSM */ | ||
| 84 | |||
| 85 | static inline struct anon_vma *page_anon_vma(struct page *page) | ||
| 86 | { | ||
| 87 | if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != | ||
| 88 | PAGE_MAPPING_ANON) | ||
| 89 | return NULL; | ||
| 90 | return page_rmapping(page); | ||
| 91 | } | ||
| 41 | 92 | ||
| 42 | static inline void anon_vma_lock(struct vm_area_struct *vma) | 93 | static inline void anon_vma_lock(struct vm_area_struct *vma) |
| 43 | { | 94 | { |
| @@ -58,14 +109,23 @@ static inline void anon_vma_unlock(struct vm_area_struct *vma) | |||
| 58 | */ | 109 | */ |
| 59 | void anon_vma_init(void); /* create anon_vma_cachep */ | 110 | void anon_vma_init(void); /* create anon_vma_cachep */ |
| 60 | int anon_vma_prepare(struct vm_area_struct *); | 111 | int anon_vma_prepare(struct vm_area_struct *); |
| 61 | void __anon_vma_merge(struct vm_area_struct *, struct vm_area_struct *); | 112 | void unlink_anon_vmas(struct vm_area_struct *); |
| 62 | void anon_vma_unlink(struct vm_area_struct *); | 113 | int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *); |
| 63 | void anon_vma_link(struct vm_area_struct *); | 114 | int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *); |
| 64 | void __anon_vma_link(struct vm_area_struct *); | 115 | void __anon_vma_link(struct vm_area_struct *); |
| 116 | void anon_vma_free(struct anon_vma *); | ||
| 117 | |||
| 118 | static inline void anon_vma_merge(struct vm_area_struct *vma, | ||
| 119 | struct vm_area_struct *next) | ||
| 120 | { | ||
| 121 | VM_BUG_ON(vma->anon_vma != next->anon_vma); | ||
| 122 | unlink_anon_vmas(next); | ||
| 123 | } | ||
| 65 | 124 | ||
| 66 | /* | 125 | /* |
| 67 | * rmap interfaces called when adding or removing pte of page | 126 | * rmap interfaces called when adding or removing pte of page |
| 68 | */ | 127 | */ |
| 128 | void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); | ||
| 69 | void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); | 129 | void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); |
| 70 | void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); | 130 | void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); |
| 71 | void page_add_file_rmap(struct page *); | 131 | void page_add_file_rmap(struct page *); |
| @@ -81,6 +141,9 @@ static inline void page_dup_rmap(struct page *page) | |||
| 81 | */ | 141 | */ |
| 82 | int page_referenced(struct page *, int is_locked, | 142 | int page_referenced(struct page *, int is_locked, |
| 83 | struct mem_cgroup *cnt, unsigned long *vm_flags); | 143 | struct mem_cgroup *cnt, unsigned long *vm_flags); |
| 144 | int page_referenced_one(struct page *, struct vm_area_struct *, | ||
| 145 | unsigned long address, unsigned int *mapcount, unsigned long *vm_flags); | ||
| 146 | |||
| 84 | enum ttu_flags { | 147 | enum ttu_flags { |
| 85 | TTU_UNMAP = 0, /* unmap mode */ | 148 | TTU_UNMAP = 0, /* unmap mode */ |
| 86 | TTU_MIGRATION = 1, /* migration mode */ | 149 | TTU_MIGRATION = 1, /* migration mode */ |
| @@ -94,6 +157,8 @@ enum ttu_flags { | |||
| 94 | #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) | 157 | #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) |
| 95 | 158 | ||
| 96 | int try_to_unmap(struct page *, enum ttu_flags flags); | 159 | int try_to_unmap(struct page *, enum ttu_flags flags); |
| 160 | int try_to_unmap_one(struct page *, struct vm_area_struct *, | ||
| 161 | unsigned long address, enum ttu_flags flags); | ||
| 97 | 162 | ||
| 98 | /* | 163 | /* |
| 99 | * Called from mm/filemap_xip.c to unmap empty zero page | 164 | * Called from mm/filemap_xip.c to unmap empty zero page |
| @@ -127,6 +192,12 @@ struct anon_vma *page_lock_anon_vma(struct page *page); | |||
| 127 | void page_unlock_anon_vma(struct anon_vma *anon_vma); | 192 | void page_unlock_anon_vma(struct anon_vma *anon_vma); |
| 128 | int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); | 193 | int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); |
| 129 | 194 | ||
| 195 | /* | ||
| 196 | * Called by migrate.c to remove migration ptes, but might be used more later. | ||
| 197 | */ | ||
| 198 | int rmap_walk(struct page *page, int (*rmap_one)(struct page *, | ||
| 199 | struct vm_area_struct *, unsigned long, void *), void *arg); | ||
| 200 | |||
| 130 | #else /* !CONFIG_MMU */ | 201 | #else /* !CONFIG_MMU */ |
| 131 | 202 | ||
| 132 | #define anon_vma_init() do {} while (0) | 203 | #define anon_vma_init() do {} while (0) |
| @@ -138,7 +209,7 @@ static inline int page_referenced(struct page *page, int is_locked, | |||
| 138 | unsigned long *vm_flags) | 209 | unsigned long *vm_flags) |
| 139 | { | 210 | { |
| 140 | *vm_flags = 0; | 211 | *vm_flags = 0; |
| 141 | return TestClearPageReferenced(page); | 212 | return 0; |
| 142 | } | 213 | } |
| 143 | 214 | ||
| 144 | #define try_to_unmap(page, refs) SWAP_FAIL | 215 | #define try_to_unmap(page, refs) SWAP_FAIL |
