diff options
author | Sasha Levin <levinsasha928@gmail.com> | 2013-02-22 19:32:28 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-23 20:50:10 -0500 |
commit | 4ca3a69bcb6875c3f20802522c1b4fc56bb14608 (patch) | |
tree | e5877d6bf4106733aa7122f0172efff8b87059f7 /mm/ksm.c | |
parent | 43b5fbbd28294e095653d3c1ba9c399b2168e8df (diff) |
mm/ksm.c: use new hashtable implementation
Switch ksm to use the new hashtable implementation. This reduces the
amount of generic unrelated code in the ksm module.
Signed-off-by: Sasha Levin <levinsasha928@gmail.com>
Acked-by: Hugh Dickins <hughd@google.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Konstantin Khlebnikov <khlebnikov@openvz.org>
Cc: Mel Gorman <mgorman@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/ksm.c')
-rw-r--r-- | mm/ksm.c | 30 |
1 files changed, 12 insertions, 18 deletions
@@ -33,7 +33,7 @@ | |||
33 | #include <linux/mmu_notifier.h> | 33 | #include <linux/mmu_notifier.h> |
34 | #include <linux/swap.h> | 34 | #include <linux/swap.h> |
35 | #include <linux/ksm.h> | 35 | #include <linux/ksm.h> |
36 | #include <linux/hash.h> | 36 | #include <linux/hashtable.h> |
37 | #include <linux/freezer.h> | 37 | #include <linux/freezer.h> |
38 | #include <linux/oom.h> | 38 | #include <linux/oom.h> |
39 | 39 | ||
@@ -156,9 +156,8 @@ struct rmap_item { | |||
156 | static struct rb_root root_stable_tree = RB_ROOT; | 156 | static struct rb_root root_stable_tree = RB_ROOT; |
157 | static struct rb_root root_unstable_tree = RB_ROOT; | 157 | static struct rb_root root_unstable_tree = RB_ROOT; |
158 | 158 | ||
159 | #define MM_SLOTS_HASH_SHIFT 10 | 159 | #define MM_SLOTS_HASH_BITS 10 |
160 | #define MM_SLOTS_HASH_HEADS (1 << MM_SLOTS_HASH_SHIFT) | 160 | static DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); |
161 | static struct hlist_head mm_slots_hash[MM_SLOTS_HASH_HEADS]; | ||
162 | 161 | ||
163 | static struct mm_slot ksm_mm_head = { | 162 | static struct mm_slot ksm_mm_head = { |
164 | .mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list), | 163 | .mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list), |
@@ -275,26 +274,21 @@ static inline void free_mm_slot(struct mm_slot *mm_slot) | |||
275 | 274 | ||
276 | static struct mm_slot *get_mm_slot(struct mm_struct *mm) | 275 | static struct mm_slot *get_mm_slot(struct mm_struct *mm) |
277 | { | 276 | { |
278 | struct mm_slot *mm_slot; | ||
279 | struct hlist_head *bucket; | ||
280 | struct hlist_node *node; | 277 | struct hlist_node *node; |
278 | struct mm_slot *slot; | ||
279 | |||
280 | hash_for_each_possible(mm_slots_hash, slot, node, link, (unsigned long)mm) | ||
281 | if (slot->mm == mm) | ||
282 | return slot; | ||
281 | 283 | ||
282 | bucket = &mm_slots_hash[hash_ptr(mm, MM_SLOTS_HASH_SHIFT)]; | ||
283 | hlist_for_each_entry(mm_slot, node, bucket, link) { | ||
284 | if (mm == mm_slot->mm) | ||
285 | return mm_slot; | ||
286 | } | ||
287 | return NULL; | 284 | return NULL; |
288 | } | 285 | } |
289 | 286 | ||
290 | static void insert_to_mm_slots_hash(struct mm_struct *mm, | 287 | static void insert_to_mm_slots_hash(struct mm_struct *mm, |
291 | struct mm_slot *mm_slot) | 288 | struct mm_slot *mm_slot) |
292 | { | 289 | { |
293 | struct hlist_head *bucket; | ||
294 | |||
295 | bucket = &mm_slots_hash[hash_ptr(mm, MM_SLOTS_HASH_SHIFT)]; | ||
296 | mm_slot->mm = mm; | 290 | mm_slot->mm = mm; |
297 | hlist_add_head(&mm_slot->link, bucket); | 291 | hash_add(mm_slots_hash, &mm_slot->link, (unsigned long)mm); |
298 | } | 292 | } |
299 | 293 | ||
300 | static inline int in_stable_tree(struct rmap_item *rmap_item) | 294 | static inline int in_stable_tree(struct rmap_item *rmap_item) |
@@ -647,7 +641,7 @@ static int unmerge_and_remove_all_rmap_items(void) | |||
647 | ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next, | 641 | ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next, |
648 | struct mm_slot, mm_list); | 642 | struct mm_slot, mm_list); |
649 | if (ksm_test_exit(mm)) { | 643 | if (ksm_test_exit(mm)) { |
650 | hlist_del(&mm_slot->link); | 644 | hash_del(&mm_slot->link); |
651 | list_del(&mm_slot->mm_list); | 645 | list_del(&mm_slot->mm_list); |
652 | spin_unlock(&ksm_mmlist_lock); | 646 | spin_unlock(&ksm_mmlist_lock); |
653 | 647 | ||
@@ -1392,7 +1386,7 @@ next_mm: | |||
1392 | * or when all VM_MERGEABLE areas have been unmapped (and | 1386 | * or when all VM_MERGEABLE areas have been unmapped (and |
1393 | * mmap_sem then protects against race with MADV_MERGEABLE). | 1387 | * mmap_sem then protects against race with MADV_MERGEABLE). |
1394 | */ | 1388 | */ |
1395 | hlist_del(&slot->link); | 1389 | hash_del(&slot->link); |
1396 | list_del(&slot->mm_list); | 1390 | list_del(&slot->mm_list); |
1397 | spin_unlock(&ksm_mmlist_lock); | 1391 | spin_unlock(&ksm_mmlist_lock); |
1398 | 1392 | ||
@@ -1559,7 +1553,7 @@ void __ksm_exit(struct mm_struct *mm) | |||
1559 | mm_slot = get_mm_slot(mm); | 1553 | mm_slot = get_mm_slot(mm); |
1560 | if (mm_slot && ksm_scan.mm_slot != mm_slot) { | 1554 | if (mm_slot && ksm_scan.mm_slot != mm_slot) { |
1561 | if (!mm_slot->rmap_list) { | 1555 | if (!mm_slot->rmap_list) { |
1562 | hlist_del(&mm_slot->link); | 1556 | hash_del(&mm_slot->link); |
1563 | list_del(&mm_slot->mm_list); | 1557 | list_del(&mm_slot->mm_list); |
1564 | easy_to_free = 1; | 1558 | easy_to_free = 1; |
1565 | } else { | 1559 | } else { |