aboutsummaryrefslogtreecommitdiffstats
path: root/mm/ksm.c
diff options
context:
space:
mode:
authorSasha Levin <levinsasha928@gmail.com>2013-02-22 19:32:28 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-23 20:50:10 -0500
commit4ca3a69bcb6875c3f20802522c1b4fc56bb14608 (patch)
treee5877d6bf4106733aa7122f0172efff8b87059f7 /mm/ksm.c
parent43b5fbbd28294e095653d3c1ba9c399b2168e8df (diff)
mm/ksm.c: use new hashtable implementation
Switch ksm to use the new hashtable implementation. This reduces the amount of generic unrelated code in the ksm module. Signed-off-by: Sasha Levin <levinsasha928@gmail.com> Acked-by: Hugh Dickins <hughd@google.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: Konstantin Khlebnikov <khlebnikov@openvz.org> Cc: Mel Gorman <mgorman@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/ksm.c')
-rw-r--r--mm/ksm.c30
1 files changed, 12 insertions, 18 deletions
diff --git a/mm/ksm.c b/mm/ksm.c
index e1f1f278075f..d3842b206f8a 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -33,7 +33,7 @@
33#include <linux/mmu_notifier.h> 33#include <linux/mmu_notifier.h>
34#include <linux/swap.h> 34#include <linux/swap.h>
35#include <linux/ksm.h> 35#include <linux/ksm.h>
36#include <linux/hash.h> 36#include <linux/hashtable.h>
37#include <linux/freezer.h> 37#include <linux/freezer.h>
38#include <linux/oom.h> 38#include <linux/oom.h>
39 39
@@ -156,9 +156,8 @@ struct rmap_item {
156static struct rb_root root_stable_tree = RB_ROOT; 156static struct rb_root root_stable_tree = RB_ROOT;
157static struct rb_root root_unstable_tree = RB_ROOT; 157static struct rb_root root_unstable_tree = RB_ROOT;
158 158
159#define MM_SLOTS_HASH_SHIFT 10 159#define MM_SLOTS_HASH_BITS 10
160#define MM_SLOTS_HASH_HEADS (1 << MM_SLOTS_HASH_SHIFT) 160static DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
161static struct hlist_head mm_slots_hash[MM_SLOTS_HASH_HEADS];
162 161
163static struct mm_slot ksm_mm_head = { 162static struct mm_slot ksm_mm_head = {
164 .mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list), 163 .mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list),
@@ -275,26 +274,21 @@ static inline void free_mm_slot(struct mm_slot *mm_slot)
275 274
276static struct mm_slot *get_mm_slot(struct mm_struct *mm) 275static struct mm_slot *get_mm_slot(struct mm_struct *mm)
277{ 276{
278 struct mm_slot *mm_slot;
279 struct hlist_head *bucket;
280 struct hlist_node *node; 277 struct hlist_node *node;
278 struct mm_slot *slot;
279
280 hash_for_each_possible(mm_slots_hash, slot, node, link, (unsigned long)mm)
281 if (slot->mm == mm)
282 return slot;
281 283
282 bucket = &mm_slots_hash[hash_ptr(mm, MM_SLOTS_HASH_SHIFT)];
283 hlist_for_each_entry(mm_slot, node, bucket, link) {
284 if (mm == mm_slot->mm)
285 return mm_slot;
286 }
287 return NULL; 284 return NULL;
288} 285}
289 286
290static void insert_to_mm_slots_hash(struct mm_struct *mm, 287static void insert_to_mm_slots_hash(struct mm_struct *mm,
291 struct mm_slot *mm_slot) 288 struct mm_slot *mm_slot)
292{ 289{
293 struct hlist_head *bucket;
294
295 bucket = &mm_slots_hash[hash_ptr(mm, MM_SLOTS_HASH_SHIFT)];
296 mm_slot->mm = mm; 290 mm_slot->mm = mm;
297 hlist_add_head(&mm_slot->link, bucket); 291 hash_add(mm_slots_hash, &mm_slot->link, (unsigned long)mm);
298} 292}
299 293
300static inline int in_stable_tree(struct rmap_item *rmap_item) 294static inline int in_stable_tree(struct rmap_item *rmap_item)
@@ -647,7 +641,7 @@ static int unmerge_and_remove_all_rmap_items(void)
647 ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next, 641 ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next,
648 struct mm_slot, mm_list); 642 struct mm_slot, mm_list);
649 if (ksm_test_exit(mm)) { 643 if (ksm_test_exit(mm)) {
650 hlist_del(&mm_slot->link); 644 hash_del(&mm_slot->link);
651 list_del(&mm_slot->mm_list); 645 list_del(&mm_slot->mm_list);
652 spin_unlock(&ksm_mmlist_lock); 646 spin_unlock(&ksm_mmlist_lock);
653 647
@@ -1392,7 +1386,7 @@ next_mm:
1392 * or when all VM_MERGEABLE areas have been unmapped (and 1386 * or when all VM_MERGEABLE areas have been unmapped (and
1393 * mmap_sem then protects against race with MADV_MERGEABLE). 1387 * mmap_sem then protects against race with MADV_MERGEABLE).
1394 */ 1388 */
1395 hlist_del(&slot->link); 1389 hash_del(&slot->link);
1396 list_del(&slot->mm_list); 1390 list_del(&slot->mm_list);
1397 spin_unlock(&ksm_mmlist_lock); 1391 spin_unlock(&ksm_mmlist_lock);
1398 1392
@@ -1559,7 +1553,7 @@ void __ksm_exit(struct mm_struct *mm)
1559 mm_slot = get_mm_slot(mm); 1553 mm_slot = get_mm_slot(mm);
1560 if (mm_slot && ksm_scan.mm_slot != mm_slot) { 1554 if (mm_slot && ksm_scan.mm_slot != mm_slot) {
1561 if (!mm_slot->rmap_list) { 1555 if (!mm_slot->rmap_list) {
1562 hlist_del(&mm_slot->link); 1556 hash_del(&mm_slot->link);
1563 list_del(&mm_slot->mm_list); 1557 list_del(&mm_slot->mm_list);
1564 easy_to_free = 1; 1558 easy_to_free = 1;
1565 } else { 1559 } else {