diff options
author | Hugh Dickins <hughd@google.com> | 2013-02-22 19:36:06 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-23 20:50:23 -0500 |
commit | bc56620b493496b8a6962080b644ccc537f4d526 (patch) | |
tree | 46f59d3ac15cac32cbb97b8a48cc4dffb539b3f8 /mm | |
parent | b599cbdf1c2d88eac7caed00854ee4eecb119a6b (diff) |
ksm: shrink 32-bit rmap_item back to 32 bytes
Think of struct rmap_item as an extension of struct page (restricted to
MADV_MERGEABLE areas): there may be a lot of them, we need to keep them
small, especially on 32-bit architectures of limited lowmem.
Siting "int nid" after "unsigned int checksum" works nicely on 64-bit,
making no change to its 64-byte struct rmap_item; but bloats the 32-bit
struct rmap_item from (nicely cache-aligned) 32 bytes to 36 bytes, which
rounds up to 40 bytes once allocated from slab. We'd better avoid that.
Hey, I only just remembered that the anon_vma pointer in struct
rmap_item has no purpose until the rmap_item is hung from a stable tree
node (which has its own nid field); and rmap_item's nid field no purpose
than to say which tree root to tell rb_erase() when unlinking from an
unstable tree.
Double them up in a union. There's just one place where we set anon_vma
early (when we already hold mmap_sem): now we must remove tree_rmap_item
from its unstable tree there, before overwriting nid. No need to
spatter BUG()s around: we'd be seeing oopses if this were wrong.
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Petr Holasek <pholasek@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Izik Eidus <izik.eidus@ravellosystems.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/ksm.c | 26 |
1 files changed, 14 insertions, 12 deletions
@@ -150,23 +150,25 @@ struct stable_node { | |||
150 | * struct rmap_item - reverse mapping item for virtual addresses | 150 | * struct rmap_item - reverse mapping item for virtual addresses |
151 | * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list | 151 | * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list |
152 | * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree | 152 | * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree |
153 | * @nid: NUMA node id of unstable tree in which linked (may not match page) | ||
153 | * @mm: the memory structure this rmap_item is pointing into | 154 | * @mm: the memory structure this rmap_item is pointing into |
154 | * @address: the virtual address this rmap_item tracks (+ flags in low bits) | 155 | * @address: the virtual address this rmap_item tracks (+ flags in low bits) |
155 | * @oldchecksum: previous checksum of the page at that virtual address | 156 | * @oldchecksum: previous checksum of the page at that virtual address |
156 | * @nid: NUMA node id of unstable tree in which linked (may not match page) | ||
157 | * @node: rb node of this rmap_item in the unstable tree | 157 | * @node: rb node of this rmap_item in the unstable tree |
158 | * @head: pointer to stable_node heading this list in the stable tree | 158 | * @head: pointer to stable_node heading this list in the stable tree |
159 | * @hlist: link into hlist of rmap_items hanging off that stable_node | 159 | * @hlist: link into hlist of rmap_items hanging off that stable_node |
160 | */ | 160 | */ |
161 | struct rmap_item { | 161 | struct rmap_item { |
162 | struct rmap_item *rmap_list; | 162 | struct rmap_item *rmap_list; |
163 | struct anon_vma *anon_vma; /* when stable */ | 163 | union { |
164 | struct anon_vma *anon_vma; /* when stable */ | ||
165 | #ifdef CONFIG_NUMA | ||
166 | int nid; /* when node of unstable tree */ | ||
167 | #endif | ||
168 | }; | ||
164 | struct mm_struct *mm; | 169 | struct mm_struct *mm; |
165 | unsigned long address; /* + low bits used for flags below */ | 170 | unsigned long address; /* + low bits used for flags below */ |
166 | unsigned int oldchecksum; /* when unstable */ | 171 | unsigned int oldchecksum; /* when unstable */ |
167 | #ifdef CONFIG_NUMA | ||
168 | int nid; | ||
169 | #endif | ||
170 | union { | 172 | union { |
171 | struct rb_node node; /* when node of unstable tree */ | 173 | struct rb_node node; /* when node of unstable tree */ |
172 | struct { /* when listed from stable tree */ | 174 | struct { /* when listed from stable tree */ |
@@ -1094,6 +1096,9 @@ static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item, | |||
1094 | if (err) | 1096 | if (err) |
1095 | goto out; | 1097 | goto out; |
1096 | 1098 | ||
1099 | /* Unstable nid is in union with stable anon_vma: remove first */ | ||
1100 | remove_rmap_item_from_tree(rmap_item); | ||
1101 | |||
1097 | /* Must get reference to anon_vma while still holding mmap_sem */ | 1102 | /* Must get reference to anon_vma while still holding mmap_sem */ |
1098 | rmap_item->anon_vma = vma->anon_vma; | 1103 | rmap_item->anon_vma = vma->anon_vma; |
1099 | get_anon_vma(vma->anon_vma); | 1104 | get_anon_vma(vma->anon_vma); |
@@ -1468,14 +1473,11 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) | |||
1468 | kpage = try_to_merge_two_pages(rmap_item, page, | 1473 | kpage = try_to_merge_two_pages(rmap_item, page, |
1469 | tree_rmap_item, tree_page); | 1474 | tree_rmap_item, tree_page); |
1470 | put_page(tree_page); | 1475 | put_page(tree_page); |
1471 | /* | ||
1472 | * As soon as we merge this page, we want to remove the | ||
1473 | * rmap_item of the page we have merged with from the unstable | ||
1474 | * tree, and insert it instead as new node in the stable tree. | ||
1475 | */ | ||
1476 | if (kpage) { | 1476 | if (kpage) { |
1477 | remove_rmap_item_from_tree(tree_rmap_item); | 1477 | /* |
1478 | 1478 | * The pages were successfully merged: insert new | |
1479 | * node in the stable tree and add both rmap_items. | ||
1480 | */ | ||
1479 | lock_page(kpage); | 1481 | lock_page(kpage); |
1480 | stable_node = stable_tree_insert(kpage); | 1482 | stable_node = stable_tree_insert(kpage); |
1481 | if (stable_node) { | 1483 | if (stable_node) { |