aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJohannes Weiner <jweiner@redhat.com>2012-01-12 20:18:18 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-12 23:13:05 -0500
commit6b208e3f6e35aa76d254c395bdcd984b17c6b626 (patch)
treef7e65f9e1fa41907bf48f05007a10c83be668b3f /mm
parent925b7673cce39116ce61e7a06683a4a0dad1e72a (diff)
mm: memcg: remove unused node/section info from pc->flags
To find the page corresponding to a certain page_cgroup, the pc->flags encoded the node or section ID with the base array to compare the pc pointer to. Now that the per-memory cgroup LRU lists link page descriptors directly, there is no longer any code that knows the struct page_cgroup of a PFN but not the struct page. [hughd@google.com: remove unused node/section info from pc->flags fix] Signed-off-by: Johannes Weiner <jweiner@redhat.com> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Reviewed-by: Michal Hocko <mhocko@suse.cz> Reviewed-by: Kirill A. Shutemov <kirill@shutemov.name> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: "Kirill A. Shutemov" <kirill@shutemov.name> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: Balbir Singh <bsingharora@gmail.com> Cc: Ying Han <yinghan@google.com> Cc: Greg Thelen <gthelen@google.com> Cc: Michel Lespinasse <walken@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: Minchan Kim <minchan.kim@gmail.com> Cc: Christoph Hellwig <hch@infradead.org> Signed-off-by: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_cgroup.c59
1 files changed, 7 insertions, 52 deletions
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index f59405a8d752..f0559e049e00 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -11,12 +11,6 @@
11#include <linux/swapops.h> 11#include <linux/swapops.h>
12#include <linux/kmemleak.h> 12#include <linux/kmemleak.h>
13 13
14static void __meminit init_page_cgroup(struct page_cgroup *pc, unsigned long id)
15{
16 pc->flags = 0;
17 set_page_cgroup_array_id(pc, id);
18 pc->mem_cgroup = NULL;
19}
20static unsigned long total_usage; 14static unsigned long total_usage;
21 15
22#if !defined(CONFIG_SPARSEMEM) 16#if !defined(CONFIG_SPARSEMEM)
@@ -41,28 +35,13 @@ struct page_cgroup *lookup_page_cgroup(struct page *page)
41 return base + offset; 35 return base + offset;
42} 36}
43 37
44struct page *lookup_cgroup_page(struct page_cgroup *pc)
45{
46 unsigned long pfn;
47 struct page *page;
48 pg_data_t *pgdat;
49
50 pgdat = NODE_DATA(page_cgroup_array_id(pc));
51 pfn = pc - pgdat->node_page_cgroup + pgdat->node_start_pfn;
52 page = pfn_to_page(pfn);
53 VM_BUG_ON(pc != lookup_page_cgroup(page));
54 return page;
55}
56
57static int __init alloc_node_page_cgroup(int nid) 38static int __init alloc_node_page_cgroup(int nid)
58{ 39{
59 struct page_cgroup *base, *pc; 40 struct page_cgroup *base;
60 unsigned long table_size; 41 unsigned long table_size;
61 unsigned long start_pfn, nr_pages, index; 42 unsigned long nr_pages;
62 43
63 start_pfn = NODE_DATA(nid)->node_start_pfn;
64 nr_pages = NODE_DATA(nid)->node_spanned_pages; 44 nr_pages = NODE_DATA(nid)->node_spanned_pages;
65
66 if (!nr_pages) 45 if (!nr_pages)
67 return 0; 46 return 0;
68 47
@@ -72,10 +51,6 @@ static int __init alloc_node_page_cgroup(int nid)
72 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); 51 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
73 if (!base) 52 if (!base)
74 return -ENOMEM; 53 return -ENOMEM;
75 for (index = 0; index < nr_pages; index++) {
76 pc = base + index;
77 init_page_cgroup(pc, nid);
78 }
79 NODE_DATA(nid)->node_page_cgroup = base; 54 NODE_DATA(nid)->node_page_cgroup = base;
80 total_usage += table_size; 55 total_usage += table_size;
81 return 0; 56 return 0;
@@ -116,23 +91,10 @@ struct page_cgroup *lookup_page_cgroup(struct page *page)
116 return section->page_cgroup + pfn; 91 return section->page_cgroup + pfn;
117} 92}
118 93
119struct page *lookup_cgroup_page(struct page_cgroup *pc)
120{
121 struct mem_section *section;
122 struct page *page;
123 unsigned long nr;
124
125 nr = page_cgroup_array_id(pc);
126 section = __nr_to_section(nr);
127 page = pfn_to_page(pc - section->page_cgroup);
128 VM_BUG_ON(pc != lookup_page_cgroup(page));
129 return page;
130}
131
132static void *__meminit alloc_page_cgroup(size_t size, int nid) 94static void *__meminit alloc_page_cgroup(size_t size, int nid)
133{ 95{
96 gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN;
134 void *addr = NULL; 97 void *addr = NULL;
135 gfp_t flags = GFP_KERNEL | __GFP_NOWARN;
136 98
137 addr = alloc_pages_exact_nid(nid, size, flags); 99 addr = alloc_pages_exact_nid(nid, size, flags);
138 if (addr) { 100 if (addr) {
@@ -141,9 +103,9 @@ static void *__meminit alloc_page_cgroup(size_t size, int nid)
141 } 103 }
142 104
143 if (node_state(nid, N_HIGH_MEMORY)) 105 if (node_state(nid, N_HIGH_MEMORY))
144 addr = vmalloc_node(size, nid); 106 addr = vzalloc_node(size, nid);
145 else 107 else
146 addr = vmalloc(size); 108 addr = vzalloc(size);
147 109
148 return addr; 110 return addr;
149} 111}
@@ -166,14 +128,11 @@ static void free_page_cgroup(void *addr)
166 128
167static int __meminit init_section_page_cgroup(unsigned long pfn, int nid) 129static int __meminit init_section_page_cgroup(unsigned long pfn, int nid)
168{ 130{
169 struct page_cgroup *base, *pc;
170 struct mem_section *section; 131 struct mem_section *section;
132 struct page_cgroup *base;
171 unsigned long table_size; 133 unsigned long table_size;
172 unsigned long nr;
173 int index;
174 134
175 nr = pfn_to_section_nr(pfn); 135 section = __pfn_to_section(pfn);
176 section = __nr_to_section(nr);
177 136
178 if (section->page_cgroup) 137 if (section->page_cgroup)
179 return 0; 138 return 0;
@@ -193,10 +152,6 @@ static int __meminit init_section_page_cgroup(unsigned long pfn, int nid)
193 return -ENOMEM; 152 return -ENOMEM;
194 } 153 }
195 154
196 for (index = 0; index < PAGES_PER_SECTION; index++) {
197 pc = base + index;
198 init_page_cgroup(pc, nr);
199 }
200 /* 155 /*
201 * The passed "pfn" may not be aligned to SECTION. For the calculation 156 * The passed "pfn" may not be aligned to SECTION. For the calculation
202 * we need to apply a mask. 157 * we need to apply a mask.