aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFernando Luis Vazquez Cao <fernando@oss.ntt.co.jp>2009-01-07 21:07:51 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-08 11:31:04 -0500
commit0753b0ef3b301895234fed02bea2c099c7ff4feb (patch)
tree35d01368e76d0e5ad21b38fc80274154f8a35d62
parent01b1ae63c2270cbacfd43fea94578c17950eb548 (diff)
memcg: do not recalculate section unnecessarily in init_section_page_cgroup
In init_section_page_cgroup() the section a given pfn belongs to is calculated at the top of the function and, despite the fact that the pfn/section correspondence does not change, it is recalculated further down the same function. By computing this just once and reusing that value we save some bytes in the object file and do not waste CPU cycles. Signed-off-by: Fernando Luis Vazquez Cao <fernando@oss.ntt.co.jp> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: Balbir Singh <balbir@in.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/page_cgroup.c5
1 files changed, 1 insertions, 4 deletions
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index d6507a660ed6..df1e54a5ed19 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -103,13 +103,11 @@ struct page_cgroup *lookup_page_cgroup(struct page *page)
103/* __alloc_bootmem...() is protected by !slab_available() */ 103/* __alloc_bootmem...() is protected by !slab_available() */
104static int __init_refok init_section_page_cgroup(unsigned long pfn) 104static int __init_refok init_section_page_cgroup(unsigned long pfn)
105{ 105{
106 struct mem_section *section; 106 struct mem_section *section = __pfn_to_section(pfn);
107 struct page_cgroup *base, *pc; 107 struct page_cgroup *base, *pc;
108 unsigned long table_size; 108 unsigned long table_size;
109 int nid, index; 109 int nid, index;
110 110
111 section = __pfn_to_section(pfn);
112
113 if (!section->page_cgroup) { 111 if (!section->page_cgroup) {
114 nid = page_to_nid(pfn_to_page(pfn)); 112 nid = page_to_nid(pfn_to_page(pfn));
115 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; 113 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
@@ -145,7 +143,6 @@ static int __init_refok init_section_page_cgroup(unsigned long pfn)
145 __init_page_cgroup(pc, pfn + index); 143 __init_page_cgroup(pc, pfn + index);
146 } 144 }
147 145
148 section = __pfn_to_section(pfn);
149 section->page_cgroup = base - pfn; 146 section->page_cgroup = base - pfn;
150 total_usage += table_size; 147 total_usage += table_size;
151 return 0; 148 return 0;