aboutsummaryrefslogtreecommitdiffstats
path: root/mm/sparse-vmemmap.c
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2013-04-29 18:07:50 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-29 18:54:35 -0400
commit0aad818b2de455f1bfd7ef87c28cdbbaaed9a699 (patch)
treea86fe62f7c740d431f76bd2262abae5825e1a21e /mm/sparse-vmemmap.c
parent055e4fd96e95b0eee0d92fd54a26be7f0d3bcad0 (diff)
sparse-vmemmap: specify vmemmap population range in bytes
The sparse code, when asking the architecture to populate the vmemmap, specifies the section range as a starting page and a number of pages. This is an awkward interface, because none of the arch-specific code actually thinks of the range in terms of 'struct page' units and always translates it to bytes first. In addition, later patches mix huge page and regular page backing for the vmemmap. For this, they need to call vmemmap_populate_basepages() on sub-section ranges with PAGE_SIZE and PMD_SIZE in mind. But these are not necessarily multiples of the 'struct page' size and so this unit is too coarse. Just translate the section range into bytes once in the generic sparse code, then pass byte ranges down the stack. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Ben Hutchings <ben@decadent.org.uk> Cc: Bernhard Schmidt <Bernhard.Schmidt@lrz.de> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Acked-by: David S. Miller <davem@davemloft.net> Tested-by: David S. Miller <davem@davemloft.net> Cc: Wu Fengguang <fengguang.wu@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/sparse-vmemmap.c')
-rw-r--r--mm/sparse-vmemmap.c19
1 files changed, 12 insertions, 7 deletions
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index 22b7e18e9dea..27eeab3be757 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -147,11 +147,10 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
147 return pgd; 147 return pgd;
148} 148}
149 149
150int __meminit vmemmap_populate_basepages(struct page *start_page, 150int __meminit vmemmap_populate_basepages(unsigned long start,
151 unsigned long size, int node) 151 unsigned long end, int node)
152{ 152{
153 unsigned long addr = (unsigned long)start_page; 153 unsigned long addr = start;
154 unsigned long end = (unsigned long)(start_page + size);
155 pgd_t *pgd; 154 pgd_t *pgd;
156 pud_t *pud; 155 pud_t *pud;
157 pmd_t *pmd; 156 pmd_t *pmd;
@@ -178,9 +177,15 @@ int __meminit vmemmap_populate_basepages(struct page *start_page,
178 177
179struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid) 178struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid)
180{ 179{
181 struct page *map = pfn_to_page(pnum * PAGES_PER_SECTION); 180 unsigned long start;
182 int error = vmemmap_populate(map, PAGES_PER_SECTION, nid); 181 unsigned long end;
183 if (error) 182 struct page *map;
183
184 map = pfn_to_page(pnum * PAGES_PER_SECTION);
185 start = (unsigned long)map;
186 end = (unsigned long)(map + PAGES_PER_SECTION);
187
188 if (vmemmap_populate(start, end, nid))
184 return NULL; 189 return NULL;
185 190
186 return map; 191 return map;