aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2016-10-07 19:58:27 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-07 21:46:27 -0400
commit980ac1672e7edaa927557a5186f1967cd45afcf5 (patch)
tree714f4ee8ee55f1e16e003489839c9507bffee028
parent0b06bb3f6075803a92a0075ba4eb44888dd8a68a (diff)
mm/page_ext: support extra space allocation by page_ext user
Until now, if some page_ext users want to use it's own field on page_ext, it should be defined in struct page_ext by hard-coding. It has a problem that wastes memory in following situation. struct page_ext { #ifdef CONFIG_A int a; #endif #ifdef CONFIG_B int b; #endif }; Assume that kernel is built with both CONFIG_A and CONFIG_B. Even if we enable feature A and doesn't enable feature B at runtime, each entry of struct page_ext takes two int rather than one int. It's undesirable result so this patch tries to fix it. To solve above problem, this patch implements to support extra space allocation at runtime. When need() callback returns true, it's extra memory requirement is summed to entry size of page_ext. Also, offset for each user's extra memory space is returned. With this offset, user can use this extra space and there is no need to define needed field on page_ext by hard-coding. This patch only implements an infrastructure. Following patch will use it for page_owner which is only user having it's own fields on page_ext. Link: http://lkml.kernel.org/r/1471315879-32294-6-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Minchan Kim <minchan@kernel.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/page_ext.h2
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/page_ext.c41
3 files changed, 34 insertions, 11 deletions
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index 03f2a3e7d76d..179bdc4a470c 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -7,6 +7,8 @@
7 7
8struct pglist_data; 8struct pglist_data;
9struct page_ext_operations { 9struct page_ext_operations {
10 size_t offset;
11 size_t size;
10 bool (*need)(void); 12 bool (*need)(void);
11 void (*init)(void); 13 void (*init)(void);
12}; 14};
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 06ea805d1b14..b0f133f2c655 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -687,7 +687,7 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
687 __mod_zone_freepage_state(zone, (1 << order), migratetype); 687 __mod_zone_freepage_state(zone, (1 << order), migratetype);
688} 688}
689#else 689#else
690struct page_ext_operations debug_guardpage_ops = { NULL, }; 690struct page_ext_operations debug_guardpage_ops;
691static inline bool set_page_guard(struct zone *zone, struct page *page, 691static inline bool set_page_guard(struct zone *zone, struct page *page,
692 unsigned int order, int migratetype) { return false; } 692 unsigned int order, int migratetype) { return false; }
693static inline void clear_page_guard(struct zone *zone, struct page *page, 693static inline void clear_page_guard(struct zone *zone, struct page *page,
diff --git a/mm/page_ext.c b/mm/page_ext.c
index 16292829c5c5..121dcffc4ec1 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -42,6 +42,11 @@
42 * and page extension core can skip to allocate memory. As result, 42 * and page extension core can skip to allocate memory. As result,
43 * none of memory is wasted. 43 * none of memory is wasted.
44 * 44 *
45 * When need callback returns true, page_ext checks if there is a request for
46 * extra memory through size in struct page_ext_operations. If it is non-zero,
47 * extra space is allocated for each page_ext entry and offset is returned to
48 * user through offset in struct page_ext_operations.
49 *
45 * The init callback is used to do proper initialization after page extension 50 * The init callback is used to do proper initialization after page extension
46 * is completely initialized. In sparse memory system, extra memory is 51 * is completely initialized. In sparse memory system, extra memory is
47 * allocated some time later than memmap is allocated. In other words, lifetime 52 * allocated some time later than memmap is allocated. In other words, lifetime
@@ -66,18 +71,24 @@ static struct page_ext_operations *page_ext_ops[] = {
66}; 71};
67 72
68static unsigned long total_usage; 73static unsigned long total_usage;
74static unsigned long extra_mem;
69 75
70static bool __init invoke_need_callbacks(void) 76static bool __init invoke_need_callbacks(void)
71{ 77{
72 int i; 78 int i;
73 int entries = ARRAY_SIZE(page_ext_ops); 79 int entries = ARRAY_SIZE(page_ext_ops);
80 bool need = false;
74 81
75 for (i = 0; i < entries; i++) { 82 for (i = 0; i < entries; i++) {
76 if (page_ext_ops[i]->need && page_ext_ops[i]->need()) 83 if (page_ext_ops[i]->need && page_ext_ops[i]->need()) {
77 return true; 84 page_ext_ops[i]->offset = sizeof(struct page_ext) +
85 extra_mem;
86 extra_mem += page_ext_ops[i]->size;
87 need = true;
88 }
78 } 89 }
79 90
80 return false; 91 return need;
81} 92}
82 93
83static void __init invoke_init_callbacks(void) 94static void __init invoke_init_callbacks(void)
@@ -91,6 +102,16 @@ static void __init invoke_init_callbacks(void)
91 } 102 }
92} 103}
93 104
105static unsigned long get_entry_size(void)
106{
107 return sizeof(struct page_ext) + extra_mem;
108}
109
110static inline struct page_ext *get_entry(void *base, unsigned long index)
111{
112 return base + get_entry_size() * index;
113}
114
94#if !defined(CONFIG_SPARSEMEM) 115#if !defined(CONFIG_SPARSEMEM)
95 116
96 117
@@ -121,7 +142,7 @@ struct page_ext *lookup_page_ext(struct page *page)
121#endif 142#endif
122 index = pfn - round_down(node_start_pfn(page_to_nid(page)), 143 index = pfn - round_down(node_start_pfn(page_to_nid(page)),
123 MAX_ORDER_NR_PAGES); 144 MAX_ORDER_NR_PAGES);
124 return base + index; 145 return get_entry(base, index);
125} 146}
126 147
127static int __init alloc_node_page_ext(int nid) 148static int __init alloc_node_page_ext(int nid)
@@ -143,7 +164,7 @@ static int __init alloc_node_page_ext(int nid)
143 !IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES)) 164 !IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES))
144 nr_pages += MAX_ORDER_NR_PAGES; 165 nr_pages += MAX_ORDER_NR_PAGES;
145 166
146 table_size = sizeof(struct page_ext) * nr_pages; 167 table_size = get_entry_size() * nr_pages;
147 168
148 base = memblock_virt_alloc_try_nid_nopanic( 169 base = memblock_virt_alloc_try_nid_nopanic(
149 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS), 170 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
@@ -196,7 +217,7 @@ struct page_ext *lookup_page_ext(struct page *page)
196 if (!section->page_ext) 217 if (!section->page_ext)
197 return NULL; 218 return NULL;
198#endif 219#endif
199 return section->page_ext + pfn; 220 return get_entry(section->page_ext, pfn);
200} 221}
201 222
202static void *__meminit alloc_page_ext(size_t size, int nid) 223static void *__meminit alloc_page_ext(size_t size, int nid)
@@ -229,7 +250,7 @@ static int __meminit init_section_page_ext(unsigned long pfn, int nid)
229 if (section->page_ext) 250 if (section->page_ext)
230 return 0; 251 return 0;
231 252
232 table_size = sizeof(struct page_ext) * PAGES_PER_SECTION; 253 table_size = get_entry_size() * PAGES_PER_SECTION;
233 base = alloc_page_ext(table_size, nid); 254 base = alloc_page_ext(table_size, nid);
234 255
235 /* 256 /*
@@ -249,7 +270,7 @@ static int __meminit init_section_page_ext(unsigned long pfn, int nid)
249 * we need to apply a mask. 270 * we need to apply a mask.
250 */ 271 */
251 pfn &= PAGE_SECTION_MASK; 272 pfn &= PAGE_SECTION_MASK;
252 section->page_ext = base - pfn; 273 section->page_ext = (void *)base - get_entry_size() * pfn;
253 total_usage += table_size; 274 total_usage += table_size;
254 return 0; 275 return 0;
255} 276}
@@ -262,7 +283,7 @@ static void free_page_ext(void *addr)
262 struct page *page = virt_to_page(addr); 283 struct page *page = virt_to_page(addr);
263 size_t table_size; 284 size_t table_size;
264 285
265 table_size = sizeof(struct page_ext) * PAGES_PER_SECTION; 286 table_size = get_entry_size() * PAGES_PER_SECTION;
266 287
267 BUG_ON(PageReserved(page)); 288 BUG_ON(PageReserved(page));
268 free_pages_exact(addr, table_size); 289 free_pages_exact(addr, table_size);
@@ -277,7 +298,7 @@ static void __free_page_ext(unsigned long pfn)
277 ms = __pfn_to_section(pfn); 298 ms = __pfn_to_section(pfn);
278 if (!ms || !ms->page_ext) 299 if (!ms || !ms->page_ext)
279 return; 300 return;
280 base = ms->page_ext + pfn; 301 base = get_entry(ms->page_ext, pfn);
281 free_page_ext(base); 302 free_page_ext(base);
282 ms->page_ext = NULL; 303 ms->page_ext = NULL;
283} 304}