aboutsummaryrefslogtreecommitdiffstats
path: root/mm/zsmalloc.c
diff options
context:
space:
mode:
authorMinchan Kim <minchan@kernel.org>2016-07-26 18:23:20 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-26 19:19:19 -0400
commitbdb0af7ca8f0e9f4c03a9169a744b22890641b64 (patch)
treecb0bbec5771ceb7322d776f8e9389375eeecdbe9 /mm/zsmalloc.c
parent4f42047bbde059823fe70381387257a9e3bd229c (diff)
zsmalloc: factor page chain functionality out
For page migration, we need to create page chain of zspage dynamically so this patch factors it out from alloc_zspage. Link: http://lkml.kernel.org/r/1464736881-24886-8-git-send-email-minchan@kernel.org Signed-off-by: Minchan Kim <minchan@kernel.org> Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/zsmalloc.c')
-rw-r--r--mm/zsmalloc.c59
1 files changed, 35 insertions, 24 deletions
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 5da80961ff3e..07485a2e5b96 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -960,7 +960,8 @@ static void init_zspage(struct size_class *class, struct page *first_page)
960 unsigned long off = 0; 960 unsigned long off = 0;
961 struct page *page = first_page; 961 struct page *page = first_page;
962 962
963 VM_BUG_ON_PAGE(!is_first_page(first_page), first_page); 963 first_page->freelist = NULL;
964 set_zspage_inuse(first_page, 0);
964 965
965 while (page) { 966 while (page) {
966 struct page *next_page; 967 struct page *next_page;
@@ -996,15 +997,16 @@ static void init_zspage(struct size_class *class, struct page *first_page)
996 page = next_page; 997 page = next_page;
997 off %= PAGE_SIZE; 998 off %= PAGE_SIZE;
998 } 999 }
1000
1001 set_freeobj(first_page, (unsigned long)location_to_obj(first_page, 0));
999} 1002}
1000 1003
1001/* 1004static void create_page_chain(struct page *pages[], int nr_pages)
1002 * Allocate a zspage for the given size class
1003 */
1004static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
1005{ 1005{
1006 int i, error; 1006 int i;
1007 struct page *first_page = NULL, *uninitialized_var(prev_page); 1007 struct page *page;
1008 struct page *prev_page = NULL;
1009 struct page *first_page = NULL;
1008 1010
1009 /* 1011 /*
1010 * Allocate individual pages and link them together as: 1012 * Allocate individual pages and link them together as:
@@ -1017,20 +1019,14 @@ static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
1017 * (i.e. no other sub-page has this flag set) and PG_private_2 to 1019 * (i.e. no other sub-page has this flag set) and PG_private_2 to
1018 * identify the last page. 1020 * identify the last page.
1019 */ 1021 */
1020 error = -ENOMEM; 1022 for (i = 0; i < nr_pages; i++) {
1021 for (i = 0; i < class->pages_per_zspage; i++) { 1023 page = pages[i];
1022 struct page *page;
1023
1024 page = alloc_page(flags);
1025 if (!page)
1026 goto cleanup;
1027 1024
1028 INIT_LIST_HEAD(&page->lru); 1025 INIT_LIST_HEAD(&page->lru);
1029 if (i == 0) { /* first page */ 1026 if (i == 0) {
1030 SetPagePrivate(page); 1027 SetPagePrivate(page);
1031 set_page_private(page, 0); 1028 set_page_private(page, 0);
1032 first_page = page; 1029 first_page = page;
1033 set_zspage_inuse(first_page, 0);
1034 } 1030 }
1035 if (i == 1) 1031 if (i == 1)
1036 set_page_private(first_page, (unsigned long)page); 1032 set_page_private(first_page, (unsigned long)page);
@@ -1038,22 +1034,37 @@ static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
1038 set_page_private(page, (unsigned long)first_page); 1034 set_page_private(page, (unsigned long)first_page);
1039 if (i >= 2) 1035 if (i >= 2)
1040 list_add(&page->lru, &prev_page->lru); 1036 list_add(&page->lru, &prev_page->lru);
1041 if (i == class->pages_per_zspage - 1) /* last page */ 1037 if (i == nr_pages - 1)
1042 SetPagePrivate2(page); 1038 SetPagePrivate2(page);
1043 prev_page = page; 1039 prev_page = page;
1044 } 1040 }
1041}
1045 1042
1046 init_zspage(class, first_page); 1043/*
1044 * Allocate a zspage for the given size class
1045 */
1046static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
1047{
1048 int i;
1049 struct page *first_page = NULL;
1050 struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE];
1047 1051
1048 set_freeobj(first_page, (unsigned long)location_to_obj(first_page, 0)); 1052 for (i = 0; i < class->pages_per_zspage; i++) {
1049 error = 0; /* Success */ 1053 struct page *page;
1050 1054
1051cleanup: 1055 page = alloc_page(flags);
1052 if (unlikely(error) && first_page) { 1056 if (!page) {
1053 free_zspage(first_page); 1057 while (--i >= 0)
1054 first_page = NULL; 1058 __free_page(pages[i]);
1059 return NULL;
1060 }
1061 pages[i] = page;
1055 } 1062 }
1056 1063
1064 create_page_chain(pages, class->pages_per_zspage);
1065 first_page = pages[0];
1066 init_zspage(class, first_page);
1067
1057 return first_page; 1068 return first_page;
1058} 1069}
1059 1070