aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDan Streetman <ddstreet@ieee.org>2014-10-09 18:30:01 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-09 22:26:03 -0400
commit5538c562377580947916b3366898f1eb5f53768e (patch)
treec21621020e0c858da05326f4d0a9648e187cb9fd /mm
parent6dd9737e31504f9377a8a19810ea4922e88516c1 (diff)
zsmalloc: simplify init_zspage free obj linking
Change zsmalloc init_zspage() logic to iterate through each object on each of its pages, checking the offset to verify the object is on the current page before linking it into the zspage. The current zsmalloc init_zspage free object linking code has logic that relies on there only being one page per zspage when PAGE_SIZE is a multiple of class->size. It calculates the number of objects for the current page, and iterates through all of them plus one, to account for the assumed partial object at the end of the page. While this currently works, the logic can be simplified to just link the object at each successive offset until the offset is larger than PAGE_SIZE, which does not rely on PAGE_SIZE being a multiple of class->size. Signed-off-by: Dan Streetman <ddstreet@ieee.org> Acked-by: Minchan Kim <minchan@kernel.org> Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Cc: Nitin Gupta <ngupta@vflare.org> Cc: Seth Jennings <sjennings@variantweb.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/zsmalloc.c14
1 files changed, 5 insertions, 9 deletions
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index c81f63e73c5f..839a48c3ca27 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -628,7 +628,7 @@ static void init_zspage(struct page *first_page, struct size_class *class)
628 while (page) { 628 while (page) {
629 struct page *next_page; 629 struct page *next_page;
630 struct link_free *link; 630 struct link_free *link;
631 unsigned int i, objs_on_page; 631 unsigned int i = 1;
632 632
633 /* 633 /*
634 * page->index stores offset of first object starting 634 * page->index stores offset of first object starting
@@ -641,14 +641,10 @@ static void init_zspage(struct page *first_page, struct size_class *class)
641 641
642 link = (struct link_free *)kmap_atomic(page) + 642 link = (struct link_free *)kmap_atomic(page) +
643 off / sizeof(*link); 643 off / sizeof(*link);
644 objs_on_page = (PAGE_SIZE - off) / class->size;
645 644
646 for (i = 1; i <= objs_on_page; i++) { 645 while ((off += class->size) < PAGE_SIZE) {
647 off += class->size; 646 link->next = obj_location_to_handle(page, i++);
648 if (off < PAGE_SIZE) { 647 link += class->size / sizeof(*link);
649 link->next = obj_location_to_handle(page, i);
650 link += class->size / sizeof(*link);
651 }
652 } 648 }
653 649
654 /* 650 /*
@@ -660,7 +656,7 @@ static void init_zspage(struct page *first_page, struct size_class *class)
660 link->next = obj_location_to_handle(next_page, 0); 656 link->next = obj_location_to_handle(next_page, 0);
661 kunmap_atomic(link); 657 kunmap_atomic(link);
662 page = next_page; 658 page = next_page;
663 off = (off + class->size) % PAGE_SIZE; 659 off %= PAGE_SIZE;
664 } 660 }
665} 661}
666 662