aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slob.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2012-06-13 11:24:52 -0400
committerPekka Enberg <penberg@kernel.org>2012-06-14 02:19:47 -0400
commitb8c24c4aef94b1f0daafb450363fef13a1163780 (patch)
tree59f83d4c3cdaf4d58a0692eede704e658ce5659b /mm/slob.c
parent23910c50cced8f35d53e4f8ea0cc09ad58d286af (diff)
slob: Define page struct fields used in mm_types.h
Define the fields used by slob in mm_types.h and use struct page instead of struct slob_page in slob. This cleans up numerous of typecasts in slob.c and makes readers aware of slob's use of page struct fields. [Also cleans up some bitrot in slob.c. The page struct field layout in slob.c is an old layout and does not match the one in mm_types.h] Reviewed-by: Glauber Costa <glommer@parallels.com> Acked-by: David Rientjes <rientjes@google.com> Reviewed-by: Joonsoo Kim <js1304@gmail.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slob.c')
-rw-r--r--mm/slob.c95
1 files changed, 37 insertions, 58 deletions
diff --git a/mm/slob.c b/mm/slob.c
index 8105be42cad1..30862a2d56a9 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -92,33 +92,12 @@ struct slob_block {
92typedef struct slob_block slob_t; 92typedef struct slob_block slob_t;
93 93
94/* 94/*
95 * We use struct page fields to manage some slob allocation aspects,
96 * however to avoid the horrible mess in include/linux/mm_types.h, we'll
97 * just define our own struct page type variant here.
98 */
99struct slob_page {
100 union {
101 struct {
102 unsigned long flags; /* mandatory */
103 atomic_t _count; /* mandatory */
104 slobidx_t units; /* free units left in page */
105 unsigned long pad[2];
106 slob_t *free; /* first free slob_t in page */
107 struct list_head list; /* linked list of free pages */
108 };
109 struct page page;
110 };
111};
112static inline void struct_slob_page_wrong_size(void)
113{ BUILD_BUG_ON(sizeof(struct slob_page) != sizeof(struct page)); }
114
115/*
116 * free_slob_page: call before a slob_page is returned to the page allocator. 95 * free_slob_page: call before a slob_page is returned to the page allocator.
117 */ 96 */
118static inline void free_slob_page(struct slob_page *sp) 97static inline void free_slob_page(struct page *sp)
119{ 98{
120 reset_page_mapcount(&sp->page); 99 reset_page_mapcount(sp);
121 sp->page.mapping = NULL; 100 sp->mapping = NULL;
122} 101}
123 102
124/* 103/*
@@ -133,44 +112,44 @@ static LIST_HEAD(free_slob_large);
133/* 112/*
134 * is_slob_page: True for all slob pages (false for bigblock pages) 113 * is_slob_page: True for all slob pages (false for bigblock pages)
135 */ 114 */
136static inline int is_slob_page(struct slob_page *sp) 115static inline int is_slob_page(struct page *sp)
137{ 116{
138 return PageSlab((struct page *)sp); 117 return PageSlab(sp);
139} 118}
140 119
141static inline void set_slob_page(struct slob_page *sp) 120static inline void set_slob_page(struct page *sp)
142{ 121{
143 __SetPageSlab((struct page *)sp); 122 __SetPageSlab(sp);
144} 123}
145 124
146static inline void clear_slob_page(struct slob_page *sp) 125static inline void clear_slob_page(struct page *sp)
147{ 126{
148 __ClearPageSlab((struct page *)sp); 127 __ClearPageSlab(sp);
149} 128}
150 129
151static inline struct slob_page *slob_page(const void *addr) 130static inline struct page *slob_page(const void *addr)
152{ 131{
153 return (struct slob_page *)virt_to_page(addr); 132 return virt_to_page(addr);
154} 133}
155 134
156/* 135/*
157 * slob_page_free: true for pages on free_slob_pages list. 136 * slob_page_free: true for pages on free_slob_pages list.
158 */ 137 */
159static inline int slob_page_free(struct slob_page *sp) 138static inline int slob_page_free(struct page *sp)
160{ 139{
161 return PageSlobFree((struct page *)sp); 140 return PageSlobFree(sp);
162} 141}
163 142
164static void set_slob_page_free(struct slob_page *sp, struct list_head *list) 143static void set_slob_page_free(struct page *sp, struct list_head *list)
165{ 144{
166 list_add(&sp->list, list); 145 list_add(&sp->list, list);
167 __SetPageSlobFree((struct page *)sp); 146 __SetPageSlobFree(sp);
168} 147}
169 148
170static inline void clear_slob_page_free(struct slob_page *sp) 149static inline void clear_slob_page_free(struct page *sp)
171{ 150{
172 list_del(&sp->list); 151 list_del(&sp->list);
173 __ClearPageSlobFree((struct page *)sp); 152 __ClearPageSlobFree(sp);
174} 153}
175 154
176#define SLOB_UNIT sizeof(slob_t) 155#define SLOB_UNIT sizeof(slob_t)
@@ -267,12 +246,12 @@ static void slob_free_pages(void *b, int order)
267/* 246/*
268 * Allocate a slob block within a given slob_page sp. 247 * Allocate a slob block within a given slob_page sp.
269 */ 248 */
270static void *slob_page_alloc(struct slob_page *sp, size_t size, int align) 249static void *slob_page_alloc(struct page *sp, size_t size, int align)
271{ 250{
272 slob_t *prev, *cur, *aligned = NULL; 251 slob_t *prev, *cur, *aligned = NULL;
273 int delta = 0, units = SLOB_UNITS(size); 252 int delta = 0, units = SLOB_UNITS(size);
274 253
275 for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) { 254 for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) {
276 slobidx_t avail = slob_units(cur); 255 slobidx_t avail = slob_units(cur);
277 256
278 if (align) { 257 if (align) {
@@ -296,12 +275,12 @@ static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
296 if (prev) 275 if (prev)
297 set_slob(prev, slob_units(prev), next); 276 set_slob(prev, slob_units(prev), next);
298 else 277 else
299 sp->free = next; 278 sp->freelist = next;
300 } else { /* fragment */ 279 } else { /* fragment */
301 if (prev) 280 if (prev)
302 set_slob(prev, slob_units(prev), cur + units); 281 set_slob(prev, slob_units(prev), cur + units);
303 else 282 else
304 sp->free = cur + units; 283 sp->freelist = cur + units;
305 set_slob(cur + units, avail - units, next); 284 set_slob(cur + units, avail - units, next);
306 } 285 }
307 286
@@ -320,7 +299,7 @@ static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
320 */ 299 */
321static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) 300static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
322{ 301{
323 struct slob_page *sp; 302 struct page *sp;
324 struct list_head *prev; 303 struct list_head *prev;
325 struct list_head *slob_list; 304 struct list_head *slob_list;
326 slob_t *b = NULL; 305 slob_t *b = NULL;
@@ -341,7 +320,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
341 * If there's a node specification, search for a partial 320 * If there's a node specification, search for a partial
342 * page with a matching node id in the freelist. 321 * page with a matching node id in the freelist.
343 */ 322 */
344 if (node != -1 && page_to_nid(&sp->page) != node) 323 if (node != -1 && page_to_nid(sp) != node)
345 continue; 324 continue;
346#endif 325#endif
347 /* Enough room on this page? */ 326 /* Enough room on this page? */
@@ -374,7 +353,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
374 353
375 spin_lock_irqsave(&slob_lock, flags); 354 spin_lock_irqsave(&slob_lock, flags);
376 sp->units = SLOB_UNITS(PAGE_SIZE); 355 sp->units = SLOB_UNITS(PAGE_SIZE);
377 sp->free = b; 356 sp->freelist = b;
378 INIT_LIST_HEAD(&sp->list); 357 INIT_LIST_HEAD(&sp->list);
379 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE)); 358 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
380 set_slob_page_free(sp, slob_list); 359 set_slob_page_free(sp, slob_list);
@@ -392,7 +371,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
392 */ 371 */
393static void slob_free(void *block, int size) 372static void slob_free(void *block, int size)
394{ 373{
395 struct slob_page *sp; 374 struct page *sp;
396 slob_t *prev, *next, *b = (slob_t *)block; 375 slob_t *prev, *next, *b = (slob_t *)block;
397 slobidx_t units; 376 slobidx_t units;
398 unsigned long flags; 377 unsigned long flags;
@@ -421,7 +400,7 @@ static void slob_free(void *block, int size)
421 if (!slob_page_free(sp)) { 400 if (!slob_page_free(sp)) {
422 /* This slob page is about to become partially free. Easy! */ 401 /* This slob page is about to become partially free. Easy! */
423 sp->units = units; 402 sp->units = units;
424 sp->free = b; 403 sp->freelist = b;
425 set_slob(b, units, 404 set_slob(b, units,
426 (void *)((unsigned long)(b + 405 (void *)((unsigned long)(b +
427 SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK)); 406 SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
@@ -441,15 +420,15 @@ static void slob_free(void *block, int size)
441 */ 420 */
442 sp->units += units; 421 sp->units += units;
443 422
444 if (b < sp->free) { 423 if (b < (slob_t *)sp->freelist) {
445 if (b + units == sp->free) { 424 if (b + units == sp->freelist) {
446 units += slob_units(sp->free); 425 units += slob_units(sp->freelist);
447 sp->free = slob_next(sp->free); 426 sp->freelist = slob_next(sp->freelist);
448 } 427 }
449 set_slob(b, units, sp->free); 428 set_slob(b, units, sp->freelist);
450 sp->free = b; 429 sp->freelist = b;
451 } else { 430 } else {
452 prev = sp->free; 431 prev = sp->freelist;
453 next = slob_next(prev); 432 next = slob_next(prev);
454 while (b > next) { 433 while (b > next) {
455 prev = next; 434 prev = next;
@@ -522,7 +501,7 @@ EXPORT_SYMBOL(__kmalloc_node);
522 501
523void kfree(const void *block) 502void kfree(const void *block)
524{ 503{
525 struct slob_page *sp; 504 struct page *sp;
526 505
527 trace_kfree(_RET_IP_, block); 506 trace_kfree(_RET_IP_, block);
528 507
@@ -536,14 +515,14 @@ void kfree(const void *block)
536 unsigned int *m = (unsigned int *)(block - align); 515 unsigned int *m = (unsigned int *)(block - align);
537 slob_free(m, *m + align); 516 slob_free(m, *m + align);
538 } else 517 } else
539 put_page(&sp->page); 518 put_page(sp);
540} 519}
541EXPORT_SYMBOL(kfree); 520EXPORT_SYMBOL(kfree);
542 521
543/* can't use ksize for kmem_cache_alloc memory, only kmalloc */ 522/* can't use ksize for kmem_cache_alloc memory, only kmalloc */
544size_t ksize(const void *block) 523size_t ksize(const void *block)
545{ 524{
546 struct slob_page *sp; 525 struct page *sp;
547 526
548 BUG_ON(!block); 527 BUG_ON(!block);
549 if (unlikely(block == ZERO_SIZE_PTR)) 528 if (unlikely(block == ZERO_SIZE_PTR))
@@ -555,7 +534,7 @@ size_t ksize(const void *block)
555 unsigned int *m = (unsigned int *)(block - align); 534 unsigned int *m = (unsigned int *)(block - align);
556 return SLOB_UNITS(*m) * SLOB_UNIT; 535 return SLOB_UNITS(*m) * SLOB_UNIT;
557 } else 536 } else
558 return sp->page.private; 537 return sp->private;
559} 538}
560EXPORT_SYMBOL(ksize); 539EXPORT_SYMBOL(ksize);
561 540