aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2008-01-08 02:20:29 -0500
committerChristoph Lameter <christoph@stapp.engr.sgi.com>2008-02-07 20:47:41 -0500
commit683d0baad3d6e18134927f8c28ee804dbe10fe71 (patch)
treea1ef69f9cde406492b5deedc6874312cbeb3b357 /mm
parent5bb983b0cce9b7b281af15730f7019116dd42568 (diff)
SLUB: Use unique end pointer for each slab page.
We use a NULL pointer on freelists to signal that there are no more objects. However the NULL pointers of all slabs match in contrast to the pointers to the real objects which are in different ranges for different slab pages. Change the end pointer to be a pointer to the first object and set bit 0. Every slab will then have a different end pointer. This is necessary to ensure that end markers can be matched to the source slab during cmpxchg_local. Bring back the use of the mapping field by SLUB since we would otherwise have to call a relatively expensive function page_address() in __slab_alloc(). Use of the mapping field allows avoiding a call to page_address() in various other functions as well. There is no need to change the page_mapping() function since bit 0 is set on the mapping as also for anonymous pages. page_mapping(slab_page) will therefore still return NULL although the mapping field is overloaded. Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c70
1 files changed, 47 insertions, 23 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 2dacaf519c4..5995626e0cf 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -280,15 +280,32 @@ static inline struct kmem_cache_cpu *get_cpu_slab(struct kmem_cache *s, int cpu)
280#endif 280#endif
281} 281}
282 282
283/*
284 * The end pointer in a slab is special. It points to the first object in the
285 * slab but has bit 0 set to mark it.
286 *
287 * Note that SLUB relies on page_mapping returning NULL for pages with bit 0
288 * in the mapping set.
289 */
290static inline int is_end(void *addr)
291{
292 return (unsigned long)addr & PAGE_MAPPING_ANON;
293}
294
295void *slab_address(struct page *page)
296{
297 return page->end - PAGE_MAPPING_ANON;
298}
299
283static inline int check_valid_pointer(struct kmem_cache *s, 300static inline int check_valid_pointer(struct kmem_cache *s,
284 struct page *page, const void *object) 301 struct page *page, const void *object)
285{ 302{
286 void *base; 303 void *base;
287 304
288 if (!object) 305 if (object == page->end)
289 return 1; 306 return 1;
290 307
291 base = page_address(page); 308 base = slab_address(page);
292 if (object < base || object >= base + s->objects * s->size || 309 if (object < base || object >= base + s->objects * s->size ||
293 (object - base) % s->size) { 310 (object - base) % s->size) {
294 return 0; 311 return 0;
@@ -321,7 +338,8 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
321 338
322/* Scan freelist */ 339/* Scan freelist */
323#define for_each_free_object(__p, __s, __free) \ 340#define for_each_free_object(__p, __s, __free) \
324 for (__p = (__free); __p; __p = get_freepointer((__s), __p)) 341 for (__p = (__free); (__p) != page->end; __p = get_freepointer((__s),\
342 __p))
325 343
326/* Determine object index from a given position */ 344/* Determine object index from a given position */
327static inline int slab_index(void *p, struct kmem_cache *s, void *addr) 345static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
@@ -473,7 +491,7 @@ static void slab_fix(struct kmem_cache *s, char *fmt, ...)
473static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) 491static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
474{ 492{
475 unsigned int off; /* Offset of last byte */ 493 unsigned int off; /* Offset of last byte */
476 u8 *addr = page_address(page); 494 u8 *addr = slab_address(page);
477 495
478 print_tracking(s, p); 496 print_tracking(s, p);
479 497
@@ -651,7 +669,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
651 if (!(s->flags & SLAB_POISON)) 669 if (!(s->flags & SLAB_POISON))
652 return 1; 670 return 1;
653 671
654 start = page_address(page); 672 start = slab_address(page);
655 end = start + (PAGE_SIZE << s->order); 673 end = start + (PAGE_SIZE << s->order);
656 length = s->objects * s->size; 674 length = s->objects * s->size;
657 remainder = end - (start + length); 675 remainder = end - (start + length);
@@ -718,7 +736,7 @@ static int check_object(struct kmem_cache *s, struct page *page,
718 * of the free objects in this slab. May cause 736 * of the free objects in this slab. May cause
719 * another error because the object count is now wrong. 737 * another error because the object count is now wrong.
720 */ 738 */
721 set_freepointer(s, p, NULL); 739 set_freepointer(s, p, page->end);
722 return 0; 740 return 0;
723 } 741 }
724 return 1; 742 return 1;
@@ -752,18 +770,18 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
752 void *fp = page->freelist; 770 void *fp = page->freelist;
753 void *object = NULL; 771 void *object = NULL;
754 772
755 while (fp && nr <= s->objects) { 773 while (fp != page->end && nr <= s->objects) {
756 if (fp == search) 774 if (fp == search)
757 return 1; 775 return 1;
758 if (!check_valid_pointer(s, page, fp)) { 776 if (!check_valid_pointer(s, page, fp)) {
759 if (object) { 777 if (object) {
760 object_err(s, page, object, 778 object_err(s, page, object,
761 "Freechain corrupt"); 779 "Freechain corrupt");
762 set_freepointer(s, object, NULL); 780 set_freepointer(s, object, page->end);
763 break; 781 break;
764 } else { 782 } else {
765 slab_err(s, page, "Freepointer corrupt"); 783 slab_err(s, page, "Freepointer corrupt");
766 page->freelist = NULL; 784 page->freelist = page->end;
767 page->inuse = s->objects; 785 page->inuse = s->objects;
768 slab_fix(s, "Freelist cleared"); 786 slab_fix(s, "Freelist cleared");
769 return 0; 787 return 0;
@@ -869,7 +887,7 @@ bad:
869 */ 887 */
870 slab_fix(s, "Marking all objects used"); 888 slab_fix(s, "Marking all objects used");
871 page->inuse = s->objects; 889 page->inuse = s->objects;
872 page->freelist = NULL; 890 page->freelist = page->end;
873 } 891 }
874 return 0; 892 return 0;
875} 893}
@@ -910,7 +928,7 @@ static int free_debug_processing(struct kmem_cache *s, struct page *page,
910 } 928 }
911 929
912 /* Special debug activities for freeing objects */ 930 /* Special debug activities for freeing objects */
913 if (!SlabFrozen(page) && !page->freelist) 931 if (!SlabFrozen(page) && page->freelist == page->end)
914 remove_full(s, page); 932 remove_full(s, page);
915 if (s->flags & SLAB_STORE_USER) 933 if (s->flags & SLAB_STORE_USER)
916 set_track(s, object, TRACK_FREE, addr); 934 set_track(s, object, TRACK_FREE, addr);
@@ -1102,6 +1120,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1102 SetSlabDebug(page); 1120 SetSlabDebug(page);
1103 1121
1104 start = page_address(page); 1122 start = page_address(page);
1123 page->end = start + 1;
1105 1124
1106 if (unlikely(s->flags & SLAB_POISON)) 1125 if (unlikely(s->flags & SLAB_POISON))
1107 memset(start, POISON_INUSE, PAGE_SIZE << s->order); 1126 memset(start, POISON_INUSE, PAGE_SIZE << s->order);
@@ -1113,7 +1132,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1113 last = p; 1132 last = p;
1114 } 1133 }
1115 setup_object(s, page, last); 1134 setup_object(s, page, last);
1116 set_freepointer(s, last, NULL); 1135 set_freepointer(s, last, page->end);
1117 1136
1118 page->freelist = start; 1137 page->freelist = start;
1119 page->inuse = 0; 1138 page->inuse = 0;
@@ -1129,7 +1148,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
1129 void *p; 1148 void *p;
1130 1149
1131 slab_pad_check(s, page); 1150 slab_pad_check(s, page);
1132 for_each_object(p, s, page_address(page)) 1151 for_each_object(p, s, slab_address(page))
1133 check_object(s, page, p, 0); 1152 check_object(s, page, p, 0);
1134 ClearSlabDebug(page); 1153 ClearSlabDebug(page);
1135 } 1154 }
@@ -1139,6 +1158,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
1139 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, 1158 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1140 -pages); 1159 -pages);
1141 1160
1161 page->mapping = NULL;
1142 __free_pages(page, s->order); 1162 __free_pages(page, s->order);
1143} 1163}
1144 1164
@@ -1341,7 +1361,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
1341 ClearSlabFrozen(page); 1361 ClearSlabFrozen(page);
1342 if (page->inuse) { 1362 if (page->inuse) {
1343 1363
1344 if (page->freelist) 1364 if (page->freelist != page->end)
1345 add_partial(n, page, tail); 1365 add_partial(n, page, tail);
1346 else if (SlabDebug(page) && (s->flags & SLAB_STORE_USER)) 1366 else if (SlabDebug(page) && (s->flags & SLAB_STORE_USER))
1347 add_full(n, page); 1367 add_full(n, page);
@@ -1377,8 +1397,12 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
1377 * Merge cpu freelist into freelist. Typically we get here 1397 * Merge cpu freelist into freelist. Typically we get here
1378 * because both freelists are empty. So this is unlikely 1398 * because both freelists are empty. So this is unlikely
1379 * to occur. 1399 * to occur.
1400 *
1401 * We need to use _is_end here because deactivate slab may
1402 * be called for a debug slab. Then c->freelist may contain
1403 * a dummy pointer.
1380 */ 1404 */
1381 while (unlikely(c->freelist)) { 1405 while (unlikely(!is_end(c->freelist))) {
1382 void **object; 1406 void **object;
1383 1407
1384 tail = 0; /* Hot objects. Put the slab first */ 1408 tail = 0; /* Hot objects. Put the slab first */
@@ -1478,7 +1502,7 @@ static void *__slab_alloc(struct kmem_cache *s,
1478 goto another_slab; 1502 goto another_slab;
1479load_freelist: 1503load_freelist:
1480 object = c->page->freelist; 1504 object = c->page->freelist;
1481 if (unlikely(!object)) 1505 if (unlikely(object == c->page->end))
1482 goto another_slab; 1506 goto another_slab;
1483 if (unlikely(SlabDebug(c->page))) 1507 if (unlikely(SlabDebug(c->page)))
1484 goto debug; 1508 goto debug;
@@ -1486,7 +1510,7 @@ load_freelist:
1486 object = c->page->freelist; 1510 object = c->page->freelist;
1487 c->freelist = object[c->offset]; 1511 c->freelist = object[c->offset];
1488 c->page->inuse = s->objects; 1512 c->page->inuse = s->objects;
1489 c->page->freelist = NULL; 1513 c->page->freelist = c->page->end;
1490 c->node = page_to_nid(c->page); 1514 c->node = page_to_nid(c->page);
1491 slab_unlock(c->page); 1515 slab_unlock(c->page);
1492 return object; 1516 return object;
@@ -1550,7 +1574,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1550 1574
1551 local_irq_save(flags); 1575 local_irq_save(flags);
1552 c = get_cpu_slab(s, smp_processor_id()); 1576 c = get_cpu_slab(s, smp_processor_id());
1553 if (unlikely(!c->freelist || !node_match(c, node))) 1577 if (unlikely(is_end(c->freelist) || !node_match(c, node)))
1554 1578
1555 object = __slab_alloc(s, gfpflags, node, addr, c); 1579 object = __slab_alloc(s, gfpflags, node, addr, c);
1556 1580
@@ -1614,7 +1638,7 @@ checks_ok:
1614 * was not on the partial list before 1638 * was not on the partial list before
1615 * then add it. 1639 * then add it.
1616 */ 1640 */
1617 if (unlikely(!prior)) 1641 if (unlikely(prior == page->end))
1618 add_partial(get_node(s, page_to_nid(page)), page, 1); 1642 add_partial(get_node(s, page_to_nid(page)), page, 1);
1619 1643
1620out_unlock: 1644out_unlock:
@@ -1622,7 +1646,7 @@ out_unlock:
1622 return; 1646 return;
1623 1647
1624slab_empty: 1648slab_empty:
1625 if (prior) 1649 if (prior != page->end)
1626 /* 1650 /*
1627 * Slab still on the partial list. 1651 * Slab still on the partial list.
1628 */ 1652 */
@@ -1842,7 +1866,7 @@ static void init_kmem_cache_cpu(struct kmem_cache *s,
1842 struct kmem_cache_cpu *c) 1866 struct kmem_cache_cpu *c)
1843{ 1867{
1844 c->page = NULL; 1868 c->page = NULL;
1845 c->freelist = NULL; 1869 c->freelist = (void *)PAGE_MAPPING_ANON;
1846 c->node = 0; 1870 c->node = 0;
1847 c->offset = s->offset / sizeof(void *); 1871 c->offset = s->offset / sizeof(void *);
1848 c->objsize = s->objsize; 1872 c->objsize = s->objsize;
@@ -3105,7 +3129,7 @@ static int validate_slab(struct kmem_cache *s, struct page *page,
3105 unsigned long *map) 3129 unsigned long *map)
3106{ 3130{
3107 void *p; 3131 void *p;
3108 void *addr = page_address(page); 3132 void *addr = slab_address(page);
3109 3133
3110 if (!check_slab(s, page) || 3134 if (!check_slab(s, page) ||
3111 !on_freelist(s, page, NULL)) 3135 !on_freelist(s, page, NULL))
@@ -3385,7 +3409,7 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
3385static void process_slab(struct loc_track *t, struct kmem_cache *s, 3409static void process_slab(struct loc_track *t, struct kmem_cache *s,
3386 struct page *page, enum track_item alloc) 3410 struct page *page, enum track_item alloc)
3387{ 3411{
3388 void *addr = page_address(page); 3412 void *addr = slab_address(page);
3389 DECLARE_BITMAP(map, s->objects); 3413 DECLARE_BITMAP(map, s->objects);
3390 void *p; 3414 void *p;
3391 3415