aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/mm_types.h5
-rw-r--r--mm/slub.c70
2 files changed, 24 insertions, 51 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index bfee0bd1d435..34023c65d466 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -64,10 +64,7 @@ struct page {
64#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS 64#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
65 spinlock_t ptl; 65 spinlock_t ptl;
66#endif 66#endif
67 struct { 67 struct kmem_cache *slab; /* SLUB: Pointer to slab */
68 struct kmem_cache *slab; /* SLUB: Pointer to slab */
69 void *end; /* SLUB: end marker */
70 };
71 struct page *first_page; /* Compound tail pages */ 68 struct page *first_page; /* Compound tail pages */
72 }; 69 };
73 union { 70 union {
diff --git a/mm/slub.c b/mm/slub.c
index 74c65af0a54f..a873953e5a11 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -291,32 +291,15 @@ static inline struct kmem_cache_cpu *get_cpu_slab(struct kmem_cache *s, int cpu)
291#endif 291#endif
292} 292}
293 293
294/*
295 * The end pointer in a slab is special. It points to the first object in the
296 * slab but has bit 0 set to mark it.
297 *
298 * Note that SLUB relies on page_mapping returning NULL for pages with bit 0
299 * in the mapping set.
300 */
301static inline int is_end(void *addr)
302{
303 return (unsigned long)addr & PAGE_MAPPING_ANON;
304}
305
306static void *slab_address(struct page *page)
307{
308 return page->end - PAGE_MAPPING_ANON;
309}
310
311static inline int check_valid_pointer(struct kmem_cache *s, 294static inline int check_valid_pointer(struct kmem_cache *s,
312 struct page *page, const void *object) 295 struct page *page, const void *object)
313{ 296{
314 void *base; 297 void *base;
315 298
316 if (object == page->end) 299 if (!object)
317 return 1; 300 return 1;
318 301
319 base = slab_address(page); 302 base = page_address(page);
320 if (object < base || object >= base + s->objects * s->size || 303 if (object < base || object >= base + s->objects * s->size ||
321 (object - base) % s->size) { 304 (object - base) % s->size) {
322 return 0; 305 return 0;
@@ -349,8 +332,7 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
349 332
350/* Scan freelist */ 333/* Scan freelist */
351#define for_each_free_object(__p, __s, __free) \ 334#define for_each_free_object(__p, __s, __free) \
352 for (__p = (__free); (__p) != page->end; __p = get_freepointer((__s),\ 335 for (__p = (__free); __p; __p = get_freepointer((__s), __p))
353 __p))
354 336
355/* Determine object index from a given position */ 337/* Determine object index from a given position */
356static inline int slab_index(void *p, struct kmem_cache *s, void *addr) 338static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
@@ -502,7 +484,7 @@ static void slab_fix(struct kmem_cache *s, char *fmt, ...)
502static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) 484static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
503{ 485{
504 unsigned int off; /* Offset of last byte */ 486 unsigned int off; /* Offset of last byte */
505 u8 *addr = slab_address(page); 487 u8 *addr = page_address(page);
506 488
507 print_tracking(s, p); 489 print_tracking(s, p);
508 490
@@ -680,7 +662,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
680 if (!(s->flags & SLAB_POISON)) 662 if (!(s->flags & SLAB_POISON))
681 return 1; 663 return 1;
682 664
683 start = slab_address(page); 665 start = page_address(page);
684 end = start + (PAGE_SIZE << s->order); 666 end = start + (PAGE_SIZE << s->order);
685 length = s->objects * s->size; 667 length = s->objects * s->size;
686 remainder = end - (start + length); 668 remainder = end - (start + length);
@@ -748,7 +730,7 @@ static int check_object(struct kmem_cache *s, struct page *page,
748 * of the free objects in this slab. May cause 730 * of the free objects in this slab. May cause
749 * another error because the object count is now wrong. 731 * another error because the object count is now wrong.
750 */ 732 */
751 set_freepointer(s, p, page->end); 733 set_freepointer(s, p, NULL);
752 return 0; 734 return 0;
753 } 735 }
754 return 1; 736 return 1;
@@ -782,18 +764,18 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
782 void *fp = page->freelist; 764 void *fp = page->freelist;
783 void *object = NULL; 765 void *object = NULL;
784 766
785 while (fp != page->end && nr <= s->objects) { 767 while (fp && nr <= s->objects) {
786 if (fp == search) 768 if (fp == search)
787 return 1; 769 return 1;
788 if (!check_valid_pointer(s, page, fp)) { 770 if (!check_valid_pointer(s, page, fp)) {
789 if (object) { 771 if (object) {
790 object_err(s, page, object, 772 object_err(s, page, object,
791 "Freechain corrupt"); 773 "Freechain corrupt");
792 set_freepointer(s, object, page->end); 774 set_freepointer(s, object, NULL);
793 break; 775 break;
794 } else { 776 } else {
795 slab_err(s, page, "Freepointer corrupt"); 777 slab_err(s, page, "Freepointer corrupt");
796 page->freelist = page->end; 778 page->freelist = NULL;
797 page->inuse = s->objects; 779 page->inuse = s->objects;
798 slab_fix(s, "Freelist cleared"); 780 slab_fix(s, "Freelist cleared");
799 return 0; 781 return 0;
@@ -899,7 +881,7 @@ bad:
899 */ 881 */
900 slab_fix(s, "Marking all objects used"); 882 slab_fix(s, "Marking all objects used");
901 page->inuse = s->objects; 883 page->inuse = s->objects;
902 page->freelist = page->end; 884 page->freelist = NULL;
903 } 885 }
904 return 0; 886 return 0;
905} 887}
@@ -939,7 +921,7 @@ static int free_debug_processing(struct kmem_cache *s, struct page *page,
939 } 921 }
940 922
941 /* Special debug activities for freeing objects */ 923 /* Special debug activities for freeing objects */
942 if (!SlabFrozen(page) && page->freelist == page->end) 924 if (!SlabFrozen(page) && !page->freelist)
943 remove_full(s, page); 925 remove_full(s, page);
944 if (s->flags & SLAB_STORE_USER) 926 if (s->flags & SLAB_STORE_USER)
945 set_track(s, object, TRACK_FREE, addr); 927 set_track(s, object, TRACK_FREE, addr);
@@ -1124,7 +1106,6 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1124 SetSlabDebug(page); 1106 SetSlabDebug(page);
1125 1107
1126 start = page_address(page); 1108 start = page_address(page);
1127 page->end = start + 1;
1128 1109
1129 if (unlikely(s->flags & SLAB_POISON)) 1110 if (unlikely(s->flags & SLAB_POISON))
1130 memset(start, POISON_INUSE, PAGE_SIZE << s->order); 1111 memset(start, POISON_INUSE, PAGE_SIZE << s->order);
@@ -1136,7 +1117,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1136 last = p; 1117 last = p;
1137 } 1118 }
1138 setup_object(s, page, last); 1119 setup_object(s, page, last);
1139 set_freepointer(s, last, page->end); 1120 set_freepointer(s, last, NULL);
1140 1121
1141 page->freelist = start; 1122 page->freelist = start;
1142 page->inuse = 0; 1123 page->inuse = 0;
@@ -1152,7 +1133,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
1152 void *p; 1133 void *p;
1153 1134
1154 slab_pad_check(s, page); 1135 slab_pad_check(s, page);
1155 for_each_object(p, s, slab_address(page)) 1136 for_each_object(p, s, page_address(page))
1156 check_object(s, page, p, 0); 1137 check_object(s, page, p, 0);
1157 ClearSlabDebug(page); 1138 ClearSlabDebug(page);
1158 } 1139 }
@@ -1162,7 +1143,6 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
1162 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, 1143 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1163 -pages); 1144 -pages);
1164 1145
1165 page->mapping = NULL;
1166 __free_pages(page, s->order); 1146 __free_pages(page, s->order);
1167} 1147}
1168 1148
@@ -1366,7 +1346,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
1366 ClearSlabFrozen(page); 1346 ClearSlabFrozen(page);
1367 if (page->inuse) { 1347 if (page->inuse) {
1368 1348
1369 if (page->freelist != page->end) { 1349 if (page->freelist) {
1370 add_partial(n, page, tail); 1350 add_partial(n, page, tail);
1371 stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD); 1351 stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
1372 } else { 1352 } else {
@@ -1410,12 +1390,8 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
1410 * Merge cpu freelist into freelist. Typically we get here 1390 * Merge cpu freelist into freelist. Typically we get here
1411 * because both freelists are empty. So this is unlikely 1391 * because both freelists are empty. So this is unlikely
1412 * to occur. 1392 * to occur.
1413 *
1414 * We need to use _is_end here because deactivate slab may
1415 * be called for a debug slab. Then c->freelist may contain
1416 * a dummy pointer.
1417 */ 1393 */
1418 while (unlikely(!is_end(c->freelist))) { 1394 while (unlikely(c->freelist)) {
1419 void **object; 1395 void **object;
1420 1396
1421 tail = 0; /* Hot objects. Put the slab first */ 1397 tail = 0; /* Hot objects. Put the slab first */
@@ -1517,7 +1493,7 @@ static void *__slab_alloc(struct kmem_cache *s,
1517 stat(c, ALLOC_REFILL); 1493 stat(c, ALLOC_REFILL);
1518load_freelist: 1494load_freelist:
1519 object = c->page->freelist; 1495 object = c->page->freelist;
1520 if (unlikely(object == c->page->end)) 1496 if (unlikely(!object))
1521 goto another_slab; 1497 goto another_slab;
1522 if (unlikely(SlabDebug(c->page))) 1498 if (unlikely(SlabDebug(c->page)))
1523 goto debug; 1499 goto debug;
@@ -1525,7 +1501,7 @@ load_freelist:
1525 object = c->page->freelist; 1501 object = c->page->freelist;
1526 c->freelist = object[c->offset]; 1502 c->freelist = object[c->offset];
1527 c->page->inuse = s->objects; 1503 c->page->inuse = s->objects;
1528 c->page->freelist = c->page->end; 1504 c->page->freelist = NULL;
1529 c->node = page_to_nid(c->page); 1505 c->node = page_to_nid(c->page);
1530unlock_out: 1506unlock_out:
1531 slab_unlock(c->page); 1507 slab_unlock(c->page);
@@ -1607,7 +1583,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1607 1583
1608 local_irq_save(flags); 1584 local_irq_save(flags);
1609 c = get_cpu_slab(s, smp_processor_id()); 1585 c = get_cpu_slab(s, smp_processor_id());
1610 if (unlikely(is_end(c->freelist) || !node_match(c, node))) 1586 if (unlikely(!c->freelist || !node_match(c, node)))
1611 1587
1612 object = __slab_alloc(s, gfpflags, node, addr, c); 1588 object = __slab_alloc(s, gfpflags, node, addr, c);
1613 1589
@@ -1677,7 +1653,7 @@ checks_ok:
1677 * was not on the partial list before 1653 * was not on the partial list before
1678 * then add it. 1654 * then add it.
1679 */ 1655 */
1680 if (unlikely(prior == page->end)) { 1656 if (unlikely(!prior)) {
1681 add_partial(get_node(s, page_to_nid(page)), page, 1); 1657 add_partial(get_node(s, page_to_nid(page)), page, 1);
1682 stat(c, FREE_ADD_PARTIAL); 1658 stat(c, FREE_ADD_PARTIAL);
1683 } 1659 }
@@ -1687,7 +1663,7 @@ out_unlock:
1687 return; 1663 return;
1688 1664
1689slab_empty: 1665slab_empty:
1690 if (prior != page->end) { 1666 if (prior) {
1691 /* 1667 /*
1692 * Slab still on the partial list. 1668 * Slab still on the partial list.
1693 */ 1669 */
@@ -1910,7 +1886,7 @@ static void init_kmem_cache_cpu(struct kmem_cache *s,
1910 struct kmem_cache_cpu *c) 1886 struct kmem_cache_cpu *c)
1911{ 1887{
1912 c->page = NULL; 1888 c->page = NULL;
1913 c->freelist = (void *)PAGE_MAPPING_ANON; 1889 c->freelist = NULL;
1914 c->node = 0; 1890 c->node = 0;
1915 c->offset = s->offset / sizeof(void *); 1891 c->offset = s->offset / sizeof(void *);
1916 c->objsize = s->objsize; 1892 c->objsize = s->objsize;
@@ -3199,7 +3175,7 @@ static int validate_slab(struct kmem_cache *s, struct page *page,
3199 unsigned long *map) 3175 unsigned long *map)
3200{ 3176{
3201 void *p; 3177 void *p;
3202 void *addr = slab_address(page); 3178 void *addr = page_address(page);
3203 3179
3204 if (!check_slab(s, page) || 3180 if (!check_slab(s, page) ||
3205 !on_freelist(s, page, NULL)) 3181 !on_freelist(s, page, NULL))
@@ -3482,7 +3458,7 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
3482static void process_slab(struct loc_track *t, struct kmem_cache *s, 3458static void process_slab(struct loc_track *t, struct kmem_cache *s,
3483 struct page *page, enum track_item alloc) 3459 struct page *page, enum track_item alloc)
3484{ 3460{
3485 void *addr = slab_address(page); 3461 void *addr = page_address(page);
3486 DECLARE_BITMAP(map, s->objects); 3462 DECLARE_BITMAP(map, s->objects);
3487 void *p; 3463 void *p;
3488 3464