diff options
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 54 |
1 files changed, 34 insertions, 20 deletions
@@ -301,7 +301,7 @@ static inline int check_valid_pointer(struct kmem_cache *s, | |||
301 | return 1; | 301 | return 1; |
302 | 302 | ||
303 | base = page_address(page); | 303 | base = page_address(page); |
304 | if (object < base || object >= base + s->objects * s->size || | 304 | if (object < base || object >= base + page->objects * s->size || |
305 | (object - base) % s->size) { | 305 | (object - base) % s->size) { |
306 | return 0; | 306 | return 0; |
307 | } | 307 | } |
@@ -451,8 +451,8 @@ static void print_tracking(struct kmem_cache *s, void *object) | |||
451 | 451 | ||
452 | static void print_page_info(struct page *page) | 452 | static void print_page_info(struct page *page) |
453 | { | 453 | { |
454 | printk(KERN_ERR "INFO: Slab 0x%p used=%u fp=0x%p flags=0x%04lx\n", | 454 | printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n", |
455 | page, page->inuse, page->freelist, page->flags); | 455 | page, page->objects, page->inuse, page->freelist, page->flags); |
456 | 456 | ||
457 | } | 457 | } |
458 | 458 | ||
@@ -652,6 +652,7 @@ static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) | |||
652 | p + off, POISON_INUSE, s->size - off); | 652 | p + off, POISON_INUSE, s->size - off); |
653 | } | 653 | } |
654 | 654 | ||
655 | /* Check the pad bytes at the end of a slab page */ | ||
655 | static int slab_pad_check(struct kmem_cache *s, struct page *page) | 656 | static int slab_pad_check(struct kmem_cache *s, struct page *page) |
656 | { | 657 | { |
657 | u8 *start; | 658 | u8 *start; |
@@ -664,20 +665,20 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page) | |||
664 | return 1; | 665 | return 1; |
665 | 666 | ||
666 | start = page_address(page); | 667 | start = page_address(page); |
667 | end = start + (PAGE_SIZE << s->order); | 668 | length = (PAGE_SIZE << s->order); |
668 | length = s->objects * s->size; | 669 | end = start + length; |
669 | remainder = end - (start + length); | 670 | remainder = length % s->size; |
670 | if (!remainder) | 671 | if (!remainder) |
671 | return 1; | 672 | return 1; |
672 | 673 | ||
673 | fault = check_bytes(start + length, POISON_INUSE, remainder); | 674 | fault = check_bytes(end - remainder, POISON_INUSE, remainder); |
674 | if (!fault) | 675 | if (!fault) |
675 | return 1; | 676 | return 1; |
676 | while (end > fault && end[-1] == POISON_INUSE) | 677 | while (end > fault && end[-1] == POISON_INUSE) |
677 | end--; | 678 | end--; |
678 | 679 | ||
679 | slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1); | 680 | slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1); |
680 | print_section("Padding", start, length); | 681 | print_section("Padding", end - remainder, remainder); |
681 | 682 | ||
682 | restore_bytes(s, "slab padding", POISON_INUSE, start, end); | 683 | restore_bytes(s, "slab padding", POISON_INUSE, start, end); |
683 | return 0; | 684 | return 0; |
@@ -739,15 +740,24 @@ static int check_object(struct kmem_cache *s, struct page *page, | |||
739 | 740 | ||
740 | static int check_slab(struct kmem_cache *s, struct page *page) | 741 | static int check_slab(struct kmem_cache *s, struct page *page) |
741 | { | 742 | { |
743 | int maxobj; | ||
744 | |||
742 | VM_BUG_ON(!irqs_disabled()); | 745 | VM_BUG_ON(!irqs_disabled()); |
743 | 746 | ||
744 | if (!PageSlab(page)) { | 747 | if (!PageSlab(page)) { |
745 | slab_err(s, page, "Not a valid slab page"); | 748 | slab_err(s, page, "Not a valid slab page"); |
746 | return 0; | 749 | return 0; |
747 | } | 750 | } |
748 | if (page->inuse > s->objects) { | 751 | |
752 | maxobj = (PAGE_SIZE << compound_order(page)) / s->size; | ||
753 | if (page->objects > maxobj) { | ||
754 | slab_err(s, page, "objects %u > max %u", | ||
755 | s->name, page->objects, maxobj); | ||
756 | return 0; | ||
757 | } | ||
758 | if (page->inuse > page->objects) { | ||
749 | slab_err(s, page, "inuse %u > max %u", | 759 | slab_err(s, page, "inuse %u > max %u", |
750 | s->name, page->inuse, s->objects); | 760 | s->name, page->inuse, page->objects); |
751 | return 0; | 761 | return 0; |
752 | } | 762 | } |
753 | /* Slab_pad_check fixes things up after itself */ | 763 | /* Slab_pad_check fixes things up after itself */ |
@@ -765,7 +775,7 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search) | |||
765 | void *fp = page->freelist; | 775 | void *fp = page->freelist; |
766 | void *object = NULL; | 776 | void *object = NULL; |
767 | 777 | ||
768 | while (fp && nr <= s->objects) { | 778 | while (fp && nr <= page->objects) { |
769 | if (fp == search) | 779 | if (fp == search) |
770 | return 1; | 780 | return 1; |
771 | if (!check_valid_pointer(s, page, fp)) { | 781 | if (!check_valid_pointer(s, page, fp)) { |
@@ -777,7 +787,7 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search) | |||
777 | } else { | 787 | } else { |
778 | slab_err(s, page, "Freepointer corrupt"); | 788 | slab_err(s, page, "Freepointer corrupt"); |
779 | page->freelist = NULL; | 789 | page->freelist = NULL; |
780 | page->inuse = s->objects; | 790 | page->inuse = page->objects; |
781 | slab_fix(s, "Freelist cleared"); | 791 | slab_fix(s, "Freelist cleared"); |
782 | return 0; | 792 | return 0; |
783 | } | 793 | } |
@@ -788,10 +798,10 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search) | |||
788 | nr++; | 798 | nr++; |
789 | } | 799 | } |
790 | 800 | ||
791 | if (page->inuse != s->objects - nr) { | 801 | if (page->inuse != page->objects - nr) { |
792 | slab_err(s, page, "Wrong object count. Counter is %d but " | 802 | slab_err(s, page, "Wrong object count. Counter is %d but " |
793 | "counted were %d", page->inuse, s->objects - nr); | 803 | "counted were %d", page->inuse, page->objects - nr); |
794 | page->inuse = s->objects - nr; | 804 | page->inuse = page->objects - nr; |
795 | slab_fix(s, "Object count adjusted."); | 805 | slab_fix(s, "Object count adjusted."); |
796 | } | 806 | } |
797 | return search == NULL; | 807 | return search == NULL; |
@@ -910,7 +920,7 @@ bad: | |||
910 | * as used avoids touching the remaining objects. | 920 | * as used avoids touching the remaining objects. |
911 | */ | 921 | */ |
912 | slab_fix(s, "Marking all objects used"); | 922 | slab_fix(s, "Marking all objects used"); |
913 | page->inuse = s->objects; | 923 | page->inuse = page->objects; |
914 | page->freelist = NULL; | 924 | page->freelist = NULL; |
915 | } | 925 | } |
916 | return 0; | 926 | return 0; |
@@ -1081,6 +1091,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
1081 | if (!page) | 1091 | if (!page) |
1082 | return NULL; | 1092 | return NULL; |
1083 | 1093 | ||
1094 | page->objects = s->objects; | ||
1084 | mod_zone_page_state(page_zone(page), | 1095 | mod_zone_page_state(page_zone(page), |
1085 | (s->flags & SLAB_RECLAIM_ACCOUNT) ? | 1096 | (s->flags & SLAB_RECLAIM_ACCOUNT) ? |
1086 | NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, | 1097 | NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, |
@@ -1519,7 +1530,7 @@ load_freelist: | |||
1519 | goto debug; | 1530 | goto debug; |
1520 | 1531 | ||
1521 | c->freelist = object[c->offset]; | 1532 | c->freelist = object[c->offset]; |
1522 | c->page->inuse = s->objects; | 1533 | c->page->inuse = c->page->objects; |
1523 | c->page->freelist = NULL; | 1534 | c->page->freelist = NULL; |
1524 | c->node = page_to_nid(c->page); | 1535 | c->node = page_to_nid(c->page); |
1525 | unlock_out: | 1536 | unlock_out: |
@@ -1818,6 +1829,9 @@ static inline int slab_order(int size, int min_objects, | |||
1818 | int rem; | 1829 | int rem; |
1819 | int min_order = slub_min_order; | 1830 | int min_order = slub_min_order; |
1820 | 1831 | ||
1832 | if ((PAGE_SIZE << min_order) / size > 65535) | ||
1833 | return get_order(size * 65535) - 1; | ||
1834 | |||
1821 | for (order = max(min_order, | 1835 | for (order = max(min_order, |
1822 | fls(min_objects * size - 1) - PAGE_SHIFT); | 1836 | fls(min_objects * size - 1) - PAGE_SHIFT); |
1823 | order <= max_order; order++) { | 1837 | order <= max_order; order++) { |
@@ -3251,7 +3265,7 @@ static int validate_slab(struct kmem_cache *s, struct page *page, | |||
3251 | return 0; | 3265 | return 0; |
3252 | 3266 | ||
3253 | /* Now we know that a valid freelist exists */ | 3267 | /* Now we know that a valid freelist exists */ |
3254 | bitmap_zero(map, s->objects); | 3268 | bitmap_zero(map, page->objects); |
3255 | 3269 | ||
3256 | for_each_free_object(p, s, page->freelist) { | 3270 | for_each_free_object(p, s, page->freelist) { |
3257 | set_bit(slab_index(p, s, addr), map); | 3271 | set_bit(slab_index(p, s, addr), map); |
@@ -3528,10 +3542,10 @@ static void process_slab(struct loc_track *t, struct kmem_cache *s, | |||
3528 | struct page *page, enum track_item alloc) | 3542 | struct page *page, enum track_item alloc) |
3529 | { | 3543 | { |
3530 | void *addr = page_address(page); | 3544 | void *addr = page_address(page); |
3531 | DECLARE_BITMAP(map, s->objects); | 3545 | DECLARE_BITMAP(map, page->objects); |
3532 | void *p; | 3546 | void *p; |
3533 | 3547 | ||
3534 | bitmap_zero(map, s->objects); | 3548 | bitmap_zero(map, page->objects); |
3535 | for_each_free_object(p, s, page->freelist) | 3549 | for_each_free_object(p, s, page->freelist) |
3536 | set_bit(slab_index(p, s, addr), map); | 3550 | set_bit(slab_index(p, s, addr), map); |
3537 | 3551 | ||