diff options
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 238 |
1 files changed, 121 insertions, 117 deletions
@@ -78,10 +78,18 @@ | |||
78 | * | 78 | * |
79 | * Overloading of page flags that are otherwise used for LRU management. | 79 | * Overloading of page flags that are otherwise used for LRU management. |
80 | * | 80 | * |
81 | * PageActive The slab is used as a cpu cache. Allocations | 81 | * PageActive The slab is frozen and exempt from list processing. |
82 | * may be performed from the slab. The slab is not | 82 | * This means that the slab is dedicated to a purpose |
83 | * on any slab list and cannot be moved onto one. | 83 | * such as satisfying allocations for a specific |
84 | * The cpu slab may be equipped with an additioanl | 84 | * processor. Objects may be freed in the slab while |
85 | * it is frozen but slab_free will then skip the usual | ||
86 | * list operations. It is up to the processor holding | ||
87 | * the slab to integrate the slab into the slab lists | ||
88 | * when the slab is no longer needed. | ||
89 | * | ||
90 | * One use of this flag is to mark slabs that are | ||
91 | * used for allocations. Then such a slab becomes a cpu | ||
92 | * slab. The cpu slab may be equipped with an additional | ||
85 | * lockless_freelist that allows lockless access to | 93 | * lockless_freelist that allows lockless access to |
86 | * free objects in addition to the regular freelist | 94 | * free objects in addition to the regular freelist |
87 | * that requires the slab lock. | 95 | * that requires the slab lock. |
@@ -91,27 +99,42 @@ | |||
91 | * the fast path and disables lockless freelists. | 99 | * the fast path and disables lockless freelists. |
92 | */ | 100 | */ |
93 | 101 | ||
94 | static inline int SlabDebug(struct page *page) | 102 | #define FROZEN (1 << PG_active) |
95 | { | 103 | |
96 | #ifdef CONFIG_SLUB_DEBUG | 104 | #ifdef CONFIG_SLUB_DEBUG |
97 | return PageError(page); | 105 | #define SLABDEBUG (1 << PG_error) |
98 | #else | 106 | #else |
99 | return 0; | 107 | #define SLABDEBUG 0 |
100 | #endif | 108 | #endif |
109 | |||
110 | static inline int SlabFrozen(struct page *page) | ||
111 | { | ||
112 | return page->flags & FROZEN; | ||
113 | } | ||
114 | |||
115 | static inline void SetSlabFrozen(struct page *page) | ||
116 | { | ||
117 | page->flags |= FROZEN; | ||
118 | } | ||
119 | |||
120 | static inline void ClearSlabFrozen(struct page *page) | ||
121 | { | ||
122 | page->flags &= ~FROZEN; | ||
123 | } | ||
124 | |||
125 | static inline int SlabDebug(struct page *page) | ||
126 | { | ||
127 | return page->flags & SLABDEBUG; | ||
101 | } | 128 | } |
102 | 129 | ||
103 | static inline void SetSlabDebug(struct page *page) | 130 | static inline void SetSlabDebug(struct page *page) |
104 | { | 131 | { |
105 | #ifdef CONFIG_SLUB_DEBUG | 132 | page->flags |= SLABDEBUG; |
106 | SetPageError(page); | ||
107 | #endif | ||
108 | } | 133 | } |
109 | 134 | ||
110 | static inline void ClearSlabDebug(struct page *page) | 135 | static inline void ClearSlabDebug(struct page *page) |
111 | { | 136 | { |
112 | #ifdef CONFIG_SLUB_DEBUG | 137 | page->flags &= ~SLABDEBUG; |
113 | ClearPageError(page); | ||
114 | #endif | ||
115 | } | 138 | } |
116 | 139 | ||
117 | /* | 140 | /* |
@@ -719,6 +742,22 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search) | |||
719 | return search == NULL; | 742 | return search == NULL; |
720 | } | 743 | } |
721 | 744 | ||
745 | static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc) | ||
746 | { | ||
747 | if (s->flags & SLAB_TRACE) { | ||
748 | printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n", | ||
749 | s->name, | ||
750 | alloc ? "alloc" : "free", | ||
751 | object, page->inuse, | ||
752 | page->freelist); | ||
753 | |||
754 | if (!alloc) | ||
755 | print_section("Object", (void *)object, s->objsize); | ||
756 | |||
757 | dump_stack(); | ||
758 | } | ||
759 | } | ||
760 | |||
722 | /* | 761 | /* |
723 | * Tracking of fully allocated slabs for debugging purposes. | 762 | * Tracking of fully allocated slabs for debugging purposes. |
724 | */ | 763 | */ |
@@ -743,8 +782,18 @@ static void remove_full(struct kmem_cache *s, struct page *page) | |||
743 | spin_unlock(&n->list_lock); | 782 | spin_unlock(&n->list_lock); |
744 | } | 783 | } |
745 | 784 | ||
746 | static int alloc_object_checks(struct kmem_cache *s, struct page *page, | 785 | static void setup_object_debug(struct kmem_cache *s, struct page *page, |
747 | void *object) | 786 | void *object) |
787 | { | ||
788 | if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))) | ||
789 | return; | ||
790 | |||
791 | init_object(s, object, 0); | ||
792 | init_tracking(s, object); | ||
793 | } | ||
794 | |||
795 | static int alloc_debug_processing(struct kmem_cache *s, struct page *page, | ||
796 | void *object, void *addr) | ||
748 | { | 797 | { |
749 | if (!check_slab(s, page)) | 798 | if (!check_slab(s, page)) |
750 | goto bad; | 799 | goto bad; |
@@ -759,13 +808,16 @@ static int alloc_object_checks(struct kmem_cache *s, struct page *page, | |||
759 | goto bad; | 808 | goto bad; |
760 | } | 809 | } |
761 | 810 | ||
762 | if (!object) | 811 | if (object && !check_object(s, page, object, 0)) |
763 | return 1; | ||
764 | |||
765 | if (!check_object(s, page, object, 0)) | ||
766 | goto bad; | 812 | goto bad; |
767 | 813 | ||
814 | /* Success perform special debug activities for allocs */ | ||
815 | if (s->flags & SLAB_STORE_USER) | ||
816 | set_track(s, object, TRACK_ALLOC, addr); | ||
817 | trace(s, page, object, 1); | ||
818 | init_object(s, object, 1); | ||
768 | return 1; | 819 | return 1; |
820 | |||
769 | bad: | 821 | bad: |
770 | if (PageSlab(page)) { | 822 | if (PageSlab(page)) { |
771 | /* | 823 | /* |
@@ -783,8 +835,8 @@ bad: | |||
783 | return 0; | 835 | return 0; |
784 | } | 836 | } |
785 | 837 | ||
786 | static int free_object_checks(struct kmem_cache *s, struct page *page, | 838 | static int free_debug_processing(struct kmem_cache *s, struct page *page, |
787 | void *object) | 839 | void *object, void *addr) |
788 | { | 840 | { |
789 | if (!check_slab(s, page)) | 841 | if (!check_slab(s, page)) |
790 | goto fail; | 842 | goto fail; |
@@ -818,29 +870,22 @@ static int free_object_checks(struct kmem_cache *s, struct page *page, | |||
818 | "to slab %s", object, page->slab->name); | 870 | "to slab %s", object, page->slab->name); |
819 | goto fail; | 871 | goto fail; |
820 | } | 872 | } |
873 | |||
874 | /* Special debug activities for freeing objects */ | ||
875 | if (!SlabFrozen(page) && !page->freelist) | ||
876 | remove_full(s, page); | ||
877 | if (s->flags & SLAB_STORE_USER) | ||
878 | set_track(s, object, TRACK_FREE, addr); | ||
879 | trace(s, page, object, 0); | ||
880 | init_object(s, object, 0); | ||
821 | return 1; | 881 | return 1; |
882 | |||
822 | fail: | 883 | fail: |
823 | printk(KERN_ERR "@@@ SLUB: %s slab 0x%p object at 0x%p not freed.\n", | 884 | printk(KERN_ERR "@@@ SLUB: %s slab 0x%p object at 0x%p not freed.\n", |
824 | s->name, page, object); | 885 | s->name, page, object); |
825 | return 0; | 886 | return 0; |
826 | } | 887 | } |
827 | 888 | ||
828 | static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc) | ||
829 | { | ||
830 | if (s->flags & SLAB_TRACE) { | ||
831 | printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n", | ||
832 | s->name, | ||
833 | alloc ? "alloc" : "free", | ||
834 | object, page->inuse, | ||
835 | page->freelist); | ||
836 | |||
837 | if (!alloc) | ||
838 | print_section("Object", (void *)object, s->objsize); | ||
839 | |||
840 | dump_stack(); | ||
841 | } | ||
842 | } | ||
843 | |||
844 | static int __init setup_slub_debug(char *str) | 889 | static int __init setup_slub_debug(char *str) |
845 | { | 890 | { |
846 | if (!str || *str != '=') | 891 | if (!str || *str != '=') |
@@ -891,13 +936,13 @@ static void kmem_cache_open_debug_check(struct kmem_cache *s) | |||
891 | * On 32 bit platforms the limit is 256k. On 64bit platforms | 936 | * On 32 bit platforms the limit is 256k. On 64bit platforms |
892 | * the limit is 512k. | 937 | * the limit is 512k. |
893 | * | 938 | * |
894 | * Debugging or ctor/dtors may create a need to move the free | 939 | * Debugging or ctor may create a need to move the free |
895 | * pointer. Fail if this happens. | 940 | * pointer. Fail if this happens. |
896 | */ | 941 | */ |
897 | if (s->size >= 65535 * sizeof(void *)) { | 942 | if (s->objsize >= 65535 * sizeof(void *)) { |
898 | BUG_ON(s->flags & (SLAB_RED_ZONE | SLAB_POISON | | 943 | BUG_ON(s->flags & (SLAB_RED_ZONE | SLAB_POISON | |
899 | SLAB_STORE_USER | SLAB_DESTROY_BY_RCU)); | 944 | SLAB_STORE_USER | SLAB_DESTROY_BY_RCU)); |
900 | BUG_ON(s->ctor || s->dtor); | 945 | BUG_ON(s->ctor); |
901 | } | 946 | } |
902 | else | 947 | else |
903 | /* | 948 | /* |
@@ -909,26 +954,20 @@ static void kmem_cache_open_debug_check(struct kmem_cache *s) | |||
909 | s->flags |= slub_debug; | 954 | s->flags |= slub_debug; |
910 | } | 955 | } |
911 | #else | 956 | #else |
957 | static inline void setup_object_debug(struct kmem_cache *s, | ||
958 | struct page *page, void *object) {} | ||
912 | 959 | ||
913 | static inline int alloc_object_checks(struct kmem_cache *s, | 960 | static inline int alloc_debug_processing(struct kmem_cache *s, |
914 | struct page *page, void *object) { return 0; } | 961 | struct page *page, void *object, void *addr) { return 0; } |
915 | 962 | ||
916 | static inline int free_object_checks(struct kmem_cache *s, | 963 | static inline int free_debug_processing(struct kmem_cache *s, |
917 | struct page *page, void *object) { return 0; } | 964 | struct page *page, void *object, void *addr) { return 0; } |
918 | 965 | ||
919 | static inline void add_full(struct kmem_cache_node *n, struct page *page) {} | ||
920 | static inline void remove_full(struct kmem_cache *s, struct page *page) {} | ||
921 | static inline void trace(struct kmem_cache *s, struct page *page, | ||
922 | void *object, int alloc) {} | ||
923 | static inline void init_object(struct kmem_cache *s, | ||
924 | void *object, int active) {} | ||
925 | static inline void init_tracking(struct kmem_cache *s, void *object) {} | ||
926 | static inline int slab_pad_check(struct kmem_cache *s, struct page *page) | 966 | static inline int slab_pad_check(struct kmem_cache *s, struct page *page) |
927 | { return 1; } | 967 | { return 1; } |
928 | static inline int check_object(struct kmem_cache *s, struct page *page, | 968 | static inline int check_object(struct kmem_cache *s, struct page *page, |
929 | void *object, int active) { return 1; } | 969 | void *object, int active) { return 1; } |
930 | static inline void set_track(struct kmem_cache *s, void *object, | 970 | static inline void add_full(struct kmem_cache_node *n, struct page *page) {} |
931 | enum track_item alloc, void *addr) {} | ||
932 | static inline void kmem_cache_open_debug_check(struct kmem_cache *s) {} | 971 | static inline void kmem_cache_open_debug_check(struct kmem_cache *s) {} |
933 | #define slub_debug 0 | 972 | #define slub_debug 0 |
934 | #endif | 973 | #endif |
@@ -965,13 +1004,9 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
965 | static void setup_object(struct kmem_cache *s, struct page *page, | 1004 | static void setup_object(struct kmem_cache *s, struct page *page, |
966 | void *object) | 1005 | void *object) |
967 | { | 1006 | { |
968 | if (SlabDebug(page)) { | 1007 | setup_object_debug(s, page, object); |
969 | init_object(s, object, 0); | ||
970 | init_tracking(s, object); | ||
971 | } | ||
972 | |||
973 | if (unlikely(s->ctor)) | 1008 | if (unlikely(s->ctor)) |
974 | s->ctor(object, s, SLAB_CTOR_CONSTRUCTOR); | 1009 | s->ctor(object, s, 0); |
975 | } | 1010 | } |
976 | 1011 | ||
977 | static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) | 1012 | static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) |
@@ -1030,15 +1065,12 @@ static void __free_slab(struct kmem_cache *s, struct page *page) | |||
1030 | { | 1065 | { |
1031 | int pages = 1 << s->order; | 1066 | int pages = 1 << s->order; |
1032 | 1067 | ||
1033 | if (unlikely(SlabDebug(page) || s->dtor)) { | 1068 | if (unlikely(SlabDebug(page))) { |
1034 | void *p; | 1069 | void *p; |
1035 | 1070 | ||
1036 | slab_pad_check(s, page); | 1071 | slab_pad_check(s, page); |
1037 | for_each_object(p, s, page_address(page)) { | 1072 | for_each_object(p, s, page_address(page)) |
1038 | if (s->dtor) | ||
1039 | s->dtor(p, s, 0); | ||
1040 | check_object(s, page, p, 0); | 1073 | check_object(s, page, p, 0); |
1041 | } | ||
1042 | } | 1074 | } |
1043 | 1075 | ||
1044 | mod_zone_page_state(page_zone(page), | 1076 | mod_zone_page_state(page_zone(page), |
@@ -1138,11 +1170,12 @@ static void remove_partial(struct kmem_cache *s, | |||
1138 | * | 1170 | * |
1139 | * Must hold list_lock. | 1171 | * Must hold list_lock. |
1140 | */ | 1172 | */ |
1141 | static int lock_and_del_slab(struct kmem_cache_node *n, struct page *page) | 1173 | static inline int lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page) |
1142 | { | 1174 | { |
1143 | if (slab_trylock(page)) { | 1175 | if (slab_trylock(page)) { |
1144 | list_del(&page->lru); | 1176 | list_del(&page->lru); |
1145 | n->nr_partial--; | 1177 | n->nr_partial--; |
1178 | SetSlabFrozen(page); | ||
1146 | return 1; | 1179 | return 1; |
1147 | } | 1180 | } |
1148 | return 0; | 1181 | return 0; |
@@ -1166,7 +1199,7 @@ static struct page *get_partial_node(struct kmem_cache_node *n) | |||
1166 | 1199 | ||
1167 | spin_lock(&n->list_lock); | 1200 | spin_lock(&n->list_lock); |
1168 | list_for_each_entry(page, &n->partial, lru) | 1201 | list_for_each_entry(page, &n->partial, lru) |
1169 | if (lock_and_del_slab(n, page)) | 1202 | if (lock_and_freeze_slab(n, page)) |
1170 | goto out; | 1203 | goto out; |
1171 | page = NULL; | 1204 | page = NULL; |
1172 | out: | 1205 | out: |
@@ -1245,10 +1278,11 @@ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) | |||
1245 | * | 1278 | * |
1246 | * On exit the slab lock will have been dropped. | 1279 | * On exit the slab lock will have been dropped. |
1247 | */ | 1280 | */ |
1248 | static void putback_slab(struct kmem_cache *s, struct page *page) | 1281 | static void unfreeze_slab(struct kmem_cache *s, struct page *page) |
1249 | { | 1282 | { |
1250 | struct kmem_cache_node *n = get_node(s, page_to_nid(page)); | 1283 | struct kmem_cache_node *n = get_node(s, page_to_nid(page)); |
1251 | 1284 | ||
1285 | ClearSlabFrozen(page); | ||
1252 | if (page->inuse) { | 1286 | if (page->inuse) { |
1253 | 1287 | ||
1254 | if (page->freelist) | 1288 | if (page->freelist) |
@@ -1299,9 +1333,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, int cpu) | |||
1299 | page->inuse--; | 1333 | page->inuse--; |
1300 | } | 1334 | } |
1301 | s->cpu_slab[cpu] = NULL; | 1335 | s->cpu_slab[cpu] = NULL; |
1302 | ClearPageActive(page); | 1336 | unfreeze_slab(s, page); |
1303 | |||
1304 | putback_slab(s, page); | ||
1305 | } | 1337 | } |
1306 | 1338 | ||
1307 | static void flush_slab(struct kmem_cache *s, struct page *page, int cpu) | 1339 | static void flush_slab(struct kmem_cache *s, struct page *page, int cpu) |
@@ -1392,9 +1424,7 @@ another_slab: | |||
1392 | new_slab: | 1424 | new_slab: |
1393 | page = get_partial(s, gfpflags, node); | 1425 | page = get_partial(s, gfpflags, node); |
1394 | if (page) { | 1426 | if (page) { |
1395 | have_slab: | ||
1396 | s->cpu_slab[cpu] = page; | 1427 | s->cpu_slab[cpu] = page; |
1397 | SetPageActive(page); | ||
1398 | goto load_freelist; | 1428 | goto load_freelist; |
1399 | } | 1429 | } |
1400 | 1430 | ||
@@ -1424,17 +1454,15 @@ have_slab: | |||
1424 | flush_slab(s, s->cpu_slab[cpu], cpu); | 1454 | flush_slab(s, s->cpu_slab[cpu], cpu); |
1425 | } | 1455 | } |
1426 | slab_lock(page); | 1456 | slab_lock(page); |
1427 | goto have_slab; | 1457 | SetSlabFrozen(page); |
1458 | s->cpu_slab[cpu] = page; | ||
1459 | goto load_freelist; | ||
1428 | } | 1460 | } |
1429 | return NULL; | 1461 | return NULL; |
1430 | debug: | 1462 | debug: |
1431 | object = page->freelist; | 1463 | object = page->freelist; |
1432 | if (!alloc_object_checks(s, page, object)) | 1464 | if (!alloc_debug_processing(s, page, object, addr)) |
1433 | goto another_slab; | 1465 | goto another_slab; |
1434 | if (s->flags & SLAB_STORE_USER) | ||
1435 | set_track(s, object, TRACK_ALLOC, addr); | ||
1436 | trace(s, page, object, 1); | ||
1437 | init_object(s, object, 1); | ||
1438 | 1466 | ||
1439 | page->inuse++; | 1467 | page->inuse++; |
1440 | page->freelist = object[page->offset]; | 1468 | page->freelist = object[page->offset]; |
@@ -1511,11 +1539,7 @@ checks_ok: | |||
1511 | page->freelist = object; | 1539 | page->freelist = object; |
1512 | page->inuse--; | 1540 | page->inuse--; |
1513 | 1541 | ||
1514 | if (unlikely(PageActive(page))) | 1542 | if (unlikely(SlabFrozen(page))) |
1515 | /* | ||
1516 | * Cpu slabs are never on partial lists and are | ||
1517 | * never freed. | ||
1518 | */ | ||
1519 | goto out_unlock; | 1543 | goto out_unlock; |
1520 | 1544 | ||
1521 | if (unlikely(!page->inuse)) | 1545 | if (unlikely(!page->inuse)) |
@@ -1545,14 +1569,8 @@ slab_empty: | |||
1545 | return; | 1569 | return; |
1546 | 1570 | ||
1547 | debug: | 1571 | debug: |
1548 | if (!free_object_checks(s, page, x)) | 1572 | if (!free_debug_processing(s, page, x, addr)) |
1549 | goto out_unlock; | 1573 | goto out_unlock; |
1550 | if (!PageActive(page) && !page->freelist) | ||
1551 | remove_full(s, page); | ||
1552 | if (s->flags & SLAB_STORE_USER) | ||
1553 | set_track(s, x, TRACK_FREE, addr); | ||
1554 | trace(s, page, object, 0); | ||
1555 | init_object(s, object, 0); | ||
1556 | goto checks_ok; | 1574 | goto checks_ok; |
1557 | } | 1575 | } |
1558 | 1576 | ||
@@ -1789,7 +1807,7 @@ static struct kmem_cache_node * __init early_kmem_cache_node_alloc(gfp_t gfpflag | |||
1789 | page->freelist = get_freepointer(kmalloc_caches, n); | 1807 | page->freelist = get_freepointer(kmalloc_caches, n); |
1790 | page->inuse++; | 1808 | page->inuse++; |
1791 | kmalloc_caches->node[node] = n; | 1809 | kmalloc_caches->node[node] = n; |
1792 | init_object(kmalloc_caches, n, 1); | 1810 | setup_object_debug(kmalloc_caches, page, n); |
1793 | init_kmem_cache_node(n); | 1811 | init_kmem_cache_node(n); |
1794 | atomic_long_inc(&n->nr_slabs); | 1812 | atomic_long_inc(&n->nr_slabs); |
1795 | add_partial(n, page); | 1813 | add_partial(n, page); |
@@ -1871,7 +1889,7 @@ static int calculate_sizes(struct kmem_cache *s) | |||
1871 | * then we should never poison the object itself. | 1889 | * then we should never poison the object itself. |
1872 | */ | 1890 | */ |
1873 | if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) && | 1891 | if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) && |
1874 | !s->ctor && !s->dtor) | 1892 | !s->ctor) |
1875 | s->flags |= __OBJECT_POISON; | 1893 | s->flags |= __OBJECT_POISON; |
1876 | else | 1894 | else |
1877 | s->flags &= ~__OBJECT_POISON; | 1895 | s->flags &= ~__OBJECT_POISON; |
@@ -1899,9 +1917,8 @@ static int calculate_sizes(struct kmem_cache *s) | |||
1899 | */ | 1917 | */ |
1900 | s->inuse = size; | 1918 | s->inuse = size; |
1901 | 1919 | ||
1902 | #ifdef CONFIG_SLUB_DEBUG | ||
1903 | if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) || | 1920 | if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) || |
1904 | s->ctor || s->dtor)) { | 1921 | s->ctor)) { |
1905 | /* | 1922 | /* |
1906 | * Relocate free pointer after the object if it is not | 1923 | * Relocate free pointer after the object if it is not |
1907 | * permitted to overwrite the first word of the object on | 1924 | * permitted to overwrite the first word of the object on |
@@ -1914,6 +1931,7 @@ static int calculate_sizes(struct kmem_cache *s) | |||
1914 | size += sizeof(void *); | 1931 | size += sizeof(void *); |
1915 | } | 1932 | } |
1916 | 1933 | ||
1934 | #ifdef CONFIG_SLUB_DEBUG | ||
1917 | if (flags & SLAB_STORE_USER) | 1935 | if (flags & SLAB_STORE_USER) |
1918 | /* | 1936 | /* |
1919 | * Need to store information about allocs and frees after | 1937 | * Need to store information about allocs and frees after |
@@ -1970,13 +1988,11 @@ static int calculate_sizes(struct kmem_cache *s) | |||
1970 | static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, | 1988 | static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, |
1971 | const char *name, size_t size, | 1989 | const char *name, size_t size, |
1972 | size_t align, unsigned long flags, | 1990 | size_t align, unsigned long flags, |
1973 | void (*ctor)(void *, struct kmem_cache *, unsigned long), | 1991 | void (*ctor)(void *, struct kmem_cache *, unsigned long)) |
1974 | void (*dtor)(void *, struct kmem_cache *, unsigned long)) | ||
1975 | { | 1992 | { |
1976 | memset(s, 0, kmem_size); | 1993 | memset(s, 0, kmem_size); |
1977 | s->name = name; | 1994 | s->name = name; |
1978 | s->ctor = ctor; | 1995 | s->ctor = ctor; |
1979 | s->dtor = dtor; | ||
1980 | s->objsize = size; | 1996 | s->objsize = size; |
1981 | s->flags = flags; | 1997 | s->flags = flags; |
1982 | s->align = align; | 1998 | s->align = align; |
@@ -2161,7 +2177,7 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, | |||
2161 | 2177 | ||
2162 | down_write(&slub_lock); | 2178 | down_write(&slub_lock); |
2163 | if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, | 2179 | if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, |
2164 | flags, NULL, NULL)) | 2180 | flags, NULL)) |
2165 | goto panic; | 2181 | goto panic; |
2166 | 2182 | ||
2167 | list_add(&s->list, &slab_caches); | 2183 | list_add(&s->list, &slab_caches); |
@@ -2463,7 +2479,7 @@ static int slab_unmergeable(struct kmem_cache *s) | |||
2463 | if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) | 2479 | if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) |
2464 | return 1; | 2480 | return 1; |
2465 | 2481 | ||
2466 | if (s->ctor || s->dtor) | 2482 | if (s->ctor) |
2467 | return 1; | 2483 | return 1; |
2468 | 2484 | ||
2469 | return 0; | 2485 | return 0; |
@@ -2471,15 +2487,14 @@ static int slab_unmergeable(struct kmem_cache *s) | |||
2471 | 2487 | ||
2472 | static struct kmem_cache *find_mergeable(size_t size, | 2488 | static struct kmem_cache *find_mergeable(size_t size, |
2473 | size_t align, unsigned long flags, | 2489 | size_t align, unsigned long flags, |
2474 | void (*ctor)(void *, struct kmem_cache *, unsigned long), | 2490 | void (*ctor)(void *, struct kmem_cache *, unsigned long)) |
2475 | void (*dtor)(void *, struct kmem_cache *, unsigned long)) | ||
2476 | { | 2491 | { |
2477 | struct list_head *h; | 2492 | struct list_head *h; |
2478 | 2493 | ||
2479 | if (slub_nomerge || (flags & SLUB_NEVER_MERGE)) | 2494 | if (slub_nomerge || (flags & SLUB_NEVER_MERGE)) |
2480 | return NULL; | 2495 | return NULL; |
2481 | 2496 | ||
2482 | if (ctor || dtor) | 2497 | if (ctor) |
2483 | return NULL; | 2498 | return NULL; |
2484 | 2499 | ||
2485 | size = ALIGN(size, sizeof(void *)); | 2500 | size = ALIGN(size, sizeof(void *)); |
@@ -2521,8 +2536,9 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, | |||
2521 | { | 2536 | { |
2522 | struct kmem_cache *s; | 2537 | struct kmem_cache *s; |
2523 | 2538 | ||
2539 | BUG_ON(dtor); | ||
2524 | down_write(&slub_lock); | 2540 | down_write(&slub_lock); |
2525 | s = find_mergeable(size, align, flags, dtor, ctor); | 2541 | s = find_mergeable(size, align, flags, ctor); |
2526 | if (s) { | 2542 | if (s) { |
2527 | s->refcount++; | 2543 | s->refcount++; |
2528 | /* | 2544 | /* |
@@ -2536,7 +2552,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, | |||
2536 | } else { | 2552 | } else { |
2537 | s = kmalloc(kmem_size, GFP_KERNEL); | 2553 | s = kmalloc(kmem_size, GFP_KERNEL); |
2538 | if (s && kmem_cache_open(s, GFP_KERNEL, name, | 2554 | if (s && kmem_cache_open(s, GFP_KERNEL, name, |
2539 | size, align, flags, ctor, dtor)) { | 2555 | size, align, flags, ctor)) { |
2540 | if (sysfs_slab_add(s)) { | 2556 | if (sysfs_slab_add(s)) { |
2541 | kfree(s); | 2557 | kfree(s); |
2542 | goto err; | 2558 | goto err; |
@@ -3177,17 +3193,6 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf) | |||
3177 | } | 3193 | } |
3178 | SLAB_ATTR_RO(ctor); | 3194 | SLAB_ATTR_RO(ctor); |
3179 | 3195 | ||
3180 | static ssize_t dtor_show(struct kmem_cache *s, char *buf) | ||
3181 | { | ||
3182 | if (s->dtor) { | ||
3183 | int n = sprint_symbol(buf, (unsigned long)s->dtor); | ||
3184 | |||
3185 | return n + sprintf(buf + n, "\n"); | ||
3186 | } | ||
3187 | return 0; | ||
3188 | } | ||
3189 | SLAB_ATTR_RO(dtor); | ||
3190 | |||
3191 | static ssize_t aliases_show(struct kmem_cache *s, char *buf) | 3196 | static ssize_t aliases_show(struct kmem_cache *s, char *buf) |
3192 | { | 3197 | { |
3193 | return sprintf(buf, "%d\n", s->refcount - 1); | 3198 | return sprintf(buf, "%d\n", s->refcount - 1); |
@@ -3419,7 +3424,6 @@ static struct attribute * slab_attrs[] = { | |||
3419 | &partial_attr.attr, | 3424 | &partial_attr.attr, |
3420 | &cpu_slabs_attr.attr, | 3425 | &cpu_slabs_attr.attr, |
3421 | &ctor_attr.attr, | 3426 | &ctor_attr.attr, |
3422 | &dtor_attr.attr, | ||
3423 | &aliases_attr.attr, | 3427 | &aliases_attr.attr, |
3424 | &align_attr.attr, | 3428 | &align_attr.attr, |
3425 | &sanity_checks_attr.attr, | 3429 | &sanity_checks_attr.attr, |