aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNamhoon Kim <namhoonk@cs.unc.edu>2017-09-06 08:50:44 -0400
committerNamhoon Kim <namhoonk@cs.unc.edu>2017-09-06 08:50:44 -0400
commit84e36eb17cd9e5bb4bfb6cc4e29f55542fa69fde (patch)
tree9df230af0722c75a01b546f10568f4ea96572262
parentc652e8d303af087269b93eac7c4bf52626818f7f (diff)
fix PGFREE and NR_FREE_PAGES
-rw-r--r--include/linux/mmzone.h1
-rw-r--r--include/linux/slab.h2
-rw-r--r--include/linux/slub_def.h2
-rw-r--r--include/linux/vm_event_item.h2
-rw-r--r--include/linux/vmstat.h7
-rw-r--r--litmus/page_dev.c2
-rw-r--r--mm/page_alloc.c35
-rw-r--r--mm/page_isolation.c6
-rw-r--r--mm/slab.h2
-rw-r--r--mm/slab_common.c48
-rw-r--r--mm/slub.c7
-rw-r--r--mm/vmstat.c2
12 files changed, 93 insertions, 23 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index d28f7ef8228d..750c0b64fa96 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -168,6 +168,7 @@ enum zone_stat_item {
168 WORKINGSET_NODERECLAIM, 168 WORKINGSET_NODERECLAIM,
169 NR_ANON_TRANSPARENT_HUGEPAGES, 169 NR_ANON_TRANSPARENT_HUGEPAGES,
170 NR_FREE_CMA_PAGES, 170 NR_FREE_CMA_PAGES,
171 NR_FREE_HC_PAGES,
171 NR_VM_ZONE_STAT_ITEMS }; 172 NR_VM_ZONE_STAT_ITEMS };
172 173
173/* 174/*
diff --git a/include/linux/slab.h b/include/linux/slab.h
index ffd24c830151..a899dda28def 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -87,6 +87,8 @@
87# define SLAB_FAILSLAB 0x00000000UL 87# define SLAB_FAILSLAB 0x00000000UL
88#endif 88#endif
89 89
90#define SLAB_NO_MERGE 0x04000000UL /* Do not merge with existing slab */
91
90/* The following flags affect the page allocator grouping pages by mobility */ 92/* The following flags affect the page allocator grouping pages by mobility */
91#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ 93#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
92#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ 94#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 33885118523c..9400aa1e9128 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -98,6 +98,8 @@ struct kmem_cache {
98 */ 98 */
99 int remote_node_defrag_ratio; 99 int remote_node_defrag_ratio;
100#endif 100#endif
101 /* cpu id for higher-criticality slabs */
102 int cpu_id;
101 struct kmem_cache_node *node[MAX_NUMNODES]; 103 struct kmem_cache_node *node[MAX_NUMNODES];
102}; 104};
103 105
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 9246d32dc973..3f5a9da27d7a 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -23,7 +23,7 @@
23 23
24enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, 24enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
25 FOR_ALL_ZONES(PGALLOC), 25 FOR_ALL_ZONES(PGALLOC),
26 PGFREE, PGACTIVATE, PGDEACTIVATE, 26 PGFREE, PGFREE_HC, PGACTIVATE, PGDEACTIVATE,
27 PGFAULT, PGMAJFAULT, 27 PGFAULT, PGMAJFAULT,
28 FOR_ALL_ZONES(PGREFILL), 28 FOR_ALL_ZONES(PGREFILL),
29 FOR_ALL_ZONES(PGSTEAL_KSWAPD), 29 FOR_ALL_ZONES(PGSTEAL_KSWAPD),
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 82e7db7f7100..b6410f7efd74 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -278,9 +278,12 @@ static inline void drain_zonestat(struct zone *zone,
278#endif /* CONFIG_SMP */ 278#endif /* CONFIG_SMP */
279 279
280static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages, 280static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
281 int migratetype) 281 int migratetype, int part_no)
282{ 282{
283 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); 283 if (part_no == NR_CPUS)
284 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
285 else
286 __mod_zone_page_state(zone, NR_FREE_HC_PAGES, nr_pages);
284 if (is_migrate_cma(migratetype)) 287 if (is_migrate_cma(migratetype))
285 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages); 288 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
286} 289}
diff --git a/litmus/page_dev.c b/litmus/page_dev.c
index 8e29e68ed89a..2894e93213d3 100644
--- a/litmus/page_dev.c
+++ b/litmus/page_dev.c
@@ -226,7 +226,7 @@ static struct ctl_table partition_table[] =
226 .maxlen = sizeof(llc_partition[4]), 226 .maxlen = sizeof(llc_partition[4]),
227 .extra1 = &dram_partition_min, 227 .extra1 = &dram_partition_min,
228 .extra2 = &dram_partition_max, 228 .extra2 = &dram_partition_max,
229 }, 229 },
230 { } 230 { }
231}; 231};
232 232
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4611656df49a..e240fcd3039d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -485,7 +485,7 @@ static inline void set_page_guard(struct zone *zone, struct page *page,
485 INIT_LIST_HEAD(&page->lru); 485 INIT_LIST_HEAD(&page->lru);
486 set_page_private(page, order); 486 set_page_private(page, order);
487 /* Guard pages are not available for any usage */ 487 /* Guard pages are not available for any usage */
488 __mod_zone_freepage_state(zone, -(1 << order), migratetype); 488 __mod_zone_freepage_state(zone, -(1 << order), migratetype, bank_to_partition(page_bank(page)));
489} 489}
490 490
491static inline void clear_page_guard(struct zone *zone, struct page *page, 491static inline void clear_page_guard(struct zone *zone, struct page *page,
@@ -501,7 +501,7 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
501 501
502 set_page_private(page, 0); 502 set_page_private(page, 0);
503 if (!is_migrate_isolate(migratetype)) 503 if (!is_migrate_isolate(migratetype))
504 __mod_zone_freepage_state(zone, (1 << order), migratetype); 504 __mod_zone_freepage_state(zone, (1 << order), migratetype, bank_to_partition(page_bank(page)));
505} 505}
506#else 506#else
507struct page_ext_operations debug_guardpage_ops = { NULL, }; 507struct page_ext_operations debug_guardpage_ops = { NULL, };
@@ -629,7 +629,7 @@ static inline void __free_one_page(struct page *page,
629 */ 629 */
630 max_order = min(MAX_ORDER, pageblock_order + 1); 630 max_order = min(MAX_ORDER, pageblock_order + 1);
631 } else { 631 } else {
632 __mod_zone_freepage_state(zone, 1 << order, migratetype); 632 __mod_zone_freepage_state(zone, 1 << order, migratetype, parti_no);
633 } 633 }
634 634
635 page_idx = pfn & ((1 << max_order) - 1); 635 page_idx = pfn & ((1 << max_order) - 1);
@@ -694,7 +694,7 @@ out:
694 if (is_migrate_isolate(migratetype)) { 694 if (is_migrate_isolate(migratetype)) {
695 max_order = min(MAX_PARTITIONED_ORDER, pageblock_order + 1); 695 max_order = min(MAX_PARTITIONED_ORDER, pageblock_order + 1);
696 } else { 696 } else {
697 __mod_zone_freepage_state(zone, 1 << order, migratetype); 697 __mod_zone_freepage_state(zone, 1 << order, migratetype, parti_no);
698 } 698 }
699 699
700 page_idx = pfn & ((1 << max_order) - 1); 700 page_idx = pfn & ((1 << max_order) - 1);
@@ -927,7 +927,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
927 927
928 migratetype = get_pfnblock_migratetype(page, pfn); 928 migratetype = get_pfnblock_migratetype(page, pfn);
929 local_irq_save(flags); 929 local_irq_save(flags);
930 __count_vm_events(PGFREE, 1 << order); 930 if (bank_to_partition(page_bank(page)) == NR_CPUS)
931 __count_vm_events(PGFREE, 1 << order);
932 else if (bank_to_partition(page_bank(page)) < NR_CPUS)
933 __count_vm_events(PGFREE_HC, 1 << order);
931 set_freepage_migratetype(page, migratetype); 934 set_freepage_migratetype(page, migratetype);
932 free_one_page(page_zone(page), page, pfn, order, migratetype); 935 free_one_page(page_zone(page), page, pfn, order, migratetype);
933 local_irq_restore(flags); 936 local_irq_restore(flags);
@@ -1097,6 +1100,7 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
1097/* Kernel page coloring */ 1100/* Kernel page coloring */
1098 1101
1099/* build colored page list */ 1102/* build colored page list */
1103#if 0
1100static void build_colored_pages(struct zone *zone, struct page *page, int order) 1104static void build_colored_pages(struct zone *zone, struct page *page, int order)
1101{ 1105{
1102 int i, color, bank; 1106 int i, color, bank;
@@ -1167,6 +1171,7 @@ static inline struct page *get_colored_page(struct zone *zone, unsigned long req
1167 printk(KERN_INFO "color=%d, bank=%d allocated\n", color, bank); 1171 printk(KERN_INFO "color=%d, bank=%d allocated\n", color, bank);
1168 return page; 1172 return page;
1169} 1173}
1174#endif
1170 1175
1171/* 1176/*
1172 * Go through the free lists for the given migratetype and remove 1177 * Go through the free lists for the given migratetype and remove
@@ -1181,8 +1186,11 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
1181 struct page *page; 1186 struct page *page;
1182 int cpu = raw_smp_processor_id(); 1187 int cpu = raw_smp_processor_id();
1183 1188
1184 if (order == 0 && color_req == 1) { 1189 // if (order <= 2 && color_req == 1) {
1190 /* The max. order of color_req is <= 2 */
1191 if (color_req == 1) {
1185 int found = 0; 1192 int found = 0;
1193 printk(KERN_INFO "COLOR PAGE requested on CPU%d with order = %d\n", cpu, order);
1186 /* Find a page of the appropriate size in the preferred list */ 1194 /* Find a page of the appropriate size in the preferred list */
1187 for (current_order = order; current_order < MAX_PARTITIONED_ORDER; ++current_order) { 1195 for (current_order = order; current_order < MAX_PARTITIONED_ORDER; ++current_order) {
1188 area = &(zone->free_area_d[cpu][current_order]); 1196 area = &(zone->free_area_d[cpu][current_order]);
@@ -1555,7 +1563,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
1555 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1563 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
1556 -(1 << order)); 1564 -(1 << order));
1557 } 1565 }
1558 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); 1566 __mod_zone_page_state(zone, NR_FREE_HC_PAGES, -(i << order));
1559 spin_unlock(&zone->lock); 1567 spin_unlock(&zone->lock);
1560 return i; 1568 return i;
1561} 1569}
@@ -1752,8 +1760,13 @@ void free_hot_cold_page(struct page *page, bool cold)
1752 migratetype = get_pfnblock_migratetype(page, pfn); 1760 migratetype = get_pfnblock_migratetype(page, pfn);
1753 set_freepage_migratetype(page, migratetype); 1761 set_freepage_migratetype(page, migratetype);
1754 local_irq_save(flags); 1762 local_irq_save(flags);
1755 __count_vm_event(PGFREE); 1763
1756 1764
1765 if (bank_to_partition(page_bank(page)) == NR_CPUS)
1766 __count_vm_event(PGFREE);
1767 else if (bank_to_partition(page_bank(page)) < NR_CPUS)
1768 __count_vm_event(PGFREE_HC);
1769
1757 /* 1770 /*
1758 * We only track unmovable, reclaimable and movable on pcp lists. 1771 * We only track unmovable, reclaimable and movable on pcp lists.
1759 * Free ISOLATE pages back to the allocator because they are being 1772 * Free ISOLATE pages back to the allocator because they are being
@@ -1861,7 +1874,7 @@ int __isolate_free_page(struct page *page, unsigned int order)
1861 if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) 1874 if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
1862 return 0; 1875 return 0;
1863 1876
1864 __mod_zone_freepage_state(zone, -(1UL << order), mt); 1877 __mod_zone_freepage_state(zone, -(1UL << order), mt, bank_to_partition(page_bank(page)));
1865 } 1878 }
1866 1879
1867 /* Remove page from free list */ 1880 /* Remove page from free list */
@@ -1966,7 +1979,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
1966 if (!page) 1979 if (!page)
1967 goto failed; 1980 goto failed;
1968 __mod_zone_freepage_state(zone, -(1 << order), 1981 __mod_zone_freepage_state(zone, -(1 << order),
1969 get_freepage_migratetype(page)); 1982 get_freepage_migratetype(page), bank_to_partition(page_bank(page)));
1970 } 1983 }
1971 1984
1972 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); 1985 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 303c908790ef..ed05910069ff 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -58,12 +58,13 @@ out:
58 if (!ret) { 58 if (!ret) {
59 unsigned long nr_pages; 59 unsigned long nr_pages;
60 int migratetype = get_pageblock_migratetype(page); 60 int migratetype = get_pageblock_migratetype(page);
61 int partno = bank_to_partition(page_bank(page));
61 62
62 set_pageblock_migratetype(page, MIGRATE_ISOLATE); 63 set_pageblock_migratetype(page, MIGRATE_ISOLATE);
63 zone->nr_isolate_pageblock++; 64 zone->nr_isolate_pageblock++;
64 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); 65 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE);
65 66
66 __mod_zone_freepage_state(zone, -nr_pages, migratetype); 67 __mod_zone_freepage_state(zone, -nr_pages, migratetype, partno);
67 } 68 }
68 69
69 spin_unlock_irqrestore(&zone->lock, flags); 70 spin_unlock_irqrestore(&zone->lock, flags);
@@ -117,8 +118,9 @@ void unset_migratetype_isolate(struct page *page, unsigned migratetype)
117 * pageblock scanning for freepage moving. 118 * pageblock scanning for freepage moving.
118 */ 119 */
119 if (!isolated_page) { 120 if (!isolated_page) {
121 int partno = bank_to_partition(page_bank(page));
120 nr_pages = move_freepages_block(zone, page, migratetype); 122 nr_pages = move_freepages_block(zone, page, migratetype);
121 __mod_zone_freepage_state(zone, nr_pages, migratetype); 123 __mod_zone_freepage_state(zone, nr_pages, migratetype, partno);
122 } 124 }
123 set_pageblock_migratetype(page, migratetype); 125 set_pageblock_migratetype(page, migratetype);
124 zone->nr_isolate_pageblock--; 126 zone->nr_isolate_pageblock--;
diff --git a/mm/slab.h b/mm/slab.h
index 4c3ac12dd644..48be14c4ec7b 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -114,7 +114,7 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
114 114
115/* Legal flag mask for kmem_cache_create(), for various configurations */ 115/* Legal flag mask for kmem_cache_create(), for various configurations */
116#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \ 116#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
117 SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS ) 117 SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | SLAB_NO_MERGE)
118 118
119#if defined(CONFIG_DEBUG_SLAB) 119#if defined(CONFIG_DEBUG_SLAB)
120#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) 120#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 999bb3424d44..dee018acaeaf 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -35,7 +35,7 @@ struct kmem_cache *kmem_cache;
35 */ 35 */
36#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 36#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
37 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \ 37 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
38 SLAB_FAILSLAB) 38 SLAB_FAILSLAB | SLAB_NO_MERGE)
39 39
40#define SLAB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ 40#define SLAB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
41 SLAB_CACHE_DMA | SLAB_NOTRACK) 41 SLAB_CACHE_DMA | SLAB_NOTRACK)
@@ -703,7 +703,9 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
703 panic("Out of memory when creating slab %s\n", name); 703 panic("Out of memory when creating slab %s\n", name);
704 704
705 create_boot_cache(s, name, size, flags); 705 create_boot_cache(s, name, size, flags);
706
706 list_add(&s->list, &slab_caches); 707 list_add(&s->list, &slab_caches);
708
707 s->refcount = 1; 709 s->refcount = 1;
708 return s; 710 return s;
709} 711}
@@ -711,6 +713,11 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
711struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; 713struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
712EXPORT_SYMBOL(kmalloc_caches); 714EXPORT_SYMBOL(kmalloc_caches);
713 715
716/* for per-cpu kmalloc objects */
717struct kmem_cache *hc_kmalloc_caches[NR_CPUS][KMALLOC_SHIFT_HIGH + 1];
718//struct kmem_cache *hc_kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
719EXPORT_SYMBOL(hc_kmalloc_caches);
720
714#ifdef CONFIG_ZONE_DMA 721#ifdef CONFIG_ZONE_DMA
715struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1]; 722struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
716EXPORT_SYMBOL(kmalloc_dma_caches); 723EXPORT_SYMBOL(kmalloc_dma_caches);
@@ -790,7 +797,7 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
790 */ 797 */
791void __init create_kmalloc_caches(unsigned long flags) 798void __init create_kmalloc_caches(unsigned long flags)
792{ 799{
793 int i; 800 int i, cpu = 0;
794 801
795 /* 802 /*
796 * Patch up the size_index table if we have strange large alignment 803 * Patch up the size_index table if we have strange large alignment
@@ -837,6 +844,7 @@ void __init create_kmalloc_caches(unsigned long flags)
837 if (!kmalloc_caches[i]) { 844 if (!kmalloc_caches[i]) {
838 kmalloc_caches[i] = create_kmalloc_cache(NULL, 845 kmalloc_caches[i] = create_kmalloc_cache(NULL,
839 1 << i, flags); 846 1 << i, flags);
847printk(KERN_INFO "KMALLOC-%d CACHE CREATED\n", 1<<i);
840 } 848 }
841 849
842 /* 850 /*
@@ -844,11 +852,43 @@ void __init create_kmalloc_caches(unsigned long flags)
844 * These have to be created immediately after the 852 * These have to be created immediately after the
845 * earlier power of two caches 853 * earlier power of two caches
846 */ 854 */
847 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6) 855 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6) {
848 kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags); 856 kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
857printk(KERN_INFO "KMALLOC-96 CACHE CREATED\n");
858 }
849 859
850 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7) 860 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7) {
851 kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags); 861 kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
862printk(KERN_INFO "KMALLOC-192 CACHE CREATED\n");
863 }
864 }
865
866/* per-cpu kmalloc caches */
867 printk(KERN_INFO "SLAB_STATE = %d\n", slab_state);
868 for (cpu = 0; cpu < NR_CPUS; cpu++) {
869 //cpu = 0;
870 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
871 char *n;
872 n = kasprintf(GFP_NOWAIT, "hc%01d-kmalloc-%d", cpu, kmalloc_size(i));
873 hc_kmalloc_caches[cpu][i] = create_kmalloc_cache(n, 1 << i, SLAB_NO_MERGE|flags);
874 hc_kmalloc_caches[cpu][i]->cpu_id = cpu;
875 printk(KERN_INFO "CPU%d HC-KMALLOC-%d CACHE CREATED\n", cpu, 1<<i);
876 printk(KERN_INFO "HC-KMALLOC-%d slabs freelist=%p, pages=%p, partial=%p\n", 1<<i, hc_kmalloc_caches[cpu][i]->cpu_slab->freelist, hc_kmalloc_caches[cpu][i]->cpu_slab->page,hc_kmalloc_caches[cpu][i]->cpu_slab->partial);
877
878
879 /*
880
881 if (KMALLOC_MIN_SIZE <= 32 && !pc_kmalloc_caches[cpu][1] && i == 6) {
882 pc_kmalloc_caches[cpu][1] = create_kmalloc_cache(NULL, 96, flags);
883 printk(KERN_INFO "PC-KMALLOC-96 CACHE CREATED\n");
884 }
885
886 if (KMALLOC_MIN_SIZE <= 64 && !pc_kmalloc_caches[cpu][2] && i == 7) {
887 pc_kmalloc_caches[cpu][2] = create_kmalloc_cache(NULL, 192, flags);
888 printk(KERN_INFO "PC-KMALLOC-192 CACHE CREATED\n");
889 }
890 */
891 }
852 } 892 }
853 893
854 /* Kmalloc array is now usable */ 894 /* Kmalloc array is now usable */
diff --git a/mm/slub.c b/mm/slub.c
index 54c0876b43d5..2727a6fc403f 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2770,7 +2770,8 @@ EXPORT_SYMBOL(kmem_cache_free);
2770 * take the list_lock. 2770 * take the list_lock.
2771 */ 2771 */
2772static int slub_min_order; 2772static int slub_min_order;
2773static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER; 2773//static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
2774static int slub_max_order = 2;
2774static int slub_min_objects; 2775static int slub_min_objects;
2775 2776
2776/* 2777/*
@@ -5139,6 +5140,10 @@ static char *create_unique_id(struct kmem_cache *s)
5139 * are matched during merging to guarantee that the id is 5140 * are matched during merging to guarantee that the id is
5140 * unique. 5141 * unique.
5141 */ 5142 */
5143 if (s->flags & SLAB_NO_MERGE) {
5144 *p++ = 'n';
5145 p += sprintf(p, "%01d", s->cpu_id);
5146 }
5142 if (s->flags & SLAB_CACHE_DMA) 5147 if (s->flags & SLAB_CACHE_DMA)
5143 *p++ = 'd'; 5148 *p++ = 'd';
5144 if (s->flags & SLAB_RECLAIM_ACCOUNT) 5149 if (s->flags & SLAB_RECLAIM_ACCOUNT)
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 4bbf65f7335b..0d748d23dc4c 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -739,6 +739,7 @@ const char * const vmstat_text[] = {
739 "workingset_nodereclaim", 739 "workingset_nodereclaim",
740 "nr_anon_transparent_hugepages", 740 "nr_anon_transparent_hugepages",
741 "nr_free_cma", 741 "nr_free_cma",
742 "nr_free_hc_pages",
742 743
743 /* enum writeback_stat_item counters */ 744 /* enum writeback_stat_item counters */
744 "nr_dirty_threshold", 745 "nr_dirty_threshold",
@@ -754,6 +755,7 @@ const char * const vmstat_text[] = {
754 TEXTS_FOR_ZONES("pgalloc") 755 TEXTS_FOR_ZONES("pgalloc")
755 756
756 "pgfree", 757 "pgfree",
758 "pgfree_hc",
757 "pgactivate", 759 "pgactivate",
758 "pgdeactivate", 760 "pgdeactivate",
759 761