From 5cc515b26848d31deda903fc19e5534723cf4a35 Mon Sep 17 00:00:00 2001 From: Namhoon Kim Date: Tue, 12 Sep 2017 15:57:53 -0400 Subject: ORDER 2 per-partition alloc --- litmus/page_dev.c | 89 ++++++++++++++++ mm/page_alloc.c | 313 ++++++++++++++++++++++++++++++++++-------------------- mm/slab_common.c | 37 ++++--- mm/slub.c | 36 ++++++- mm/vmstat.c | 5 +- 5 files changed, 347 insertions(+), 133 deletions(-) diff --git a/litmus/page_dev.c b/litmus/page_dev.c index 2894e93213d3..2fd829b05a0a 100644 --- a/litmus/page_dev.c +++ b/litmus/page_dev.c @@ -98,6 +98,81 @@ unsigned int llc_partition_min = 0; unsigned int dram_partition_max = 0x000000ff; unsigned int dram_partition_min = 0; +/* slabtest module */ +int buf_size = 0; +int buf_num = 1; + +int slabtest_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) +{ + int ret = 0, i; + int** testbuffer; + mutex_lock(&dev_mutex); + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); + + if (ret) + goto out; + + if (write) { + int idx; + int n_data = buf_size/sizeof(int); + + testbuffer = kmalloc(sizeof(int*)*buf_num, GFP_KERNEL|GFP_COLOR); + + for (idx=0; idx NR_CPUS); - if (parti_no < 0 || parti_no > NR_CPUS) - printk(KERN_ALERT "PART_NO %d\n", parti_no); - if (parti_no < NR_CPUS) - printk(KERN_ALERT "pfn = %lx, part_no = %d order = %d\n", pfn, parti_no, order); + //if (parti_no < NR_CPUS) + //printk(KERN_ALERT "pfn = %lx, part_no = %d order = %d\n", pfn, parti_no, order); - if (parti_no == NR_CPUS) { - max_order = MAX_ORDER; + if (parti_no < NR_CPUS) { + max_order = MAX_PARTITIONED_ORDER; VM_BUG_ON(!zone_is_initialized(zone)); VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); VM_BUG_ON(migratetype == -1); if (is_migrate_isolate(migratetype)) { - /* - * We restrict max order of merging to prevent merge - * between freepages on isolate pageblock and normal - * pageblock. Without this, pageblock isolation - * could cause incorrect freepage accounting. - */ - max_order = min(MAX_ORDER, pageblock_order + 1); + max_order = min(MAX_PARTITIONED_ORDER, pageblock_order + 1); } else { __mod_zone_freepage_state(zone, 1 << order, migratetype, parti_no); } @@ -642,15 +634,12 @@ static inline void __free_one_page(struct page *page, buddy = page + (buddy_idx - page_idx); if (!page_is_buddy(page, buddy, order)) break; - /* - * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, - * merge with it and move up one order. - */ + if (page_is_guard(buddy)) { clear_page_guard(zone, buddy, order, migratetype); } else { list_del(&buddy->lru); - zone->free_area[order].nr_free--; + zone->free_area_d[parti_no][order].nr_free--; rmv_page_order(buddy); } combined_idx = buddy_idx & page_idx; @@ -660,15 +649,7 @@ static inline void __free_one_page(struct page *page, } set_page_order(page, order); - /* - * If this is not the largest possible page, check if the buddy - * of the next-highest order is free. If it is, it's possible - * that pages are being freed that will coalesce soon. In case, - * that is happening, add the free page to the tail of the list - * so it's less likely to be used soon and more likely to be merged - * as a higher order page - */ - if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) { + if ((order < MAX_PARTITIONED_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) { struct page *higher_page, *higher_buddy; combined_idx = buddy_idx & page_idx; higher_page = page + (combined_idx - page_idx); @@ -676,23 +657,43 @@ static inline void __free_one_page(struct page *page, higher_buddy = higher_page + (buddy_idx - combined_idx); if (page_is_buddy(higher_page, higher_buddy, order + 1)) { list_add_tail(&page->lru, - &zone->free_area[order].free_list[migratetype]); - goto out; + &zone->free_area_d[parti_no][order].free_list[migratetype]); + zone->free_area_d[parti_no][order].nr_free++; + return; } } - list_add(&page->lru, &zone->free_area[order].free_list[migratetype]); -out: - zone->free_area[order].nr_free++; - } else { - max_order = MAX_PARTITIONED_ORDER; + if (order >= MAX_PARTITIONED_ORDER) { + int n_idx = 0; + struct page *lower_page; + for (n_idx = 0 ; n_idx < (1 << (order - MAX_PARTITIONED_ORDER + 1)); n_idx++) { + lower_page = page + (n_idx << (MAX_PARTITIONED_ORDER - 1)); + if (lower_page->flags & PAGE_FLAGS_CHECK_AT_PREP) + lower_page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; + set_page_order(lower_page, MAX_PARTITIONED_ORDER-1); + list_add(&lower_page->lru, &zone->free_area_d[parti_no][MAX_PARTITIONED_ORDER-1].free_list[migratetype]); + zone->free_area_d[parti_no][MAX_PARTITIONED_ORDER-1].nr_free++; + } + } else { + list_add(&page->lru, &zone->free_area_d[parti_no][order].free_list[migratetype]); + zone->free_area_d[parti_no][order].nr_free++; + } + } + else { + max_order = MAX_ORDER; VM_BUG_ON(!zone_is_initialized(zone)); VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); VM_BUG_ON(migratetype == -1); if (is_migrate_isolate(migratetype)) { - max_order = min(MAX_PARTITIONED_ORDER, pageblock_order + 1); + /* + * We restrict max order of merging to prevent merge + * between freepages on isolate pageblock and normal + * pageblock. Without this, pageblock isolation + * could cause incorrect freepage accounting. + */ + max_order = min(MAX_ORDER, pageblock_order + 1); } else { __mod_zone_freepage_state(zone, 1 << order, migratetype, parti_no); } @@ -707,12 +708,15 @@ out: buddy = page + (buddy_idx - page_idx); if (!page_is_buddy(page, buddy, order)) break; - + /* + * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, + * merge with it and move up one order. + */ if (page_is_guard(buddy)) { clear_page_guard(zone, buddy, order, migratetype); } else { list_del(&buddy->lru); - zone->free_area_d[parti_no][order].nr_free--; + zone->free_area[order].nr_free--; rmv_page_order(buddy); } combined_idx = buddy_idx & page_idx; @@ -722,7 +726,15 @@ out: } set_page_order(page, order); - if ((order < MAX_PARTITIONED_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) { + /* + * If this is not the largest possible page, check if the buddy + * of the next-highest order is free. If it is, it's possible + * that pages are being freed that will coalesce soon. In case, + * that is happening, add the free page to the tail of the list + * so it's less likely to be used soon and more likely to be merged + * as a higher order page + */ + if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) { struct page *higher_page, *higher_buddy; combined_idx = buddy_idx & page_idx; higher_page = page + (combined_idx - page_idx); @@ -730,25 +742,14 @@ out: higher_buddy = higher_page + (buddy_idx - combined_idx); if (page_is_buddy(higher_page, higher_buddy, order + 1)) { list_add_tail(&page->lru, - &zone->free_area_d[parti_no][order].free_list[migratetype]); - zone->free_area_d[parti_no][order].nr_free++; - return; + &zone->free_area[order].free_list[migratetype]); + goto out; } } - if (order >= MAX_PARTITIONED_ORDER) { - int n_idx = 0; - struct page *lower_page; - for (n_idx = 0 ; n_idx < (1 << (order - MAX_PARTITIONED_ORDER + 1)); n_idx++) { - lower_page = page + (n_idx << (MAX_PARTITIONED_ORDER - 1)); - set_page_order(lower_page, MAX_PARTITIONED_ORDER-1); - list_add(&lower_page->lru, &zone->free_area_d[parti_no][MAX_PARTITIONED_ORDER-1].free_list[migratetype]); - zone->free_area_d[parti_no][MAX_PARTITIONED_ORDER-1].nr_free++; - } - } else { - list_add(&page->lru, &zone->free_area_d[parti_no][order].free_list[migratetype]); - zone->free_area_d[parti_no][order].nr_free++; - } + list_add(&page->lru, &zone->free_area[order].free_list[migratetype]); +out: + zone->free_area[order].nr_free++; } } @@ -1190,12 +1191,24 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, /* The max. order of color_req is <= 2 */ if (color_req == 1) { int found = 0; - printk(KERN_INFO "COLOR PAGE requested on CPU%d with order = %d\n", cpu, order); + unsigned long s_pfn = zone->zone_start_pfn; + unsigned long e_pfn = zone_end_pfn(zone); + printk(KERN_INFO "COLOR PAGE requested on CPU%d with order = %d migratetype = %d\n", cpu, order, migratetype); /* Find a page of the appropriate size in the preferred list */ for (current_order = order; current_order < MAX_PARTITIONED_ORDER; ++current_order) { area = &(zone->free_area_d[cpu][current_order]); - if (list_empty(&area->free_list[migratetype])) + if (list_empty(&area->free_list[migratetype])) { + printk(KERN_INFO "order %d list empty\n", current_order); continue; + } + + + { + list_for_each_entry(page, &area->free_list[migratetype], lru) { + printk(KERN_INFO "__rmqueue_smallest list entry %p color %d\n", page, page_color(page)); + } + } + printk(KERN_INFO "__rmqueue_smallest LAST list entry %p\n", page); page = list_entry(area->free_list[migratetype].next, struct page, lru); @@ -1204,14 +1217,17 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, while(!found) { page = list_next_entry(page, lru); - if (is_in_llc_partition(page, cpu)) + if (is_in_llc_partition(page, cpu) && (page_to_pfn(page) >= s_pfn && page_to_pfn(page) < e_pfn)) found = 1; } + BUG_ON(found == 0); + list_del(&page->lru); rmv_page_order(page); area->nr_free--; expand(zone, page, order, current_order, area, migratetype); set_freepage_migratetype(page, migratetype); + printk(KERN_INFO "COLOR %d page return %p\n", page_color(page), page); return page; } } else { @@ -1445,7 +1461,7 @@ int find_suitable_fallback(struct free_area *area, unsigned int order, /* Remove an element from the buddy allocator from the fallback list */ static inline struct page * -__rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype) +__rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype, int color_req) { struct free_area *area; unsigned int current_order; @@ -1453,44 +1469,104 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype) int fallback_mt; bool can_steal; - /* Find the largest possible block of pages in the other list */ - for (current_order = MAX_ORDER-1; - current_order >= order && current_order <= MAX_ORDER-1; - --current_order) { - area = &(zone->free_area[current_order]); - fallback_mt = find_suitable_fallback(area, current_order, - start_migratetype, false, &can_steal); - if (fallback_mt == -1) - continue; + if (color_req == 1) { + int cpu = raw_smp_processor_id(); + int found = 0; + /* Find the largest possible block of pages in the other list */ + for (current_order = MAX_PARTITIONED_ORDER-1; + current_order >= order && current_order <= MAX_PARTITIONED_ORDER-1; + --current_order) { + area = &(zone->free_area_d[cpu][current_order]); + fallback_mt = find_suitable_fallback(area, current_order, + start_migratetype, false, &can_steal); + if (fallback_mt == -1) + continue; + +/* + { + list_for_each_entry(page, &area->free_list[fallback_mt], lru) { + printk(KERN_INFO "__rmqueue_fallback list entry %p color %d\n", page, page_color(page)); + } + } +*/ + + page = list_entry(area->free_list[fallback_mt].next, + struct page, lru); + if (is_in_llc_partition(page, cpu)) + found = 1; + + while(!found) { + page = list_next_entry(page, lru); + if (is_in_llc_partition(page, cpu)) + found = 1; + } + + if (can_steal) + steal_suitable_fallback(zone, page, start_migratetype); - page = list_entry(area->free_list[fallback_mt].next, - struct page, lru); - if (can_steal) - steal_suitable_fallback(zone, page, start_migratetype); + /* Remove the page from the freelists */ + area->nr_free--; + list_del(&page->lru); + rmv_page_order(page); - /* Remove the page from the freelists */ - area->nr_free--; - list_del(&page->lru); - rmv_page_order(page); + expand(zone, page, order, current_order, area, + start_migratetype); + /* + * The freepage_migratetype may differ from pageblock's + * migratetype depending on the decisions in + * try_to_steal_freepages(). This is OK as long as it + * does not differ for MIGRATE_CMA pageblocks. For CMA + * we need to make sure unallocated pages flushed from + * pcp lists are returned to the correct freelist. + */ + set_freepage_migratetype(page, start_migratetype); - expand(zone, page, order, current_order, area, - start_migratetype); - /* - * The freepage_migratetype may differ from pageblock's - * migratetype depending on the decisions in - * try_to_steal_freepages(). This is OK as long as it - * does not differ for MIGRATE_CMA pageblocks. For CMA - * we need to make sure unallocated pages flushed from - * pcp lists are returned to the correct freelist. - */ - set_freepage_migratetype(page, start_migratetype); + trace_mm_page_alloc_extfrag(page, order, current_order, + start_migratetype, fallback_mt); + + printk(KERN_INFO "__rmqueue_fallback(): CPU%d COLOR %d page return %p\n", cpu, page_color(page), page); + return page; + } + } else { + /* Find the largest possible block of pages in the other list */ + for (current_order = MAX_ORDER-1; + current_order >= order && current_order <= MAX_ORDER-1; + --current_order) { + area = &(zone->free_area[current_order]); + fallback_mt = find_suitable_fallback(area, current_order, + start_migratetype, false, &can_steal); + if (fallback_mt == -1) + continue; - trace_mm_page_alloc_extfrag(page, order, current_order, - start_migratetype, fallback_mt); + page = list_entry(area->free_list[fallback_mt].next, + struct page, lru); + if (can_steal) + steal_suitable_fallback(zone, page, start_migratetype); - return page; - } + /* Remove the page from the freelists */ + area->nr_free--; + list_del(&page->lru); + rmv_page_order(page); + + expand(zone, page, order, current_order, area, + start_migratetype); + /* + * The freepage_migratetype may differ from pageblock's + * migratetype depending on the decisions in + * try_to_steal_freepages(). This is OK as long as it + * does not differ for MIGRATE_CMA pageblocks. For CMA + * we need to make sure unallocated pages flushed from + * pcp lists are returned to the correct freelist. + */ + set_freepage_migratetype(page, start_migratetype); + + trace_mm_page_alloc_extfrag(page, order, current_order, + start_migratetype, fallback_mt); + return page; + } + } + return NULL; } @@ -1510,8 +1586,11 @@ retry_reserve: if (migratetype == MIGRATE_MOVABLE) page = __rmqueue_cma_fallback(zone, order); - if (!page) - page = __rmqueue_fallback(zone, order, migratetype); + if (!page) { + page = __rmqueue_fallback(zone, order, migratetype, color_req); + if (color_req) + printk(KERN_INFO "page received from __rmqueue_fallback()"); + } /* * Use MIGRATE_RESERVE rather than fail an allocation. goto @@ -1541,7 +1620,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, spin_lock(&zone->lock); for (i = 0; i < count; ++i) { - struct page *page = __rmqueue(zone, order, migratetype, 1); + struct page *page = __rmqueue(zone, order, migratetype, 0); if (unlikely(page == NULL)) break; @@ -1563,7 +1642,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, -(1 << order)); } - __mod_zone_page_state(zone, NR_FREE_HC_PAGES, -(i << order)); + __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); spin_unlock(&zone->lock); return i; } @@ -1751,8 +1830,8 @@ void free_hot_cold_page(struct page *page, bool cold) unsigned long flags; unsigned long pfn = page_to_pfn(page); int migratetype; - unsigned int cpu; - int is_local, is_in_pcp; + //unsigned int part_no; + //int is_local, is_hc_page; if (!free_pages_prepare(page, 0)) return; @@ -1765,7 +1844,8 @@ void free_hot_cold_page(struct page *page, bool cold) if (bank_to_partition(page_bank(page)) == NR_CPUS) __count_vm_event(PGFREE); else if (bank_to_partition(page_bank(page)) < NR_CPUS) - __count_vm_event(PGFREE_HC); + //__count_vm_event(PGFREE_HC); + BUG(); /* * We only track unmovable, reclaimable and movable on pcp lists. @@ -1782,18 +1862,18 @@ void free_hot_cold_page(struct page *page, bool cold) migratetype = MIGRATE_MOVABLE; } - cpu = bank_to_partition(page_bank(page)); - BUG_ON(cpu<0); + //part_no = bank_to_partition(page_bank(page)); + //BUG_ON(part_no<0); - if (cpu == smp_processor_id()) - is_local = 1; - else - is_local = 0; + //if (part_no == smp_processor_id()) + // is_local = 1; + //else + // is_local = 0; - is_in_pcp = is_in_llc_partition(page, smp_processor_id()); - if (cpu != NR_CPUS) - printk(KERN_ALERT "CPU%d Free order-0 page bank = %d, color = %d, is_local %d is_in_pcp %d\n", smp_processor_id(), page_bank(page), page_color(page), is_local, is_in_pcp); - if (is_local && is_in_pcp) { + //is_hc_page = is_in_llc_partition(page, smp_processor_id()); + //if (part_no != NR_CPUS) + // printk(KERN_ALERT "CPU%d Free order-0 page bank = %d, color = %d, is_local %d is_hc_page %d\n", smp_processor_id(), page_bank(page), page_color(page), is_local, is_hc_page); + //if (!is_local || !is_hc_page) { pcp = &this_cpu_ptr(zone->pageset)->pcp; if (!cold) list_add(&page->lru, &pcp->lists[migratetype]); @@ -1805,9 +1885,9 @@ void free_hot_cold_page(struct page *page, bool cold) free_pcppages_bulk(zone, batch, pcp); pcp->count -= batch; } - } else { - __free_page(page); - } +// } else { +// __free_page(page); +// } out: local_irq_restore(flags); } @@ -1936,8 +2016,11 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, struct page *page; bool cold = ((gfp_flags & __GFP_COLD) != 0); bool colored_req = ((gfp_flags & __GFP_COLOR) != 0); + +if (colored_req) + printk(KERN_INFO "buffered_rmqueue(): colored_req received\n"); - if (likely(order == 0) && colored_req) { + if (likely(order == 0) && !colored_req) { struct per_cpu_pages *pcp; struct list_head *list; @@ -1974,7 +2057,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, WARN_ON_ONCE(order > 1); } spin_lock_irqsave(&zone->lock, flags); - page = __rmqueue(zone, order, migratetype, 0); + page = __rmqueue(zone, order, migratetype, colored_req); spin_unlock(&zone->lock); if (!page) goto failed; @@ -3087,6 +3170,9 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, .migratetype = gfpflags_to_migratetype(gfp_mask), }; +if (gfp_mask&GFP_COLOR) + printk(KERN_INFO "__alloc_pages_nodemask(): called gfp %08x gfp_allowed_mask %08x mt = %d\n", gfp_mask, gfp_allowed_mask, ac.migratetype); + gfp_mask &= gfp_allowed_mask; lockdep_trace_alloc(gfp_mask); @@ -3181,8 +3267,9 @@ EXPORT_SYMBOL(get_zeroed_page); void __free_pages(struct page *page, unsigned int order) { + int parti_no = bank_to_partition(page_bank(page)); if (put_page_testzero(page)) { - if (order == 0) + if (order == 0 && parti_no == NR_CPUS) free_hot_cold_page(page, false); else __free_pages_ok(page, order); diff --git a/mm/slab_common.c b/mm/slab_common.c index dee018acaeaf..ff4d4c6f4129 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -787,7 +787,14 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags) return kmalloc_dma_caches[index]; #endif - return kmalloc_caches[index]; + + if (flags & GFP_COLOR) { + int cpu = raw_smp_processor_id(); + printk(KERN_INFO "in kmalloc_slab index %d\n", index); + return hc_kmalloc_caches[cpu][index]; + } + else + return kmalloc_caches[index]; } /* @@ -841,6 +848,7 @@ void __init create_kmalloc_caches(unsigned long flags) size_index[size_index_elem(i)] = 8; } for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) { + printk(KERN_INFO "KMALLOC i = %d\n", i); if (!kmalloc_caches[i]) { kmalloc_caches[i] = create_kmalloc_cache(NULL, 1 << i, flags); @@ -866,28 +874,29 @@ printk(KERN_INFO "KMALLOC-192 CACHE CREATED\n"); /* per-cpu kmalloc caches */ printk(KERN_INFO "SLAB_STATE = %d\n", slab_state); for (cpu = 0; cpu < NR_CPUS; cpu++) { - //cpu = 0; for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) { char *n; - n = kasprintf(GFP_NOWAIT, "hc%01d-kmalloc-%d", cpu, kmalloc_size(i)); + n = kasprintf(GFP_NOWAIT, "cpu%01d-kmalloc-%d", cpu, kmalloc_size(i)); + + printk(KERN_INFO "HC-KMALLOC i = %d\n", i); hc_kmalloc_caches[cpu][i] = create_kmalloc_cache(n, 1 << i, SLAB_NO_MERGE|flags); hc_kmalloc_caches[cpu][i]->cpu_id = cpu; printk(KERN_INFO "CPU%d HC-KMALLOC-%d CACHE CREATED\n", cpu, 1<cpu_slab->freelist, hc_kmalloc_caches[cpu][i]->cpu_slab->page,hc_kmalloc_caches[cpu][i]->cpu_slab->partial); - - - /* + printk(KERN_INFO "HC-KMALLOC-%d slabs freelist=%p, pages=%p, partial=%p\n", 1<cpu_slab->freelist, hc_kmalloc_caches[cpu][i]->cpu_slab->page,hc_kmalloc_caches[cpu][i]->cpu_slab->partial); - if (KMALLOC_MIN_SIZE <= 32 && !pc_kmalloc_caches[cpu][1] && i == 6) { - pc_kmalloc_caches[cpu][1] = create_kmalloc_cache(NULL, 96, flags); - printk(KERN_INFO "PC-KMALLOC-96 CACHE CREATED\n"); + if (KMALLOC_MIN_SIZE <= 32 && !hc_kmalloc_caches[cpu][1] && i == 6) { + char *nm; + nm = kasprintf(GFP_NOWAIT, "cpu%01d-kmalloc-%d", cpu, kmalloc_size(1)); + hc_kmalloc_caches[cpu][1] = create_kmalloc_cache(nm, 96, SLAB_NO_MERGE|flags); + printk(KERN_INFO "CPU%d HC-KMALLOC-96 CACHE CREATED\n", cpu); } - if (KMALLOC_MIN_SIZE <= 64 && !pc_kmalloc_caches[cpu][2] && i == 7) { - pc_kmalloc_caches[cpu][2] = create_kmalloc_cache(NULL, 192, flags); - printk(KERN_INFO "PC-KMALLOC-192 CACHE CREATED\n"); + if (KMALLOC_MIN_SIZE <= 64 && !hc_kmalloc_caches[cpu][2] && i == 7) { + char *nm; + nm = kasprintf(GFP_NOWAIT, "cpu%01d-kmalloc-%d", cpu, kmalloc_size(2)); + hc_kmalloc_caches[cpu][2] = create_kmalloc_cache(nm, 192, SLAB_NO_MERGE|flags); + printk(KERN_INFO "CPU%d HC-KMALLOC-192 CACHE CREATED\n", cpu); } - */ } } diff --git a/mm/slub.c b/mm/slub.c index 2727a6fc403f..1a2858905c54 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1320,8 +1320,11 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s, if (memcg_charge_slab(s, flags, order)) return NULL; - if (node == NUMA_NO_NODE) + if (node == NUMA_NO_NODE) { + if (flags&GFP_COLOR) + printk(KERN_INFO "alloc_pages calls with GFP_COLOR order = %d\n", order); page = alloc_pages(flags, order); + } else page = alloc_pages_exact_node(node, flags, order); @@ -1337,6 +1340,9 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) struct kmem_cache_order_objects oo = s->oo; gfp_t alloc_gfp; +if (flags&GFP_COLOR) + printk(KERN_INFO "gfp_allowed_mask = %08x\n", gfp_allowed_mask); + flags &= gfp_allowed_mask; if (flags & __GFP_WAIT) @@ -1349,7 +1355,9 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) * so we fall-back to the minimum order allocation. */ alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; - +if (flags&__GFP_COLOR) { + printk(KERN_INFO "allocate_slab with GFP_COLOR alloc_gfp = %08x\n", alloc_gfp); +} page = alloc_slab_page(s, alloc_gfp, node, oo); if (unlikely(!page)) { oo = s->min; @@ -1419,7 +1427,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) } page = allocate_slab(s, - flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); + flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK | GFP_COLOR), node); if (!page) goto out; @@ -2223,6 +2231,11 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags, return freelist; page = new_slab(s, flags, node); + +if (flags&GFP_COLOR) { + printk(KERN_INFO "new_slab_objects(): gets page %p\n", page); +} + if (page) { c = raw_cpu_ptr(s->cpu_slab); if (c->page) @@ -2308,6 +2321,8 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, void *freelist; struct page *page; unsigned long flags; +if (gfpflags&GFP_COLOR) + printk(KERN_INFO "__slab_alloc slow_path\n"); local_irq_save(flags); #ifdef CONFIG_PREEMPT @@ -2319,6 +2334,11 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, c = this_cpu_ptr(s->cpu_slab); #endif + +if (gfpflags&GFP_COLOR) { + printk(KERN_INFO "__slab_alloc : page %p, partial %p\n", c->page, c->partial); +} + page = c->page; if (!page) goto new_slab; @@ -3308,14 +3328,22 @@ void *__kmalloc(size_t size, gfp_t flags) struct kmem_cache *s; void *ret; +if (flags & GFP_COLOR) { + printk(KERN_INFO "kmalloc size %d\n", size); +} if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) return kmalloc_large(size, flags); s = kmalloc_slab(size, flags); - +if (flags & GFP_COLOR) { + printk(KERN_INFO "kmalloc_slab %p\n", s); +} if (unlikely(ZERO_OR_NULL_PTR(s))) return s; +if (flags & GFP_COLOR) { + printk(KERN_INFO "slab_alloc calls!!\n"); +} ret = slab_alloc(s, flags, _RET_IP_); trace_kmalloc(_RET_IP_, ret, size, s->size, flags); diff --git a/mm/vmstat.c b/mm/vmstat.c index 0d748d23dc4c..5df6edb32512 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1312,8 +1312,8 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, seq_printf(m, "\n vm stats threshold: %d", pageset->stat_threshold); #endif - /* test */ - seq_printf(m, "\n"); + /* pcp test */ +/* seq_printf(m, "\n"); for (mtype = 0; mtype < MIGRATE_PCPTYPES; mtype++) { struct page *p; list_for_each_entry(p, &pageset->pcp.lists[mtype], lru) { @@ -1321,6 +1321,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, seq_printf(m, "page bank = %d color = %d\n", page_bank(p), page_color(p)); } } +*/ } seq_printf(m, "\n all_unreclaimable: %u" -- cgit v1.2.2