aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNamhoon Kim <namhoonk@cs.unc.edu>2017-09-13 05:48:56 -0400
committerNamhoon Kim <namhoonk@cs.unc.edu>2017-09-13 05:48:56 -0400
commitb0905375748cbc0fde1dfd7578bd4ff7ac47913b (patch)
tree9dbf24179ead0d6b6d366ec18fa09e8a712f9cc9
parent7ce89fa17531c28b08f03b37bcfd3eeb505aab10 (diff)
Impl. Done.
-rw-r--r--arch/arm/include/asm/dma-mapping.h7
-rw-r--r--arch/arm/mm/dma-mapping.c15
-rw-r--r--drivers/media/usb/uvc/uvc_video.c4
-rw-r--r--drivers/usb/core/buffer.c1
-rw-r--r--include/linux/mmzone.h4
-rw-r--r--include/linux/slab.h6
-rw-r--r--litmus/page_dev.c1
-rw-r--r--mm/page_alloc.c116
-rw-r--r--mm/slab_common.c6
-rw-r--r--mm/slub.c64
10 files changed, 170 insertions, 54 deletions
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index b52101d37ec7..fef26e00159d 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -219,6 +219,13 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size,
219 void *cpu_addr; 219 void *cpu_addr;
220 BUG_ON(!ops); 220 BUG_ON(!ops);
221 221
222#ifdef CONFIG_SCHED_DEBUG_TRACE
223 if (flag&GFP_COLOR) {
224 printk(KERN_INFO "dma_alloc_attrs() \n");
225 printk(KERN_INFO "func: %pF at address: %p\n", ops->alloc, ops->alloc);
226 }
227#endif
228
222 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); 229 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
223 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); 230 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
224 return cpu_addr; 231 return cpu_addr;
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index d0c70d97346b..e272fdcccc48 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -664,6 +664,11 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
664 size = PAGE_ALIGN(size); 664 size = PAGE_ALIGN(size);
665 want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs); 665 want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs);
666 666
667#ifdef CONFIG_SCHED_DEBUG_TRACE
668 if (gfp&GFP_COLOR)
669 printk(KERN_INFO "__dma_alloc() for usb buffer\n");
670#endif
671
667 if (is_coherent || nommu()) 672 if (is_coherent || nommu())
668 addr = __alloc_simple_buffer(dev, size, gfp, &page); 673 addr = __alloc_simple_buffer(dev, size, gfp, &page);
669 else if (!(gfp & __GFP_WAIT)) 674 else if (!(gfp & __GFP_WAIT))
@@ -689,6 +694,16 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
689 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); 694 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
690 void *memory; 695 void *memory;
691 696
697 if ((gfp&GFP_COLOR) && (size > PAGE_SIZE*4)) {
698#ifdef CONFIG_SCHED_DEBUG_TRACE
699 printk(KERN_INFO "arm_dma_alloc(): original prot %08x\n", prot);
700#endif
701 prot = pgprot_noncached(prot);
702#ifdef CONFIG_SCHED_DEBUG_TRACE
703 printk(KERN_INFO "arm_dma_alloc(): set as uncacheable prot %08x\n", prot);
704#endif
705 }
706
692 if (dma_alloc_from_coherent(dev, size, handle, &memory)) 707 if (dma_alloc_from_coherent(dev, size, handle, &memory))
693 return memory; 708 return memory;
694 709
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index 20ccc9d315dc..454e6e83aa56 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -1406,10 +1406,10 @@ static int uvc_alloc_urb_buffers(struct uvc_streaming *stream,
1406#ifndef CONFIG_DMA_NONCOHERENT 1406#ifndef CONFIG_DMA_NONCOHERENT
1407 stream->urb_buffer[i] = usb_alloc_coherent( 1407 stream->urb_buffer[i] = usb_alloc_coherent(
1408 stream->dev->udev, stream->urb_size, 1408 stream->dev->udev, stream->urb_size,
1409 gfp_flags | __GFP_NOWARN, &stream->urb_dma[i]); 1409 gfp_flags | __GFP_NOWARN | GFP_COLOR, &stream->urb_dma[i]);
1410#else 1410#else
1411 stream->urb_buffer[i] = 1411 stream->urb_buffer[i] =
1412 kmalloc(stream->urb_size, gfp_flags | __GFP_NOWARN); 1412 kmalloc(stream->urb_size, gfp_flags | __GFP_NOWARN | GFP_COLOR);
1413#endif 1413#endif
1414 if (!stream->urb_buffer[i]) { 1414 if (!stream->urb_buffer[i]) {
1415 uvc_free_urb_buffers(stream); 1415 uvc_free_urb_buffers(stream);
diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
index 506b969ea7fd..b9af51400214 100644
--- a/drivers/usb/core/buffer.c
+++ b/drivers/usb/core/buffer.c
@@ -128,6 +128,7 @@ void *hcd_buffer_alloc(
128 if (size <= pool_max[i]) 128 if (size <= pool_max[i])
129 return dma_pool_alloc(hcd->pool[i], mem_flags, dma); 129 return dma_pool_alloc(hcd->pool[i], mem_flags, dma);
130 } 130 }
131
131 return dma_alloc_coherent(hcd->self.controller, size, dma, mem_flags); 132 return dma_alloc_coherent(hcd->self.controller, size, dma, mem_flags);
132} 133}
133 134
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 750c0b64fa96..f0100050d1d6 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -44,7 +44,9 @@
44#define CACHE_SHIFT 12 44#define CACHE_SHIFT 12
45#define MAX_NUM_COLOR 16 45#define MAX_NUM_COLOR 16
46#define MAX_NUM_BANK 8 46#define MAX_NUM_BANK 8
47#define MAX_PARTITIONED_ORDER 3 47//#define MAX_PARTITIONED_ORDER 3
48#define MAX_PARTITIONED_ORDER 11
49#define MAX_CONTIG_ORDER 3
48 50
49enum { 51enum {
50 MIGRATE_UNMOVABLE, 52 MIGRATE_UNMOVABLE,
diff --git a/include/linux/slab.h b/include/linux/slab.h
index a899dda28def..6064df01e268 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -419,6 +419,12 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
419 */ 419 */
420static __always_inline void *kmalloc(size_t size, gfp_t flags) 420static __always_inline void *kmalloc(size_t size, gfp_t flags)
421{ 421{
422
423#ifdef CONFIG_SCHED_DEBUG_TRACE
424 if (flags&GFP_COLOR)
425 printk(KERN_INFO "kmalloc() is called with GFP_COLOR\n");
426#endif
427
422 if (__builtin_constant_p(size)) { 428 if (__builtin_constant_p(size)) {
423 if (size > KMALLOC_MAX_CACHE_SIZE) 429 if (size > KMALLOC_MAX_CACHE_SIZE)
424 return kmalloc_large(size, flags); 430 return kmalloc_large(size, flags);
diff --git a/litmus/page_dev.c b/litmus/page_dev.c
index 2fd829b05a0a..ea5d5f5cb36d 100644
--- a/litmus/page_dev.c
+++ b/litmus/page_dev.c
@@ -6,6 +6,7 @@
6 */ 6 */
7 7
8#include <litmus/page_dev.h> 8#include <litmus/page_dev.h>
9#include <litmus/debug_trace.h>
9 10
10// This Address Decoding is used in imx6-sabredsd platform 11// This Address Decoding is used in imx6-sabredsd platform
11#define NUM_BANKS 8 12#define NUM_BANKS 8
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 805c4f598b8c..90cf3ea441e0 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1032,6 +1032,41 @@ static inline void expand(struct zone *zone, struct page *page,
1032 } 1032 }
1033} 1033}
1034 1034
1035static inline void expand_middle(struct zone *zone, struct page *page,
1036 int offset, int low, int high, struct free_area *area,
1037 int migratetype)
1038{
1039 unsigned long size = 1 << high;
1040
1041 while ((size>>1) > offset) {
1042 area--;
1043 high--;
1044 size >>= 1;
1045 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
1046
1047 list_add(&page[size].lru, &area->free_list[migratetype]);
1048 area->nr_free++;
1049 set_page_order(&page[size], high);
1050 }
1051 area--;
1052 high--;
1053 size >>= 1;
1054 VM_BUG_ON_PAGE(bad_range(zone, page), page);
1055 list_add(&page[0].lru, &area->free_list[migratetype]);
1056 area->nr_free++;
1057 set_page_order(&page[0], high);
1058
1059 if (offset == size)
1060 return;
1061
1062 area--;
1063 high--;
1064 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
1065 list_add(&page[size].lru, &area->free_list[migratetype]);
1066 area->nr_free++;
1067 set_page_order(&page[size], high);
1068}
1069
1035/* 1070/*
1036 * This page is about to be returned from the page allocator 1071 * This page is about to be returned from the page allocator
1037 */ 1072 */
@@ -1193,23 +1228,38 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
1193 int found = 0; 1228 int found = 0;
1194 unsigned long s_pfn = zone->zone_start_pfn; 1229 unsigned long s_pfn = zone->zone_start_pfn;
1195 unsigned long e_pfn = zone_end_pfn(zone); 1230 unsigned long e_pfn = zone_end_pfn(zone);
1196 printk(KERN_INFO "COLOR PAGE requested on CPU%d with order = %d migratetype = %d\n", cpu, order, migratetype); 1231 TRACE("COLOR PAGE requested on CPU%d with order = %d migratetype = %d\n", cpu, order, migratetype);
1197 /* Find a page of the appropriate size in the preferred list */ 1232 /* Find a page of the appropriate size in the preferred list */
1198 for (current_order = order; current_order < MAX_PARTITIONED_ORDER; ++current_order) { 1233 for (current_order = order; current_order < MAX_PARTITIONED_ORDER; ++current_order) {
1234 int offset = 0;
1199 area = &(zone->free_area_d[cpu][current_order]); 1235 area = &(zone->free_area_d[cpu][current_order]);
1200 if (list_empty(&area->free_list[migratetype])) { 1236 if (list_empty(&area->free_list[migratetype])) {
1201 printk(KERN_INFO "order %d list empty\n", current_order); 1237 TRACE("order %d list empty\n", current_order);
1202 continue; 1238 continue;
1203 } 1239 }
1204 1240
1205 list_for_each_entry(page, &area->free_list[migratetype], lru) { 1241 list_for_each_entry(page, &area->free_list[migratetype], lru) {
1206 printk(KERN_INFO "__rmqueue_smallest list entry %p color %d\n", page, page_color(page)); 1242 TRACE("__rmqueue_smallest list entry %p color %d pfn:%05lx\n", page, page_color(page), page_to_pfn(page));
1207 if (is_in_llc_partition(page, cpu) && (page_to_pfn(page) >= s_pfn && page_to_pfn(page) < e_pfn)) { 1243 if (current_order < MAX_CONTIG_ORDER) {
1208 found = 1; 1244 if (is_in_llc_partition(page, cpu) && (page_to_pfn(page) >= s_pfn && page_to_pfn(page) < e_pfn)) {
1209 break; 1245 found = 1;
1246 offset = 0;
1247 break;
1248 }
1249 } else { // order >= 3 , must be uncacheable.
1250 int size = 1 << current_order;
1251 for (offset = 0; offset < size; offset += 4) {
1252 if (is_in_llc_partition(&page[offset], cpu) && (page_to_pfn(&page[offset]) >= s_pfn && page_to_pfn(&page[offset]) < e_pfn)) {
1253 found = 1;
1254 break;
1255 }
1256 }
1257 if (found)
1258 break;
1210 } 1259 }
1211 } 1260 }
1212 printk(KERN_INFO "__rmqueue_smallest LAST list entry %p\n", page); 1261
1262 TRACE("__rmqueue_smallest LAST list entry %p\n", page);
1213 1263
1214 if (!found) 1264 if (!found)
1215 return NULL; 1265 return NULL;
@@ -1219,7 +1269,7 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
1219 area->nr_free--; 1269 area->nr_free--;
1220 expand(zone, page, order, current_order, area, migratetype); 1270 expand(zone, page, order, current_order, area, migratetype);
1221 set_freepage_migratetype(page, migratetype); 1271 set_freepage_migratetype(page, migratetype);
1222 printk(KERN_INFO "COLOR %d page return %p\n", page_color(page), page); 1272 TRACE("COLOR %d page return %p\n", page_color(page), page);
1223 return page; 1273 return page;
1224 } 1274 }
1225 } else { 1275 } else {
@@ -1470,7 +1520,8 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype,
1470 /* Find the largest possible block of pages in the other list */ 1520 /* Find the largest possible block of pages in the other list */
1471 for (current_order = MAX_PARTITIONED_ORDER-1; 1521 for (current_order = MAX_PARTITIONED_ORDER-1;
1472 current_order >= order && current_order <= MAX_PARTITIONED_ORDER-1; 1522 current_order >= order && current_order <= MAX_PARTITIONED_ORDER-1;
1473 --current_order) { 1523 --current_order) {
1524 int offset = 0;
1474 area = &(zone->free_area_d[cpu][current_order]); 1525 area = &(zone->free_area_d[cpu][current_order]);
1475 fallback_mt = find_suitable_fallback(area, current_order, 1526 fallback_mt = find_suitable_fallback(area, current_order,
1476 start_migratetype, false, &can_steal); 1527 start_migratetype, false, &can_steal);
@@ -1478,13 +1529,26 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype,
1478 continue; 1529 continue;
1479 1530
1480 list_for_each_entry(page, &area->free_list[fallback_mt], lru) { 1531 list_for_each_entry(page, &area->free_list[fallback_mt], lru) {
1481 printk(KERN_INFO "__rmqueue_falback list entry %p color %d\n", page, page_color(page)); 1532 TRACE("__rmqueue_fallback list entry %p color %d pfn:%05lx\n", page, page_color(page), page_to_pfn(page));
1482 if (is_in_llc_partition(page, cpu) && (page_to_pfn(page) >= s_pfn && page_to_pfn(page) < e_pfn)) { 1533 if (current_order < MAX_CONTIG_ORDER) {
1483 found = 1; 1534 if (is_in_llc_partition(page, cpu) && (page_to_pfn(page) >= s_pfn && page_to_pfn(page) < e_pfn)) {
1484 break; 1535 found = 1;
1536 offset = 0;
1537 break;
1538 }
1539 } else { // order >= 3 , must be uncacheable.
1540 int size = 1 << current_order;
1541 for (offset = 0; offset < size; offset += 4) {
1542 if (is_in_llc_partition(&page[offset], cpu) && (page_to_pfn(&page[offset]) >= s_pfn && page_to_pfn(&page[offset]) < e_pfn)) {
1543 found = 1;
1544 break;
1545 }
1546 }
1547 if (found)
1548 break;
1485 } 1549 }
1486 } 1550 }
1487 printk(KERN_INFO "__rmqueue_falback LAST list entry %p\n", page); 1551 TRACE("__rmqueue_fallback LAST list entry %p\n", page);
1488 1552
1489 if (!found) 1553 if (!found)
1490 return NULL; 1554 return NULL;
@@ -1497,8 +1561,8 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype,
1497 list_del(&page->lru); 1561 list_del(&page->lru);
1498 rmv_page_order(page); 1562 rmv_page_order(page);
1499 1563
1500 expand(zone, page, order, current_order, area, 1564 expand(zone, page, order, current_order, area, start_migratetype);
1501 start_migratetype); 1565
1502 /* 1566 /*
1503 * The freepage_migratetype may differ from pageblock's 1567 * The freepage_migratetype may differ from pageblock's
1504 * migratetype depending on the decisions in 1568 * migratetype depending on the decisions in
@@ -1512,7 +1576,7 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype,
1512 trace_mm_page_alloc_extfrag(page, order, current_order, 1576 trace_mm_page_alloc_extfrag(page, order, current_order,
1513 start_migratetype, fallback_mt); 1577 start_migratetype, fallback_mt);
1514 1578
1515 printk(KERN_INFO "__rmqueue_fallback(): CPU%d COLOR %d page return %p\n", cpu, page_color(page), page); 1579 TRACE("__rmqueue_fallback(): CPU%d COLOR %d page return %p pfn:%05lx\n", cpu, page_color(page), page, page_to_pfn(page));
1516 return page; 1580 return page;
1517 } 1581 }
1518 } else { 1582 } else {
@@ -1576,8 +1640,10 @@ retry_reserve:
1576 1640
1577 if (!page) { 1641 if (!page) {
1578 page = __rmqueue_fallback(zone, order, migratetype, color_req); 1642 page = __rmqueue_fallback(zone, order, migratetype, color_req);
1643#ifdef CONFIG_SCHED_DEBUG_TRACE
1579 if (color_req) 1644 if (color_req)
1580 printk(KERN_INFO "page received from __rmqueue_fallback()"); 1645 TRACE("page received from __rmqueue_fallback()");
1646#endif
1581 } 1647 }
1582 1648
1583 /* 1649 /*
@@ -2005,8 +2071,10 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
2005 bool cold = ((gfp_flags & __GFP_COLD) != 0); 2071 bool cold = ((gfp_flags & __GFP_COLD) != 0);
2006 bool colored_req = ((gfp_flags & __GFP_COLOR) != 0); 2072 bool colored_req = ((gfp_flags & __GFP_COLOR) != 0);
2007 2073
2008if (colored_req) 2074#ifdef CONFIG_SCHED_DEBUG_TRACE
2009 printk(KERN_INFO "buffered_rmqueue(): colored_req received\n"); 2075 if (colored_req)
2076 TRACE("buffered_rmqueue(): colored_req received\n");
2077#endif
2010 2078
2011 if (likely(order == 0) && !colored_req) { 2079 if (likely(order == 0) && !colored_req) {
2012 struct per_cpu_pages *pcp; 2080 struct per_cpu_pages *pcp;
@@ -3158,8 +3226,10 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
3158 .migratetype = gfpflags_to_migratetype(gfp_mask), 3226 .migratetype = gfpflags_to_migratetype(gfp_mask),
3159 }; 3227 };
3160 3228
3161if (gfp_mask&GFP_COLOR) 3229#ifdef CONFIG_SCHED_DEBUG_TRACE
3162 printk(KERN_INFO "__alloc_pages_nodemask(): called gfp %08x gfp_allowed_mask %08x mt = %d\n", gfp_mask, gfp_allowed_mask, ac.migratetype); 3230 if (gfp_mask&GFP_COLOR)
3231 TRACE("__alloc_pages_nodemask(): called gfp %08x gfp_allowed_mask %08x mt = %d\n", gfp_mask, gfp_allowed_mask, ac.migratetype);
3232#endif
3163 3233
3164 gfp_mask &= gfp_allowed_mask; 3234 gfp_mask &= gfp_allowed_mask;
3165 3235
diff --git a/mm/slab_common.c b/mm/slab_common.c
index ff4d4c6f4129..bbd0ddc0b029 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -790,7 +790,7 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
790 790
791 if (flags & GFP_COLOR) { 791 if (flags & GFP_COLOR) {
792 int cpu = raw_smp_processor_id(); 792 int cpu = raw_smp_processor_id();
793 printk(KERN_INFO "in kmalloc_slab index %d\n", index); 793 TRACE("in kmalloc_slab index %d\n", index);
794 return hc_kmalloc_caches[cpu][index]; 794 return hc_kmalloc_caches[cpu][index];
795 } 795 }
796 else 796 else
@@ -848,11 +848,9 @@ void __init create_kmalloc_caches(unsigned long flags)
848 size_index[size_index_elem(i)] = 8; 848 size_index[size_index_elem(i)] = 8;
849 } 849 }
850 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) { 850 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
851 printk(KERN_INFO "KMALLOC i = %d\n", i);
852 if (!kmalloc_caches[i]) { 851 if (!kmalloc_caches[i]) {
853 kmalloc_caches[i] = create_kmalloc_cache(NULL, 852 kmalloc_caches[i] = create_kmalloc_cache(NULL,
854 1 << i, flags); 853 1 << i, flags);
855printk(KERN_INFO "KMALLOC-%d CACHE CREATED\n", 1<<i);
856 } 854 }
857 855
858 /* 856 /*
@@ -862,12 +860,10 @@ printk(KERN_INFO "KMALLOC-%d CACHE CREATED\n", 1<<i);
862 */ 860 */
863 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6) { 861 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6) {
864 kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags); 862 kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
865printk(KERN_INFO "KMALLOC-96 CACHE CREATED\n");
866 } 863 }
867 864
868 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7) { 865 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7) {
869 kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags); 866 kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
870printk(KERN_INFO "KMALLOC-192 CACHE CREATED\n");
871 } 867 }
872 } 868 }
873 869
diff --git a/mm/slub.c b/mm/slub.c
index 1a2858905c54..0e8ce1f912fb 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1321,8 +1321,10 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s,
1321 return NULL; 1321 return NULL;
1322 1322
1323 if (node == NUMA_NO_NODE) { 1323 if (node == NUMA_NO_NODE) {
1324#ifdef CONFIG_SCHED_DEBUG_TRACE
1324 if (flags&GFP_COLOR) 1325 if (flags&GFP_COLOR)
1325 printk(KERN_INFO "alloc_pages calls with GFP_COLOR order = %d\n", order); 1326 printk(KERN_INFO "alloc_pages calls with GFP_COLOR order = %d\n", order);
1327#endif
1326 page = alloc_pages(flags, order); 1328 page = alloc_pages(flags, order);
1327 } 1329 }
1328 else 1330 else
@@ -1340,8 +1342,10 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1340 struct kmem_cache_order_objects oo = s->oo; 1342 struct kmem_cache_order_objects oo = s->oo;
1341 gfp_t alloc_gfp; 1343 gfp_t alloc_gfp;
1342 1344
1343if (flags&GFP_COLOR) 1345#ifdef CONFIG_SCHED_DEBUG_TRACE
1344 printk(KERN_INFO "gfp_allowed_mask = %08x\n", gfp_allowed_mask); 1346 if (flags&GFP_COLOR)
1347 printk(KERN_INFO "gfp_allowed_mask = %08x\n", gfp_allowed_mask);
1348#endif
1345 1349
1346 flags &= gfp_allowed_mask; 1350 flags &= gfp_allowed_mask;
1347 1351
@@ -1355,9 +1359,12 @@ if (flags&GFP_COLOR)
1355 * so we fall-back to the minimum order allocation. 1359 * so we fall-back to the minimum order allocation.
1356 */ 1360 */
1357 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; 1361 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
1358if (flags&__GFP_COLOR) { 1362
1359 printk(KERN_INFO "allocate_slab with GFP_COLOR alloc_gfp = %08x\n", alloc_gfp); 1363#ifdef CONFIG_SCHED_DEBUG_TRACE
1360} 1364 if (flags&__GFP_COLOR)
1365 printk(KERN_INFO "allocate_slab with GFP_COLOR alloc_gfp = %08x\n", alloc_gfp);
1366#endif
1367
1361 page = alloc_slab_page(s, alloc_gfp, node, oo); 1368 page = alloc_slab_page(s, alloc_gfp, node, oo);
1362 if (unlikely(!page)) { 1369 if (unlikely(!page)) {
1363 oo = s->min; 1370 oo = s->min;
@@ -2232,9 +2239,10 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
2232 2239
2233 page = new_slab(s, flags, node); 2240 page = new_slab(s, flags, node);
2234 2241
2235if (flags&GFP_COLOR) { 2242#ifdef CONFIG_SCHED_DEBUG_TRACE
2236 printk(KERN_INFO "new_slab_objects(): gets page %p\n", page); 2243 if (flags&GFP_COLOR)
2237} 2244 printk(KERN_INFO "new_slab_objects(): gets page %p\n", page);
2245#endif
2238 2246
2239 if (page) { 2247 if (page) {
2240 c = raw_cpu_ptr(s->cpu_slab); 2248 c = raw_cpu_ptr(s->cpu_slab);
@@ -2321,8 +2329,11 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2321 void *freelist; 2329 void *freelist;
2322 struct page *page; 2330 struct page *page;
2323 unsigned long flags; 2331 unsigned long flags;
2324if (gfpflags&GFP_COLOR) 2332
2325 printk(KERN_INFO "__slab_alloc slow_path\n"); 2333#ifdef CONFIG_SCHED_DEBUG_TRACE
2334 if (gfpflags&GFP_COLOR)
2335 printk(KERN_INFO "__slab_alloc slow_path\n");
2336#endif
2326 2337
2327 local_irq_save(flags); 2338 local_irq_save(flags);
2328#ifdef CONFIG_PREEMPT 2339#ifdef CONFIG_PREEMPT
@@ -2334,10 +2345,10 @@ if (gfpflags&GFP_COLOR)
2334 c = this_cpu_ptr(s->cpu_slab); 2345 c = this_cpu_ptr(s->cpu_slab);
2335#endif 2346#endif
2336 2347
2337 2348#ifdef CONFIG_SCHED_DEBUG_TRACE
2338if (gfpflags&GFP_COLOR) { 2349 if (gfpflags&GFP_COLOR)
2339 printk(KERN_INFO "__slab_alloc : page %p, partial %p\n", c->page, c->partial); 2350 printk(KERN_INFO "__slab_alloc : page %p, partial %p\n", c->page, c->partial);
2340} 2351#endif
2341 2352
2342 page = c->page; 2353 page = c->page;
2343 if (!page) 2354 if (!page)
@@ -3328,22 +3339,29 @@ void *__kmalloc(size_t size, gfp_t flags)
3328 struct kmem_cache *s; 3339 struct kmem_cache *s;
3329 void *ret; 3340 void *ret;
3330 3341
3331if (flags & GFP_COLOR) { 3342#ifdef CONFIG_SCHED_DEBUG_TRACE
3332 printk(KERN_INFO "kmalloc size %d\n", size); 3343 if (flags & GFP_COLOR)
3333} 3344 printk(KERN_INFO "kmalloc size %d\n", size);
3345#endif
3346
3334 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) 3347 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
3335 return kmalloc_large(size, flags); 3348 return kmalloc_large(size, flags);
3336 3349
3337 s = kmalloc_slab(size, flags); 3350 s = kmalloc_slab(size, flags);
3338if (flags & GFP_COLOR) { 3351
3339 printk(KERN_INFO "kmalloc_slab %p\n", s); 3352#ifdef CONFIG_SCHED_DEBUG_TRACE
3340} 3353 if (flags & GFP_COLOR)
3354 printk(KERN_INFO "kmalloc_slab %p\n", s);
3355#endif
3356
3341 if (unlikely(ZERO_OR_NULL_PTR(s))) 3357 if (unlikely(ZERO_OR_NULL_PTR(s)))
3342 return s; 3358 return s;
3343 3359
3344if (flags & GFP_COLOR) { 3360#ifdef CONFIG_SCHED_DEBUG_TRACE
3345 printk(KERN_INFO "slab_alloc calls!!\n"); 3361 if (flags & GFP_COLOR)
3346} 3362 printk(KERN_INFO "slab_alloc calls!!\n");
3363#endif
3364
3347 ret = slab_alloc(s, flags, _RET_IP_); 3365 ret = slab_alloc(s, flags, _RET_IP_);
3348 3366
3349 trace_kmalloc(_RET_IP_, ret, size, s->size, flags); 3367 trace_kmalloc(_RET_IP_, ret, size, s->size, flags);