diff options
-rw-r--r-- | arch/arm/kernel/irq.c | 1 | ||||
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 20 | ||||
-rw-r--r-- | drivers/media/usb/uvc/uvc_v4l2.c | 4 | ||||
-rw-r--r-- | drivers/media/usb/uvc/uvc_video.c | 37 | ||||
-rw-r--r-- | drivers/media/v4l2-core/videobuf2-core.c | 21 | ||||
-rw-r--r-- | drivers/media/v4l2-core/videobuf2-vmalloc.c | 29 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/fec_main.c | 17 | ||||
-rw-r--r-- | include/linux/gfp.h | 8 | ||||
-rw-r--r-- | include/linux/mm.h | 6 | ||||
-rw-r--r-- | include/linux/vmalloc.h | 2 | ||||
-rw-r--r-- | include/litmus/page_dev.h | 8 | ||||
-rw-r--r-- | include/litmus/trace.h | 10 | ||||
-rw-r--r-- | kernel/irq/handle.c | 19 | ||||
-rw-r--r-- | kernel/softirq.c | 8 | ||||
-rw-r--r-- | litmus/Makefile | 3 | ||||
-rw-r--r-- | litmus/cache_proc.c | 4 | ||||
-rw-r--r-- | litmus/fakedev0.c | 123 | ||||
-rw-r--r-- | litmus/litmus.c | 44 | ||||
-rw-r--r-- | litmus/page_dev.c | 49 | ||||
-rw-r--r-- | litmus/sched_mc2.c | 198 | ||||
-rw-r--r-- | litmus/uncachedev.c | 4 | ||||
-rw-r--r-- | mm/dmapool.c | 2 | ||||
-rw-r--r-- | mm/migrate.c | 13 | ||||
-rw-r--r-- | mm/page_alloc.c | 182 | ||||
-rw-r--r-- | mm/slub.c | 56 | ||||
-rw-r--r-- | mm/vmalloc.c | 41 | ||||
-rw-r--r-- | net/core/dev.c | 1 | ||||
-rw-r--r-- | net/core/skbuff.c | 68 |
28 files changed, 706 insertions, 272 deletions
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 720b45e232f2..a9ba6e5ce317 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include <asm/mach/time.h> | 45 | #include <asm/mach/time.h> |
46 | 46 | ||
47 | #include <litmus/cache_proc.h> | 47 | #include <litmus/cache_proc.h> |
48 | #include <litmus/litmus.h> | ||
48 | 49 | ||
49 | unsigned long irq_err_count; | 50 | unsigned long irq_err_count; |
50 | 51 | ||
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index e272fdcccc48..bd7dfb1b7ebd 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -259,7 +259,8 @@ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gf | |||
259 | page = alloc_pages(gfp, order); | 259 | page = alloc_pages(gfp, order); |
260 | if (!page) | 260 | if (!page) |
261 | return NULL; | 261 | return NULL; |
262 | 262 | if (gfp&GFP_COLOR) | |
263 | printk(KERN_INFO "__dma_alloc_buffer(): size %d, order %ld requested\n", size, order); | ||
263 | /* | 264 | /* |
264 | * Now split the huge page and free the excess pages | 265 | * Now split the huge page and free the excess pages |
265 | */ | 266 | */ |
@@ -665,18 +666,24 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | |||
665 | want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs); | 666 | want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs); |
666 | 667 | ||
667 | #ifdef CONFIG_SCHED_DEBUG_TRACE | 668 | #ifdef CONFIG_SCHED_DEBUG_TRACE |
668 | if (gfp&GFP_COLOR) | 669 | if (gfp&GFP_COLOR) { |
669 | printk(KERN_INFO "__dma_alloc() for usb buffer\n"); | 670 | printk(KERN_INFO "__dma_alloc() for usb buffer\n"); |
671 | if (gfp&GFP_CPU1) { | ||
672 | printk(KERN_INFO "__dma_alloc() GFP_CPU1 is set\n"); | ||
673 | } | ||
674 | } | ||
670 | #endif | 675 | #endif |
671 | 676 | ||
672 | if (is_coherent || nommu()) | 677 | if (is_coherent || nommu()) |
673 | addr = __alloc_simple_buffer(dev, size, gfp, &page); | 678 | addr = __alloc_simple_buffer(dev, size, gfp, &page); |
674 | else if (!(gfp & __GFP_WAIT)) | 679 | else if (!(gfp & __GFP_WAIT)) |
675 | addr = __alloc_from_pool(size, &page); | 680 | addr = __alloc_from_pool(size, &page); |
676 | else if (!dev_get_cma_area(dev)) | 681 | else if (!dev_get_cma_area(dev)) { |
677 | addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller, want_vaddr); | 682 | addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller, want_vaddr); |
678 | else | 683 | //printk(KERN_INFO "__alloc_remap_buffer returned %p page, size %d, color %d, bank %d, pfn %05lx\n", page, size, page_color(page), page_bank(page), page_to_pfn(page)); |
684 | } else { | ||
679 | addr = __alloc_from_contiguous(dev, size, prot, &page, caller, want_vaddr); | 685 | addr = __alloc_from_contiguous(dev, size, prot, &page, caller, want_vaddr); |
686 | } | ||
680 | 687 | ||
681 | if (page) | 688 | if (page) |
682 | *handle = pfn_to_dma(dev, page_to_pfn(page)); | 689 | *handle = pfn_to_dma(dev, page_to_pfn(page)); |
@@ -694,16 +701,17 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | |||
694 | pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); | 701 | pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); |
695 | void *memory; | 702 | void *memory; |
696 | 703 | ||
704 | /* | ||
697 | if ((gfp&GFP_COLOR) && (size > PAGE_SIZE*4)) { | 705 | if ((gfp&GFP_COLOR) && (size > PAGE_SIZE*4)) { |
698 | #ifdef CONFIG_SCHED_DEBUG_TRACE | 706 | #ifdef CONFIG_SCHED_DEBUG_TRACE |
699 | printk(KERN_INFO "arm_dma_alloc(): original prot %08x\n", prot); | 707 | printk(KERN_INFO "arm_dma_alloc(): original prot %08x\n", prot); |
700 | #endif | 708 | #endif |
701 | prot = pgprot_noncached(prot); | 709 | //prot = pgprot_noncached(prot); |
702 | #ifdef CONFIG_SCHED_DEBUG_TRACE | 710 | #ifdef CONFIG_SCHED_DEBUG_TRACE |
703 | printk(KERN_INFO "arm_dma_alloc(): set as uncacheable prot %08x\n", prot); | 711 | printk(KERN_INFO "arm_dma_alloc(): set as uncacheable prot %08x\n", prot); |
704 | #endif | 712 | #endif |
705 | } | 713 | } |
706 | 714 | */ | |
707 | if (dma_alloc_from_coherent(dev, size, handle, &memory)) | 715 | if (dma_alloc_from_coherent(dev, size, handle, &memory)) |
708 | return memory; | 716 | return memory; |
709 | 717 | ||
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c index c4b1ac6750d8..e40daf90d388 100644 --- a/drivers/media/usb/uvc/uvc_v4l2.c +++ b/drivers/media/usb/uvc/uvc_v4l2.c | |||
@@ -1437,7 +1437,9 @@ static int uvc_v4l2_mmap(struct file *file, struct vm_area_struct *vma) | |||
1437 | struct uvc_streaming *stream = handle->stream; | 1437 | struct uvc_streaming *stream = handle->stream; |
1438 | 1438 | ||
1439 | uvc_trace(UVC_TRACE_CALLS, "uvc_v4l2_mmap\n"); | 1439 | uvc_trace(UVC_TRACE_CALLS, "uvc_v4l2_mmap\n"); |
1440 | 1440 | #if 0 | |
1441 | printk(KERN_INFO "uvc_mmap entry point\n"); | ||
1442 | #endif | ||
1441 | return uvc_queue_mmap(&stream->queue, vma); | 1443 | return uvc_queue_mmap(&stream->queue, vma); |
1442 | } | 1444 | } |
1443 | 1445 | ||
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c index 454e6e83aa56..9daef917557b 100644 --- a/drivers/media/usb/uvc/uvc_video.c +++ b/drivers/media/usb/uvc/uvc_video.c | |||
@@ -26,6 +26,13 @@ | |||
26 | 26 | ||
27 | #include "uvcvideo.h" | 27 | #include "uvcvideo.h" |
28 | 28 | ||
29 | #define ENABLE_WORST_CASE 1 | ||
30 | #ifdef ENABLE_WORST_CASE | ||
31 | #define UVC_FLAG (GFP_COLOR|GFP_CPU1) | ||
32 | #else | ||
33 | #define UVC_FLAG (GFP_COLOR) | ||
34 | #endif | ||
35 | |||
29 | /* ------------------------------------------------------------------------ | 36 | /* ------------------------------------------------------------------------ |
30 | * UVC Controls | 37 | * UVC Controls |
31 | */ | 38 | */ |
@@ -167,7 +174,7 @@ static int uvc_get_video_ctrl(struct uvc_streaming *stream, | |||
167 | query == UVC_GET_DEF) | 174 | query == UVC_GET_DEF) |
168 | return -EIO; | 175 | return -EIO; |
169 | 176 | ||
170 | data = kmalloc(size, GFP_KERNEL); | 177 | data = kmalloc(size, GFP_KERNEL|UVC_FLAG); |
171 | if (data == NULL) | 178 | if (data == NULL) |
172 | return -ENOMEM; | 179 | return -ENOMEM; |
173 | 180 | ||
@@ -251,7 +258,7 @@ static int uvc_set_video_ctrl(struct uvc_streaming *stream, | |||
251 | int ret; | 258 | int ret; |
252 | 259 | ||
253 | size = stream->dev->uvc_version >= 0x0110 ? 34 : 26; | 260 | size = stream->dev->uvc_version >= 0x0110 ? 34 : 26; |
254 | data = kzalloc(size, GFP_KERNEL); | 261 | data = kzalloc(size, GFP_KERNEL|UVC_FLAG); |
255 | if (data == NULL) | 262 | if (data == NULL) |
256 | return -ENOMEM; | 263 | return -ENOMEM; |
257 | 264 | ||
@@ -494,7 +501,7 @@ static int uvc_video_clock_init(struct uvc_streaming *stream) | |||
494 | clock->size = 32; | 501 | clock->size = 32; |
495 | 502 | ||
496 | clock->samples = kmalloc(clock->size * sizeof(*clock->samples), | 503 | clock->samples = kmalloc(clock->size * sizeof(*clock->samples), |
497 | GFP_KERNEL); | 504 | GFP_KERNEL|UVC_FLAG); |
498 | if (clock->samples == NULL) | 505 | if (clock->samples == NULL) |
499 | return -ENOMEM; | 506 | return -ENOMEM; |
500 | 507 | ||
@@ -1343,7 +1350,7 @@ static void uvc_video_complete(struct urb *urb) | |||
1343 | 1350 | ||
1344 | stream->decode(urb, stream, buf); | 1351 | stream->decode(urb, stream, buf); |
1345 | 1352 | ||
1346 | if ((ret = usb_submit_urb(urb, GFP_ATOMIC)) < 0) { | 1353 | if ((ret = usb_submit_urb(urb, GFP_ATOMIC|UVC_FLAG)) < 0) { |
1347 | uvc_printk(KERN_ERR, "Failed to resubmit video URB (%d).\n", | 1354 | uvc_printk(KERN_ERR, "Failed to resubmit video URB (%d).\n", |
1348 | ret); | 1355 | ret); |
1349 | } | 1356 | } |
@@ -1406,10 +1413,10 @@ static int uvc_alloc_urb_buffers(struct uvc_streaming *stream, | |||
1406 | #ifndef CONFIG_DMA_NONCOHERENT | 1413 | #ifndef CONFIG_DMA_NONCOHERENT |
1407 | stream->urb_buffer[i] = usb_alloc_coherent( | 1414 | stream->urb_buffer[i] = usb_alloc_coherent( |
1408 | stream->dev->udev, stream->urb_size, | 1415 | stream->dev->udev, stream->urb_size, |
1409 | gfp_flags | __GFP_NOWARN | GFP_COLOR, &stream->urb_dma[i]); | 1416 | gfp_flags | __GFP_NOWARN | UVC_FLAG, &stream->urb_dma[i]); |
1410 | #else | 1417 | #else |
1411 | stream->urb_buffer[i] = | 1418 | stream->urb_buffer[i] = |
1412 | kmalloc(stream->urb_size, gfp_flags | __GFP_NOWARN | GFP_COLOR); | 1419 | kmalloc(stream->urb_size, gfp_flags | __GFP_NOWARN | UVC_FLAG; |
1413 | #endif | 1420 | #endif |
1414 | if (!stream->urb_buffer[i]) { | 1421 | if (!stream->urb_buffer[i]) { |
1415 | uvc_free_urb_buffers(stream); | 1422 | uvc_free_urb_buffers(stream); |
@@ -1492,14 +1499,14 @@ static int uvc_init_video_isoc(struct uvc_streaming *stream, | |||
1492 | psize = uvc_endpoint_max_bpi(stream->dev->udev, ep); | 1499 | psize = uvc_endpoint_max_bpi(stream->dev->udev, ep); |
1493 | size = stream->ctrl.dwMaxVideoFrameSize; | 1500 | size = stream->ctrl.dwMaxVideoFrameSize; |
1494 | 1501 | ||
1495 | npackets = uvc_alloc_urb_buffers(stream, size, psize, gfp_flags); | 1502 | npackets = uvc_alloc_urb_buffers(stream, size, psize, gfp_flags|UVC_FLAG); |
1496 | if (npackets == 0) | 1503 | if (npackets == 0) |
1497 | return -ENOMEM; | 1504 | return -ENOMEM; |
1498 | 1505 | ||
1499 | size = npackets * psize; | 1506 | size = npackets * psize; |
1500 | 1507 | ||
1501 | for (i = 0; i < UVC_URBS; ++i) { | 1508 | for (i = 0; i < UVC_URBS; ++i) { |
1502 | urb = usb_alloc_urb(npackets, gfp_flags); | 1509 | urb = usb_alloc_urb(npackets, gfp_flags|UVC_FLAG); |
1503 | if (urb == NULL) { | 1510 | if (urb == NULL) { |
1504 | uvc_uninit_video(stream, 1); | 1511 | uvc_uninit_video(stream, 1); |
1505 | return -ENOMEM; | 1512 | return -ENOMEM; |
@@ -1548,7 +1555,7 @@ static int uvc_init_video_bulk(struct uvc_streaming *stream, | |||
1548 | size = stream->ctrl.dwMaxPayloadTransferSize; | 1555 | size = stream->ctrl.dwMaxPayloadTransferSize; |
1549 | stream->bulk.max_payload_size = size; | 1556 | stream->bulk.max_payload_size = size; |
1550 | 1557 | ||
1551 | npackets = uvc_alloc_urb_buffers(stream, size, psize, gfp_flags); | 1558 | npackets = uvc_alloc_urb_buffers(stream, size, psize, gfp_flags|UVC_FLAG); |
1552 | if (npackets == 0) | 1559 | if (npackets == 0) |
1553 | return -ENOMEM; | 1560 | return -ENOMEM; |
1554 | 1561 | ||
@@ -1565,7 +1572,7 @@ static int uvc_init_video_bulk(struct uvc_streaming *stream, | |||
1565 | size = 0; | 1572 | size = 0; |
1566 | 1573 | ||
1567 | for (i = 0; i < UVC_URBS; ++i) { | 1574 | for (i = 0; i < UVC_URBS; ++i) { |
1568 | urb = usb_alloc_urb(0, gfp_flags); | 1575 | urb = usb_alloc_urb(0, gfp_flags|UVC_FLAG); |
1569 | if (urb == NULL) { | 1576 | if (urb == NULL) { |
1570 | uvc_uninit_video(stream, 1); | 1577 | uvc_uninit_video(stream, 1); |
1571 | return -ENOMEM; | 1578 | return -ENOMEM; |
@@ -1654,7 +1661,7 @@ static int uvc_init_video(struct uvc_streaming *stream, gfp_t gfp_flags) | |||
1654 | if (ret < 0) | 1661 | if (ret < 0) |
1655 | return ret; | 1662 | return ret; |
1656 | 1663 | ||
1657 | ret = uvc_init_video_isoc(stream, best_ep, gfp_flags); | 1664 | ret = uvc_init_video_isoc(stream, best_ep, gfp_flags|UVC_FLAG); |
1658 | } else { | 1665 | } else { |
1659 | /* Bulk endpoint, proceed to URB initialization. */ | 1666 | /* Bulk endpoint, proceed to URB initialization. */ |
1660 | ep = uvc_find_endpoint(&intf->altsetting[0], | 1667 | ep = uvc_find_endpoint(&intf->altsetting[0], |
@@ -1662,7 +1669,7 @@ static int uvc_init_video(struct uvc_streaming *stream, gfp_t gfp_flags) | |||
1662 | if (ep == NULL) | 1669 | if (ep == NULL) |
1663 | return -EIO; | 1670 | return -EIO; |
1664 | 1671 | ||
1665 | ret = uvc_init_video_bulk(stream, ep, gfp_flags); | 1672 | ret = uvc_init_video_bulk(stream, ep, gfp_flags|UVC_FLAG); |
1666 | } | 1673 | } |
1667 | 1674 | ||
1668 | if (ret < 0) | 1675 | if (ret < 0) |
@@ -1670,7 +1677,7 @@ static int uvc_init_video(struct uvc_streaming *stream, gfp_t gfp_flags) | |||
1670 | 1677 | ||
1671 | /* Submit the URBs. */ | 1678 | /* Submit the URBs. */ |
1672 | for (i = 0; i < UVC_URBS; ++i) { | 1679 | for (i = 0; i < UVC_URBS; ++i) { |
1673 | ret = usb_submit_urb(stream->urb[i], gfp_flags); | 1680 | ret = usb_submit_urb(stream->urb[i], gfp_flags|UVC_FLAG); |
1674 | if (ret < 0) { | 1681 | if (ret < 0) { |
1675 | uvc_printk(KERN_ERR, "Failed to submit URB %u " | 1682 | uvc_printk(KERN_ERR, "Failed to submit URB %u " |
1676 | "(%d).\n", i, ret); | 1683 | "(%d).\n", i, ret); |
@@ -1741,7 +1748,7 @@ int uvc_video_resume(struct uvc_streaming *stream, int reset) | |||
1741 | if (ret < 0) | 1748 | if (ret < 0) |
1742 | return ret; | 1749 | return ret; |
1743 | 1750 | ||
1744 | return uvc_init_video(stream, GFP_NOIO); | 1751 | return uvc_init_video(stream, GFP_NOIO|UVC_FLAG); |
1745 | } | 1752 | } |
1746 | 1753 | ||
1747 | /* ------------------------------------------------------------------------ | 1754 | /* ------------------------------------------------------------------------ |
@@ -1892,7 +1899,7 @@ int uvc_video_enable(struct uvc_streaming *stream, int enable) | |||
1892 | if (ret < 0) | 1899 | if (ret < 0) |
1893 | goto error_commit; | 1900 | goto error_commit; |
1894 | 1901 | ||
1895 | ret = uvc_init_video(stream, GFP_KERNEL); | 1902 | ret = uvc_init_video(stream, GFP_KERNEL|UVC_FLAG); |
1896 | if (ret < 0) | 1903 | if (ret < 0) |
1897 | goto error_video; | 1904 | goto error_video; |
1898 | 1905 | ||
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index 66ada01c796c..54058877f467 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c | |||
@@ -30,6 +30,13 @@ | |||
30 | #include <media/v4l2-common.h> | 30 | #include <media/v4l2-common.h> |
31 | #include <media/videobuf2-core.h> | 31 | #include <media/videobuf2-core.h> |
32 | 32 | ||
33 | #define ENABLE_WORST_CASE 1 | ||
34 | #ifdef ENABLE_WORST_CASE | ||
35 | #define VB2_CORE_FLAG (GFP_COLOR|GFP_CPU1) | ||
36 | #else | ||
37 | #define VB2_CORE_FLAG (GFP_COLOR) | ||
38 | #endif | ||
39 | |||
33 | static int debug; | 40 | static int debug; |
34 | module_param(debug, int, 0644); | 41 | module_param(debug, int, 0644); |
35 | 42 | ||
@@ -200,7 +207,7 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb) | |||
200 | */ | 207 | */ |
201 | for (plane = 0; plane < vb->num_planes; ++plane) { | 208 | for (plane = 0; plane < vb->num_planes; ++plane) { |
202 | unsigned long size = PAGE_ALIGN(q->plane_sizes[plane]); | 209 | unsigned long size = PAGE_ALIGN(q->plane_sizes[plane]); |
203 | 210 | printk(KERN_INFO "__vb2_buf_mem_alloc(): size %ld, func %pF GFP_COLOR? %d\n", size, vb->vb2_queue->mem_ops->alloc, q->gfp_flags&GFP_COLOR); | |
204 | mem_priv = call_ptr_memop(vb, alloc, q->alloc_ctx[plane], | 211 | mem_priv = call_ptr_memop(vb, alloc, q->alloc_ctx[plane], |
205 | size, dma_dir, q->gfp_flags); | 212 | size, dma_dir, q->gfp_flags); |
206 | if (IS_ERR_OR_NULL(mem_priv)) | 213 | if (IS_ERR_OR_NULL(mem_priv)) |
@@ -352,7 +359,7 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory, | |||
352 | 359 | ||
353 | for (buffer = 0; buffer < num_buffers; ++buffer) { | 360 | for (buffer = 0; buffer < num_buffers; ++buffer) { |
354 | /* Allocate videobuf buffer structures */ | 361 | /* Allocate videobuf buffer structures */ |
355 | vb = kzalloc(q->buf_struct_size, GFP_KERNEL); | 362 | vb = kzalloc(q->buf_struct_size, GFP_KERNEL|VB2_CORE_FLAG); |
356 | if (!vb) { | 363 | if (!vb) { |
357 | dprintk(1, "memory alloc for buffer struct failed\n"); | 364 | dprintk(1, "memory alloc for buffer struct failed\n"); |
358 | break; | 365 | break; |
@@ -402,7 +409,8 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory, | |||
402 | 409 | ||
403 | dprintk(1, "allocated %d buffers, %d plane(s) each\n", | 410 | dprintk(1, "allocated %d buffers, %d plane(s) each\n", |
404 | buffer, num_planes); | 411 | buffer, num_planes); |
405 | 412 | printk(KERN_INFO "allocated %d buffers, %d plane(s) each\n", | |
413 | buffer, num_planes); | ||
406 | return buffer; | 414 | return buffer; |
407 | } | 415 | } |
408 | 416 | ||
@@ -2237,6 +2245,7 @@ static int vb2_internal_streamon(struct vb2_queue *q, enum v4l2_buf_type type) | |||
2237 | * Tell driver to start streaming provided sufficient buffers | 2245 | * Tell driver to start streaming provided sufficient buffers |
2238 | * are available. | 2246 | * are available. |
2239 | */ | 2247 | */ |
2248 | printk(KERN_INFO "vb2_internal_streamon()\n"); | ||
2240 | if (q->queued_count >= q->min_buffers_needed) { | 2249 | if (q->queued_count >= q->min_buffers_needed) { |
2241 | ret = vb2_start_streaming(q); | 2250 | ret = vb2_start_streaming(q); |
2242 | if (ret) { | 2251 | if (ret) { |
@@ -2525,7 +2534,7 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma) | |||
2525 | "MMAP invalid, as it would overflow buffer length\n"); | 2534 | "MMAP invalid, as it would overflow buffer length\n"); |
2526 | return -EINVAL; | 2535 | return -EINVAL; |
2527 | } | 2536 | } |
2528 | 2537 | printk(KERN_INFO "memop mmap %pF\n", vb->vb2_queue->mem_ops->mmap); | |
2529 | mutex_lock(&q->mmap_lock); | 2538 | mutex_lock(&q->mmap_lock); |
2530 | ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma); | 2539 | ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma); |
2531 | mutex_unlock(&q->mmap_lock); | 2540 | mutex_unlock(&q->mmap_lock); |
@@ -2830,7 +2839,7 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read) | |||
2830 | (read) ? "read" : "write", count, q->fileio_read_once, | 2839 | (read) ? "read" : "write", count, q->fileio_read_once, |
2831 | q->fileio_write_immediately); | 2840 | q->fileio_write_immediately); |
2832 | 2841 | ||
2833 | fileio = kzalloc(sizeof(struct vb2_fileio_data), GFP_KERNEL); | 2842 | fileio = kzalloc(sizeof(struct vb2_fileio_data), GFP_KERNEL|VB2_CORE_FLAG); |
2834 | if (fileio == NULL) | 2843 | if (fileio == NULL) |
2835 | return -ENOMEM; | 2844 | return -ENOMEM; |
2836 | 2845 | ||
@@ -3223,7 +3232,7 @@ int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv, | |||
3223 | if (WARN_ON(q->fileio)) | 3232 | if (WARN_ON(q->fileio)) |
3224 | return -EBUSY; | 3233 | return -EBUSY; |
3225 | 3234 | ||
3226 | threadio = kzalloc(sizeof(*threadio), GFP_KERNEL); | 3235 | threadio = kzalloc(sizeof(*threadio), GFP_KERNEL|VB2_CORE_FLAG); |
3227 | if (threadio == NULL) | 3236 | if (threadio == NULL) |
3228 | return -ENOMEM; | 3237 | return -ENOMEM; |
3229 | threadio->fnc = fnc; | 3238 | threadio->fnc = fnc; |
diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c index 657ab302a5cf..813cc718c116 100644 --- a/drivers/media/v4l2-core/videobuf2-vmalloc.c +++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c | |||
@@ -21,6 +21,14 @@ | |||
21 | #include <media/videobuf2-vmalloc.h> | 21 | #include <media/videobuf2-vmalloc.h> |
22 | #include <media/videobuf2-memops.h> | 22 | #include <media/videobuf2-memops.h> |
23 | 23 | ||
24 | |||
25 | #define ENABLE_WORST_CASE 1 | ||
26 | #ifdef ENABLE_WORST_CASE | ||
27 | #define VB2_FLAG (GFP_COLOR|GFP_CPU1) | ||
28 | #else | ||
29 | #define VB2_FLAG (GFP_COLOR) | ||
30 | #endif | ||
31 | |||
24 | struct vb2_vmalloc_buf { | 32 | struct vb2_vmalloc_buf { |
25 | void *vaddr; | 33 | void *vaddr; |
26 | struct page **pages; | 34 | struct page **pages; |
@@ -39,13 +47,18 @@ static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size, | |||
39 | enum dma_data_direction dma_dir, gfp_t gfp_flags) | 47 | enum dma_data_direction dma_dir, gfp_t gfp_flags) |
40 | { | 48 | { |
41 | struct vb2_vmalloc_buf *buf; | 49 | struct vb2_vmalloc_buf *buf; |
42 | 50 | /* video buffer allocation */ | |
43 | buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags); | 51 | printk(KERN_INFO "vb2_vmalloc_alloc(): size %ld requested\n", size); |
52 | buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags | VB2_FLAG); | ||
44 | if (!buf) | 53 | if (!buf) |
45 | return NULL; | 54 | return NULL; |
46 | 55 | ||
47 | buf->size = size; | 56 | buf->size = size; |
48 | buf->vaddr = vmalloc_user(buf->size); | 57 | #ifdef ENABLE_WORST_CASE |
58 | buf->vaddr = vmalloc_color_user_cpu1(buf->size); | ||
59 | #else | ||
60 | buf->vaddr = vmalloc_color_user(buf->size); | ||
61 | #endif | ||
49 | buf->dma_dir = dma_dir; | 62 | buf->dma_dir = dma_dir; |
50 | buf->handler.refcount = &buf->refcount; | 63 | buf->handler.refcount = &buf->refcount; |
51 | buf->handler.put = vb2_vmalloc_put; | 64 | buf->handler.put = vb2_vmalloc_put; |
@@ -81,7 +94,7 @@ static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr, | |||
81 | struct vm_area_struct *vma; | 94 | struct vm_area_struct *vma; |
82 | dma_addr_t physp; | 95 | dma_addr_t physp; |
83 | 96 | ||
84 | buf = kzalloc(sizeof(*buf), GFP_KERNEL); | 97 | buf = kzalloc(sizeof(*buf), GFP_KERNEL | VB2_FLAG); |
85 | if (!buf) | 98 | if (!buf) |
86 | return NULL; | 99 | return NULL; |
87 | 100 | ||
@@ -103,7 +116,7 @@ static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr, | |||
103 | last = (vaddr + size - 1) >> PAGE_SHIFT; | 116 | last = (vaddr + size - 1) >> PAGE_SHIFT; |
104 | buf->n_pages = last - first + 1; | 117 | buf->n_pages = last - first + 1; |
105 | buf->pages = kzalloc(buf->n_pages * sizeof(struct page *), | 118 | buf->pages = kzalloc(buf->n_pages * sizeof(struct page *), |
106 | GFP_KERNEL); | 119 | GFP_KERNEL | VB2_FLAG); |
107 | if (!buf->pages) | 120 | if (!buf->pages) |
108 | goto fail_pages_array_alloc; | 121 | goto fail_pages_array_alloc; |
109 | 122 | ||
@@ -233,12 +246,12 @@ static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *de | |||
233 | int ret; | 246 | int ret; |
234 | int i; | 247 | int i; |
235 | 248 | ||
236 | attach = kzalloc(sizeof(*attach), GFP_KERNEL); | 249 | attach = kzalloc(sizeof(*attach), GFP_KERNEL | VB2_FLAG); |
237 | if (!attach) | 250 | if (!attach) |
238 | return -ENOMEM; | 251 | return -ENOMEM; |
239 | 252 | ||
240 | sgt = &attach->sgt; | 253 | sgt = &attach->sgt; |
241 | ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL); | 254 | ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL | VB2_FLAG); |
242 | if (ret) { | 255 | if (ret) { |
243 | kfree(attach); | 256 | kfree(attach); |
244 | return ret; | 257 | return ret; |
@@ -429,7 +442,7 @@ static void *vb2_vmalloc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf, | |||
429 | if (dbuf->size < size) | 442 | if (dbuf->size < size) |
430 | return ERR_PTR(-EFAULT); | 443 | return ERR_PTR(-EFAULT); |
431 | 444 | ||
432 | buf = kzalloc(sizeof(*buf), GFP_KERNEL); | 445 | buf = kzalloc(sizeof(*buf), GFP_KERNEL | VB2_FLAG); |
433 | if (!buf) | 446 | if (!buf) |
434 | return ERR_PTR(-ENOMEM); | 447 | return ERR_PTR(-ENOMEM); |
435 | 448 | ||
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index ad3759e8a749..2f7aab0b60cf 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
@@ -65,6 +65,13 @@ | |||
65 | 65 | ||
66 | #include "fec.h" | 66 | #include "fec.h" |
67 | 67 | ||
68 | #define ENABLE_WORST_CASE 1 | ||
69 | #ifdef ENABLE_WORST_CASE | ||
70 | #define FEC_FLAG (GFP_COLOR|GFP_CPU1) | ||
71 | #else | ||
72 | #define FEC_FLAG (0) | ||
73 | #endif | ||
74 | |||
68 | static void set_multicast_list(struct net_device *ndev); | 75 | static void set_multicast_list(struct net_device *ndev); |
69 | static void fec_enet_itr_coal_init(struct net_device *ndev); | 76 | static void fec_enet_itr_coal_init(struct net_device *ndev); |
70 | 77 | ||
@@ -2628,7 +2635,7 @@ static int fec_enet_alloc_queue(struct net_device *ndev) | |||
2628 | struct fec_enet_priv_tx_q *txq; | 2635 | struct fec_enet_priv_tx_q *txq; |
2629 | 2636 | ||
2630 | for (i = 0; i < fep->num_tx_queues; i++) { | 2637 | for (i = 0; i < fep->num_tx_queues; i++) { |
2631 | txq = kzalloc(sizeof(*txq), GFP_KERNEL); | 2638 | txq = kzalloc(sizeof(*txq), GFP_KERNEL|FEC_FLAG); |
2632 | if (!txq) { | 2639 | if (!txq) { |
2633 | ret = -ENOMEM; | 2640 | ret = -ENOMEM; |
2634 | goto alloc_failed; | 2641 | goto alloc_failed; |
@@ -2645,7 +2652,7 @@ static int fec_enet_alloc_queue(struct net_device *ndev) | |||
2645 | txq->tso_hdrs = dma_alloc_coherent(NULL, | 2652 | txq->tso_hdrs = dma_alloc_coherent(NULL, |
2646 | txq->tx_ring_size * TSO_HEADER_SIZE, | 2653 | txq->tx_ring_size * TSO_HEADER_SIZE, |
2647 | &txq->tso_hdrs_dma, | 2654 | &txq->tso_hdrs_dma, |
2648 | GFP_KERNEL); | 2655 | GFP_KERNEL|FEC_FLAG); |
2649 | if (!txq->tso_hdrs) { | 2656 | if (!txq->tso_hdrs) { |
2650 | ret = -ENOMEM; | 2657 | ret = -ENOMEM; |
2651 | goto alloc_failed; | 2658 | goto alloc_failed; |
@@ -2654,7 +2661,7 @@ static int fec_enet_alloc_queue(struct net_device *ndev) | |||
2654 | 2661 | ||
2655 | for (i = 0; i < fep->num_rx_queues; i++) { | 2662 | for (i = 0; i < fep->num_rx_queues; i++) { |
2656 | fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]), | 2663 | fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]), |
2657 | GFP_KERNEL); | 2664 | GFP_KERNEL|FEC_FLAG); |
2658 | if (!fep->rx_queue[i]) { | 2665 | if (!fep->rx_queue[i]) { |
2659 | ret = -ENOMEM; | 2666 | ret = -ENOMEM; |
2660 | goto alloc_failed; | 2667 | goto alloc_failed; |
@@ -2723,7 +2730,7 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue) | |||
2723 | txq = fep->tx_queue[queue]; | 2730 | txq = fep->tx_queue[queue]; |
2724 | bdp = txq->tx_bd_base; | 2731 | bdp = txq->tx_bd_base; |
2725 | for (i = 0; i < txq->tx_ring_size; i++) { | 2732 | for (i = 0; i < txq->tx_ring_size; i++) { |
2726 | txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); | 2733 | txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL|FEC_FLAG); |
2727 | if (!txq->tx_bounce[i]) | 2734 | if (!txq->tx_bounce[i]) |
2728 | goto err_alloc; | 2735 | goto err_alloc; |
2729 | 2736 | ||
@@ -3037,7 +3044,7 @@ static int fec_enet_init(struct net_device *ndev) | |||
3037 | 3044 | ||
3038 | /* Allocate memory for buffer descriptors. */ | 3045 | /* Allocate memory for buffer descriptors. */ |
3039 | cbd_base = dma_alloc_coherent(NULL, bd_size, &bd_dma, | 3046 | cbd_base = dma_alloc_coherent(NULL, bd_size, &bd_dma, |
3040 | GFP_KERNEL); | 3047 | GFP_KERNEL|FEC_FLAG); |
3041 | if (!cbd_base) { | 3048 | if (!cbd_base) { |
3042 | return -ENOMEM; | 3049 | return -ENOMEM; |
3043 | } | 3050 | } |
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 10a4601c558b..69bdfb464e5c 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
@@ -36,6 +36,7 @@ struct vm_area_struct; | |||
36 | #define ___GFP_OTHER_NODE 0x800000u | 36 | #define ___GFP_OTHER_NODE 0x800000u |
37 | #define ___GFP_WRITE 0x1000000u | 37 | #define ___GFP_WRITE 0x1000000u |
38 | #define ___GFP_COLOR 0x2000000u | 38 | #define ___GFP_COLOR 0x2000000u |
39 | #define ___GFP_CPU1 0x4000000u | ||
39 | /* If the above are modified, __GFP_BITS_SHIFT may need updating */ | 40 | /* If the above are modified, __GFP_BITS_SHIFT may need updating */ |
40 | 41 | ||
41 | /* | 42 | /* |
@@ -96,6 +97,7 @@ struct vm_area_struct; | |||
96 | #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */ | 97 | #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */ |
97 | #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */ | 98 | #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */ |
98 | #define __GFP_COLOR ((__force gfp_t)___GFP_COLOR) /* Colored page request */ | 99 | #define __GFP_COLOR ((__force gfp_t)___GFP_COLOR) /* Colored page request */ |
100 | #define __GFP_CPU1 ((__force gfp_t)___GFP_CPU1) /* use cpu1 bank */ | ||
99 | 101 | ||
100 | /* | 102 | /* |
101 | * This may seem redundant, but it's a way of annotating false positives vs. | 103 | * This may seem redundant, but it's a way of annotating false positives vs. |
@@ -103,7 +105,7 @@ struct vm_area_struct; | |||
103 | */ | 105 | */ |
104 | #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK) | 106 | #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK) |
105 | 107 | ||
106 | #define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */ | 108 | #define __GFP_BITS_SHIFT 27 /* Room for N __GFP_FOO bits */ |
107 | #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) | 109 | #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) |
108 | 110 | ||
109 | /* This equals 0, but use constants in case they ever change */ | 111 | /* This equals 0, but use constants in case they ever change */ |
@@ -129,7 +131,7 @@ struct vm_area_struct; | |||
129 | /* Control page allocator reclaim behavior */ | 131 | /* Control page allocator reclaim behavior */ |
130 | #define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\ | 132 | #define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\ |
131 | __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ | 133 | __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ |
132 | __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC) | 134 | __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|__GFP_COLOR|__GFP_CPU1) |
133 | 135 | ||
134 | /* Control slab gfp mask during early boot */ | 136 | /* Control slab gfp mask during early boot */ |
135 | #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS)) | 137 | #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS)) |
@@ -151,6 +153,8 @@ struct vm_area_struct; | |||
151 | /* Colored page requests */ | 153 | /* Colored page requests */ |
152 | #define GFP_COLOR __GFP_COLOR | 154 | #define GFP_COLOR __GFP_COLOR |
153 | 155 | ||
156 | #define GFP_CPU1 __GFP_CPU1 | ||
157 | |||
154 | /* Convert GFP flags to their corresponding migrate type */ | 158 | /* Convert GFP flags to their corresponding migrate type */ |
155 | static inline int gfpflags_to_migratetype(const gfp_t gfp_flags) | 159 | static inline int gfpflags_to_migratetype(const gfp_t gfp_flags) |
156 | { | 160 | { |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 0755b9fd03a7..6008a33bfeac 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -172,6 +172,12 @@ extern unsigned int kobjsize(const void *objp); | |||
172 | # define VM_MPX VM_ARCH_2 | 172 | # define VM_MPX VM_ARCH_2 |
173 | #endif | 173 | #endif |
174 | 174 | ||
175 | #if defined(CONFIG_ARM) | ||
176 | /* MPX specific bounds table or bounds directory */ | ||
177 | # define VM_DONOTMOVE VM_ARCH_2 | ||
178 | #endif | ||
179 | |||
180 | |||
175 | #ifndef VM_GROWSUP | 181 | #ifndef VM_GROWSUP |
176 | # define VM_GROWSUP VM_NONE | 182 | # define VM_GROWSUP VM_NONE |
177 | #endif | 183 | #endif |
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index c82054890d77..2323fbe02aa9 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h | |||
@@ -70,6 +70,8 @@ extern void *vmalloc(unsigned long size); | |||
70 | extern void *vmalloc_color(unsigned long size); | 70 | extern void *vmalloc_color(unsigned long size); |
71 | extern void *vzalloc(unsigned long size); | 71 | extern void *vzalloc(unsigned long size); |
72 | extern void *vmalloc_user(unsigned long size); | 72 | extern void *vmalloc_user(unsigned long size); |
73 | extern void *vmalloc_color_user(unsigned long size); | ||
74 | extern void *vmalloc_color_user_cpu1(unsigned long size); | ||
73 | extern void *vmalloc_node(unsigned long size, int node); | 75 | extern void *vmalloc_node(unsigned long size, int node); |
74 | extern void *vzalloc_node(unsigned long size, int node); | 76 | extern void *vzalloc_node(unsigned long size, int node); |
75 | extern void *vmalloc_exec(unsigned long size); | 77 | extern void *vmalloc_exec(unsigned long size); |
diff --git a/include/litmus/page_dev.h b/include/litmus/page_dev.h index f1791469cba1..37953928e6f3 100644 --- a/include/litmus/page_dev.h +++ b/include/litmus/page_dev.h | |||
@@ -1,7 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * page_dev.h - Implementation of the page coloring for cache and bank partition. | 2 | * page_dev.h - Implementation of the page coloring for cache and bank partition. |
3 | * The file will keep a pool of colored pages. MMU can allocate pages with | ||
4 | * specific color or bank number. | ||
5 | * Author: Namhoon Kim (namhoonk@cs.unc.edu) | 3 | * Author: Namhoon Kim (namhoonk@cs.unc.edu) |
6 | */ | 4 | */ |
7 | 5 | ||
@@ -16,17 +14,15 @@ | |||
16 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
17 | #include <linux/io.h> | 15 | #include <linux/io.h> |
18 | #include <linux/mutex.h> | 16 | #include <linux/mutex.h> |
19 | #include <linux/mm.h> | ||
20 | #include <linux/random.h> | ||
21 | #include <linux/mmzone.h> | ||
22 | 17 | ||
23 | #include <litmus/litmus_proc.h> | ||
24 | #include <litmus/sched_trace.h> | 18 | #include <litmus/sched_trace.h> |
25 | #include <litmus/litmus.h> | 19 | #include <litmus/litmus.h> |
26 | 20 | ||
27 | int llc_partition_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); | 21 | int llc_partition_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); |
28 | int dram_partition_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); | 22 | int dram_partition_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); |
29 | int bank_to_partition(unsigned int bank); | 23 | int bank_to_partition(unsigned int bank); |
24 | int get_area_index(int cpu); | ||
25 | int is_in_correct_bank(struct page* page, int cpu); | ||
30 | int is_in_llc_partition(struct page* page, int cpu); | 26 | int is_in_llc_partition(struct page* page, int cpu); |
31 | 27 | ||
32 | #endif /* _LITMUS_PAGE_DEV_H */ \ No newline at end of file | 28 | #endif /* _LITMUS_PAGE_DEV_H */ \ No newline at end of file |
diff --git a/include/litmus/trace.h b/include/litmus/trace.h index 24ca412e1184..7d36a119d045 100644 --- a/include/litmus/trace.h +++ b/include/litmus/trace.h | |||
@@ -143,6 +143,16 @@ feather_callback void save_cpu_task_latency(unsigned long event, unsigned long w | |||
143 | #define TS_ISR_START CPU_TIMESTAMP_CUR(192) | 143 | #define TS_ISR_START CPU_TIMESTAMP_CUR(192) |
144 | #define TS_ISR_END CPU_TIMESTAMP_CUR(193) | 144 | #define TS_ISR_END CPU_TIMESTAMP_CUR(193) |
145 | 145 | ||
146 | /* For RTAS2018 */ | ||
147 | #define TS_NET_RX_HARDIRQ_START CPU_TIMESTAMP_CUR(194) | ||
148 | #define TS_NET_RX_HARDIRQ_END CPU_TIMESTAMP_CUR(195) | ||
149 | |||
150 | #define TS_NET_RX_SOFTIRQ_START CPU_TIMESTAMP_CUR(196) | ||
151 | #define TS_NET_RX_SOFTIRQ_END CPU_TIMESTAMP_CUR(197) | ||
152 | |||
153 | #define TS_UVC_IRQ_START CPU_TIMESTAMP_CUR(198) | ||
154 | #define TS_UVC_IRQ_END CPU_TIMESTAMP_CUR(199) | ||
155 | |||
146 | #define TS_RELEASE_LATENCY(when) CPU_LTIMESTAMP(208, &(when)) | 156 | #define TS_RELEASE_LATENCY(when) CPU_LTIMESTAMP(208, &(when)) |
147 | #define TS_RELEASE_LATENCY_A(when) CPU_LTIMESTAMP(209, &(when)) | 157 | #define TS_RELEASE_LATENCY_A(when) CPU_LTIMESTAMP(209, &(when)) |
148 | #define TS_RELEASE_LATENCY_B(when) CPU_LTIMESTAMP(210, &(when)) | 158 | #define TS_RELEASE_LATENCY_B(when) CPU_LTIMESTAMP(210, &(when)) |
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 635480270858..8da6ba6a3ae6 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
@@ -18,6 +18,9 @@ | |||
18 | 18 | ||
19 | #include <trace/events/irq.h> | 19 | #include <trace/events/irq.h> |
20 | 20 | ||
21 | #include <litmus/litmus.h> | ||
22 | #include <litmus/trace.h> | ||
23 | |||
21 | #include "internals.h" | 24 | #include "internals.h" |
22 | 25 | ||
23 | /** | 26 | /** |
@@ -138,11 +141,23 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) | |||
138 | 141 | ||
139 | do { | 142 | do { |
140 | irqreturn_t res; | 143 | irqreturn_t res; |
141 | 144 | /* | |
145 | if (irq == 282) | ||
146 | TS_UVC_IRQ_START; | ||
147 | if (irq == 284) { | ||
148 | TS_NET_RX_HARDIRQ_START; | ||
149 | } | ||
150 | */ | ||
142 | trace_irq_handler_entry(irq, action); | 151 | trace_irq_handler_entry(irq, action); |
143 | res = action->handler(irq, action->dev_id); | 152 | res = action->handler(irq, action->dev_id); |
144 | trace_irq_handler_exit(irq, action, res); | 153 | trace_irq_handler_exit(irq, action, res); |
145 | 154 | /* | |
155 | if (irq == 282) | ||
156 | TS_UVC_IRQ_END; | ||
157 | if (irq == 284) { | ||
158 | TS_NET_RX_HARDIRQ_END; | ||
159 | } | ||
160 | */ | ||
146 | if (WARN_ONCE(!irqs_disabled(),"irq %u handler %pF enabled interrupts\n", | 161 | if (WARN_ONCE(!irqs_disabled(),"irq %u handler %pF enabled interrupts\n", |
147 | irq, action->handler)) | 162 | irq, action->handler)) |
148 | local_irq_disable(); | 163 | local_irq_disable(); |
diff --git a/kernel/softirq.c b/kernel/softirq.c index 99fe8b877e53..5c694353763b 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -27,6 +27,9 @@ | |||
27 | #include <linux/tick.h> | 27 | #include <linux/tick.h> |
28 | #include <linux/irq.h> | 28 | #include <linux/irq.h> |
29 | 29 | ||
30 | /* for measuring NET_RX bottom half */ | ||
31 | #include <litmus/trace.h> | ||
32 | |||
30 | #define CREATE_TRACE_POINTS | 33 | #define CREATE_TRACE_POINTS |
31 | #include <trace/events/irq.h> | 34 | #include <trace/events/irq.h> |
32 | 35 | ||
@@ -273,7 +276,12 @@ restart: | |||
273 | kstat_incr_softirqs_this_cpu(vec_nr); | 276 | kstat_incr_softirqs_this_cpu(vec_nr); |
274 | 277 | ||
275 | trace_softirq_entry(vec_nr); | 278 | trace_softirq_entry(vec_nr); |
279 | // if (vec_nr == 3) | ||
280 | // TS_NET_RX_SOFTIRQ_START; | ||
281 | // net_rx_action() is called here | ||
276 | h->action(h); | 282 | h->action(h); |
283 | // if (vec_nr == 3) | ||
284 | // TS_NET_RX_SOFTIRQ_END; | ||
277 | trace_softirq_exit(vec_nr); | 285 | trace_softirq_exit(vec_nr); |
278 | if (unlikely(prev_count != preempt_count())) { | 286 | if (unlikely(prev_count != preempt_count())) { |
279 | pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n", | 287 | pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n", |
diff --git a/litmus/Makefile b/litmus/Makefile index 29ae4b04f046..ccd532d81b78 100644 --- a/litmus/Makefile +++ b/litmus/Makefile | |||
@@ -30,7 +30,8 @@ obj-y = sched_plugin.o litmus.o \ | |||
30 | color_shm.o \ | 30 | color_shm.o \ |
31 | replicate_lib.o \ | 31 | replicate_lib.o \ |
32 | cache_proc.o \ | 32 | cache_proc.o \ |
33 | page_dev.o | 33 | page_dev.o \ |
34 | fakedev0.o | ||
34 | 35 | ||
35 | obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o | 36 | obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o |
36 | obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o | 37 | obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o |
diff --git a/litmus/cache_proc.c b/litmus/cache_proc.c index 2d90454ad5cc..102feaf5c9e6 100644 --- a/litmus/cache_proc.c +++ b/litmus/cache_proc.c | |||
@@ -10,7 +10,10 @@ | |||
10 | #include <linux/mutex.h> | 10 | #include <linux/mutex.h> |
11 | #include <linux/time.h> | 11 | #include <linux/time.h> |
12 | #include <linux/random.h> | 12 | #include <linux/random.h> |
13 | #include <linux/sched.h> | ||
13 | 14 | ||
15 | #include <litmus/rt_param.h> | ||
16 | #include <litmus/litmus.h> | ||
14 | #include <litmus/litmus_proc.h> | 17 | #include <litmus/litmus_proc.h> |
15 | #include <litmus/sched_trace.h> | 18 | #include <litmus/sched_trace.h> |
16 | #include <litmus/cache_proc.h> | 19 | #include <litmus/cache_proc.h> |
@@ -19,7 +22,6 @@ | |||
19 | #include <asm/hardware/cache-l2x0.h> | 22 | #include <asm/hardware/cache-l2x0.h> |
20 | #include <asm/cacheflush.h> | 23 | #include <asm/cacheflush.h> |
21 | 24 | ||
22 | |||
23 | #define UNLOCK_ALL 0x00000000 /* allocation in any way */ | 25 | #define UNLOCK_ALL 0x00000000 /* allocation in any way */ |
24 | #define LOCK_ALL (~UNLOCK_ALL) | 26 | #define LOCK_ALL (~UNLOCK_ALL) |
25 | #define MAX_NR_WAYS 16 | 27 | #define MAX_NR_WAYS 16 |
diff --git a/litmus/fakedev0.c b/litmus/fakedev0.c new file mode 100644 index 000000000000..0b8909e77777 --- /dev/null +++ b/litmus/fakedev0.c | |||
@@ -0,0 +1,123 @@ | |||
1 | #include <linux/sched.h> | ||
2 | #include <linux/kernel.h> | ||
3 | #include <linux/mm.h> | ||
4 | #include <linux/fs.h> | ||
5 | #include <linux/errno.h> | ||
6 | #include <linux/highmem.h> | ||
7 | #include <asm/page.h> | ||
8 | #include <linux/miscdevice.h> | ||
9 | #include <linux/module.h> | ||
10 | |||
11 | #include <litmus/litmus.h> | ||
12 | |||
13 | /* device for allocating pages not cached by the CPU */ | ||
14 | |||
15 | #define FAKEDEV0_NAME "litmus/fakedev0" | ||
16 | |||
17 | #define NUM_BANKS 8 | ||
18 | #define BANK_MASK 0x38000000 | ||
19 | #define BANK_SHIFT 27 | ||
20 | |||
21 | #define NUM_COLORS 16 | ||
22 | #define CACHE_MASK 0x0000f000 | ||
23 | #define CACHE_SHIFT 12 | ||
24 | |||
25 | /* Decoding page color, 0~15 */ | ||
26 | static inline unsigned int page_color(struct page *page) | ||
27 | { | ||
28 | return ((page_to_phys(page)& CACHE_MASK) >> CACHE_SHIFT); | ||
29 | } | ||
30 | |||
31 | /* Decoding page bank number, 0~7 */ | ||
32 | static inline unsigned int page_bank(struct page *page) | ||
33 | { | ||
34 | return ((page_to_phys(page)& BANK_MASK) >> BANK_SHIFT); | ||
35 | } | ||
36 | |||
37 | void litmus_fakedev0_vm_open(struct vm_area_struct *vma) | ||
38 | { | ||
39 | } | ||
40 | |||
41 | void litmus_fakedev0_vm_close(struct vm_area_struct *vma) | ||
42 | { | ||
43 | } | ||
44 | |||
45 | int litmus_fakedev0_vm_fault(struct vm_area_struct* vma, | ||
46 | struct vm_fault* vmf) | ||
47 | { | ||
48 | /* modeled after SG DMA video4linux, but without DMA. */ | ||
49 | /* (see drivers/media/video/videobuf-dma-sg.c) */ | ||
50 | struct page *page; | ||
51 | |||
52 | page = alloc_page(GFP_USER|GFP_COLOR|GFP_CPU1); | ||
53 | if (!page) | ||
54 | return VM_FAULT_OOM; | ||
55 | |||
56 | clear_user_highpage(page, (unsigned long)vmf->virtual_address); | ||
57 | vmf->page = page; | ||
58 | |||
59 | return 0; | ||
60 | } | ||
61 | |||
62 | static struct vm_operations_struct litmus_fakedev0_vm_ops = { | ||
63 | .open = litmus_fakedev0_vm_open, | ||
64 | .close = litmus_fakedev0_vm_close, | ||
65 | .fault = litmus_fakedev0_vm_fault, | ||
66 | }; | ||
67 | |||
68 | static int litmus_fakedev0_mmap(struct file* filp, struct vm_area_struct* vma) | ||
69 | { | ||
70 | /* first make sure mapper knows what he's doing */ | ||
71 | |||
72 | /* you can only map the "first" page */ | ||
73 | if (vma->vm_pgoff != 0) | ||
74 | return -EINVAL; | ||
75 | |||
76 | /* you can't share it with anyone */ | ||
77 | if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED)) | ||
78 | return -EINVAL; | ||
79 | |||
80 | /* cannot be expanded, and is not a "normal" page. */ | ||
81 | vma->vm_flags |= (VM_DONTEXPAND|VM_DONOTMOVE); | ||
82 | |||
83 | /* noncached pages are not explicitly locked in memory (for now). */ | ||
84 | //vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
85 | |||
86 | vma->vm_ops = &litmus_fakedev0_vm_ops; | ||
87 | |||
88 | return 0; | ||
89 | } | ||
90 | |||
91 | static struct file_operations litmus_fakedev0_fops = { | ||
92 | .owner = THIS_MODULE, | ||
93 | .mmap = litmus_fakedev0_mmap, | ||
94 | }; | ||
95 | |||
96 | static struct miscdevice litmus_fakedev0_dev = { | ||
97 | .name = FAKEDEV0_NAME, | ||
98 | .minor = MISC_DYNAMIC_MINOR, | ||
99 | .fops = &litmus_fakedev0_fops, | ||
100 | /* pages are not locked, so there is no reason why | ||
101 | anyone cannot allocate an fakedev0 pages */ | ||
102 | .mode = (S_IRUGO | S_IWUGO), | ||
103 | }; | ||
104 | |||
105 | static int __init init_litmus_fakedev0_dev(void) | ||
106 | { | ||
107 | int err; | ||
108 | |||
109 | printk("Initializing LITMUS^RT fakedev0 device.\n"); | ||
110 | err = misc_register(&litmus_fakedev0_dev); | ||
111 | if (err) | ||
112 | printk("Could not allocate %s device (%d).\n", FAKEDEV0_NAME, err); | ||
113 | return err; | ||
114 | } | ||
115 | |||
116 | static void __exit exit_litmus_fakedev0_dev(void) | ||
117 | { | ||
118 | misc_deregister(&litmus_fakedev0_dev); | ||
119 | } | ||
120 | |||
121 | module_init(init_litmus_fakedev0_dev); | ||
122 | module_exit(exit_litmus_fakedev0_dev); | ||
123 | |||
diff --git a/litmus/litmus.c b/litmus/litmus.c index 1105408e405a..ec9379979e1a 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <litmus/cache_proc.h> | 28 | #include <litmus/cache_proc.h> |
29 | #include <litmus/mc2_common.h> | 29 | #include <litmus/mc2_common.h> |
30 | #include <litmus/replicate_lib.h> | 30 | #include <litmus/replicate_lib.h> |
31 | #include <litmus/page_dev.h> | ||
31 | 32 | ||
32 | #ifdef CONFIG_SCHED_CPU_AFFINITY | 33 | #ifdef CONFIG_SCHED_CPU_AFFINITY |
33 | #include <litmus/affinity.h> | 34 | #include <litmus/affinity.h> |
@@ -350,8 +351,15 @@ extern struct page *new_alloc_page(struct page *page, unsigned long node, int ** | |||
350 | static struct page *alloc_colored_page(struct page *page, unsigned long node, int **result) | 351 | static struct page *alloc_colored_page(struct page *page, unsigned long node, int **result) |
351 | { | 352 | { |
352 | struct page *newpage; | 353 | struct page *newpage; |
354 | gfp_t gfp_mask; | ||
353 | 355 | ||
354 | newpage = alloc_pages(GFP_HIGHUSER_MOVABLE|GFP_COLOR, 0); | 356 | gfp_mask = GFP_HIGHUSER_MOVABLE; |
357 | if (node != 8) | ||
358 | gfp_mask |= GFP_COLOR; | ||
359 | if (node == 9) | ||
360 | gfp_mask |= GFP_CPU1; | ||
361 | |||
362 | newpage = alloc_pages(gfp_mask, 0); | ||
355 | 363 | ||
356 | return newpage; | 364 | return newpage; |
357 | } | 365 | } |
@@ -378,6 +386,7 @@ asmlinkage long sys_set_page_color(int cpu) | |||
378 | 386 | ||
379 | LIST_HEAD(pagelist); | 387 | LIST_HEAD(pagelist); |
380 | LIST_HEAD(task_shared_pagelist); | 388 | LIST_HEAD(task_shared_pagelist); |
389 | LIST_HEAD(fakedev_pagelist); | ||
381 | 390 | ||
382 | migrate_prep(); | 391 | migrate_prep(); |
383 | 392 | ||
@@ -396,7 +405,11 @@ asmlinkage long sys_set_page_color(int cpu) | |||
396 | unsigned int num_pages = 0, i; | 405 | unsigned int num_pages = 0, i; |
397 | struct page *old_page = NULL; | 406 | struct page *old_page = NULL; |
398 | int pages_in_vma = 0; | 407 | int pages_in_vma = 0; |
408 | int fakedev_pages = 0; | ||
399 | 409 | ||
410 | if (vma_itr->vm_flags & VM_DONOTMOVE) { | ||
411 | fakedev_pages = 1; | ||
412 | } | ||
400 | num_pages = (vma_itr->vm_end - vma_itr->vm_start) / PAGE_SIZE; | 413 | num_pages = (vma_itr->vm_end - vma_itr->vm_start) / PAGE_SIZE; |
401 | /* Traverse all pages in vm_area_struct */ | 414 | /* Traverse all pages in vm_area_struct */ |
402 | for (i = 0; i < num_pages; i++) { | 415 | for (i = 0; i < num_pages; i++) { |
@@ -412,7 +425,13 @@ asmlinkage long sys_set_page_color(int cpu) | |||
412 | put_page(old_page); | 425 | put_page(old_page); |
413 | continue; | 426 | continue; |
414 | } | 427 | } |
415 | 428 | /* | |
429 | if (PageDirty(old_page)) { | ||
430 | TRACE("Dirty Page!\n"); | ||
431 | put_page(old_page); | ||
432 | continue; | ||
433 | } | ||
434 | */ | ||
416 | TRACE_TASK(current, "addr: %08x, pfn: %05lx, _mapcount: %d, _count: %d flags: %s%s%s\n", vma_itr->vm_start + PAGE_SIZE*i, page_to_pfn(old_page), page_mapcount(old_page), page_count(old_page), vma_itr->vm_flags&VM_READ?"r":"-", vma_itr->vm_flags&VM_WRITE?"w":"-", vma_itr->vm_flags&VM_EXEC?"x":"-"); | 435 | TRACE_TASK(current, "addr: %08x, pfn: %05lx, _mapcount: %d, _count: %d flags: %s%s%s\n", vma_itr->vm_start + PAGE_SIZE*i, page_to_pfn(old_page), page_mapcount(old_page), page_count(old_page), vma_itr->vm_flags&VM_READ?"r":"-", vma_itr->vm_flags&VM_WRITE?"w":"-", vma_itr->vm_flags&VM_EXEC?"x":"-"); |
417 | pages_in_vma++; | 436 | pages_in_vma++; |
418 | 437 | ||
@@ -460,12 +479,18 @@ asmlinkage long sys_set_page_color(int cpu) | |||
460 | else { | 479 | else { |
461 | ret = isolate_lru_page(old_page); | 480 | ret = isolate_lru_page(old_page); |
462 | if (!ret) { | 481 | if (!ret) { |
463 | list_add_tail(&old_page->lru, &pagelist); | 482 | if (fakedev_pages == 0) |
483 | list_add_tail(&old_page->lru, &pagelist); | ||
484 | else | ||
485 | list_add_tail(&old_page->lru, &fakedev_pagelist); | ||
486 | |||
464 | inc_zone_page_state(old_page, NR_ISOLATED_ANON + !PageSwapBacked(old_page)); | 487 | inc_zone_page_state(old_page, NR_ISOLATED_ANON + !PageSwapBacked(old_page)); |
465 | nr_pages++; | 488 | nr_pages++; |
466 | } else { | 489 | } else if (!is_in_correct_bank(old_page, cpu)) { |
467 | TRACE_TASK(current, "isolate_lru_page for a private page failed\n"); | 490 | TRACE_TASK(current, "isolate_lru_page for a private page failed\n"); |
468 | nr_failed++; | 491 | nr_failed++; |
492 | } else { | ||
493 | TRACE_TASK(current, "page is already in the correct bank\n"); | ||
469 | } | 494 | } |
470 | put_page(old_page); | 495 | put_page(old_page); |
471 | } | 496 | } |
@@ -491,6 +516,16 @@ asmlinkage long sys_set_page_color(int cpu) | |||
491 | } | 516 | } |
492 | } | 517 | } |
493 | 518 | ||
519 | /* Migrate fakedev pages */ | ||
520 | if (!list_empty(&fakedev_pagelist)) { | ||
521 | ret = migrate_pages(&fakedev_pagelist, alloc_colored_page, NULL, 9, MIGRATE_SYNC, MR_SYSCALL); | ||
522 | TRACE_TASK(current, "%ld pages not migrated.\n", ret); | ||
523 | nr_not_migrated = ret; | ||
524 | if (ret) { | ||
525 | putback_movable_pages(&fakedev_pagelist); | ||
526 | } | ||
527 | } | ||
528 | |||
494 | /* Replicate shared pages */ | 529 | /* Replicate shared pages */ |
495 | if (!list_empty(&task_shared_pagelist)) { | 530 | if (!list_empty(&task_shared_pagelist)) { |
496 | ret = replicate_pages(&task_shared_pagelist, alloc_colored_page, NULL, node, MIGRATE_SYNC, MR_SYSCALL); | 531 | ret = replicate_pages(&task_shared_pagelist, alloc_colored_page, NULL, node, MIGRATE_SYNC, MR_SYSCALL); |
@@ -569,6 +604,7 @@ asmlinkage long sys_test_call(unsigned int param) | |||
569 | } | 604 | } |
570 | 605 | ||
571 | TRACE_TASK(current, "addr: %08x, phy: %08x, color: %d, bank: %d, pfn: %05lx, _mapcount: %d, _count: %d flags: %s%s%s mapping: %p\n", vma_itr->vm_start + PAGE_SIZE*i, page_to_phys(old_page), page_color(old_page), page_bank(old_page), page_to_pfn(old_page), page_mapcount(old_page), page_count(old_page), vma_itr->vm_flags&VM_READ?"r":"-", vma_itr->vm_flags&VM_WRITE?"w":"-", vma_itr->vm_flags&VM_EXEC?"x":"-", &(old_page->mapping)); | 606 | TRACE_TASK(current, "addr: %08x, phy: %08x, color: %d, bank: %d, pfn: %05lx, _mapcount: %d, _count: %d flags: %s%s%s mapping: %p\n", vma_itr->vm_start + PAGE_SIZE*i, page_to_phys(old_page), page_color(old_page), page_bank(old_page), page_to_pfn(old_page), page_mapcount(old_page), page_count(old_page), vma_itr->vm_flags&VM_READ?"r":"-", vma_itr->vm_flags&VM_WRITE?"w":"-", vma_itr->vm_flags&VM_EXEC?"x":"-", &(old_page->mapping)); |
607 | printk(KERN_INFO "addr: %08x, phy: %08x, color: %d, bank: %d, pfn: %05lx, _mapcount: %d, _count: %d flags: %s%s%s mapping: %p\n", vma_itr->vm_start + PAGE_SIZE*i, page_to_phys(old_page), page_color(old_page), page_bank(old_page), page_to_pfn(old_page), page_mapcount(old_page), page_count(old_page), vma_itr->vm_flags&VM_READ?"r":"-", vma_itr->vm_flags&VM_WRITE?"w":"-", vma_itr->vm_flags&VM_EXEC?"x":"-", &(old_page->mapping)); | ||
572 | put_page(old_page); | 608 | put_page(old_page); |
573 | } | 609 | } |
574 | vma_itr = vma_itr->vm_next; | 610 | vma_itr = vma_itr->vm_next; |
diff --git a/litmus/page_dev.c b/litmus/page_dev.c index ea5d5f5cb36d..cde8e52d9111 100644 --- a/litmus/page_dev.c +++ b/litmus/page_dev.c | |||
@@ -44,15 +44,6 @@ unsigned int dram_partition[NR_DRAM_PARTITIONS] = { | |||
44 | 0x00000080, | 44 | 0x00000080, |
45 | 0x0000000f, | 45 | 0x0000000f, |
46 | }; | 46 | }; |
47 | /* | ||
48 | unsigned int dram_partition[NR_DRAM_PARTITIONS] = { | ||
49 | 0x00000001, | ||
50 | 0x00000002, | ||
51 | 0x00000004, | ||
52 | 0x00000008, | ||
53 | 0x000000f0, | ||
54 | }; | ||
55 | */ | ||
56 | 47 | ||
57 | /* Decoding page color, 0~15 */ | 48 | /* Decoding page color, 0~15 */ |
58 | static inline unsigned int page_color(struct page *page) | 49 | static inline unsigned int page_color(struct page *page) |
@@ -79,6 +70,35 @@ int bank_to_partition(unsigned int bank) | |||
79 | return -EINVAL; | 70 | return -EINVAL; |
80 | } | 71 | } |
81 | 72 | ||
73 | int get_area_index(int cpu) | ||
74 | { | ||
75 | int index = 0x10, area_index = 0; | ||
76 | |||
77 | while (index < 0x100) { | ||
78 | if (dram_partition[cpu]&index) | ||
79 | break; | ||
80 | index = index << 1; | ||
81 | area_index++; | ||
82 | } | ||
83 | |||
84 | return area_index; | ||
85 | } | ||
86 | |||
87 | /* use this function ONLY for Lv.A/B pages */ | ||
88 | int is_in_correct_bank(struct page* page, int cpu) | ||
89 | { | ||
90 | int bank; | ||
91 | unsigned int page_bank_bit; | ||
92 | |||
93 | bank = page_bank(page); | ||
94 | page_bank_bit = 1 << bank; | ||
95 | |||
96 | if (cpu == -1 || cpu == NR_CPUS) | ||
97 | return (page_bank_bit & dram_partition[NR_CPUS]); | ||
98 | else | ||
99 | return (page_bank_bit & dram_partition[cpu]); | ||
100 | } | ||
101 | |||
82 | int is_in_llc_partition(struct page* page, int cpu) | 102 | int is_in_llc_partition(struct page* page, int cpu) |
83 | { | 103 | { |
84 | int color; | 104 | int color; |
@@ -87,8 +107,8 @@ int is_in_llc_partition(struct page* page, int cpu) | |||
87 | color = page_color(page); | 107 | color = page_color(page); |
88 | page_color_bit = 1 << color; | 108 | page_color_bit = 1 << color; |
89 | 109 | ||
90 | if (cpu == NR_CPUS) | 110 | if (cpu == -1 || cpu == NR_CPUS) |
91 | return (page_color_bit&llc_partition[cpu*2]); | 111 | return (page_color_bit & llc_partition[8]); |
92 | else | 112 | else |
93 | return (page_color_bit & (llc_partition[cpu*2] | llc_partition[cpu*2+1])); | 113 | return (page_color_bit & (llc_partition[cpu*2] | llc_partition[cpu*2+1])); |
94 | } | 114 | } |
@@ -117,12 +137,14 @@ int slabtest_handler(struct ctl_table *table, int write, void __user *buffer, si | |||
117 | int idx; | 137 | int idx; |
118 | int n_data = buf_size/sizeof(int); | 138 | int n_data = buf_size/sizeof(int); |
119 | 139 | ||
120 | testbuffer = kmalloc(sizeof(int*)*buf_num, GFP_KERNEL|GFP_COLOR); | 140 | printk(KERN_INFO "-------SLABTEST on CPU%d with %d buffer size\n", raw_smp_processor_id(), buf_size); |
141 | |||
142 | testbuffer = kmalloc(sizeof(int*)*buf_num, GFP_KERNEL|GFP_COLOR|GFP_CPU1); | ||
121 | 143 | ||
122 | for (idx=0; idx<buf_num; idx++) | 144 | for (idx=0; idx<buf_num; idx++) |
123 | { | 145 | { |
124 | printk(KERN_INFO "kmalloc size %d, n_data %d\n", buf_size, n_data); | 146 | printk(KERN_INFO "kmalloc size %d, n_data %d\n", buf_size, n_data); |
125 | testbuffer[idx] = kmalloc(buf_size, GFP_KERNEL|GFP_COLOR); | 147 | testbuffer[idx] = kmalloc(buf_size, GFP_KERNEL|GFP_COLOR|GFP_CPU1); |
126 | 148 | ||
127 | if (!testbuffer[idx]) { | 149 | if (!testbuffer[idx]) { |
128 | printk(KERN_ERR "kmalloc failed size = %d\n", buf_size); | 150 | printk(KERN_ERR "kmalloc failed size = %d\n", buf_size); |
@@ -151,6 +173,7 @@ int slabtest_handler(struct ctl_table *table, int write, void __user *buffer, si | |||
151 | kfree(testbuffer[idx]); | 173 | kfree(testbuffer[idx]); |
152 | 174 | ||
153 | kfree(testbuffer); | 175 | kfree(testbuffer); |
176 | printk(KERN_INFO "-------SLABTEST FINISHED on CPU%d\n", raw_smp_processor_id()); | ||
154 | } | 177 | } |
155 | out: | 178 | out: |
156 | mutex_unlock(&dev_mutex); | 179 | mutex_unlock(&dev_mutex); |
diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c index b0300abf18e4..b4b159be77d2 100644 --- a/litmus/sched_mc2.c +++ b/litmus/sched_mc2.c | |||
@@ -160,7 +160,7 @@ static void task_arrives(struct mc2_cpu_state *state, struct task_struct *tsk) | |||
160 | struct mc2_task_state* tinfo = get_mc2_state(tsk); | 160 | struct mc2_task_state* tinfo = get_mc2_state(tsk); |
161 | struct reservation* res; | 161 | struct reservation* res; |
162 | struct reservation_client *client; | 162 | struct reservation_client *client; |
163 | enum crit_level lv = get_task_crit_level(tsk); | 163 | //enum crit_level lv = get_task_crit_level(tsk); |
164 | 164 | ||
165 | res = tinfo->res_info.client.reservation; | 165 | res = tinfo->res_info.client.reservation; |
166 | client = &tinfo->res_info.client; | 166 | client = &tinfo->res_info.client; |
@@ -169,14 +169,15 @@ static void task_arrives(struct mc2_cpu_state *state, struct task_struct *tsk) | |||
169 | 169 | ||
170 | res->ops->client_arrives(res, client); | 170 | res->ops->client_arrives(res, client); |
171 | TRACE_TASK(tsk, "Client arrives at %llu\n", litmus_clock()); | 171 | TRACE_TASK(tsk, "Client arrives at %llu\n", litmus_clock()); |
172 | 172 | /* | |
173 | if (lv != NUM_CRIT_LEVELS) { | 173 | if (lv != NUM_CRIT_LEVELS) { |
174 | struct crit_entry *ce; | 174 | struct crit_entry *ce; |
175 | ce = &state->crit_entries[lv]; | 175 | ce = &state->crit_entries[lv]; |
176 | /* if the currrent task is a ghost job, remove it */ | 176 | // if the currrent task is a ghost job, remove it |
177 | if (ce->running == tsk) | 177 | if (ce->running == tsk) |
178 | ce->running = NULL; | 178 | ce->running = NULL; |
179 | } | 179 | } |
180 | */ | ||
180 | } | 181 | } |
181 | 182 | ||
182 | /* get_lowest_prio_cpu - return the lowest priority cpu | 183 | /* get_lowest_prio_cpu - return the lowest priority cpu |
@@ -190,6 +191,9 @@ static int get_lowest_prio_cpu(lt_t priority) | |||
190 | int cpu, ret = NO_CPU; | 191 | int cpu, ret = NO_CPU; |
191 | lt_t latest_deadline = 0; | 192 | lt_t latest_deadline = 0; |
192 | 193 | ||
194 | if (priority == LITMUS_NO_PRIORITY) | ||
195 | return ret; | ||
196 | |||
193 | ce = &_lowest_prio_cpu.cpu_entries[local_cpu_state()->cpu]; | 197 | ce = &_lowest_prio_cpu.cpu_entries[local_cpu_state()->cpu]; |
194 | if (!ce->will_schedule && !ce->scheduled) { | 198 | if (!ce->will_schedule && !ce->scheduled) { |
195 | TRACE("CPU %d (local) is the lowest!\n", ce->cpu); | 199 | TRACE("CPU %d (local) is the lowest!\n", ce->cpu); |
@@ -202,10 +206,12 @@ static int get_lowest_prio_cpu(lt_t priority) | |||
202 | ce = &_lowest_prio_cpu.cpu_entries[cpu]; | 206 | ce = &_lowest_prio_cpu.cpu_entries[cpu]; |
203 | /* If a CPU will call schedule() in the near future, we don't | 207 | /* If a CPU will call schedule() in the near future, we don't |
204 | return that CPU. */ | 208 | return that CPU. */ |
209 | /* | ||
205 | TRACE("CPU %d will_schedule=%d, scheduled=(%s/%d:%d)\n", cpu, ce->will_schedule, | 210 | TRACE("CPU %d will_schedule=%d, scheduled=(%s/%d:%d)\n", cpu, ce->will_schedule, |
206 | ce->scheduled ? (ce->scheduled)->comm : "null", | 211 | ce->scheduled ? (ce->scheduled)->comm : "null", |
207 | ce->scheduled ? (ce->scheduled)->pid : 0, | 212 | ce->scheduled ? (ce->scheduled)->pid : 0, |
208 | ce->scheduled ? (ce->scheduled)->rt_param.job_params.job_no : 0); | 213 | ce->scheduled ? (ce->scheduled)->rt_param.job_params.job_no : 0); |
214 | */ | ||
209 | if (!ce->will_schedule) { | 215 | if (!ce->will_schedule) { |
210 | if (!ce->scheduled) { | 216 | if (!ce->scheduled) { |
211 | /* Idle cpu, return this. */ | 217 | /* Idle cpu, return this. */ |
@@ -242,6 +248,9 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) | |||
242 | lt_t update, now; | 248 | lt_t update, now; |
243 | struct next_timer_event *event, *next; | 249 | struct next_timer_event *event, *next; |
244 | int reschedule[NR_CPUS]; | 250 | int reschedule[NR_CPUS]; |
251 | unsigned long flags; | ||
252 | |||
253 | local_irq_save(flags); | ||
245 | 254 | ||
246 | for (cpus = 0; cpus<NR_CPUS; cpus++) | 255 | for (cpus = 0; cpus<NR_CPUS; cpus++) |
247 | reschedule[cpus] = 0; | 256 | reschedule[cpus] = 0; |
@@ -268,15 +277,12 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) | |||
268 | if (event->timer_armed_on == NO_CPU) { | 277 | if (event->timer_armed_on == NO_CPU) { |
269 | struct reservation *res = gmp_find_by_id(&_global_env, event->id); | 278 | struct reservation *res = gmp_find_by_id(&_global_env, event->id); |
270 | int cpu = get_lowest_prio_cpu(res?res->priority:0); | 279 | int cpu = get_lowest_prio_cpu(res?res->priority:0); |
271 | TRACE("GLOBAL EVENT PASSED!! poking CPU %d to reschedule\n", cpu); | 280 | //TRACE("GLOBAL EVENT PASSED!! poking CPU %d to reschedule\n", cpu); |
272 | list_del(&event->list); | 281 | list_del(&event->list); |
273 | kfree(event); | 282 | kfree(event); |
274 | if (cpu != NO_CPU) { | 283 | if (cpu != NO_CPU) { |
275 | _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; | 284 | _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; |
276 | if (cpu == local_cpu_state()->cpu) | 285 | reschedule[cpu] = 1; |
277 | litmus_reschedule_local(); | ||
278 | else | ||
279 | reschedule[cpu] = 1; | ||
280 | } | 286 | } |
281 | } | 287 | } |
282 | } else if (event->next_update < update && (event->timer_armed_on == NO_CPU || event->timer_armed_on == state->cpu)) { | 288 | } else if (event->next_update < update && (event->timer_armed_on == NO_CPU || event->timer_armed_on == state->cpu)) { |
@@ -289,6 +295,7 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) | |||
289 | /* Must drop state lock before calling into hrtimer_start(), which | 295 | /* Must drop state lock before calling into hrtimer_start(), which |
290 | * may raise a softirq, which in turn may wake ksoftirqd. */ | 296 | * may raise a softirq, which in turn may wake ksoftirqd. */ |
291 | raw_spin_unlock(&_global_env.lock); | 297 | raw_spin_unlock(&_global_env.lock); |
298 | local_irq_restore(flags); | ||
292 | raw_spin_unlock(&state->lock); | 299 | raw_spin_unlock(&state->lock); |
293 | 300 | ||
294 | if (update <= now || reschedule[state->cpu]) { | 301 | if (update <= now || reschedule[state->cpu]) { |
@@ -325,9 +332,7 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) | |||
325 | */ | 332 | */ |
326 | TRACE("mc2_update_timer for remote CPU %d (update=%llu, " | 333 | TRACE("mc2_update_timer for remote CPU %d (update=%llu, " |
327 | "active:%d, set:%llu)\n", | 334 | "active:%d, set:%llu)\n", |
328 | state->cpu, | 335 | state->cpu, update, hrtimer_active(&state->timer), |
329 | update, | ||
330 | hrtimer_active(&state->timer), | ||
331 | ktime_to_ns(hrtimer_get_expires(&state->timer))); | 336 | ktime_to_ns(hrtimer_get_expires(&state->timer))); |
332 | if (!hrtimer_active(&state->timer) || | 337 | if (!hrtimer_active(&state->timer) || |
333 | ktime_to_ns(hrtimer_get_expires(&state->timer)) > update) { | 338 | ktime_to_ns(hrtimer_get_expires(&state->timer)) > update) { |
@@ -336,17 +341,19 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) | |||
336 | state->cpu, | 341 | state->cpu, |
337 | hrtimer_active(&state->timer), | 342 | hrtimer_active(&state->timer), |
338 | ktime_to_ns(hrtimer_get_expires(&state->timer))); | 343 | ktime_to_ns(hrtimer_get_expires(&state->timer))); |
339 | raw_spin_lock(&state->lock); | 344 | //raw_spin_lock(&state->lock); |
340 | preempt_if_preemptable(state->scheduled, state->cpu); | 345 | //preempt_if_preemptable(state->scheduled, state->cpu); |
341 | raw_spin_unlock(&state->lock); | 346 | //raw_spin_unlock(&state->lock); |
342 | reschedule[state->cpu] = 0; | 347 | //reschedule[state->cpu] = 0; |
343 | } | 348 | } |
344 | } | 349 | } |
350 | |||
345 | for (cpus = 0; cpus<NR_CPUS; cpus++) { | 351 | for (cpus = 0; cpus<NR_CPUS; cpus++) { |
346 | if (reschedule[cpus]) { | 352 | if (reschedule[cpus]) { |
347 | litmus_reschedule(cpus); | 353 | litmus_reschedule(cpus); |
348 | } | 354 | } |
349 | } | 355 | } |
356 | |||
350 | } | 357 | } |
351 | 358 | ||
352 | /* update_cpu_prio - Update cpu's priority | 359 | /* update_cpu_prio - Update cpu's priority |
@@ -428,15 +435,13 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) | |||
428 | int cpu = get_lowest_prio_cpu(0); | 435 | int cpu = get_lowest_prio_cpu(0); |
429 | if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) { | 436 | if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) { |
430 | _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; | 437 | _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; |
431 | TRACE("LOWEST CPU = P%d\n", cpu); | ||
432 | if (cpu == state->cpu && update > now) | 438 | if (cpu == state->cpu && update > now) |
433 | litmus_reschedule_local(); | 439 | ; //litmus_reschedule_local(); |
434 | else | 440 | else |
435 | reschedule[cpu] = 1; | 441 | reschedule[cpu] = 1; |
436 | } | 442 | } |
437 | } | 443 | } |
438 | raw_spin_unlock(&_global_env.lock); | 444 | raw_spin_unlock(&_global_env.lock); |
439 | |||
440 | raw_spin_unlock_irqrestore(&state->lock, flags); | 445 | raw_spin_unlock_irqrestore(&state->lock, flags); |
441 | 446 | ||
442 | TS_ISR_END; | 447 | TS_ISR_END; |
@@ -447,7 +452,6 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) | |||
447 | } | 452 | } |
448 | } | 453 | } |
449 | 454 | ||
450 | |||
451 | return restart; | 455 | return restart; |
452 | } | 456 | } |
453 | 457 | ||
@@ -470,7 +474,7 @@ static long mc2_complete_job(void) | |||
470 | unsigned long flags; | 474 | unsigned long flags; |
471 | enum crit_level lv; | 475 | enum crit_level lv; |
472 | 476 | ||
473 | preempt_disable(); | 477 | //preempt_disable(); |
474 | local_irq_save(flags); | 478 | local_irq_save(flags); |
475 | 479 | ||
476 | tinfo = get_mc2_state(current); | 480 | tinfo = get_mc2_state(current); |
@@ -504,7 +508,7 @@ static long mc2_complete_job(void) | |||
504 | 508 | ||
505 | raw_spin_unlock(&state->lock); | 509 | raw_spin_unlock(&state->lock); |
506 | local_irq_restore(flags); | 510 | local_irq_restore(flags); |
507 | preempt_enable(); | 511 | //preempt_enable(); |
508 | } | 512 | } |
509 | 513 | ||
510 | sched_trace_task_completion(current, 0); | 514 | sched_trace_task_completion(current, 0); |
@@ -568,7 +572,6 @@ struct task_struct* mc2_global_dispatch(struct mc2_cpu_state* state) | |||
568 | { | 572 | { |
569 | struct reservation *res, *next; | 573 | struct reservation *res, *next; |
570 | struct task_struct *tsk = NULL; | 574 | struct task_struct *tsk = NULL; |
571 | |||
572 | enum crit_level lv; | 575 | enum crit_level lv; |
573 | lt_t time_slice; | 576 | lt_t time_slice; |
574 | 577 | ||
@@ -578,31 +581,20 @@ struct task_struct* mc2_global_dispatch(struct mc2_cpu_state* state) | |||
578 | tsk = res->ops->dispatch_client(res, &time_slice); | 581 | tsk = res->ops->dispatch_client(res, &time_slice); |
579 | if (likely(tsk)) { | 582 | if (likely(tsk)) { |
580 | lv = get_task_crit_level(tsk); | 583 | lv = get_task_crit_level(tsk); |
581 | if (lv == NUM_CRIT_LEVELS) { | 584 | if (lv != CRIT_LEVEL_C) |
585 | BUG(); | ||
582 | #if BUDGET_ENFORCEMENT_AT_C | 586 | #if BUDGET_ENFORCEMENT_AT_C |
583 | gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN); | 587 | gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN); |
584 | #endif | ||
585 | res->event_added = 1; | ||
586 | res->blocked_by_ghost = 0; | ||
587 | res->is_ghost = NO_CPU; | ||
588 | res->scheduled_on = state->cpu; | ||
589 | return tsk; | ||
590 | } else if (lv == CRIT_LEVEL_C) { | ||
591 | #if BUDGET_ENFORCEMENT_AT_C | ||
592 | gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN); | ||
593 | #endif | 588 | #endif |
594 | res->event_added = 1; | 589 | res->event_added = 1; |
595 | res->blocked_by_ghost = 0; | 590 | res->blocked_by_ghost = 0; |
596 | res->is_ghost = NO_CPU; | 591 | res->is_ghost = NO_CPU; |
597 | res->scheduled_on = state->cpu; | 592 | res->scheduled_on = state->cpu; |
598 | return tsk; | 593 | return tsk; |
599 | } else { | ||
600 | BUG(); | ||
601 | } | ||
602 | } | 594 | } |
603 | } | 595 | } |
604 | } | 596 | } |
605 | 597 | ||
606 | return NULL; | 598 | return NULL; |
607 | } | 599 | } |
608 | 600 | ||
@@ -621,7 +613,7 @@ static inline void post_schedule(struct task_struct *next, int cpu) | |||
621 | { | 613 | { |
622 | enum crit_level lev; | 614 | enum crit_level lev; |
623 | if ((!next) || !is_realtime(next)) { | 615 | if ((!next) || !is_realtime(next)) { |
624 | do_partition(NUM_CRIT_LEVELS, -1); | 616 | //do_partition(NUM_CRIT_LEVELS, -1); |
625 | return; | 617 | return; |
626 | } | 618 | } |
627 | 619 | ||
@@ -646,15 +638,15 @@ static inline void post_schedule(struct task_struct *next, int cpu) | |||
646 | */ | 638 | */ |
647 | static struct task_struct* mc2_schedule(struct task_struct * prev) | 639 | static struct task_struct* mc2_schedule(struct task_struct * prev) |
648 | { | 640 | { |
649 | int np, blocks, exists, preempt, to_schedule; | 641 | int np, blocks, exists, cpu; //preempt, to_schedule; |
650 | /* next == NULL means "schedule background work". */ | 642 | /* next == NULL means "schedule background work". */ |
651 | lt_t now; | 643 | lt_t now = litmus_clock(); |
652 | struct mc2_cpu_state *state = local_cpu_state(); | 644 | struct mc2_cpu_state *state = local_cpu_state(); |
653 | 645 | ||
654 | pre_schedule(prev, state->cpu); | ||
655 | |||
656 | raw_spin_lock(&state->lock); | 646 | raw_spin_lock(&state->lock); |
657 | 647 | ||
648 | pre_schedule(prev, state->cpu); | ||
649 | |||
658 | if (state->scheduled && state->scheduled != prev) | 650 | if (state->scheduled && state->scheduled != prev) |
659 | printk(KERN_ALERT "BUG1!!!!!!!! %s %s\n", state->scheduled ? (state->scheduled)->comm : "null", prev ? (prev)->comm : "null"); | 651 | printk(KERN_ALERT "BUG1!!!!!!!! %s %s\n", state->scheduled ? (state->scheduled)->comm : "null", prev ? (prev)->comm : "null"); |
660 | if (state->scheduled && !is_realtime(prev)) | 652 | if (state->scheduled && !is_realtime(prev)) |
@@ -664,16 +656,9 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
664 | exists = state->scheduled != NULL; | 656 | exists = state->scheduled != NULL; |
665 | blocks = exists && !is_current_running(); | 657 | blocks = exists && !is_current_running(); |
666 | np = exists && is_np(state->scheduled); | 658 | np = exists && is_np(state->scheduled); |
667 | |||
668 | raw_spin_lock(&_global_env.lock); | ||
669 | preempt = resched_cpu[state->cpu]; | ||
670 | resched_cpu[state->cpu] = 0; | ||
671 | raw_spin_unlock(&_global_env.lock); | ||
672 | 659 | ||
673 | /* update time */ | 660 | /* update time */ |
674 | state->sup_env.will_schedule = true; | 661 | state->sup_env.will_schedule = true; |
675 | |||
676 | now = litmus_clock(); | ||
677 | sup_update_time(&state->sup_env, now); | 662 | sup_update_time(&state->sup_env, now); |
678 | 663 | ||
679 | if (is_realtime(current) && blocks) { | 664 | if (is_realtime(current) && blocks) { |
@@ -690,7 +675,8 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
690 | 675 | ||
691 | if (!state->scheduled) { | 676 | if (!state->scheduled) { |
692 | raw_spin_lock(&_global_env.lock); | 677 | raw_spin_lock(&_global_env.lock); |
693 | to_schedule = gmp_update_time(&_global_env, now); | 678 | if (is_realtime(prev)) |
679 | gmp_update_time(&_global_env, now); | ||
694 | state->scheduled = mc2_global_dispatch(state); | 680 | state->scheduled = mc2_global_dispatch(state); |
695 | _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; | 681 | _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; |
696 | update_cpu_prio(state); | 682 | update_cpu_prio(state); |
@@ -711,18 +697,18 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
711 | /* NOTE: drops state->lock */ | 697 | /* NOTE: drops state->lock */ |
712 | mc2_update_timer_and_unlock(state); | 698 | mc2_update_timer_and_unlock(state); |
713 | 699 | ||
700 | raw_spin_lock(&state->lock); | ||
714 | if (prev != state->scheduled && is_realtime(prev)) { | 701 | if (prev != state->scheduled && is_realtime(prev)) { |
715 | struct mc2_task_state* tinfo = get_mc2_state(prev); | 702 | struct mc2_task_state* tinfo = get_mc2_state(prev); |
716 | struct reservation* res = tinfo->res_info.client.reservation; | 703 | struct reservation* res = tinfo->res_info.client.reservation; |
717 | TRACE_TASK(prev, "PREV JOB scheduled_on = P%d\n", res->scheduled_on); | ||
718 | res->scheduled_on = NO_CPU; | 704 | res->scheduled_on = NO_CPU; |
719 | TRACE_TASK(prev, "descheduled.\n"); | 705 | TRACE_TASK(prev, "descheduled at %llu.\n", litmus_clock()); |
720 | /* if prev is preempted and a global task, find the lowest cpu and reschedule */ | 706 | /* if prev is preempted and a global task, find the lowest cpu and reschedule */ |
721 | if (tinfo->has_departed == false && get_task_crit_level(prev) == CRIT_LEVEL_C) { | 707 | if (tinfo->has_departed == false && get_task_crit_level(prev) == CRIT_LEVEL_C) { |
722 | int cpu; | 708 | int cpu; |
723 | raw_spin_lock(&_global_env.lock); | 709 | raw_spin_lock(&_global_env.lock); |
724 | cpu = get_lowest_prio_cpu(res?res->priority:0); | 710 | cpu = get_lowest_prio_cpu(res?res->priority:LITMUS_NO_PRIORITY); |
725 | TRACE("LEVEL-C TASK PREEMPTED!! poking CPU %d to reschedule\n", cpu); | 711 | //TRACE("LEVEL-C TASK PREEMPTED!! poking CPU %d to reschedule\n", cpu); |
726 | if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) { | 712 | if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) { |
727 | _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; | 713 | _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; |
728 | resched_cpu[cpu] = 1; | 714 | resched_cpu[cpu] = 1; |
@@ -730,7 +716,8 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
730 | raw_spin_unlock(&_global_env.lock); | 716 | raw_spin_unlock(&_global_env.lock); |
731 | } | 717 | } |
732 | } | 718 | } |
733 | 719 | ||
720 | /* | ||
734 | if (to_schedule != 0) { | 721 | if (to_schedule != 0) { |
735 | raw_spin_lock(&_global_env.lock); | 722 | raw_spin_lock(&_global_env.lock); |
736 | while (to_schedule--) { | 723 | while (to_schedule--) { |
@@ -742,13 +729,15 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
742 | } | 729 | } |
743 | raw_spin_unlock(&_global_env.lock); | 730 | raw_spin_unlock(&_global_env.lock); |
744 | } | 731 | } |
732 | */ | ||
745 | 733 | ||
734 | post_schedule(state->scheduled, state->cpu); | ||
735 | |||
736 | raw_spin_unlock(&state->lock); | ||
746 | if (state->scheduled) { | 737 | if (state->scheduled) { |
747 | TRACE_TASK(state->scheduled, "scheduled.\n"); | 738 | TRACE_TASK(state->scheduled, "scheduled.\n"); |
748 | } | 739 | } |
749 | 740 | ||
750 | post_schedule(state->scheduled, state->cpu); | ||
751 | |||
752 | return state->scheduled; | 741 | return state->scheduled; |
753 | } | 742 | } |
754 | 743 | ||
@@ -758,38 +747,40 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
758 | static void mc2_task_resume(struct task_struct *tsk) | 747 | static void mc2_task_resume(struct task_struct *tsk) |
759 | { | 748 | { |
760 | unsigned long flags; | 749 | unsigned long flags; |
761 | struct mc2_task_state* tinfo = get_mc2_state(tsk); | 750 | struct mc2_task_state* tinfo; |
762 | struct mc2_cpu_state *state; | 751 | struct mc2_cpu_state *state; |
763 | 752 | ||
764 | TRACE_TASK(tsk, "thread wakes up at %llu\n", litmus_clock()); | 753 | TRACE_TASK(tsk, "thread wakes up at %llu\n", litmus_clock()); |
765 | 754 | ||
766 | local_irq_save(flags); | 755 | preempt_disable(); |
756 | tinfo = get_mc2_state(tsk); | ||
767 | if (tinfo->cpu != -1) | 757 | if (tinfo->cpu != -1) |
768 | state = cpu_state_for(tinfo->cpu); | 758 | state = cpu_state_for(tinfo->cpu); |
769 | else | 759 | else |
770 | state = local_cpu_state(); | 760 | state = local_cpu_state(); |
761 | preempt_enable(); | ||
771 | 762 | ||
772 | /* Requeue only if self-suspension was already processed. */ | 763 | /* Requeue only if self-suspension was already processed. */ |
773 | if (tinfo->has_departed) | 764 | if (tinfo->has_departed) |
774 | { | 765 | { |
775 | /* We don't want to consider jobs before synchronous releases */ | 766 | #ifdef CONFIG_SCHED_OVERHEAD_TRACE |
776 | if (tsk_rt(tsk)->job_params.job_no > 5) { | 767 | switch(get_task_crit_level(tsk)) { |
777 | switch(get_task_crit_level(tsk)) { | 768 | case CRIT_LEVEL_A: |
778 | case CRIT_LEVEL_A: | 769 | TS_RELEASE_LATENCY_A(get_release(tsk)); |
779 | TS_RELEASE_LATENCY_A(get_release(tsk)); | 770 | break; |
780 | break; | 771 | case CRIT_LEVEL_B: |
781 | case CRIT_LEVEL_B: | 772 | TS_RELEASE_LATENCY_B(get_release(tsk)); |
782 | TS_RELEASE_LATENCY_B(get_release(tsk)); | 773 | break; |
783 | break; | 774 | case CRIT_LEVEL_C: |
784 | case CRIT_LEVEL_C: | 775 | TS_RELEASE_LATENCY_C(get_release(tsk)); |
785 | TS_RELEASE_LATENCY_C(get_release(tsk)); | 776 | break; |
786 | break; | 777 | default: |
787 | default: | 778 | break; |
788 | break; | ||
789 | } | ||
790 | } | 779 | } |
780 | #endif | ||
781 | |||
782 | raw_spin_lock_irqsave(&state->lock, flags); | ||
791 | 783 | ||
792 | raw_spin_lock(&state->lock); | ||
793 | /* Assumption: litmus_clock() is synchronized across cores, | 784 | /* Assumption: litmus_clock() is synchronized across cores, |
794 | * since we might not actually be executing on tinfo->cpu | 785 | * since we might not actually be executing on tinfo->cpu |
795 | * at the moment. */ | 786 | * at the moment. */ |
@@ -805,12 +796,14 @@ static void mc2_task_resume(struct task_struct *tsk) | |||
805 | 796 | ||
806 | /* NOTE: drops state->lock */ | 797 | /* NOTE: drops state->lock */ |
807 | TRACE_TASK(tsk, "mc2_resume()\n"); | 798 | TRACE_TASK(tsk, "mc2_resume()\n"); |
799 | raw_spin_unlock_irqrestore(&state->lock, flags); | ||
800 | |||
801 | raw_spin_lock(&state->lock); | ||
808 | mc2_update_timer_and_unlock(state); | 802 | mc2_update_timer_and_unlock(state); |
809 | } else { | 803 | } else { |
810 | TRACE_TASK(tsk, "resume event ignored, still scheduled\n"); | 804 | TRACE_TASK(tsk, "resume event ignored, still scheduled\n"); |
811 | } | 805 | } |
812 | 806 | ||
813 | local_irq_restore(flags); | ||
814 | } | 807 | } |
815 | 808 | ||
816 | 809 | ||
@@ -818,7 +811,7 @@ static void mc2_task_resume(struct task_struct *tsk) | |||
818 | */ | 811 | */ |
819 | static long mc2_admit_task(struct task_struct *tsk) | 812 | static long mc2_admit_task(struct task_struct *tsk) |
820 | { | 813 | { |
821 | long err = -ESRCH; | 814 | long err = 0; |
822 | unsigned long flags; | 815 | unsigned long flags; |
823 | struct reservation *res; | 816 | struct reservation *res; |
824 | struct mc2_cpu_state *state; | 817 | struct mc2_cpu_state *state; |
@@ -831,11 +824,10 @@ static long mc2_admit_task(struct task_struct *tsk) | |||
831 | 824 | ||
832 | if (!mp) { | 825 | if (!mp) { |
833 | printk(KERN_ERR "mc2_admit_task: criticality level has not been set\n"); | 826 | printk(KERN_ERR "mc2_admit_task: criticality level has not been set\n"); |
834 | return err; | 827 | return -ESRCH; |
835 | } | 828 | } |
836 | 829 | ||
837 | lv = mp->crit; | 830 | lv = mp->crit; |
838 | preempt_disable(); | ||
839 | 831 | ||
840 | if (lv < CRIT_LEVEL_C) { | 832 | if (lv < CRIT_LEVEL_C) { |
841 | state = cpu_state_for(task_cpu(tsk)); | 833 | state = cpu_state_for(task_cpu(tsk)); |
@@ -858,6 +850,9 @@ static long mc2_admit_task(struct task_struct *tsk) | |||
858 | /* disable LITMUS^RT's per-thread budget enforcement */ | 850 | /* disable LITMUS^RT's per-thread budget enforcement */ |
859 | tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; | 851 | tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; |
860 | } | 852 | } |
853 | else { | ||
854 | err = -ESRCH; | ||
855 | } | ||
861 | 856 | ||
862 | raw_spin_unlock_irqrestore(&state->lock, flags); | 857 | raw_spin_unlock_irqrestore(&state->lock, flags); |
863 | } else if (lv == CRIT_LEVEL_C) { | 858 | } else if (lv == CRIT_LEVEL_C) { |
@@ -882,12 +877,13 @@ static long mc2_admit_task(struct task_struct *tsk) | |||
882 | /* disable LITMUS^RT's per-thread budget enforcement */ | 877 | /* disable LITMUS^RT's per-thread budget enforcement */ |
883 | tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; | 878 | tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; |
884 | } | 879 | } |
880 | else { | ||
881 | err = -ESRCH; | ||
882 | } | ||
885 | 883 | ||
886 | raw_spin_unlock(&_global_env.lock); | 884 | raw_spin_unlock(&_global_env.lock); |
887 | raw_spin_unlock_irqrestore(&state->lock, flags); | 885 | raw_spin_unlock_irqrestore(&state->lock, flags); |
888 | } | 886 | } |
889 | |||
890 | preempt_enable(); | ||
891 | 887 | ||
892 | if (err) | 888 | if (err) |
893 | kfree(tinfo); | 889 | kfree(tinfo); |
@@ -908,6 +904,8 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue, | |||
908 | enum crit_level lv = get_task_crit_level(tsk); | 904 | enum crit_level lv = get_task_crit_level(tsk); |
909 | lt_t release = 0; | 905 | lt_t release = 0; |
910 | 906 | ||
907 | BUG_ON(lv < CRIT_LEVEL_A || lv > CRIT_LEVEL_C); | ||
908 | |||
911 | TRACE_TASK(tsk, "new RT task %llu (on_rq:%d, running:%d)\n", | 909 | TRACE_TASK(tsk, "new RT task %llu (on_rq:%d, running:%d)\n", |
912 | litmus_clock(), on_runqueue, is_running); | 910 | litmus_clock(), on_runqueue, is_running); |
913 | 911 | ||
@@ -934,8 +932,7 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue, | |||
934 | else { | 932 | else { |
935 | res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id); | 933 | res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id); |
936 | } | 934 | } |
937 | release = res->next_replenishment; | 935 | |
938 | |||
939 | if (on_runqueue || is_running) { | 936 | if (on_runqueue || is_running) { |
940 | /* Assumption: litmus_clock() is synchronized across cores | 937 | /* Assumption: litmus_clock() is synchronized across cores |
941 | * [see comment in pres_task_resume()] */ | 938 | * [see comment in pres_task_resume()] */ |
@@ -944,22 +941,29 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue, | |||
944 | } | 941 | } |
945 | else | 942 | else |
946 | sup_update_time(&state->sup_env, litmus_clock()); | 943 | sup_update_time(&state->sup_env, litmus_clock()); |
944 | |||
947 | task_arrives(state, tsk); | 945 | task_arrives(state, tsk); |
948 | if (lv == CRIT_LEVEL_C) | 946 | if (lv == CRIT_LEVEL_C) |
949 | raw_spin_unlock(&_global_env.lock); | 947 | raw_spin_unlock(&_global_env.lock); |
950 | /* NOTE: drops state->lock */ | 948 | /* NOTE: drops state->lock */ |
949 | raw_spin_unlock(&state->lock); | ||
950 | local_irq_restore(flags); | ||
951 | |||
951 | TRACE("mc2_new()\n"); | 952 | TRACE("mc2_new()\n"); |
952 | 953 | ||
954 | raw_spin_lock(&state->lock); | ||
953 | mc2_update_timer_and_unlock(state); | 955 | mc2_update_timer_and_unlock(state); |
954 | } else { | 956 | } else { |
955 | if (lv == CRIT_LEVEL_C) | 957 | if (lv == CRIT_LEVEL_C) |
956 | raw_spin_unlock(&_global_env.lock); | 958 | raw_spin_unlock(&_global_env.lock); |
957 | raw_spin_unlock(&state->lock); | 959 | raw_spin_unlock(&state->lock); |
960 | local_irq_restore(flags); | ||
958 | } | 961 | } |
959 | local_irq_restore(flags); | 962 | release = res->next_replenishment; |
960 | 963 | ||
961 | if (!release) { | 964 | if (!release) { |
962 | TRACE_TASK(tsk, "mc2_task_new() next_release = %llu\n", release); | 965 | TRACE_TASK(tsk, "mc2_task_new() next_release = %llu\n", release); |
966 | BUG(); | ||
963 | } | 967 | } |
964 | else | 968 | else |
965 | TRACE_TASK(tsk, "mc2_task_new() next_release = NULL\n"); | 969 | TRACE_TASK(tsk, "mc2_task_new() next_release = NULL\n"); |
@@ -977,7 +981,10 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu) | |||
977 | unsigned long flags; | 981 | unsigned long flags; |
978 | 982 | ||
979 | if (cpu == -1) { | 983 | if (cpu == -1) { |
984 | struct next_timer_event *event, *e_next; | ||
985 | |||
980 | /* if the reservation is global reservation */ | 986 | /* if the reservation is global reservation */ |
987 | |||
981 | local_irq_save(flags); | 988 | local_irq_save(flags); |
982 | raw_spin_lock(&_global_env.lock); | 989 | raw_spin_lock(&_global_env.lock); |
983 | 990 | ||
@@ -1009,6 +1016,13 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu) | |||
1009 | } | 1016 | } |
1010 | } | 1017 | } |
1011 | } | 1018 | } |
1019 | /* delete corresponding events */ | ||
1020 | list_for_each_entry_safe(event, e_next, &_global_env.next_events, list) { | ||
1021 | if (event->id == reservation_id) { | ||
1022 | list_del(&event->list); | ||
1023 | kfree(event); | ||
1024 | } | ||
1025 | } | ||
1012 | 1026 | ||
1013 | raw_spin_unlock(&_global_env.lock); | 1027 | raw_spin_unlock(&_global_env.lock); |
1014 | local_irq_restore(flags); | 1028 | local_irq_restore(flags); |
@@ -1105,7 +1119,6 @@ static void mc2_task_exit(struct task_struct *tsk) | |||
1105 | mc2_update_timer_and_unlock(state); | 1119 | mc2_update_timer_and_unlock(state); |
1106 | } else { | 1120 | } else { |
1107 | raw_spin_unlock(&state->lock); | 1121 | raw_spin_unlock(&state->lock); |
1108 | |||
1109 | } | 1122 | } |
1110 | 1123 | ||
1111 | if (lv == CRIT_LEVEL_C) { | 1124 | if (lv == CRIT_LEVEL_C) { |
@@ -1412,7 +1425,7 @@ static long mc2_activate_plugin(void) | |||
1412 | struct cpu_entry *ce; | 1425 | struct cpu_entry *ce; |
1413 | 1426 | ||
1414 | gmp_init(&_global_env); | 1427 | gmp_init(&_global_env); |
1415 | raw_spin_lock_init(&_lowest_prio_cpu.lock); | 1428 | //raw_spin_lock_init(&_lowest_prio_cpu.lock); |
1416 | 1429 | ||
1417 | for_each_online_cpu(cpu) { | 1430 | for_each_online_cpu(cpu) { |
1418 | TRACE("Initializing CPU%d...\n", cpu); | 1431 | TRACE("Initializing CPU%d...\n", cpu); |
@@ -1456,7 +1469,8 @@ static void mc2_finish_switch(struct task_struct *prev) | |||
1456 | state->scheduled = is_realtime(current) ? current : NULL; | 1469 | state->scheduled = is_realtime(current) ? current : NULL; |
1457 | if (lv == CRIT_LEVEL_C) { | 1470 | if (lv == CRIT_LEVEL_C) { |
1458 | for (cpus = 0; cpus<NR_CPUS; cpus++) { | 1471 | for (cpus = 0; cpus<NR_CPUS; cpus++) { |
1459 | if (resched_cpu[cpus]) { | 1472 | if (resched_cpu[cpus] && state->cpu != cpus) { |
1473 | resched_cpu[cpus] = 0; | ||
1460 | litmus_reschedule(cpus); | 1474 | litmus_reschedule(cpus); |
1461 | } | 1475 | } |
1462 | } | 1476 | } |
diff --git a/litmus/uncachedev.c b/litmus/uncachedev.c index 06a6a7c17983..86875816c6ef 100644 --- a/litmus/uncachedev.c +++ b/litmus/uncachedev.c | |||
@@ -28,8 +28,8 @@ int litmus_uncache_vm_fault(struct vm_area_struct* vma, | |||
28 | /* modeled after SG DMA video4linux, but without DMA. */ | 28 | /* modeled after SG DMA video4linux, but without DMA. */ |
29 | /* (see drivers/media/video/videobuf-dma-sg.c) */ | 29 | /* (see drivers/media/video/videobuf-dma-sg.c) */ |
30 | struct page *page; | 30 | struct page *page; |
31 | 31 | ||
32 | page = alloc_page(GFP_USER); | 32 | page = alloc_page(GFP_USER|GFP_COLOR); |
33 | if (!page) | 33 | if (!page) |
34 | return VM_FAULT_OOM; | 34 | return VM_FAULT_OOM; |
35 | 35 | ||
diff --git a/mm/dmapool.c b/mm/dmapool.c index fd5fe4342e93..b69dc139bcc4 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c | |||
@@ -333,7 +333,7 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, | |||
333 | 333 | ||
334 | /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */ | 334 | /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */ |
335 | spin_unlock_irqrestore(&pool->lock, flags); | 335 | spin_unlock_irqrestore(&pool->lock, flags); |
336 | 336 | printk(KERN_INFO "dma_pool_alloc(): called with %x flags\n", mem_flags); | |
337 | page = pool_alloc_page(pool, mem_flags); | 337 | page = pool_alloc_page(pool, mem_flags); |
338 | if (!page) | 338 | if (!page) |
339 | return NULL; | 339 | return NULL; |
diff --git a/mm/migrate.c b/mm/migrate.c index 8dd685be20d8..29b69cd39d13 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -638,7 +638,18 @@ void replicate_page_copy(struct page *newpage, struct page *page) | |||
638 | SetPageMappedToDisk(newpage); | 638 | SetPageMappedToDisk(newpage); |
639 | 639 | ||
640 | if (PageDirty(page)) { | 640 | if (PageDirty(page)) { |
641 | BUG(); | 641 | clear_page_dirty_for_io(page); |
642 | /* | ||
643 | * Want to mark the page and the radix tree as dirty, and | ||
644 | * redo the accounting that clear_page_dirty_for_io undid, | ||
645 | * but we can't use set_page_dirty because that function | ||
646 | * is actually a signal that all of the page has become dirty. | ||
647 | * Whereas only part of our page may be dirty. | ||
648 | */ | ||
649 | if (PageSwapBacked(page)) | ||
650 | SetPageDirty(newpage); | ||
651 | else | ||
652 | __set_page_dirty_nobuffers(newpage); | ||
642 | } | 653 | } |
643 | 654 | ||
644 | /* | 655 | /* |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 90cf3ea441e0..8c22d10b0c23 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1032,7 +1032,7 @@ static inline void expand(struct zone *zone, struct page *page, | |||
1032 | } | 1032 | } |
1033 | } | 1033 | } |
1034 | 1034 | ||
1035 | static inline void expand_middle(struct zone *zone, struct page *page, | 1035 | static inline int expand_middle(struct zone *zone, struct page *page, |
1036 | int offset, int low, int high, struct free_area *area, | 1036 | int offset, int low, int high, struct free_area *area, |
1037 | int migratetype) | 1037 | int migratetype) |
1038 | { | 1038 | { |
@@ -1056,8 +1056,10 @@ static inline void expand_middle(struct zone *zone, struct page *page, | |||
1056 | area->nr_free++; | 1056 | area->nr_free++; |
1057 | set_page_order(&page[0], high); | 1057 | set_page_order(&page[0], high); |
1058 | 1058 | ||
1059 | if (offset == size) | 1059 | if (offset == size) { |
1060 | return; | 1060 | //printk(KERN_INFO "offset == size %d high = %d\n", offset, high); |
1061 | return high; | ||
1062 | } | ||
1061 | 1063 | ||
1062 | area--; | 1064 | area--; |
1063 | high--; | 1065 | high--; |
@@ -1065,6 +1067,8 @@ static inline void expand_middle(struct zone *zone, struct page *page, | |||
1065 | list_add(&page[size].lru, &area->free_list[migratetype]); | 1067 | list_add(&page[size].lru, &area->free_list[migratetype]); |
1066 | area->nr_free++; | 1068 | area->nr_free++; |
1067 | set_page_order(&page[size], high); | 1069 | set_page_order(&page[size], high); |
1070 | |||
1071 | return high; | ||
1068 | } | 1072 | } |
1069 | 1073 | ||
1070 | /* | 1074 | /* |
@@ -1224,52 +1228,78 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, | |||
1224 | 1228 | ||
1225 | // if (order <= 2 && color_req == 1) { | 1229 | // if (order <= 2 && color_req == 1) { |
1226 | /* The max. order of color_req is <= 2 */ | 1230 | /* The max. order of color_req is <= 2 */ |
1227 | if (color_req == 1) { | 1231 | if (color_req != 0) { |
1228 | int found = 0; | 1232 | int found = 0; |
1233 | int area_index; | ||
1229 | unsigned long s_pfn = zone->zone_start_pfn; | 1234 | unsigned long s_pfn = zone->zone_start_pfn; |
1230 | unsigned long e_pfn = zone_end_pfn(zone); | 1235 | unsigned long e_pfn = zone_end_pfn(zone); |
1231 | TRACE("COLOR PAGE requested on CPU%d with order = %d migratetype = %d\n", cpu, order, migratetype); | 1236 | |
1237 | if (color_req == 2) | ||
1238 | area_index = get_area_index(1); | ||
1239 | else | ||
1240 | area_index = get_area_index(cpu); | ||
1241 | |||
1242 | //printk(KERN_INFO "CPU%d color_request %d, area_index %d\n", cpu, color_req, area_index); | ||
1243 | //printk(KERN_INFO "COLOR PAGE requested on CPU%d with order = %d migratetype = %d\n", cpu, order, migratetype); | ||
1232 | /* Find a page of the appropriate size in the preferred list */ | 1244 | /* Find a page of the appropriate size in the preferred list */ |
1233 | for (current_order = order; current_order < MAX_PARTITIONED_ORDER; ++current_order) { | 1245 | for (current_order = order; current_order < MAX_PARTITIONED_ORDER; ++current_order) { |
1234 | int offset = 0; | 1246 | int offset = 0; |
1235 | area = &(zone->free_area_d[cpu][current_order]); | 1247 | area = &(zone->free_area_d[area_index][current_order]); |
1236 | if (list_empty(&area->free_list[migratetype])) { | 1248 | if (list_empty(&area->free_list[migratetype])) { |
1237 | TRACE("order %d list empty\n", current_order); | 1249 | //printk(KERN_INFO "P%d order %d list empty\n", cpu, current_order); |
1238 | continue; | 1250 | continue; |
1239 | } | 1251 | } |
1240 | 1252 | ||
1241 | list_for_each_entry(page, &area->free_list[migratetype], lru) { | 1253 | if (order >= MAX_CONTIG_ORDER) { // requested order >= 3 , must be uncacheable. |
1242 | TRACE("__rmqueue_smallest list entry %p color %d pfn:%05lx\n", page, page_color(page), page_to_pfn(page)); | 1254 | page = list_entry(area->free_list[migratetype].next, struct page, lru); |
1243 | if (current_order < MAX_CONTIG_ORDER) { | 1255 | found = 1; |
1244 | if (is_in_llc_partition(page, cpu) && (page_to_pfn(page) >= s_pfn && page_to_pfn(page) < e_pfn)) { | 1256 | } else { |
1245 | found = 1; | 1257 | list_for_each_entry(page, &area->free_list[migratetype], lru) { |
1246 | offset = 0; | 1258 | //printk(KERN_INFO "P%d __rmqueue_smallest order [%d] list entry %p color %d pfn:%05lx\n", cpu, current_order, page, page_color(page), page_to_pfn(page)); |
1247 | break; | 1259 | if (current_order < MAX_CONTIG_ORDER) { |
1248 | } | 1260 | if (is_in_llc_partition(page, cpu) && (page_to_pfn(page) >= s_pfn && page_to_pfn(page) < e_pfn)) { |
1249 | } else { // order >= 3 , must be uncacheable. | 1261 | offset = 0; |
1250 | int size = 1 << current_order; | ||
1251 | for (offset = 0; offset < size; offset += 4) { | ||
1252 | if (is_in_llc_partition(&page[offset], cpu) && (page_to_pfn(&page[offset]) >= s_pfn && page_to_pfn(&page[offset]) < e_pfn)) { | ||
1253 | found = 1; | 1262 | found = 1; |
1254 | break; | 1263 | break; |
1255 | } | 1264 | } |
1265 | } else { | ||
1266 | int size = 1 << current_order; | ||
1267 | for (offset = 0; offset < size; offset++) { | ||
1268 | if (is_in_llc_partition(&page[offset], cpu) && (page_to_pfn(&page[offset]) >= s_pfn && page_to_pfn(&page[offset]) < e_pfn)) { | ||
1269 | found = 1; | ||
1270 | break; | ||
1271 | } | ||
1272 | } | ||
1273 | if (found) | ||
1274 | break; | ||
1256 | } | 1275 | } |
1257 | if (found) | ||
1258 | break; | ||
1259 | } | 1276 | } |
1260 | } | 1277 | } |
1261 | 1278 | ||
1262 | TRACE("__rmqueue_smallest LAST list entry %p\n", page); | 1279 | //printk(KERN_INFO "P%d __rmqueue_smallest LAST list entry %p\n", cpu, page); |
1263 | 1280 | ||
1264 | if (!found) | 1281 | if (!found) |
1265 | return NULL; | 1282 | continue; |
1283 | //printk(KERN_INFO "P%d __rmqueue_smallest LAST list entry %p, order %d current_order %d offset %d\n", cpu, page, order, current_order, offset); | ||
1266 | 1284 | ||
1267 | list_del(&page->lru); | 1285 | list_del(&page->lru); |
1268 | rmv_page_order(page); | 1286 | rmv_page_order(page); |
1269 | area->nr_free--; | 1287 | area->nr_free--; |
1270 | expand(zone, page, order, current_order, area, migratetype); | 1288 | |
1289 | if (offset == 0) { | ||
1290 | expand(zone, page, order, current_order, area, migratetype); | ||
1291 | } else { | ||
1292 | int frac = expand_middle(zone, page, offset, order, current_order, area, migratetype); | ||
1293 | page = &page[offset]; | ||
1294 | //list_del(&page->lru); | ||
1295 | //rmv_page_order(page); | ||
1296 | area = &(zone->free_area_d[area_index][frac]); | ||
1297 | //area->nr_free--; | ||
1298 | expand(zone, page, order, frac, area, migratetype); | ||
1299 | } | ||
1300 | |||
1271 | set_freepage_migratetype(page, migratetype); | 1301 | set_freepage_migratetype(page, migratetype); |
1272 | TRACE("COLOR %d page return %p\n", page_color(page), page); | 1302 | //printk(KERN_INFO "__rmqueue_smallest(): CPU%d COLOR %d BANK %d page return %p pfn:%05lx\n", cpu, page_color(page), page_bank(page), page, page_to_pfn(page)); |
1273 | return page; | 1303 | return page; |
1274 | } | 1304 | } |
1275 | } else { | 1305 | } else { |
@@ -1511,47 +1541,59 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype, | |||
1511 | int fallback_mt; | 1541 | int fallback_mt; |
1512 | bool can_steal; | 1542 | bool can_steal; |
1513 | 1543 | ||
1514 | if (color_req == 1) { | 1544 | if (color_req != 0) { |
1515 | int cpu = raw_smp_processor_id(); | 1545 | int cpu = raw_smp_processor_id(); |
1516 | int found = 0; | 1546 | int found = 0; |
1547 | int area_index; | ||
1517 | unsigned long s_pfn = zone->zone_start_pfn; | 1548 | unsigned long s_pfn = zone->zone_start_pfn; |
1518 | unsigned long e_pfn = zone_end_pfn(zone); | 1549 | unsigned long e_pfn = zone_end_pfn(zone); |
1519 | 1550 | ||
1551 | if (color_req == 2) | ||
1552 | area_index = get_area_index(1); | ||
1553 | else | ||
1554 | area_index = get_area_index(cpu); | ||
1555 | |||
1520 | /* Find the largest possible block of pages in the other list */ | 1556 | /* Find the largest possible block of pages in the other list */ |
1521 | for (current_order = MAX_PARTITIONED_ORDER-1; | 1557 | for (current_order = MAX_PARTITIONED_ORDER-1; |
1522 | current_order >= order && current_order <= MAX_PARTITIONED_ORDER-1; | 1558 | current_order >= order && current_order <= MAX_PARTITIONED_ORDER-1; |
1523 | --current_order) { | 1559 | --current_order) { |
1524 | int offset = 0; | 1560 | int offset = 0; |
1525 | area = &(zone->free_area_d[cpu][current_order]); | 1561 | area = &(zone->free_area_d[area_index][current_order]); |
1526 | fallback_mt = find_suitable_fallback(area, current_order, | 1562 | fallback_mt = find_suitable_fallback(area, current_order, |
1527 | start_migratetype, false, &can_steal); | 1563 | start_migratetype, false, &can_steal); |
1528 | if (fallback_mt == -1) | 1564 | if (fallback_mt == -1) |
1529 | continue; | 1565 | continue; |
1530 | 1566 | ||
1531 | list_for_each_entry(page, &area->free_list[fallback_mt], lru) { | 1567 | if (order >= MAX_CONTIG_ORDER) { |
1532 | TRACE("__rmqueue_fallback list entry %p color %d pfn:%05lx\n", page, page_color(page), page_to_pfn(page)); | 1568 | page = list_entry(area->free_list[fallback_mt].next, struct page, lru); |
1533 | if (current_order < MAX_CONTIG_ORDER) { | 1569 | found = 1; |
1534 | if (is_in_llc_partition(page, cpu) && (page_to_pfn(page) >= s_pfn && page_to_pfn(page) < e_pfn)) { | 1570 | //printk(KERN_INFO "__rmqueue_fallback order >= MAX_CONTIG_ORDER page = %p color %d pfn:%05lx\n", page, page_color(page), page_to_pfn(page)); |
1535 | found = 1; | 1571 | } else { |
1536 | offset = 0; | 1572 | list_for_each_entry(page, &area->free_list[fallback_mt], lru) { |
1537 | break; | 1573 | //printk(KERN_INFO "__rmqueue_fallback list entry %p color %d pfn:%05lx\n", page, page_color(page), page_to_pfn(page)); |
1538 | } | 1574 | if (current_order < MAX_CONTIG_ORDER) { |
1539 | } else { // order >= 3 , must be uncacheable. | 1575 | if (is_in_llc_partition(page, cpu) && (page_to_pfn(page) >= s_pfn && page_to_pfn(page) < e_pfn)) { |
1540 | int size = 1 << current_order; | ||
1541 | for (offset = 0; offset < size; offset += 4) { | ||
1542 | if (is_in_llc_partition(&page[offset], cpu) && (page_to_pfn(&page[offset]) >= s_pfn && page_to_pfn(&page[offset]) < e_pfn)) { | ||
1543 | found = 1; | 1576 | found = 1; |
1577 | offset = 0; | ||
1544 | break; | 1578 | break; |
1545 | } | 1579 | } |
1580 | } else { | ||
1581 | int size = 1 << current_order; | ||
1582 | for (offset = 0; offset < size; offset++) { | ||
1583 | if (is_in_llc_partition(&page[offset], cpu) && (page_to_pfn(&page[offset]) >= s_pfn && page_to_pfn(&page[offset]) < e_pfn)) { | ||
1584 | found = 1; | ||
1585 | break; | ||
1586 | } | ||
1587 | } | ||
1588 | if (found) | ||
1589 | break; | ||
1546 | } | 1590 | } |
1547 | if (found) | ||
1548 | break; | ||
1549 | } | 1591 | } |
1550 | } | 1592 | } |
1551 | TRACE("__rmqueue_fallback LAST list entry %p\n", page); | 1593 | //printk(KERN_INFO "__rmqueue_fallback LAST list entry %p\n", page); |
1552 | 1594 | ||
1553 | if (!found) | 1595 | if (!found) |
1554 | return NULL; | 1596 | continue; |
1555 | 1597 | ||
1556 | if (can_steal) | 1598 | if (can_steal) |
1557 | steal_suitable_fallback(zone, page, start_migratetype); | 1599 | steal_suitable_fallback(zone, page, start_migratetype); |
@@ -1561,7 +1603,19 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype, | |||
1561 | list_del(&page->lru); | 1603 | list_del(&page->lru); |
1562 | rmv_page_order(page); | 1604 | rmv_page_order(page); |
1563 | 1605 | ||
1564 | expand(zone, page, order, current_order, area, start_migratetype); | 1606 | if (offset == 0) |
1607 | expand(zone, page, order, current_order, area, start_migratetype); | ||
1608 | else { | ||
1609 | int frac = expand_middle(zone, page, offset, order, current_order, area, start_migratetype); | ||
1610 | page = &page[offset]; | ||
1611 | //list_del(&page->lru); | ||
1612 | //rmv_page_order(page); | ||
1613 | area = &(zone->free_area_d[area_index][frac]); | ||
1614 | //area->nr_free--; | ||
1615 | expand(zone, page, order, frac, area, start_migratetype); | ||
1616 | |||
1617 | } | ||
1618 | |||
1565 | 1619 | ||
1566 | /* | 1620 | /* |
1567 | * The freepage_migratetype may differ from pageblock's | 1621 | * The freepage_migratetype may differ from pageblock's |
@@ -1576,7 +1630,7 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype, | |||
1576 | trace_mm_page_alloc_extfrag(page, order, current_order, | 1630 | trace_mm_page_alloc_extfrag(page, order, current_order, |
1577 | start_migratetype, fallback_mt); | 1631 | start_migratetype, fallback_mt); |
1578 | 1632 | ||
1579 | TRACE("__rmqueue_fallback(): CPU%d COLOR %d page return %p pfn:%05lx\n", cpu, page_color(page), page, page_to_pfn(page)); | 1633 | //printk(KERN_INFO "__rmqueue_fallback(): CPU%d COLOR %d BANK %d page return %p pfn:%05lx\n", cpu, page_color(page), page_bank(page), page, page_to_pfn(page)); |
1580 | return page; | 1634 | return page; |
1581 | } | 1635 | } |
1582 | } else { | 1636 | } else { |
@@ -1640,9 +1694,10 @@ retry_reserve: | |||
1640 | 1694 | ||
1641 | if (!page) { | 1695 | if (!page) { |
1642 | page = __rmqueue_fallback(zone, order, migratetype, color_req); | 1696 | page = __rmqueue_fallback(zone, order, migratetype, color_req); |
1643 | #ifdef CONFIG_SCHED_DEBUG_TRACE | 1697 | #if 0 |
1698 | //#ifdef CONFIG_SCHED_DEBUG_TRACE | ||
1644 | if (color_req) | 1699 | if (color_req) |
1645 | TRACE("page received from __rmqueue_fallback()"); | 1700 | printk(KERN_INFO "page received from __rmqueue_fallback()"); |
1646 | #endif | 1701 | #endif |
1647 | } | 1702 | } |
1648 | 1703 | ||
@@ -1651,7 +1706,7 @@ retry_reserve: | |||
1651 | * is used because __rmqueue_smallest is an inline function | 1706 | * is used because __rmqueue_smallest is an inline function |
1652 | * and we want just one call site | 1707 | * and we want just one call site |
1653 | */ | 1708 | */ |
1654 | if (!page) { | 1709 | if (!page && !color_req) { |
1655 | migratetype = MIGRATE_RESERVE; | 1710 | migratetype = MIGRATE_RESERVE; |
1656 | goto retry_reserve; | 1711 | goto retry_reserve; |
1657 | } | 1712 | } |
@@ -1897,9 +1952,11 @@ void free_hot_cold_page(struct page *page, bool cold) | |||
1897 | 1952 | ||
1898 | if (bank_to_partition(page_bank(page)) == NR_CPUS) | 1953 | if (bank_to_partition(page_bank(page)) == NR_CPUS) |
1899 | __count_vm_event(PGFREE); | 1954 | __count_vm_event(PGFREE); |
1900 | else if (bank_to_partition(page_bank(page)) < NR_CPUS) | 1955 | else if (bank_to_partition(page_bank(page)) < NR_CPUS) { |
1901 | //__count_vm_event(PGFREE_HC); | 1956 | __count_vm_event(PGFREE_HC); |
1902 | BUG(); | 1957 | free_one_page(zone, page, pfn, 0, migratetype); |
1958 | goto out; | ||
1959 | } | ||
1903 | 1960 | ||
1904 | /* | 1961 | /* |
1905 | * We only track unmovable, reclaimable and movable on pcp lists. | 1962 | * We only track unmovable, reclaimable and movable on pcp lists. |
@@ -1954,8 +2011,12 @@ void free_hot_cold_page_list(struct list_head *list, bool cold) | |||
1954 | struct page *page, *next; | 2011 | struct page *page, *next; |
1955 | 2012 | ||
1956 | list_for_each_entry_safe(page, next, list, lru) { | 2013 | list_for_each_entry_safe(page, next, list, lru) { |
2014 | int parti_no = bank_to_partition(page_bank(page)); | ||
1957 | trace_mm_page_free_batched(page, cold); | 2015 | trace_mm_page_free_batched(page, cold); |
1958 | free_hot_cold_page(page, cold); | 2016 | if (parti_no == NR_CPUS) |
2017 | free_hot_cold_page(page, cold); | ||
2018 | else | ||
2019 | __free_pages_ok(page, 0); | ||
1959 | } | 2020 | } |
1960 | } | 2021 | } |
1961 | 2022 | ||
@@ -2069,11 +2130,15 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, | |||
2069 | unsigned long flags; | 2130 | unsigned long flags; |
2070 | struct page *page; | 2131 | struct page *page; |
2071 | bool cold = ((gfp_flags & __GFP_COLD) != 0); | 2132 | bool cold = ((gfp_flags & __GFP_COLD) != 0); |
2072 | bool colored_req = ((gfp_flags & __GFP_COLOR) != 0); | 2133 | int colored_req = ((gfp_flags & __GFP_COLOR) != 0); |
2073 | 2134 | ||
2074 | #ifdef CONFIG_SCHED_DEBUG_TRACE | 2135 | if ((gfp_flags & __GFP_CPU1) != 0) |
2136 | colored_req = 2; | ||
2137 | |||
2138 | //#ifdef CONFIG_SCHED_DEBUG_TRACE | ||
2139 | #if 0 | ||
2075 | if (colored_req) | 2140 | if (colored_req) |
2076 | TRACE("buffered_rmqueue(): colored_req received\n"); | 2141 | printk(KERN_INFO "buffered_rmqueue(): colored_req %d received\n", colored_req); |
2077 | #endif | 2142 | #endif |
2078 | 2143 | ||
2079 | if (likely(order == 0) && !colored_req) { | 2144 | if (likely(order == 0) && !colored_req) { |
@@ -3226,9 +3291,10 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, | |||
3226 | .migratetype = gfpflags_to_migratetype(gfp_mask), | 3291 | .migratetype = gfpflags_to_migratetype(gfp_mask), |
3227 | }; | 3292 | }; |
3228 | 3293 | ||
3229 | #ifdef CONFIG_SCHED_DEBUG_TRACE | 3294 | //#ifdef CONFIG_SCHED_DEBUG_TRACE |
3295 | #if 0 | ||
3230 | if (gfp_mask&GFP_COLOR) | 3296 | if (gfp_mask&GFP_COLOR) |
3231 | TRACE("__alloc_pages_nodemask(): called gfp %08x gfp_allowed_mask %08x mt = %d\n", gfp_mask, gfp_allowed_mask, ac.migratetype); | 3297 | printk(KERN_INFO "__alloc_pages_nodemask(): called gfp %08x gfp_allowed_mask %08x mt = %d\n", gfp_mask, gfp_allowed_mask, ac.migratetype); |
3232 | #endif | 3298 | #endif |
3233 | 3299 | ||
3234 | gfp_mask &= gfp_allowed_mask; | 3300 | gfp_mask &= gfp_allowed_mask; |
@@ -115,6 +115,26 @@ | |||
115 | * the fast path and disables lockless freelists. | 115 | * the fast path and disables lockless freelists. |
116 | */ | 116 | */ |
117 | 117 | ||
118 | // This Address Decoding is used in imx6-sabredsd platform | ||
119 | #define BANK_MASK 0x38000000 | ||
120 | #define BANK_SHIFT 27 | ||
121 | |||
122 | #define CACHE_MASK 0x0000f000 | ||
123 | #define CACHE_SHIFT 12 | ||
124 | #define MAX_COLOR_NODE 128 | ||
125 | |||
126 | /* Decoding page color, 0~15 */ | ||
127 | static inline unsigned int page_color(struct page *page) | ||
128 | { | ||
129 | return ((page_to_phys(page)& CACHE_MASK) >> CACHE_SHIFT); | ||
130 | } | ||
131 | |||
132 | /* Decoding page bank number, 0~7 */ | ||
133 | static inline unsigned int page_bank(struct page *page) | ||
134 | { | ||
135 | return ((page_to_phys(page)& BANK_MASK) >> BANK_SHIFT); | ||
136 | } | ||
137 | |||
118 | static inline int kmem_cache_debug(struct kmem_cache *s) | 138 | static inline int kmem_cache_debug(struct kmem_cache *s) |
119 | { | 139 | { |
120 | #ifdef CONFIG_SLUB_DEBUG | 140 | #ifdef CONFIG_SLUB_DEBUG |
@@ -1322,8 +1342,8 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s, | |||
1322 | 1342 | ||
1323 | if (node == NUMA_NO_NODE) { | 1343 | if (node == NUMA_NO_NODE) { |
1324 | #ifdef CONFIG_SCHED_DEBUG_TRACE | 1344 | #ifdef CONFIG_SCHED_DEBUG_TRACE |
1325 | if (flags&GFP_COLOR) | 1345 | // if (flags&GFP_COLOR) |
1326 | printk(KERN_INFO "alloc_pages calls with GFP_COLOR order = %d\n", order); | 1346 | // printk(KERN_INFO "alloc_pages calls with GFP_COLOR order = %d\n", order); |
1327 | #endif | 1347 | #endif |
1328 | page = alloc_pages(flags, order); | 1348 | page = alloc_pages(flags, order); |
1329 | } | 1349 | } |
@@ -1343,8 +1363,8 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
1343 | gfp_t alloc_gfp; | 1363 | gfp_t alloc_gfp; |
1344 | 1364 | ||
1345 | #ifdef CONFIG_SCHED_DEBUG_TRACE | 1365 | #ifdef CONFIG_SCHED_DEBUG_TRACE |
1346 | if (flags&GFP_COLOR) | 1366 | // if (flags&GFP_COLOR) |
1347 | printk(KERN_INFO "gfp_allowed_mask = %08x\n", gfp_allowed_mask); | 1367 | // printk(KERN_INFO "gfp_allowed_mask = %08x\n", gfp_allowed_mask); |
1348 | #endif | 1368 | #endif |
1349 | 1369 | ||
1350 | flags &= gfp_allowed_mask; | 1370 | flags &= gfp_allowed_mask; |
@@ -1361,8 +1381,8 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
1361 | alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; | 1381 | alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; |
1362 | 1382 | ||
1363 | #ifdef CONFIG_SCHED_DEBUG_TRACE | 1383 | #ifdef CONFIG_SCHED_DEBUG_TRACE |
1364 | if (flags&__GFP_COLOR) | 1384 | // if (flags&__GFP_COLOR) |
1365 | printk(KERN_INFO "allocate_slab with GFP_COLOR alloc_gfp = %08x\n", alloc_gfp); | 1385 | // printk(KERN_INFO "allocate_slab with GFP_COLOR alloc_gfp = %08x\n", alloc_gfp); |
1366 | #endif | 1386 | #endif |
1367 | 1387 | ||
1368 | page = alloc_slab_page(s, alloc_gfp, node, oo); | 1388 | page = alloc_slab_page(s, alloc_gfp, node, oo); |
@@ -2240,8 +2260,8 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags, | |||
2240 | page = new_slab(s, flags, node); | 2260 | page = new_slab(s, flags, node); |
2241 | 2261 | ||
2242 | #ifdef CONFIG_SCHED_DEBUG_TRACE | 2262 | #ifdef CONFIG_SCHED_DEBUG_TRACE |
2243 | if (flags&GFP_COLOR) | 2263 | // if (flags&GFP_COLOR) |
2244 | printk(KERN_INFO "new_slab_objects(): gets page %p\n", page); | 2264 | // printk(KERN_INFO "new_slab_objects(): gets page %p color %d, bank %d\n", page, page_color(page), page_bank(page)); |
2245 | #endif | 2265 | #endif |
2246 | 2266 | ||
2247 | if (page) { | 2267 | if (page) { |
@@ -2331,8 +2351,8 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, | |||
2331 | unsigned long flags; | 2351 | unsigned long flags; |
2332 | 2352 | ||
2333 | #ifdef CONFIG_SCHED_DEBUG_TRACE | 2353 | #ifdef CONFIG_SCHED_DEBUG_TRACE |
2334 | if (gfpflags&GFP_COLOR) | 2354 | // if (gfpflags&GFP_COLOR) |
2335 | printk(KERN_INFO "__slab_alloc slow_path\n"); | 2355 | // printk(KERN_INFO "__slab_alloc slow_path\n"); |
2336 | #endif | 2356 | #endif |
2337 | 2357 | ||
2338 | local_irq_save(flags); | 2358 | local_irq_save(flags); |
@@ -2346,8 +2366,8 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, | |||
2346 | #endif | 2366 | #endif |
2347 | 2367 | ||
2348 | #ifdef CONFIG_SCHED_DEBUG_TRACE | 2368 | #ifdef CONFIG_SCHED_DEBUG_TRACE |
2349 | if (gfpflags&GFP_COLOR) | 2369 | // if (gfpflags&GFP_COLOR) |
2350 | printk(KERN_INFO "__slab_alloc : page %p, partial %p\n", c->page, c->partial); | 2370 | // printk(KERN_INFO "__slab_alloc : page %p, partial %p\n", c->page, c->partial); |
2351 | #endif | 2371 | #endif |
2352 | 2372 | ||
2353 | page = c->page; | 2373 | page = c->page; |
@@ -3340,8 +3360,8 @@ void *__kmalloc(size_t size, gfp_t flags) | |||
3340 | void *ret; | 3360 | void *ret; |
3341 | 3361 | ||
3342 | #ifdef CONFIG_SCHED_DEBUG_TRACE | 3362 | #ifdef CONFIG_SCHED_DEBUG_TRACE |
3343 | if (flags & GFP_COLOR) | 3363 | // if (flags & GFP_COLOR) |
3344 | printk(KERN_INFO "kmalloc size %d\n", size); | 3364 | // printk(KERN_INFO "kmalloc size %d\n", size); |
3345 | #endif | 3365 | #endif |
3346 | 3366 | ||
3347 | if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) | 3367 | if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) |
@@ -3350,16 +3370,16 @@ void *__kmalloc(size_t size, gfp_t flags) | |||
3350 | s = kmalloc_slab(size, flags); | 3370 | s = kmalloc_slab(size, flags); |
3351 | 3371 | ||
3352 | #ifdef CONFIG_SCHED_DEBUG_TRACE | 3372 | #ifdef CONFIG_SCHED_DEBUG_TRACE |
3353 | if (flags & GFP_COLOR) | 3373 | // if (flags & GFP_COLOR) |
3354 | printk(KERN_INFO "kmalloc_slab %p\n", s); | 3374 | // printk(KERN_INFO "kmalloc_slab %p\n", s); |
3355 | #endif | 3375 | #endif |
3356 | 3376 | ||
3357 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3377 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
3358 | return s; | 3378 | return s; |
3359 | 3379 | ||
3360 | #ifdef CONFIG_SCHED_DEBUG_TRACE | 3380 | #ifdef CONFIG_SCHED_DEBUG_TRACE |
3361 | if (flags & GFP_COLOR) | 3381 | // if (flags & GFP_COLOR) |
3362 | printk(KERN_INFO "slab_alloc calls!!\n"); | 3382 | // printk(KERN_INFO "slab_alloc calls!!\n"); |
3363 | #endif | 3383 | #endif |
3364 | 3384 | ||
3365 | ret = slab_alloc(s, flags, _RET_IP_); | 3385 | ret = slab_alloc(s, flags, _RET_IP_); |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 27600f419126..7a6d7de8fff8 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -1803,6 +1803,47 @@ void *vmalloc_user(unsigned long size) | |||
1803 | EXPORT_SYMBOL(vmalloc_user); | 1803 | EXPORT_SYMBOL(vmalloc_user); |
1804 | 1804 | ||
1805 | /** | 1805 | /** |
1806 | * vmalloc_color_user - allocate zeroed virtually contiguous memory for userspace | ||
1807 | * @size: allocation size | ||
1808 | * | ||
1809 | * The resulting memory area is zeroed so it can be mapped to userspace | ||
1810 | * without leaking data. | ||
1811 | */ | ||
1812 | void *vmalloc_color_user(unsigned long size) | ||
1813 | { | ||
1814 | struct vm_struct *area; | ||
1815 | void *ret; | ||
1816 | |||
1817 | ret = __vmalloc_node(size, SHMLBA, | ||
1818 | GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO | GFP_COLOR, | ||
1819 | PAGE_KERNEL, NUMA_NO_NODE, | ||
1820 | __builtin_return_address(0)); | ||
1821 | if (ret) { | ||
1822 | area = find_vm_area(ret); | ||
1823 | area->flags |= VM_USERMAP; | ||
1824 | } | ||
1825 | return ret; | ||
1826 | } | ||
1827 | EXPORT_SYMBOL(vmalloc_color_user); | ||
1828 | |||
1829 | void *vmalloc_color_user_cpu1(unsigned long size) | ||
1830 | { | ||
1831 | struct vm_struct *area; | ||
1832 | void *ret; | ||
1833 | |||
1834 | ret = __vmalloc_node(size, SHMLBA, | ||
1835 | GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO | GFP_COLOR | GFP_CPU1, | ||
1836 | PAGE_KERNEL, NUMA_NO_NODE, | ||
1837 | __builtin_return_address(0)); | ||
1838 | if (ret) { | ||
1839 | area = find_vm_area(ret); | ||
1840 | area->flags |= VM_USERMAP; | ||
1841 | } | ||
1842 | return ret; | ||
1843 | } | ||
1844 | EXPORT_SYMBOL(vmalloc_color_user_cpu1); | ||
1845 | |||
1846 | /** | ||
1806 | * vmalloc_node - allocate memory on a specific node | 1847 | * vmalloc_node - allocate memory on a specific node |
1807 | * @size: allocation size | 1848 | * @size: allocation size |
1808 | * @node: numa node | 1849 | * @node: numa node |
diff --git a/net/core/dev.c b/net/core/dev.c index aa82f9ab6a36..f02d0c582e84 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -4629,6 +4629,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll) | |||
4629 | */ | 4629 | */ |
4630 | work = 0; | 4630 | work = 0; |
4631 | if (test_bit(NAPI_STATE_SCHED, &n->state)) { | 4631 | if (test_bit(NAPI_STATE_SCHED, &n->state)) { |
4632 | // fec_enet_rx_napi() is called | ||
4632 | work = n->poll(n, weight); | 4633 | work = n->poll(n, weight); |
4633 | trace_napi_poll(n); | 4634 | trace_napi_poll(n); |
4634 | } | 4635 | } |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 41ec02242ea7..92f091ce1d47 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -77,6 +77,13 @@ | |||
77 | #include <linux/capability.h> | 77 | #include <linux/capability.h> |
78 | #include <linux/user_namespace.h> | 78 | #include <linux/user_namespace.h> |
79 | 79 | ||
80 | #define ENABLE_WORST_CASE 1 | ||
81 | #ifdef ENABLE_WORST_CASE | ||
82 | #define SKB_FLAG (GFP_COLOR|GFP_CPU1) | ||
83 | #else | ||
84 | #define SKB_FLAG (0) | ||
85 | #endif | ||
86 | |||
80 | struct kmem_cache *skbuff_head_cache __read_mostly; | 87 | struct kmem_cache *skbuff_head_cache __read_mostly; |
81 | static struct kmem_cache *skbuff_fclone_cache __read_mostly; | 88 | static struct kmem_cache *skbuff_fclone_cache __read_mostly; |
82 | 89 | ||
@@ -133,14 +140,14 @@ static void *__kmalloc_reserve(size_t size, gfp_t flags, int node, | |||
133 | * to the reserves, fail. | 140 | * to the reserves, fail. |
134 | */ | 141 | */ |
135 | obj = kmalloc_node_track_caller(size, | 142 | obj = kmalloc_node_track_caller(size, |
136 | flags | __GFP_NOMEMALLOC | __GFP_NOWARN, | 143 | flags | __GFP_NOMEMALLOC | __GFP_NOWARN | SKB_FLAG, |
137 | node); | 144 | node); |
138 | if (obj || !(gfp_pfmemalloc_allowed(flags))) | 145 | if (obj || !(gfp_pfmemalloc_allowed(flags))) |
139 | goto out; | 146 | goto out; |
140 | 147 | ||
141 | /* Try again but now we are using pfmemalloc reserves */ | 148 | /* Try again but now we are using pfmemalloc reserves */ |
142 | ret_pfmemalloc = true; | 149 | ret_pfmemalloc = true; |
143 | obj = kmalloc_node_track_caller(size, flags, node); | 150 | obj = kmalloc_node_track_caller(size, flags | SKB_FLAG, node); |
144 | 151 | ||
145 | out: | 152 | out: |
146 | if (pfmemalloc) | 153 | if (pfmemalloc) |
@@ -161,7 +168,7 @@ struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node) | |||
161 | 168 | ||
162 | /* Get the HEAD */ | 169 | /* Get the HEAD */ |
163 | skb = kmem_cache_alloc_node(skbuff_head_cache, | 170 | skb = kmem_cache_alloc_node(skbuff_head_cache, |
164 | gfp_mask & ~__GFP_DMA, node); | 171 | (gfp_mask & ~__GFP_DMA) | SKB_FLAG, node); |
165 | if (!skb) | 172 | if (!skb) |
166 | goto out; | 173 | goto out; |
167 | 174 | ||
@@ -213,7 +220,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, | |||
213 | gfp_mask |= __GFP_MEMALLOC; | 220 | gfp_mask |= __GFP_MEMALLOC; |
214 | 221 | ||
215 | /* Get the HEAD */ | 222 | /* Get the HEAD */ |
216 | skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); | 223 | skb = kmem_cache_alloc_node(cache, (gfp_mask & ~__GFP_DMA) | SKB_FLAG, node); |
217 | if (!skb) | 224 | if (!skb) |
218 | goto out; | 225 | goto out; |
219 | prefetchw(skb); | 226 | prefetchw(skb); |
@@ -225,7 +232,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, | |||
225 | */ | 232 | */ |
226 | size = SKB_DATA_ALIGN(size); | 233 | size = SKB_DATA_ALIGN(size); |
227 | size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | 234 | size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
228 | data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc); | 235 | data = kmalloc_reserve(size, gfp_mask | SKB_FLAG, node, &pfmemalloc); |
229 | if (!data) | 236 | if (!data) |
230 | goto nodata; | 237 | goto nodata; |
231 | /* kmalloc(size) might give us more room than requested. | 238 | /* kmalloc(size) might give us more room than requested. |
@@ -304,7 +311,7 @@ struct sk_buff *__build_skb(void *data, unsigned int frag_size) | |||
304 | struct sk_buff *skb; | 311 | struct sk_buff *skb; |
305 | unsigned int size = frag_size ? : ksize(data); | 312 | unsigned int size = frag_size ? : ksize(data); |
306 | 313 | ||
307 | skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); | 314 | skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC | SKB_FLAG); |
308 | if (!skb) | 315 | if (!skb) |
309 | return NULL; | 316 | return NULL; |
310 | 317 | ||
@@ -367,12 +374,12 @@ static struct page *__page_frag_refill(struct netdev_alloc_cache *nc, | |||
367 | if (order) { | 374 | if (order) { |
368 | gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY | | 375 | gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY | |
369 | __GFP_NOMEMALLOC; | 376 | __GFP_NOMEMALLOC; |
370 | page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); | 377 | page = alloc_pages_node(NUMA_NO_NODE, gfp_mask | SKB_FLAG, order); |
371 | nc->frag.size = PAGE_SIZE << (page ? order : 0); | 378 | nc->frag.size = PAGE_SIZE << (page ? order : 0); |
372 | } | 379 | } |
373 | 380 | ||
374 | if (unlikely(!page)) | 381 | if (unlikely(!page)) |
375 | page = alloc_pages_node(NUMA_NO_NODE, gfp, 0); | 382 | page = alloc_pages_node(NUMA_NO_NODE, gfp | SKB_FLAG, 0); |
376 | 383 | ||
377 | nc->frag.page = page; | 384 | nc->frag.page = page; |
378 | 385 | ||
@@ -389,7 +396,7 @@ static void *__alloc_page_frag(struct netdev_alloc_cache __percpu *cache, | |||
389 | 396 | ||
390 | if (unlikely(!page)) { | 397 | if (unlikely(!page)) { |
391 | refill: | 398 | refill: |
392 | page = __page_frag_refill(nc, gfp_mask); | 399 | page = __page_frag_refill(nc, gfp_mask | SKB_FLAG); |
393 | if (!page) | 400 | if (!page) |
394 | return NULL; | 401 | return NULL; |
395 | 402 | ||
@@ -434,7 +441,7 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) | |||
434 | void *data; | 441 | void *data; |
435 | 442 | ||
436 | local_irq_save(flags); | 443 | local_irq_save(flags); |
437 | data = __alloc_page_frag(&netdev_alloc_cache, fragsz, gfp_mask); | 444 | data = __alloc_page_frag(&netdev_alloc_cache, fragsz, gfp_mask | SKB_FLAG); |
438 | local_irq_restore(flags); | 445 | local_irq_restore(flags); |
439 | return data; | 446 | return data; |
440 | } | 447 | } |
@@ -448,18 +455,18 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) | |||
448 | */ | 455 | */ |
449 | void *netdev_alloc_frag(unsigned int fragsz) | 456 | void *netdev_alloc_frag(unsigned int fragsz) |
450 | { | 457 | { |
451 | return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD); | 458 | return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD | SKB_FLAG); |
452 | } | 459 | } |
453 | EXPORT_SYMBOL(netdev_alloc_frag); | 460 | EXPORT_SYMBOL(netdev_alloc_frag); |
454 | 461 | ||
455 | static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) | 462 | static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) |
456 | { | 463 | { |
457 | return __alloc_page_frag(&napi_alloc_cache, fragsz, gfp_mask); | 464 | return __alloc_page_frag(&napi_alloc_cache, fragsz, gfp_mask | SKB_FLAG); |
458 | } | 465 | } |
459 | 466 | ||
460 | void *napi_alloc_frag(unsigned int fragsz) | 467 | void *napi_alloc_frag(unsigned int fragsz) |
461 | { | 468 | { |
462 | return __napi_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD); | 469 | return __napi_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD | SKB_FLAG); |
463 | } | 470 | } |
464 | EXPORT_SYMBOL(napi_alloc_frag); | 471 | EXPORT_SYMBOL(napi_alloc_frag); |
465 | 472 | ||
@@ -493,8 +500,8 @@ static struct sk_buff *__alloc_rx_skb(unsigned int length, gfp_t gfp_mask, | |||
493 | gfp_mask |= __GFP_MEMALLOC; | 500 | gfp_mask |= __GFP_MEMALLOC; |
494 | 501 | ||
495 | data = (flags & SKB_ALLOC_NAPI) ? | 502 | data = (flags & SKB_ALLOC_NAPI) ? |
496 | __napi_alloc_frag(fragsz, gfp_mask) : | 503 | __napi_alloc_frag(fragsz, gfp_mask | SKB_FLAG) : |
497 | __netdev_alloc_frag(fragsz, gfp_mask); | 504 | __netdev_alloc_frag(fragsz, gfp_mask | SKB_FLAG); |
498 | 505 | ||
499 | if (likely(data)) { | 506 | if (likely(data)) { |
500 | skb = build_skb(data, fragsz); | 507 | skb = build_skb(data, fragsz); |
@@ -502,7 +509,7 @@ static struct sk_buff *__alloc_rx_skb(unsigned int length, gfp_t gfp_mask, | |||
502 | put_page(virt_to_head_page(data)); | 509 | put_page(virt_to_head_page(data)); |
503 | } | 510 | } |
504 | } else { | 511 | } else { |
505 | skb = __alloc_skb(length, gfp_mask, | 512 | skb = __alloc_skb(length, gfp_mask | SKB_FLAG, |
506 | SKB_ALLOC_RX, NUMA_NO_NODE); | 513 | SKB_ALLOC_RX, NUMA_NO_NODE); |
507 | } | 514 | } |
508 | return skb; | 515 | return skb; |
@@ -527,7 +534,8 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, | |||
527 | struct sk_buff *skb; | 534 | struct sk_buff *skb; |
528 | 535 | ||
529 | length += NET_SKB_PAD; | 536 | length += NET_SKB_PAD; |
530 | skb = __alloc_rx_skb(length, gfp_mask, 0); | 537 | |
538 | skb = __alloc_rx_skb(length, gfp_mask | SKB_FLAG, 0); | ||
531 | 539 | ||
532 | if (likely(skb)) { | 540 | if (likely(skb)) { |
533 | skb_reserve(skb, NET_SKB_PAD); | 541 | skb_reserve(skb, NET_SKB_PAD); |
@@ -557,7 +565,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, | |||
557 | struct sk_buff *skb; | 565 | struct sk_buff *skb; |
558 | 566 | ||
559 | length += NET_SKB_PAD + NET_IP_ALIGN; | 567 | length += NET_SKB_PAD + NET_IP_ALIGN; |
560 | skb = __alloc_rx_skb(length, gfp_mask, SKB_ALLOC_NAPI); | 568 | skb = __alloc_rx_skb(length, gfp_mask | SKB_FLAG, SKB_ALLOC_NAPI); |
561 | 569 | ||
562 | if (likely(skb)) { | 570 | if (likely(skb)) { |
563 | skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); | 571 | skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); |
@@ -932,7 +940,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) | |||
932 | u8 *vaddr; | 940 | u8 *vaddr; |
933 | skb_frag_t *f = &skb_shinfo(skb)->frags[i]; | 941 | skb_frag_t *f = &skb_shinfo(skb)->frags[i]; |
934 | 942 | ||
935 | page = alloc_page(gfp_mask); | 943 | page = alloc_page(gfp_mask | SKB_FLAG); |
936 | if (!page) { | 944 | if (!page) { |
937 | while (head) { | 945 | while (head) { |
938 | struct page *next = (struct page *)page_private(head); | 946 | struct page *next = (struct page *)page_private(head); |
@@ -988,7 +996,7 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) | |||
988 | skb1); | 996 | skb1); |
989 | struct sk_buff *n; | 997 | struct sk_buff *n; |
990 | 998 | ||
991 | if (skb_orphan_frags(skb, gfp_mask)) | 999 | if (skb_orphan_frags(skb, gfp_mask | SKB_FLAG)) |
992 | return NULL; | 1000 | return NULL; |
993 | 1001 | ||
994 | if (skb->fclone == SKB_FCLONE_ORIG && | 1002 | if (skb->fclone == SKB_FCLONE_ORIG && |
@@ -999,7 +1007,7 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) | |||
999 | if (skb_pfmemalloc(skb)) | 1007 | if (skb_pfmemalloc(skb)) |
1000 | gfp_mask |= __GFP_MEMALLOC; | 1008 | gfp_mask |= __GFP_MEMALLOC; |
1001 | 1009 | ||
1002 | n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); | 1010 | n = kmem_cache_alloc(skbuff_head_cache, gfp_mask | SKB_FLAG); |
1003 | if (!n) | 1011 | if (!n) |
1004 | return NULL; | 1012 | return NULL; |
1005 | 1013 | ||
@@ -1063,7 +1071,7 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) | |||
1063 | { | 1071 | { |
1064 | int headerlen = skb_headroom(skb); | 1072 | int headerlen = skb_headroom(skb); |
1065 | unsigned int size = skb_end_offset(skb) + skb->data_len; | 1073 | unsigned int size = skb_end_offset(skb) + skb->data_len; |
1066 | struct sk_buff *n = __alloc_skb(size, gfp_mask, | 1074 | struct sk_buff *n = __alloc_skb(size, gfp_mask | SKB_FLAG, |
1067 | skb_alloc_rx_flag(skb), NUMA_NO_NODE); | 1075 | skb_alloc_rx_flag(skb), NUMA_NO_NODE); |
1068 | 1076 | ||
1069 | if (!n) | 1077 | if (!n) |
@@ -1104,7 +1112,7 @@ struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, | |||
1104 | { | 1112 | { |
1105 | unsigned int size = skb_headlen(skb) + headroom; | 1113 | unsigned int size = skb_headlen(skb) + headroom; |
1106 | int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); | 1114 | int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); |
1107 | struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE); | 1115 | struct sk_buff *n = __alloc_skb(size, gfp_mask | SKB_FLAG, flags, NUMA_NO_NODE); |
1108 | 1116 | ||
1109 | if (!n) | 1117 | if (!n) |
1110 | goto out; | 1118 | goto out; |
@@ -1123,7 +1131,7 @@ struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, | |||
1123 | if (skb_shinfo(skb)->nr_frags) { | 1131 | if (skb_shinfo(skb)->nr_frags) { |
1124 | int i; | 1132 | int i; |
1125 | 1133 | ||
1126 | if (skb_orphan_frags(skb, gfp_mask)) { | 1134 | if (skb_orphan_frags(skb, gfp_mask | SKB_FLAG)) { |
1127 | kfree_skb(n); | 1135 | kfree_skb(n); |
1128 | n = NULL; | 1136 | n = NULL; |
1129 | goto out; | 1137 | goto out; |
@@ -1180,7 +1188,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, | |||
1180 | if (skb_pfmemalloc(skb)) | 1188 | if (skb_pfmemalloc(skb)) |
1181 | gfp_mask |= __GFP_MEMALLOC; | 1189 | gfp_mask |= __GFP_MEMALLOC; |
1182 | data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), | 1190 | data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), |
1183 | gfp_mask, NUMA_NO_NODE, NULL); | 1191 | gfp_mask | SKB_FLAG, NUMA_NO_NODE, NULL); |
1184 | if (!data) | 1192 | if (!data) |
1185 | goto nodata; | 1193 | goto nodata; |
1186 | size = SKB_WITH_OVERHEAD(ksize(data)); | 1194 | size = SKB_WITH_OVERHEAD(ksize(data)); |
@@ -1201,7 +1209,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, | |||
1201 | */ | 1209 | */ |
1202 | if (skb_cloned(skb)) { | 1210 | if (skb_cloned(skb)) { |
1203 | /* copy this zero copy skb frags */ | 1211 | /* copy this zero copy skb frags */ |
1204 | if (skb_orphan_frags(skb, gfp_mask)) | 1212 | if (skb_orphan_frags(skb, gfp_mask | SKB_FLAG)) |
1205 | goto nofrags; | 1213 | goto nofrags; |
1206 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) | 1214 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) |
1207 | skb_frag_ref(skb, i); | 1215 | skb_frag_ref(skb, i); |
@@ -1286,7 +1294,7 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb, | |||
1286 | * Allocate the copy buffer | 1294 | * Allocate the copy buffer |
1287 | */ | 1295 | */ |
1288 | struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, | 1296 | struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, |
1289 | gfp_mask, skb_alloc_rx_flag(skb), | 1297 | gfp_mask | SKB_FLAG, skb_alloc_rx_flag(skb), |
1290 | NUMA_NO_NODE); | 1298 | NUMA_NO_NODE); |
1291 | int oldheadroom = skb_headroom(skb); | 1299 | int oldheadroom = skb_headroom(skb); |
1292 | int head_copy_len, head_copy_off; | 1300 | int head_copy_len, head_copy_off; |
@@ -4387,7 +4395,7 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len, | |||
4387 | gfp_head |= __GFP_REPEAT; | 4395 | gfp_head |= __GFP_REPEAT; |
4388 | 4396 | ||
4389 | *errcode = -ENOBUFS; | 4397 | *errcode = -ENOBUFS; |
4390 | skb = alloc_skb(header_len, gfp_head); | 4398 | skb = alloc_skb(header_len, gfp_head | SKB_FLAG); |
4391 | if (!skb) | 4399 | if (!skb) |
4392 | return NULL; | 4400 | return NULL; |
4393 | 4401 | ||
@@ -4401,7 +4409,7 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len, | |||
4401 | page = alloc_pages((gfp_mask & ~__GFP_WAIT) | | 4409 | page = alloc_pages((gfp_mask & ~__GFP_WAIT) | |
4402 | __GFP_COMP | | 4410 | __GFP_COMP | |
4403 | __GFP_NOWARN | | 4411 | __GFP_NOWARN | |
4404 | __GFP_NORETRY, | 4412 | __GFP_NORETRY | SKB_FLAG, |
4405 | order); | 4413 | order); |
4406 | if (page) | 4414 | if (page) |
4407 | goto fill_page; | 4415 | goto fill_page; |
@@ -4411,7 +4419,7 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len, | |||
4411 | } | 4419 | } |
4412 | order--; | 4420 | order--; |
4413 | } | 4421 | } |
4414 | page = alloc_page(gfp_mask); | 4422 | page = alloc_page(gfp_mask | SKB_FLAG); |
4415 | if (!page) | 4423 | if (!page) |
4416 | goto failure; | 4424 | goto failure; |
4417 | fill_page: | 4425 | fill_page: |