aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-10-01 22:20:11 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-10-01 22:20:11 -0400
commitbde17b90dd9712cb61a7ab0c1ccd0f7f6aa57957 (patch)
treed4597bfde4f5d4a3a5729e5534c44993c6216352
parent1bca1000fa71a1092947b4a51928abe80a3316d2 (diff)
parent676bd99178cd962ed24ffdad222b7069d330a969 (diff)
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "12 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: dmapool: fix overflow condition in pool_find_page() thermal: avoid division by zero in power allocator memcg: remove pcp_counter_lock kprobes: use _do_fork() in samples to make them work again drivers/input/joystick/Kconfig: zhenhua.c needs BITREVERSE memcg: make mem_cgroup_read_stat() unsigned memcg: fix dirty page migration dax: fix NULL pointer in __dax_pmd_fault() mm: hugetlbfs: skip shared VMAs when unmapping private pages to satisfy a fault mm/slab: fix unexpected index mapping result of kmalloc_size(INDEX_NODE+1) userfaultfd: remove kernel header include from uapi header arch/x86/include/asm/efi.h: fix build failure
-rw-r--r--arch/x86/include/asm/efi.h2
-rw-r--r--drivers/input/joystick/Kconfig1
-rw-r--r--drivers/thermal/power_allocator.c10
-rw-r--r--fs/dax.c13
-rw-r--r--include/linux/memcontrol.h1
-rw-r--r--include/linux/mm.h21
-rw-r--r--include/uapi/linux/userfaultfd.h2
-rw-r--r--mm/dmapool.c2
-rw-r--r--mm/hugetlb.c8
-rw-r--r--mm/memcontrol.c31
-rw-r--r--mm/migrate.c12
-rw-r--r--mm/slab.c13
-rw-r--r--samples/kprobes/jprobe_example.c14
-rw-r--r--samples/kprobes/kprobe_example.c6
-rw-r--r--samples/kprobes/kretprobe_example.c4
15 files changed, 106 insertions, 34 deletions
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index ab5f1d447ef9..ae68be92f755 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -86,6 +86,7 @@ extern u64 asmlinkage efi_call(void *fp, ...);
86extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size, 86extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
87 u32 type, u64 attribute); 87 u32 type, u64 attribute);
88 88
89#ifdef CONFIG_KASAN
89/* 90/*
90 * CONFIG_KASAN may redefine memset to __memset. __memset function is present 91 * CONFIG_KASAN may redefine memset to __memset. __memset function is present
91 * only in kernel binary. Since the EFI stub linked into a separate binary it 92 * only in kernel binary. Since the EFI stub linked into a separate binary it
@@ -95,6 +96,7 @@ extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
95#undef memcpy 96#undef memcpy
96#undef memset 97#undef memset
97#undef memmove 98#undef memmove
99#endif
98 100
99#endif /* CONFIG_X86_32 */ 101#endif /* CONFIG_X86_32 */
100 102
diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig
index 56eb471b5576..4215b5382092 100644
--- a/drivers/input/joystick/Kconfig
+++ b/drivers/input/joystick/Kconfig
@@ -196,6 +196,7 @@ config JOYSTICK_TWIDJOY
196config JOYSTICK_ZHENHUA 196config JOYSTICK_ZHENHUA
197 tristate "5-byte Zhenhua RC transmitter" 197 tristate "5-byte Zhenhua RC transmitter"
198 select SERIO 198 select SERIO
199 select BITREVERSE
199 help 200 help
200 Say Y here if you have a Zhen Hua PPM-4CH transmitter which is 201 Say Y here if you have a Zhen Hua PPM-4CH transmitter which is
201 supplied with a ready to fly micro electric indoor helicopters 202 supplied with a ready to fly micro electric indoor helicopters
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c
index 7ff96270c933..e570ff084add 100644
--- a/drivers/thermal/power_allocator.c
+++ b/drivers/thermal/power_allocator.c
@@ -144,6 +144,16 @@ static void estimate_pid_constants(struct thermal_zone_device *tz,
144 switch_on_temp = 0; 144 switch_on_temp = 0;
145 145
146 temperature_threshold = control_temp - switch_on_temp; 146 temperature_threshold = control_temp - switch_on_temp;
147 /*
148 * estimate_pid_constants() tries to find appropriate default
149 * values for thermal zones that don't provide them. If a
150 * system integrator has configured a thermal zone with two
151 * passive trip points at the same temperature, that person
152 * hasn't put any effort to set up the thermal zone properly
153 * so just give up.
154 */
155 if (!temperature_threshold)
156 return;
147 157
148 if (!tz->tzp->k_po || force) 158 if (!tz->tzp->k_po || force)
149 tz->tzp->k_po = int_to_frac(sustainable_power) / 159 tz->tzp->k_po = int_to_frac(sustainable_power) /
diff --git a/fs/dax.c b/fs/dax.c
index 7ae6df7ea1d2..bcfb14bfc1e4 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -569,8 +569,20 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
569 if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE) 569 if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE)
570 goto fallback; 570 goto fallback;
571 571
572 sector = bh.b_blocknr << (blkbits - 9);
573
572 if (buffer_unwritten(&bh) || buffer_new(&bh)) { 574 if (buffer_unwritten(&bh) || buffer_new(&bh)) {
573 int i; 575 int i;
576
577 length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn,
578 bh.b_size);
579 if (length < 0) {
580 result = VM_FAULT_SIGBUS;
581 goto out;
582 }
583 if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR))
584 goto fallback;
585
574 for (i = 0; i < PTRS_PER_PMD; i++) 586 for (i = 0; i < PTRS_PER_PMD; i++)
575 clear_pmem(kaddr + i * PAGE_SIZE, PAGE_SIZE); 587 clear_pmem(kaddr + i * PAGE_SIZE, PAGE_SIZE);
576 wmb_pmem(); 588 wmb_pmem();
@@ -623,7 +635,6 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
623 result = VM_FAULT_NOPAGE; 635 result = VM_FAULT_NOPAGE;
624 spin_unlock(ptl); 636 spin_unlock(ptl);
625 } else { 637 } else {
626 sector = bh.b_blocknr << (blkbits - 9);
627 length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn, 638 length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn,
628 bh.b_size); 639 bh.b_size);
629 if (length < 0) { 640 if (length < 0) {
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index ad800e62cb7a..6452ff4c463f 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -242,7 +242,6 @@ struct mem_cgroup {
242 * percpu counter. 242 * percpu counter.
243 */ 243 */
244 struct mem_cgroup_stat_cpu __percpu *stat; 244 struct mem_cgroup_stat_cpu __percpu *stat;
245 spinlock_t pcp_counter_lock;
246 245
247#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET) 246#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
248 struct cg_proto tcp_mem; 247 struct cg_proto tcp_mem;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 91c08f6f0dc9..80001de019ba 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -905,6 +905,27 @@ static inline void set_page_links(struct page *page, enum zone_type zone,
905#endif 905#endif
906} 906}
907 907
908#ifdef CONFIG_MEMCG
909static inline struct mem_cgroup *page_memcg(struct page *page)
910{
911 return page->mem_cgroup;
912}
913
914static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg)
915{
916 page->mem_cgroup = memcg;
917}
918#else
919static inline struct mem_cgroup *page_memcg(struct page *page)
920{
921 return NULL;
922}
923
924static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg)
925{
926}
927#endif
928
908/* 929/*
909 * Some inline functions in vmstat.h depend on page_zone() 930 * Some inline functions in vmstat.h depend on page_zone()
910 */ 931 */
diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h
index df0e09bb7dd5..9057d7af3ae1 100644
--- a/include/uapi/linux/userfaultfd.h
+++ b/include/uapi/linux/userfaultfd.h
@@ -11,8 +11,6 @@
11 11
12#include <linux/types.h> 12#include <linux/types.h>
13 13
14#include <linux/compiler.h>
15
16#define UFFD_API ((__u64)0xAA) 14#define UFFD_API ((__u64)0xAA)
17/* 15/*
18 * After implementing the respective features it will become: 16 * After implementing the respective features it will become:
diff --git a/mm/dmapool.c b/mm/dmapool.c
index 71a8998cd03a..312a716fa14c 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -394,7 +394,7 @@ static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
394 list_for_each_entry(page, &pool->page_list, page_list) { 394 list_for_each_entry(page, &pool->page_list, page_list) {
395 if (dma < page->dma) 395 if (dma < page->dma)
396 continue; 396 continue;
397 if (dma < (page->dma + pool->allocation)) 397 if ((dma - page->dma) < pool->allocation)
398 return page; 398 return page;
399 } 399 }
400 return NULL; 400 return NULL;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 999fb0aef8f1..9cc773483624 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3202,6 +3202,14 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
3202 continue; 3202 continue;
3203 3203
3204 /* 3204 /*
3205 * Shared VMAs have their own reserves and do not affect
3206 * MAP_PRIVATE accounting but it is possible that a shared
3207 * VMA is using the same page so check and skip such VMAs.
3208 */
3209 if (iter_vma->vm_flags & VM_MAYSHARE)
3210 continue;
3211
3212 /*
3205 * Unmap the page from other VMAs without their own reserves. 3213 * Unmap the page from other VMAs without their own reserves.
3206 * They get marked to be SIGKILLed if they fault in these 3214 * They get marked to be SIGKILLed if they fault in these
3207 * areas. This is because a future no-page fault on this VMA 3215 * areas. This is because a future no-page fault on this VMA
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 6ddaeba34e09..1fedbde68f59 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -644,12 +644,14 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
644} 644}
645 645
646/* 646/*
647 * Return page count for single (non recursive) @memcg.
648 *
647 * Implementation Note: reading percpu statistics for memcg. 649 * Implementation Note: reading percpu statistics for memcg.
648 * 650 *
649 * Both of vmstat[] and percpu_counter has threshold and do periodic 651 * Both of vmstat[] and percpu_counter has threshold and do periodic
650 * synchronization to implement "quick" read. There are trade-off between 652 * synchronization to implement "quick" read. There are trade-off between
651 * reading cost and precision of value. Then, we may have a chance to implement 653 * reading cost and precision of value. Then, we may have a chance to implement
652 * a periodic synchronizion of counter in memcg's counter. 654 * a periodic synchronization of counter in memcg's counter.
653 * 655 *
654 * But this _read() function is used for user interface now. The user accounts 656 * But this _read() function is used for user interface now. The user accounts
655 * memory usage by memory cgroup and he _always_ requires exact value because 657 * memory usage by memory cgroup and he _always_ requires exact value because
@@ -659,17 +661,24 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
659 * 661 *
660 * If there are kernel internal actions which can make use of some not-exact 662 * If there are kernel internal actions which can make use of some not-exact
661 * value, and reading all cpu value can be performance bottleneck in some 663 * value, and reading all cpu value can be performance bottleneck in some
662 * common workload, threashold and synchonization as vmstat[] should be 664 * common workload, threshold and synchronization as vmstat[] should be
663 * implemented. 665 * implemented.
664 */ 666 */
665static long mem_cgroup_read_stat(struct mem_cgroup *memcg, 667static unsigned long
666 enum mem_cgroup_stat_index idx) 668mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx)
667{ 669{
668 long val = 0; 670 long val = 0;
669 int cpu; 671 int cpu;
670 672
673 /* Per-cpu values can be negative, use a signed accumulator */
671 for_each_possible_cpu(cpu) 674 for_each_possible_cpu(cpu)
672 val += per_cpu(memcg->stat->count[idx], cpu); 675 val += per_cpu(memcg->stat->count[idx], cpu);
676 /*
677 * Summing races with updates, so val may be negative. Avoid exposing
678 * transient negative values.
679 */
680 if (val < 0)
681 val = 0;
673 return val; 682 return val;
674} 683}
675 684
@@ -1254,7 +1263,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1254 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 1263 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
1255 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) 1264 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
1256 continue; 1265 continue;
1257 pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i], 1266 pr_cont(" %s:%luKB", mem_cgroup_stat_names[i],
1258 K(mem_cgroup_read_stat(iter, i))); 1267 K(mem_cgroup_read_stat(iter, i)));
1259 } 1268 }
1260 1269
@@ -2819,14 +2828,11 @@ static unsigned long tree_stat(struct mem_cgroup *memcg,
2819 enum mem_cgroup_stat_index idx) 2828 enum mem_cgroup_stat_index idx)
2820{ 2829{
2821 struct mem_cgroup *iter; 2830 struct mem_cgroup *iter;
2822 long val = 0; 2831 unsigned long val = 0;
2823 2832
2824 /* Per-cpu values can be negative, use a signed accumulator */
2825 for_each_mem_cgroup_tree(iter, memcg) 2833 for_each_mem_cgroup_tree(iter, memcg)
2826 val += mem_cgroup_read_stat(iter, idx); 2834 val += mem_cgroup_read_stat(iter, idx);
2827 2835
2828 if (val < 0) /* race ? */
2829 val = 0;
2830 return val; 2836 return val;
2831} 2837}
2832 2838
@@ -3169,7 +3175,7 @@ static int memcg_stat_show(struct seq_file *m, void *v)
3169 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 3175 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
3170 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) 3176 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
3171 continue; 3177 continue;
3172 seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i], 3178 seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i],
3173 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE); 3179 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
3174 } 3180 }
3175 3181
@@ -3194,13 +3200,13 @@ static int memcg_stat_show(struct seq_file *m, void *v)
3194 (u64)memsw * PAGE_SIZE); 3200 (u64)memsw * PAGE_SIZE);
3195 3201
3196 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 3202 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
3197 long long val = 0; 3203 unsigned long long val = 0;
3198 3204
3199 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) 3205 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
3200 continue; 3206 continue;
3201 for_each_mem_cgroup_tree(mi, memcg) 3207 for_each_mem_cgroup_tree(mi, memcg)
3202 val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE; 3208 val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
3203 seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val); 3209 seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val);
3204 } 3210 }
3205 3211
3206 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) { 3212 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
@@ -4179,7 +4185,6 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
4179 if (memcg_wb_domain_init(memcg, GFP_KERNEL)) 4185 if (memcg_wb_domain_init(memcg, GFP_KERNEL))
4180 goto out_free_stat; 4186 goto out_free_stat;
4181 4187
4182 spin_lock_init(&memcg->pcp_counter_lock);
4183 return memcg; 4188 return memcg;
4184 4189
4185out_free_stat: 4190out_free_stat:
diff --git a/mm/migrate.c b/mm/migrate.c
index 7452a00bbb50..842ecd7aaf7f 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -740,6 +740,15 @@ static int move_to_new_page(struct page *newpage, struct page *page,
740 if (PageSwapBacked(page)) 740 if (PageSwapBacked(page))
741 SetPageSwapBacked(newpage); 741 SetPageSwapBacked(newpage);
742 742
743 /*
744 * Indirectly called below, migrate_page_copy() copies PG_dirty and thus
745 * needs newpage's memcg set to transfer memcg dirty page accounting.
746 * So perform memcg migration in two steps:
747 * 1. set newpage->mem_cgroup (here)
748 * 2. clear page->mem_cgroup (below)
749 */
750 set_page_memcg(newpage, page_memcg(page));
751
743 mapping = page_mapping(page); 752 mapping = page_mapping(page);
744 if (!mapping) 753 if (!mapping)
745 rc = migrate_page(mapping, newpage, page, mode); 754 rc = migrate_page(mapping, newpage, page, mode);
@@ -756,9 +765,10 @@ static int move_to_new_page(struct page *newpage, struct page *page,
756 rc = fallback_migrate_page(mapping, newpage, page, mode); 765 rc = fallback_migrate_page(mapping, newpage, page, mode);
757 766
758 if (rc != MIGRATEPAGE_SUCCESS) { 767 if (rc != MIGRATEPAGE_SUCCESS) {
768 set_page_memcg(newpage, NULL);
759 newpage->mapping = NULL; 769 newpage->mapping = NULL;
760 } else { 770 } else {
761 mem_cgroup_migrate(page, newpage, false); 771 set_page_memcg(page, NULL);
762 if (page_was_mapped) 772 if (page_was_mapped)
763 remove_migration_ptes(page, newpage); 773 remove_migration_ptes(page, newpage);
764 page->mapping = NULL; 774 page->mapping = NULL;
diff --git a/mm/slab.c b/mm/slab.c
index c77ebe6cc87c..4fcc5dd8d5a6 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2190,9 +2190,16 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2190 size += BYTES_PER_WORD; 2190 size += BYTES_PER_WORD;
2191 } 2191 }
2192#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) 2192#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
2193 if (size >= kmalloc_size(INDEX_NODE + 1) 2193 /*
2194 && cachep->object_size > cache_line_size() 2194 * To activate debug pagealloc, off-slab management is necessary
2195 && ALIGN(size, cachep->align) < PAGE_SIZE) { 2195 * requirement. In early phase of initialization, small sized slab
2196 * doesn't get initialized so it would not be possible. So, we need
2197 * to check size >= 256. It guarantees that all necessary small
2198 * sized slab is initialized in current slab initialization sequence.
2199 */
2200 if (!slab_early_init && size >= kmalloc_size(INDEX_NODE) &&
2201 size >= 256 && cachep->object_size > cache_line_size() &&
2202 ALIGN(size, cachep->align) < PAGE_SIZE) {
2196 cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align); 2203 cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align);
2197 size = PAGE_SIZE; 2204 size = PAGE_SIZE;
2198 } 2205 }
diff --git a/samples/kprobes/jprobe_example.c b/samples/kprobes/jprobe_example.c
index 9119ac6a8270..c285a3b8a9f1 100644
--- a/samples/kprobes/jprobe_example.c
+++ b/samples/kprobes/jprobe_example.c
@@ -1,13 +1,13 @@
1/* 1/*
2 * Here's a sample kernel module showing the use of jprobes to dump 2 * Here's a sample kernel module showing the use of jprobes to dump
3 * the arguments of do_fork(). 3 * the arguments of _do_fork().
4 * 4 *
5 * For more information on theory of operation of jprobes, see 5 * For more information on theory of operation of jprobes, see
6 * Documentation/kprobes.txt 6 * Documentation/kprobes.txt
7 * 7 *
8 * Build and insert the kernel module as done in the kprobe example. 8 * Build and insert the kernel module as done in the kprobe example.
9 * You will see the trace data in /var/log/messages and on the 9 * You will see the trace data in /var/log/messages and on the
10 * console whenever do_fork() is invoked to create a new process. 10 * console whenever _do_fork() is invoked to create a new process.
11 * (Some messages may be suppressed if syslogd is configured to 11 * (Some messages may be suppressed if syslogd is configured to
12 * eliminate duplicate messages.) 12 * eliminate duplicate messages.)
13 */ 13 */
@@ -17,13 +17,13 @@
17#include <linux/kprobes.h> 17#include <linux/kprobes.h>
18 18
19/* 19/*
20 * Jumper probe for do_fork. 20 * Jumper probe for _do_fork.
21 * Mirror principle enables access to arguments of the probed routine 21 * Mirror principle enables access to arguments of the probed routine
22 * from the probe handler. 22 * from the probe handler.
23 */ 23 */
24 24
25/* Proxy routine having the same arguments as actual do_fork() routine */ 25/* Proxy routine having the same arguments as actual _do_fork() routine */
26static long jdo_fork(unsigned long clone_flags, unsigned long stack_start, 26static long j_do_fork(unsigned long clone_flags, unsigned long stack_start,
27 unsigned long stack_size, int __user *parent_tidptr, 27 unsigned long stack_size, int __user *parent_tidptr,
28 int __user *child_tidptr) 28 int __user *child_tidptr)
29{ 29{
@@ -36,9 +36,9 @@ static long jdo_fork(unsigned long clone_flags, unsigned long stack_start,
36} 36}
37 37
38static struct jprobe my_jprobe = { 38static struct jprobe my_jprobe = {
39 .entry = jdo_fork, 39 .entry = j_do_fork,
40 .kp = { 40 .kp = {
41 .symbol_name = "do_fork", 41 .symbol_name = "_do_fork",
42 }, 42 },
43}; 43};
44 44
diff --git a/samples/kprobes/kprobe_example.c b/samples/kprobes/kprobe_example.c
index 366db1a9fb65..727eb21c9c56 100644
--- a/samples/kprobes/kprobe_example.c
+++ b/samples/kprobes/kprobe_example.c
@@ -1,13 +1,13 @@
1/* 1/*
2 * NOTE: This example is works on x86 and powerpc. 2 * NOTE: This example is works on x86 and powerpc.
3 * Here's a sample kernel module showing the use of kprobes to dump a 3 * Here's a sample kernel module showing the use of kprobes to dump a
4 * stack trace and selected registers when do_fork() is called. 4 * stack trace and selected registers when _do_fork() is called.
5 * 5 *
6 * For more information on theory of operation of kprobes, see 6 * For more information on theory of operation of kprobes, see
7 * Documentation/kprobes.txt 7 * Documentation/kprobes.txt
8 * 8 *
9 * You will see the trace data in /var/log/messages and on the console 9 * You will see the trace data in /var/log/messages and on the console
10 * whenever do_fork() is invoked to create a new process. 10 * whenever _do_fork() is invoked to create a new process.
11 */ 11 */
12 12
13#include <linux/kernel.h> 13#include <linux/kernel.h>
@@ -16,7 +16,7 @@
16 16
17/* For each probe you need to allocate a kprobe structure */ 17/* For each probe you need to allocate a kprobe structure */
18static struct kprobe kp = { 18static struct kprobe kp = {
19 .symbol_name = "do_fork", 19 .symbol_name = "_do_fork",
20}; 20};
21 21
22/* kprobe pre_handler: called just before the probed instruction is executed */ 22/* kprobe pre_handler: called just before the probed instruction is executed */
diff --git a/samples/kprobes/kretprobe_example.c b/samples/kprobes/kretprobe_example.c
index 1041b6731598..ebb1d1aed547 100644
--- a/samples/kprobes/kretprobe_example.c
+++ b/samples/kprobes/kretprobe_example.c
@@ -7,7 +7,7 @@
7 * 7 *
8 * usage: insmod kretprobe_example.ko func=<func_name> 8 * usage: insmod kretprobe_example.ko func=<func_name>
9 * 9 *
10 * If no func_name is specified, do_fork is instrumented 10 * If no func_name is specified, _do_fork is instrumented
11 * 11 *
12 * For more information on theory of operation of kretprobes, see 12 * For more information on theory of operation of kretprobes, see
13 * Documentation/kprobes.txt 13 * Documentation/kprobes.txt
@@ -25,7 +25,7 @@
25#include <linux/limits.h> 25#include <linux/limits.h>
26#include <linux/sched.h> 26#include <linux/sched.h>
27 27
28static char func_name[NAME_MAX] = "do_fork"; 28static char func_name[NAME_MAX] = "_do_fork";
29module_param_string(func, func_name, NAME_MAX, S_IRUGO); 29module_param_string(func, func_name, NAME_MAX, S_IRUGO);
30MODULE_PARM_DESC(func, "Function to kretprobe; this module will report the" 30MODULE_PARM_DESC(func, "Function to kretprobe; this module will report the"
31 " function's execution time"); 31 " function's execution time");