aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLi Bin <huawei.libin@huawei.com>2015-10-30 04:31:04 -0400
committerSteven Rostedt <rostedt@goodmis.org>2015-11-03 10:50:29 -0500
commit2ee8a74f2a5da913637f75a19a0da0e7a08c0f86 (patch)
tree30a1fe09feca6c1aca42c716aa0ef36ceb8fdce9
parentc84da8b9ad3761eef43811181c7e896e9834b26b (diff)
recordmcount: arm64: Replace the ignored mcount call into nop
By now, the recordmcount only records the function that in following sections: .text/.ref.text/.sched.text/.spinlock.text/.irqentry.text/ .kprobes.text/.text.unlikely For the function that not in these sections, the call mcount will be in place and not be replaced when kernel boot up. And it will bring performance overhead, such as do_mem_abort (in .exception.text section). This patch make the call mcount to nop for this case in recordmcount. Link: http://lkml.kernel.org/r/1446019445-14421-1-git-send-email-huawei.libin@huawei.com Link: http://lkml.kernel.org/r/1446193864-24593-4-git-send-email-huawei.libin@huawei.com Cc: <lkp@intel.com> Cc: <catalin.marinas@arm.com> Cc: <takahiro.akashi@linaro.org> Cc: <stable@vger.kernel.org> # 3.18+ Acked-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Li Bin <huawei.libin@huawei.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r--scripts/recordmcount.c24
1 files changed, 23 insertions, 1 deletions
diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
index 8cc020bbe859..698768bdc581 100644
--- a/scripts/recordmcount.c
+++ b/scripts/recordmcount.c
@@ -42,6 +42,7 @@
42 42
43#ifndef EM_AARCH64 43#ifndef EM_AARCH64
44#define EM_AARCH64 183 44#define EM_AARCH64 183
45#define R_AARCH64_NONE 0
45#define R_AARCH64_ABS64 257 46#define R_AARCH64_ABS64 257
46#endif 47#endif
47 48
@@ -160,6 +161,22 @@ static int make_nop_x86(void *map, size_t const offset)
160 return 0; 161 return 0;
161} 162}
162 163
164static unsigned char ideal_nop4_arm64[4] = {0x1f, 0x20, 0x03, 0xd5};
165static int make_nop_arm64(void *map, size_t const offset)
166{
167 uint32_t *ptr;
168
169 ptr = map + offset;
170 /* bl <_mcount> is 0x94000000 before relocation */
171 if (*ptr != 0x94000000)
172 return -1;
173
174 /* Convert to nop */
175 ulseek(fd_map, offset, SEEK_SET);
176 uwrite(fd_map, ideal_nop, 4);
177 return 0;
178}
179
163/* 180/*
164 * Get the whole file as a programming convenience in order to avoid 181 * Get the whole file as a programming convenience in order to avoid
165 * malloc+lseek+read+free of many pieces. If successful, then mmap 182 * malloc+lseek+read+free of many pieces. If successful, then mmap
@@ -354,7 +371,12 @@ do_file(char const *const fname)
354 altmcount = "__gnu_mcount_nc"; 371 altmcount = "__gnu_mcount_nc";
355 break; 372 break;
356 case EM_AARCH64: 373 case EM_AARCH64:
357 reltype = R_AARCH64_ABS64; gpfx = '_'; break; 374 reltype = R_AARCH64_ABS64;
375 make_nop = make_nop_arm64;
376 rel_type_nop = R_AARCH64_NONE;
377 ideal_nop = ideal_nop4_arm64;
378 gpfx = '_';
379 break;
358 case EM_IA_64: reltype = R_IA64_IMM64; gpfx = '_'; break; 380 case EM_IA_64: reltype = R_IA64_IMM64; gpfx = '_'; break;
359 case EM_METAG: reltype = R_METAG_ADDR32; 381 case EM_METAG: reltype = R_METAG_ADDR32;
360 altmcount = "_mcount_wrapper"; 382 altmcount = "_mcount_wrapper";