diff options
author | Geliang Tang <geliangtang@163.com> | 2015-09-27 11:25:50 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-09-28 02:09:52 -0400 |
commit | 18ab2cd3ee9d52dc64c5ae984146a261a328c4e8 (patch) | |
tree | bc9745194ff05c72d917126797c39004a8161010 | |
parent | 6afc0c269c3d20cde05515b00ede00e91fee0be5 (diff) |
perf/core, perf/x86: Change needlessly global functions and a variable to static
Fixes various sparse warnings.
Signed-off-by: Geliang Tang <geliangtang@163.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/70c14234da1bed6e3e67b9c419e2d5e376ab4f32.1443367286.git.geliangtang@163.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 8 | ||||
-rw-r--r-- | kernel/events/core.c | 8 |
2 files changed, 8 insertions, 8 deletions
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index be4febc58b94..e38d338a6447 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -157,7 +157,7 @@ struct _cpuid4_info_regs { | |||
157 | struct amd_northbridge *nb; | 157 | struct amd_northbridge *nb; |
158 | }; | 158 | }; |
159 | 159 | ||
160 | unsigned short num_cache_leaves; | 160 | static unsigned short num_cache_leaves; |
161 | 161 | ||
162 | /* AMD doesn't have CPUID4. Emulate it here to report the same | 162 | /* AMD doesn't have CPUID4. Emulate it here to report the same |
163 | information to the user. This makes some assumptions about the machine: | 163 | information to the user. This makes some assumptions about the machine: |
@@ -326,7 +326,7 @@ static void amd_calc_l3_indices(struct amd_northbridge *nb) | |||
326 | * | 326 | * |
327 | * @returns: the disabled index if used or negative value if slot free. | 327 | * @returns: the disabled index if used or negative value if slot free. |
328 | */ | 328 | */ |
329 | int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot) | 329 | static int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot) |
330 | { | 330 | { |
331 | unsigned int reg = 0; | 331 | unsigned int reg = 0; |
332 | 332 | ||
@@ -403,8 +403,8 @@ static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu, | |||
403 | * | 403 | * |
404 | * @return: 0 on success, error status on failure | 404 | * @return: 0 on success, error status on failure |
405 | */ | 405 | */ |
406 | int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot, | 406 | static int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, |
407 | unsigned long index) | 407 | unsigned slot, unsigned long index) |
408 | { | 408 | { |
409 | int ret = 0; | 409 | int ret = 0; |
410 | 410 | ||
diff --git a/kernel/events/core.c b/kernel/events/core.c index f87b434c3c1e..ea02109aee77 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -196,7 +196,7 @@ static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS; | |||
196 | static int perf_sample_allowed_ns __read_mostly = | 196 | static int perf_sample_allowed_ns __read_mostly = |
197 | DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100; | 197 | DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100; |
198 | 198 | ||
199 | void update_perf_cpu_limits(void) | 199 | static void update_perf_cpu_limits(void) |
200 | { | 200 | { |
201 | u64 tmp = perf_sample_period_ns; | 201 | u64 tmp = perf_sample_period_ns; |
202 | 202 | ||
@@ -472,7 +472,7 @@ perf_cgroup_set_timestamp(struct task_struct *task, | |||
472 | * mode SWOUT : schedule out everything | 472 | * mode SWOUT : schedule out everything |
473 | * mode SWIN : schedule in based on cgroup for next | 473 | * mode SWIN : schedule in based on cgroup for next |
474 | */ | 474 | */ |
475 | void perf_cgroup_switch(struct task_struct *task, int mode) | 475 | static void perf_cgroup_switch(struct task_struct *task, int mode) |
476 | { | 476 | { |
477 | struct perf_cpu_context *cpuctx; | 477 | struct perf_cpu_context *cpuctx; |
478 | struct pmu *pmu; | 478 | struct pmu *pmu; |
@@ -7390,7 +7390,7 @@ static int perf_pmu_nop_int(struct pmu *pmu) | |||
7390 | return 0; | 7390 | return 0; |
7391 | } | 7391 | } |
7392 | 7392 | ||
7393 | DEFINE_PER_CPU(unsigned int, nop_txn_flags); | 7393 | static DEFINE_PER_CPU(unsigned int, nop_txn_flags); |
7394 | 7394 | ||
7395 | static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags) | 7395 | static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags) |
7396 | { | 7396 | { |
@@ -7750,7 +7750,7 @@ static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) | |||
7750 | return ret; | 7750 | return ret; |
7751 | } | 7751 | } |
7752 | 7752 | ||
7753 | struct pmu *perf_init_event(struct perf_event *event) | 7753 | static struct pmu *perf_init_event(struct perf_event *event) |
7754 | { | 7754 | { |
7755 | struct pmu *pmu = NULL; | 7755 | struct pmu *pmu = NULL; |
7756 | int idx; | 7756 | int idx; |