aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAndi Kleen <ak@linux.intel.com>2013-08-05 18:02:43 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2013-08-06 17:20:13 -0400
commit277d5b40b7bf495d2d4193746181b17dd98441b2 (patch)
tree105ebee6f6306e1a1cf1abfb3ba0da30d388447d /arch
parent04bb591ca74fb8ea06d5ab7fadfb7bf5b11fb28e (diff)
x86, asmlinkage: Make several variables used from assembler/linker script visible
Plus one function, load_gs_index(). Signed-off-by: Andi Kleen <ak@linux.intel.com> Link: http://lkml.kernel.org/r/1375740170-7446-10-git-send-email-andi@firstfloor.org Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/pgtable.h3
-rw-r--r--arch/x86/include/asm/processor.h2
-rw-r--r--arch/x86/include/asm/special_insns.h2
-rw-r--r--arch/x86/kernel/cpu/amd.c4
-rw-r--r--arch/x86/kernel/cpu/common.c4
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/kernel/process_64.c2
-rw-r--r--arch/x86/kernel/setup.c4
-rw-r--r--arch/x86/kernel/signal.c2
-rw-r--r--arch/x86/lib/usercopy_64.c2
10 files changed, 14 insertions, 13 deletions
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 7dc305a46058..4e4765908af5 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -22,7 +22,8 @@
22 * ZERO_PAGE is a global shared page that is always zero: used 22 * ZERO_PAGE is a global shared page that is always zero: used
23 * for zero-mapped memory areas etc.. 23 * for zero-mapped memory areas etc..
24 */ 24 */
25extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; 25extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
26 __visible;
26#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 27#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
27 28
28extern spinlock_t pgd_lock; 29extern spinlock_t pgd_lock;
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 24cf5aefb704..573c1ad4994e 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -412,7 +412,7 @@ union irq_stack_union {
412 }; 412 };
413}; 413};
414 414
415DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union); 415DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union) __visible;
416DECLARE_INIT_PER_CPU(irq_stack_union); 416DECLARE_INIT_PER_CPU(irq_stack_union);
417 417
418DECLARE_PER_CPU(char *, irq_stack_ptr); 418DECLARE_PER_CPU(char *, irq_stack_ptr);
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index 2f4d924fe6c9..645cad2c95ff 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -101,7 +101,7 @@ static inline void native_wbinvd(void)
101 asm volatile("wbinvd": : :"memory"); 101 asm volatile("wbinvd": : :"memory");
102} 102}
103 103
104extern void native_load_gs_index(unsigned); 104extern asmlinkage void native_load_gs_index(unsigned);
105 105
106#ifdef CONFIG_PARAVIRT 106#ifdef CONFIG_PARAVIRT
107#include <asm/paravirt.h> 107#include <asm/paravirt.h>
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index f654ecefea5b..466e3d15de12 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -66,8 +66,8 @@ static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
66 * performance at the same time.. 66 * performance at the same time..
67 */ 67 */
68 68
69extern void vide(void); 69extern __visible void vide(void);
70__asm__(".align 4\nvide: ret"); 70__asm__(".globl vide\n\t.align 4\nvide: ret");
71 71
72static void init_amd_k5(struct cpuinfo_x86 *c) 72static void init_amd_k5(struct cpuinfo_x86 *c)
73{ 73{
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 25eb2747b063..2793d1f095a2 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1076,7 +1076,7 @@ struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
1076 (unsigned long) debug_idt_table }; 1076 (unsigned long) debug_idt_table };
1077 1077
1078DEFINE_PER_CPU_FIRST(union irq_stack_union, 1078DEFINE_PER_CPU_FIRST(union irq_stack_union,
1079 irq_stack_union) __aligned(PAGE_SIZE); 1079 irq_stack_union) __aligned(PAGE_SIZE) __visible;
1080 1080
1081/* 1081/*
1082 * The following four percpu variables are hot. Align current_task to 1082 * The following four percpu variables are hot. Align current_task to
@@ -1093,7 +1093,7 @@ EXPORT_PER_CPU_SYMBOL(kernel_stack);
1093DEFINE_PER_CPU(char *, irq_stack_ptr) = 1093DEFINE_PER_CPU(char *, irq_stack_ptr) =
1094 init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; 1094 init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
1095 1095
1096DEFINE_PER_CPU(unsigned int, irq_count) = -1; 1096DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
1097 1097
1098DEFINE_PER_CPU(struct task_struct *, fpu_owner_task); 1098DEFINE_PER_CPU(struct task_struct *, fpu_owner_task);
1099 1099
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 83369e5a1d27..c83516be1052 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -36,7 +36,7 @@
36 * section. Since TSS's are completely CPU-local, we want them 36 * section. Since TSS's are completely CPU-local, we want them
37 * on exact cacheline boundaries, to eliminate cacheline ping-pong. 37 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
38 */ 38 */
39DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS; 39__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
40 40
41#ifdef CONFIG_X86_64 41#ifdef CONFIG_X86_64
42static DEFINE_PER_CPU(unsigned char, is_idle); 42static DEFINE_PER_CPU(unsigned char, is_idle);
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 6e8c1d02ab4b..bb1dc51bab05 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -52,7 +52,7 @@
52 52
53asmlinkage extern void ret_from_fork(void); 53asmlinkage extern void ret_from_fork(void);
54 54
55DEFINE_PER_CPU(unsigned long, old_rsp); 55asmlinkage DEFINE_PER_CPU(unsigned long, old_rsp);
56 56
57/* Prints also some state that isn't saved in the pt_regs */ 57/* Prints also some state that isn't saved in the pt_regs */
58void __show_regs(struct pt_regs *regs, int all) 58void __show_regs(struct pt_regs *regs, int all)
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index f8ec57815c05..dfa55afccf5e 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -206,9 +206,9 @@ EXPORT_SYMBOL(boot_cpu_data);
206 206
207 207
208#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64) 208#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
209unsigned long mmu_cr4_features; 209__visible unsigned long mmu_cr4_features;
210#else 210#else
211unsigned long mmu_cr4_features = X86_CR4_PAE; 211__visible unsigned long mmu_cr4_features = X86_CR4_PAE;
212#endif 212#endif
213 213
214/* Boot loader ID and version as integers, for the benefit of proc_dointvec */ 214/* Boot loader ID and version as integers, for the benefit of proc_dointvec */
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 1dba45dd4938..6a9acc667d20 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -728,7 +728,7 @@ static void do_signal(struct pt_regs *regs)
728 * notification of userspace execution resumption 728 * notification of userspace execution resumption
729 * - triggered by the TIF_WORK_MASK flags 729 * - triggered by the TIF_WORK_MASK flags
730 */ 730 */
731void 731__visible void
732do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) 732do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
733{ 733{
734 user_exit(); 734 user_exit();
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index 906fea315791..c905e89e19fe 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -68,7 +68,7 @@ EXPORT_SYMBOL(copy_in_user);
68 * Since protection fault in copy_from/to_user is not a normal situation, 68 * Since protection fault in copy_from/to_user is not a normal situation,
69 * it is not necessary to optimize tail handling. 69 * it is not necessary to optimize tail handling.
70 */ 70 */
71unsigned long 71__visible unsigned long
72copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest) 72copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
73{ 73{
74 char c; 74 char c;