aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorZachary Amsden <zach@vmware.com>2005-10-30 17:59:34 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-30 20:37:12 -0500
commit251e6912df43df54570ed68aade703b329c6cd5b (patch)
tree28695a0c9a468281613a6b8ef8c79596e2764001
parent72e12b76fe48d99d1deb417f177b10a9d99b2e74 (diff)
[PATCH] x86: add an accessor function for getting the per-CPU gdt
Add an accessor function for getting the per-CPU gdt. Callee must already have the CPU. Signed-off-by: Zachary Amsden <zach@vmware.com> Acked-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--arch/i386/kernel/apm.c40
-rw-r--r--arch/i386/kernel/cpu/common.c9
-rw-r--r--arch/i386/mm/fault.c2
-rw-r--r--include/asm-i386/desc.h8
4 files changed, 32 insertions, 27 deletions
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c
index d7811c4e8b50..d2ef0c2aa93e 100644
--- a/arch/i386/kernel/apm.c
+++ b/arch/i386/kernel/apm.c
@@ -597,12 +597,14 @@ static u8 apm_bios_call(u32 func, u32 ebx_in, u32 ecx_in,
597 cpumask_t cpus; 597 cpumask_t cpus;
598 int cpu; 598 int cpu;
599 struct desc_struct save_desc_40; 599 struct desc_struct save_desc_40;
600 struct desc_struct *gdt;
600 601
601 cpus = apm_save_cpus(); 602 cpus = apm_save_cpus();
602 603
603 cpu = get_cpu(); 604 cpu = get_cpu();
604 save_desc_40 = per_cpu(cpu_gdt_table, cpu)[0x40 / 8]; 605 gdt = get_cpu_gdt_table(cpu);
605 per_cpu(cpu_gdt_table, cpu)[0x40 / 8] = bad_bios_desc; 606 save_desc_40 = gdt[0x40 / 8];
607 gdt[0x40 / 8] = bad_bios_desc;
606 608
607 local_save_flags(flags); 609 local_save_flags(flags);
608 APM_DO_CLI; 610 APM_DO_CLI;
@@ -610,7 +612,7 @@ static u8 apm_bios_call(u32 func, u32 ebx_in, u32 ecx_in,
610 apm_bios_call_asm(func, ebx_in, ecx_in, eax, ebx, ecx, edx, esi); 612 apm_bios_call_asm(func, ebx_in, ecx_in, eax, ebx, ecx, edx, esi);
611 APM_DO_RESTORE_SEGS; 613 APM_DO_RESTORE_SEGS;
612 local_irq_restore(flags); 614 local_irq_restore(flags);
613 per_cpu(cpu_gdt_table, cpu)[0x40 / 8] = save_desc_40; 615 gdt[0x40 / 8] = save_desc_40;
614 put_cpu(); 616 put_cpu();
615 apm_restore_cpus(cpus); 617 apm_restore_cpus(cpus);
616 618
@@ -639,13 +641,14 @@ static u8 apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax)
639 cpumask_t cpus; 641 cpumask_t cpus;
640 int cpu; 642 int cpu;
641 struct desc_struct save_desc_40; 643 struct desc_struct save_desc_40;
642 644 struct desc_struct *gdt;
643 645
644 cpus = apm_save_cpus(); 646 cpus = apm_save_cpus();
645 647
646 cpu = get_cpu(); 648 cpu = get_cpu();
647 save_desc_40 = per_cpu(cpu_gdt_table, cpu)[0x40 / 8]; 649 gdt = get_cpu_gdt_table(cpu);
648 per_cpu(cpu_gdt_table, cpu)[0x40 / 8] = bad_bios_desc; 650 save_desc_40 = gdt[0x40 / 8];
651 gdt[0x40 / 8] = bad_bios_desc;
649 652
650 local_save_flags(flags); 653 local_save_flags(flags);
651 APM_DO_CLI; 654 APM_DO_CLI;
@@ -653,7 +656,7 @@ static u8 apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax)
653 error = apm_bios_call_simple_asm(func, ebx_in, ecx_in, eax); 656 error = apm_bios_call_simple_asm(func, ebx_in, ecx_in, eax);
654 APM_DO_RESTORE_SEGS; 657 APM_DO_RESTORE_SEGS;
655 local_irq_restore(flags); 658 local_irq_restore(flags);
656 __get_cpu_var(cpu_gdt_table)[0x40 / 8] = save_desc_40; 659 gdt[0x40 / 8] = save_desc_40;
657 put_cpu(); 660 put_cpu();
658 apm_restore_cpus(cpus); 661 apm_restore_cpus(cpus);
659 return error; 662 return error;
@@ -2295,35 +2298,36 @@ static int __init apm_init(void)
2295 apm_bios_entry.segment = APM_CS; 2298 apm_bios_entry.segment = APM_CS;
2296 2299
2297 for (i = 0; i < NR_CPUS; i++) { 2300 for (i = 0; i < NR_CPUS; i++) {
2298 set_base(per_cpu(cpu_gdt_table, i)[APM_CS >> 3], 2301 struct desc_struct *gdt = get_cpu_gdt_table(i);
2302 set_base(gdt[APM_CS >> 3],
2299 __va((unsigned long)apm_info.bios.cseg << 4)); 2303 __va((unsigned long)apm_info.bios.cseg << 4));
2300 set_base(per_cpu(cpu_gdt_table, i)[APM_CS_16 >> 3], 2304 set_base(gdt[APM_CS_16 >> 3],
2301 __va((unsigned long)apm_info.bios.cseg_16 << 4)); 2305 __va((unsigned long)apm_info.bios.cseg_16 << 4));
2302 set_base(per_cpu(cpu_gdt_table, i)[APM_DS >> 3], 2306 set_base(gdt[APM_DS >> 3],
2303 __va((unsigned long)apm_info.bios.dseg << 4)); 2307 __va((unsigned long)apm_info.bios.dseg << 4));
2304#ifndef APM_RELAX_SEGMENTS 2308#ifndef APM_RELAX_SEGMENTS
2305 if (apm_info.bios.version == 0x100) { 2309 if (apm_info.bios.version == 0x100) {
2306#endif 2310#endif
2307 /* For ASUS motherboard, Award BIOS rev 110 (and others?) */ 2311 /* For ASUS motherboard, Award BIOS rev 110 (and others?) */
2308 _set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_CS >> 3], 64 * 1024 - 1); 2312 _set_limit((char *)&gdt[APM_CS >> 3], 64 * 1024 - 1);
2309 /* For some unknown machine. */ 2313 /* For some unknown machine. */
2310 _set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_CS_16 >> 3], 64 * 1024 - 1); 2314 _set_limit((char *)&gdt[APM_CS_16 >> 3], 64 * 1024 - 1);
2311 /* For the DEC Hinote Ultra CT475 (and others?) */ 2315 /* For the DEC Hinote Ultra CT475 (and others?) */
2312 _set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_DS >> 3], 64 * 1024 - 1); 2316 _set_limit((char *)&gdt[APM_DS >> 3], 64 * 1024 - 1);
2313#ifndef APM_RELAX_SEGMENTS 2317#ifndef APM_RELAX_SEGMENTS
2314 } else { 2318 } else {
2315 _set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_CS >> 3], 2319 _set_limit((char *)&gdt[APM_CS >> 3],
2316 (apm_info.bios.cseg_len - 1) & 0xffff); 2320 (apm_info.bios.cseg_len - 1) & 0xffff);
2317 _set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_CS_16 >> 3], 2321 _set_limit((char *)&gdt[APM_CS_16 >> 3],
2318 (apm_info.bios.cseg_16_len - 1) & 0xffff); 2322 (apm_info.bios.cseg_16_len - 1) & 0xffff);
2319 _set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_DS >> 3], 2323 _set_limit((char *)&gdt[APM_DS >> 3],
2320 (apm_info.bios.dseg_len - 1) & 0xffff); 2324 (apm_info.bios.dseg_len - 1) & 0xffff);
2321 /* workaround for broken BIOSes */ 2325 /* workaround for broken BIOSes */
2322 if (apm_info.bios.cseg_len <= apm_info.bios.offset) 2326 if (apm_info.bios.cseg_len <= apm_info.bios.offset)
2323 _set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_CS >> 3], 64 * 1024 -1); 2327 _set_limit((char *)&gdt[APM_CS >> 3], 64 * 1024 -1);
2324 if (apm_info.bios.dseg_len <= 0x40) { /* 0x40 * 4kB == 64kB */ 2328 if (apm_info.bios.dseg_len <= 0x40) { /* 0x40 * 4kB == 64kB */
2325 /* for the BIOS that assumes granularity = 1 */ 2329 /* for the BIOS that assumes granularity = 1 */
2326 per_cpu(cpu_gdt_table, i)[APM_DS >> 3].b |= 0x800000; 2330 gdt[APM_DS >> 3].b |= 0x800000;
2327 printk(KERN_NOTICE "apm: we set the granularity of dseg.\n"); 2331 printk(KERN_NOTICE "apm: we set the granularity of dseg.\n");
2328 } 2332 }
2329 } 2333 }
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index a162c0326b4a..74145a33cb0f 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -573,6 +573,7 @@ void __devinit cpu_init(void)
573 int cpu = smp_processor_id(); 573 int cpu = smp_processor_id();
574 struct tss_struct * t = &per_cpu(init_tss, cpu); 574 struct tss_struct * t = &per_cpu(init_tss, cpu);
575 struct thread_struct *thread = &current->thread; 575 struct thread_struct *thread = &current->thread;
576 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
576 __u32 stk16_off = (__u32)&per_cpu(cpu_16bit_stack, cpu); 577 __u32 stk16_off = (__u32)&per_cpu(cpu_16bit_stack, cpu);
577 578
578 if (cpu_test_and_set(cpu, cpu_initialized)) { 579 if (cpu_test_and_set(cpu, cpu_initialized)) {
@@ -594,18 +595,16 @@ void __devinit cpu_init(void)
594 * Initialize the per-CPU GDT with the boot GDT, 595 * Initialize the per-CPU GDT with the boot GDT,
595 * and set up the GDT descriptor: 596 * and set up the GDT descriptor:
596 */ 597 */
597 memcpy(&per_cpu(cpu_gdt_table, cpu), cpu_gdt_table, 598 memcpy(gdt, cpu_gdt_table, GDT_SIZE);
598 GDT_SIZE);
599 599
600 /* Set up GDT entry for 16bit stack */ 600 /* Set up GDT entry for 16bit stack */
601 *(__u64 *)&(per_cpu(cpu_gdt_table, cpu)[GDT_ENTRY_ESPFIX_SS]) |= 601 *(__u64 *)(&gdt[GDT_ENTRY_ESPFIX_SS]) |=
602 ((((__u64)stk16_off) << 16) & 0x000000ffffff0000ULL) | 602 ((((__u64)stk16_off) << 16) & 0x000000ffffff0000ULL) |
603 ((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) | 603 ((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) |
604 (CPU_16BIT_STACK_SIZE - 1); 604 (CPU_16BIT_STACK_SIZE - 1);
605 605
606 cpu_gdt_descr[cpu].size = GDT_SIZE - 1; 606 cpu_gdt_descr[cpu].size = GDT_SIZE - 1;
607 cpu_gdt_descr[cpu].address = 607 cpu_gdt_descr[cpu].address = (unsigned long)gdt;
608 (unsigned long)&per_cpu(cpu_gdt_table, cpu);
609 608
610 load_gdt(&cpu_gdt_descr[cpu]); 609 load_gdt(&cpu_gdt_descr[cpu]);
611 load_idt(&idt_descr); 610 load_idt(&idt_descr);
diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c
index 9edd4485b91e..cf572d9a3b6e 100644
--- a/arch/i386/mm/fault.c
+++ b/arch/i386/mm/fault.c
@@ -108,7 +108,7 @@ static inline unsigned long get_segment_eip(struct pt_regs *regs,
108 desc = (void *)desc + (seg & ~7); 108 desc = (void *)desc + (seg & ~7);
109 } else { 109 } else {
110 /* Must disable preemption while reading the GDT. */ 110 /* Must disable preemption while reading the GDT. */
111 desc = (u32 *)&per_cpu(cpu_gdt_table, get_cpu()); 111 desc = (u32 *)get_cpu_gdt_table(get_cpu());
112 desc = (void *)desc + (seg & ~7); 112 desc = (void *)desc + (seg & ~7);
113 } 113 }
114 114
diff --git a/include/asm-i386/desc.h b/include/asm-i386/desc.h
index 6df1a53c190e..29b851a18c6e 100644
--- a/include/asm-i386/desc.h
+++ b/include/asm-i386/desc.h
@@ -17,6 +17,8 @@
17extern struct desc_struct cpu_gdt_table[GDT_ENTRIES]; 17extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
18DECLARE_PER_CPU(struct desc_struct, cpu_gdt_table[GDT_ENTRIES]); 18DECLARE_PER_CPU(struct desc_struct, cpu_gdt_table[GDT_ENTRIES]);
19 19
20#define get_cpu_gdt_table(_cpu) (per_cpu(cpu_gdt_table,_cpu))
21
20DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); 22DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
21 23
22struct Xgt_desc_struct { 24struct Xgt_desc_struct {
@@ -60,7 +62,7 @@ __asm__ __volatile__ ("movw %w3,0(%2)\n\t" \
60 62
61static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr) 63static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr)
62{ 64{
63 _set_tssldt_desc(&per_cpu(cpu_gdt_table, cpu)[entry], (int)addr, 65 _set_tssldt_desc(&get_cpu_gdt_table(cpu)[entry], (int)addr,
64 offsetof(struct tss_struct, __cacheline_filler) - 1, 0x89); 66 offsetof(struct tss_struct, __cacheline_filler) - 1, 0x89);
65} 67}
66 68
@@ -68,7 +70,7 @@ static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *ad
68 70
69static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size) 71static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size)
70{ 72{
71 _set_tssldt_desc(&per_cpu(cpu_gdt_table, cpu)[GDT_ENTRY_LDT], (int)addr, ((size << 3)-1), 0x82); 73 _set_tssldt_desc(&get_cpu_gdt_table(cpu)[GDT_ENTRY_LDT], (int)addr, ((size << 3)-1), 0x82);
72} 74}
73 75
74#define LDT_entry_a(info) \ 76#define LDT_entry_a(info) \
@@ -109,7 +111,7 @@ static inline void write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 en
109 111
110static inline void load_TLS(struct thread_struct *t, unsigned int cpu) 112static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
111{ 113{
112#define C(i) per_cpu(cpu_gdt_table, cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i] 114#define C(i) get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]
113 C(0); C(1); C(2); 115 C(0); C(1); C(2);
114#undef C 116#undef C
115} 117}