aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-i386
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-i386')
-rw-r--r--include/asm-i386/a.out.h1
-rw-r--r--include/asm-i386/alternative.h2
-rw-r--r--include/asm-i386/boot.h6
-rw-r--r--include/asm-i386/bootparam.h85
-rw-r--r--include/asm-i386/cmpxchg.h16
-rw-r--r--include/asm-i386/cpufeature.h26
-rw-r--r--include/asm-i386/e820.h22
-rw-r--r--include/asm-i386/fb.h17
-rw-r--r--include/asm-i386/fixmap.h2
-rw-r--r--include/asm-i386/geode.h159
-rw-r--r--include/asm-i386/hpet.h126
-rw-r--r--include/asm-i386/i8253.h16
-rw-r--r--include/asm-i386/ide.h4
-rw-r--r--include/asm-i386/io.h1
-rw-r--r--include/asm-i386/irq.h1
-rw-r--r--include/asm-i386/kprobes.h1
-rw-r--r--include/asm-i386/mach-default/do_timer.h2
-rw-r--r--include/asm-i386/mach-default/io_ports.h5
-rw-r--r--include/asm-i386/mach-default/irq_vectors_limits.h2
-rw-r--r--include/asm-i386/mach-default/mach_reboot.h25
-rw-r--r--include/asm-i386/mach-voyager/do_timer.h2
-rw-r--r--include/asm-i386/mc146818rtc.h5
-rw-r--r--include/asm-i386/mce.h4
-rw-r--r--include/asm-i386/mmu_context.h2
-rw-r--r--include/asm-i386/nmi.h2
-rw-r--r--include/asm-i386/page.h4
-rw-r--r--include/asm-i386/paravirt.h22
-rw-r--r--include/asm-i386/pci.h42
-rw-r--r--include/asm-i386/percpu.h5
-rw-r--r--include/asm-i386/pgalloc.h6
-rw-r--r--include/asm-i386/pgtable-2level.h8
-rw-r--r--include/asm-i386/pgtable-3level.h17
-rw-r--r--include/asm-i386/pgtable.h40
-rw-r--r--include/asm-i386/processor-cyrix.h30
-rw-r--r--include/asm-i386/processor.h17
-rw-r--r--include/asm-i386/required-features.h41
-rw-r--r--include/asm-i386/resume-trace.h13
-rw-r--r--include/asm-i386/setup.h14
-rw-r--r--include/asm-i386/smp.h5
-rw-r--r--include/asm-i386/string.h243
-rw-r--r--include/asm-i386/system.h9
-rw-r--r--include/asm-i386/thread_info.h5
-rw-r--r--include/asm-i386/timer.h34
-rw-r--r--include/asm-i386/tlbflush.h6
-rw-r--r--include/asm-i386/topology.h2
-rw-r--r--include/asm-i386/tsc.h1
-rw-r--r--include/asm-i386/uaccess.h2
-rw-r--r--include/asm-i386/unistd.h3
-rw-r--r--include/asm-i386/vmi_time.h2
-rw-r--r--include/asm-i386/xen/hypercall.h413
-rw-r--r--include/asm-i386/xen/hypervisor.h73
-rw-r--r--include/asm-i386/xen/interface.h188
52 files changed, 1268 insertions, 511 deletions
diff --git a/include/asm-i386/a.out.h b/include/asm-i386/a.out.h
index ab17bb8e5465..851a60f8258c 100644
--- a/include/asm-i386/a.out.h
+++ b/include/asm-i386/a.out.h
@@ -20,6 +20,7 @@ struct exec
20#ifdef __KERNEL__ 20#ifdef __KERNEL__
21 21
22#define STACK_TOP TASK_SIZE 22#define STACK_TOP TASK_SIZE
23#define STACK_TOP_MAX STACK_TOP
23 24
24#endif 25#endif
25 26
diff --git a/include/asm-i386/alternative.h b/include/asm-i386/alternative.h
index eb7da5402bfa..bda6c810c0f4 100644
--- a/include/asm-i386/alternative.h
+++ b/include/asm-i386/alternative.h
@@ -149,4 +149,6 @@ apply_paravirt(struct paravirt_patch_site *start,
149#define __parainstructions_end NULL 149#define __parainstructions_end NULL
150#endif 150#endif
151 151
152extern void text_poke(void *addr, unsigned char *opcode, int len);
153
152#endif /* _I386_ALTERNATIVE_H */ 154#endif /* _I386_ALTERNATIVE_H */
diff --git a/include/asm-i386/boot.h b/include/asm-i386/boot.h
index bd024ab4fe53..ed8affbf96cb 100644
--- a/include/asm-i386/boot.h
+++ b/include/asm-i386/boot.h
@@ -1,5 +1,5 @@
1#ifndef _LINUX_BOOT_H 1#ifndef _ASM_BOOT_H
2#define _LINUX_BOOT_H 2#define _ASM_BOOT_H
3 3
4/* Don't touch these, unless you really know what you're doing. */ 4/* Don't touch these, unless you really know what you're doing. */
5#define DEF_INITSEG 0x9000 5#define DEF_INITSEG 0x9000
@@ -17,4 +17,4 @@
17 + (CONFIG_PHYSICAL_ALIGN - 1)) \ 17 + (CONFIG_PHYSICAL_ALIGN - 1)) \
18 & ~(CONFIG_PHYSICAL_ALIGN - 1)) 18 & ~(CONFIG_PHYSICAL_ALIGN - 1))
19 19
20#endif /* _LINUX_BOOT_H */ 20#endif /* _ASM_BOOT_H */
diff --git a/include/asm-i386/bootparam.h b/include/asm-i386/bootparam.h
new file mode 100644
index 000000000000..427d8652bfde
--- /dev/null
+++ b/include/asm-i386/bootparam.h
@@ -0,0 +1,85 @@
1#ifndef _ASM_BOOTPARAM_H
2#define _ASM_BOOTPARAM_H
3
4#include <linux/types.h>
5#include <linux/screen_info.h>
6#include <linux/apm_bios.h>
7#include <asm/e820.h>
8#include <linux/edd.h>
9#include <video/edid.h>
10
11struct setup_header {
12 u8 setup_sects;
13 u16 root_flags;
14 u32 syssize;
15 u16 ram_size;
16 u16 vid_mode;
17 u16 root_dev;
18 u16 boot_flag;
19 u16 jump;
20 u32 header;
21 u16 version;
22 u32 realmode_swtch;
23 u16 start_sys;
24 u16 kernel_version;
25 u8 type_of_loader;
26 u8 loadflags;
27#define LOADED_HIGH 0x01
28#define CAN_USE_HEAP 0x80
29 u16 setup_move_size;
30 u32 code32_start;
31 u32 ramdisk_image;
32 u32 ramdisk_size;
33 u32 bootsect_kludge;
34 u16 heap_end_ptr;
35 u16 _pad1;
36 u32 cmd_line_ptr;
37 u32 initrd_addr_max;
38 u32 kernel_alignment;
39 u8 relocatable_kernel;
40} __attribute__((packed));
41
42struct sys_desc_table {
43 u16 length;
44 u8 table[14];
45};
46
47struct efi_info {
48 u32 _pad1;
49 u32 efi_systab;
50 u32 efi_memdesc_size;
51 u32 efi_memdec_version;
52 u32 efi_memmap;
53 u32 fi_memmap_size;
54 u32 _pad2[2];
55};
56
57/* The so-called "zeropage" */
58struct boot_params {
59 struct screen_info screen_info; /* 0x000 */
60 struct apm_bios_info apm_bios_info; /* 0x040 */
61 u8 _pad2[12]; /* 0x054 */
62 u32 speedstep_info[4]; /* 0x060 */
63 u8 _pad3[16]; /* 0x070 */
64 u8 hd0_info[16]; /* obsolete! */ /* 0x080 */
65 u8 hd1_info[16]; /* obsolete! */ /* 0x090 */
66 struct sys_desc_table sys_desc_table; /* 0x0a0 */
67 u8 _pad4[144]; /* 0x0b0 */
68 struct edid_info edid_info; /* 0x140 */
69 struct efi_info efi_info; /* 0x1c0 */
70 u32 alt_mem_k; /* 0x1e0 */
71 u32 scratch; /* Scratch field! */ /* 0x1e4 */
72 u8 e820_entries; /* 0x1e8 */
73 u8 eddbuf_entries; /* 0x1e9 */
74 u8 edd_mbr_sig_buf_entries; /* 0x1ea */
75 u8 _pad6[6]; /* 0x1eb */
76 struct setup_header hdr; /* setup header */ /* 0x1f1 */
77 u8 _pad7[0x290-0x1f1-sizeof(struct setup_header)];
78 u32 edd_mbr_sig_buffer[EDD_MBR_SIG_MAX]; /* 0x290 */
79 struct e820entry e820_map[E820MAX]; /* 0x2d0 */
80 u8 _pad8[48]; /* 0xcd0 */
81 struct edd_info eddbuf[EDDMAXNR]; /* 0xd00 */
82 u8 _pad9[276]; /* 0xeec */
83} __attribute__((packed));
84
85#endif /* _ASM_BOOTPARAM_H */
diff --git a/include/asm-i386/cmpxchg.h b/include/asm-i386/cmpxchg.h
index 7adcef0cd53b..f86ede28f6dc 100644
--- a/include/asm-i386/cmpxchg.h
+++ b/include/asm-i386/cmpxchg.h
@@ -3,14 +3,16 @@
3 3
4#include <linux/bitops.h> /* for LOCK_PREFIX */ 4#include <linux/bitops.h> /* for LOCK_PREFIX */
5 5
6/*
7 * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
8 * you need to test for the feature in boot_cpu_data.
9 */
10
6#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) 11#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
7 12
8struct __xchg_dummy { unsigned long a[100]; }; 13struct __xchg_dummy { unsigned long a[100]; };
9#define __xg(x) ((struct __xchg_dummy *)(x)) 14#define __xg(x) ((struct __xchg_dummy *)(x))
10 15
11
12#ifdef CONFIG_X86_CMPXCHG64
13
14/* 16/*
15 * The semantics of XCHGCMP8B are a bit strange, this is why 17 * The semantics of XCHGCMP8B are a bit strange, this is why
16 * there is a loop and the loading of %%eax and %%edx has to 18 * there is a loop and the loading of %%eax and %%edx has to
@@ -32,7 +34,7 @@ static inline void __set_64bit (unsigned long long * ptr,
32 "\n1:\t" 34 "\n1:\t"
33 "movl (%0), %%eax\n\t" 35 "movl (%0), %%eax\n\t"
34 "movl 4(%0), %%edx\n\t" 36 "movl 4(%0), %%edx\n\t"
35 "lock cmpxchg8b (%0)\n\t" 37 LOCK_PREFIX "cmpxchg8b (%0)\n\t"
36 "jnz 1b" 38 "jnz 1b"
37 : /* no outputs */ 39 : /* no outputs */
38 : "D"(ptr), 40 : "D"(ptr),
@@ -65,8 +67,6 @@ static inline void __set_64bit_var (unsigned long long *ptr,
65 __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \ 67 __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
66 __set_64bit(ptr, ll_low(value), ll_high(value)) ) 68 __set_64bit(ptr, ll_low(value), ll_high(value)) )
67 69
68#endif
69
70/* 70/*
71 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway 71 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
72 * Note 2: xchg has side effect, so that attribute volatile is necessary, 72 * Note 2: xchg has side effect, so that attribute volatile is necessary,
@@ -252,8 +252,6 @@ static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
252}) 252})
253#endif 253#endif
254 254
255#ifdef CONFIG_X86_CMPXCHG64
256
257static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old, 255static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old,
258 unsigned long long new) 256 unsigned long long new)
259{ 257{
@@ -289,5 +287,3 @@ static inline unsigned long long __cmpxchg64_local(volatile void *ptr,
289 ((__typeof__(*(ptr)))__cmpxchg64_local((ptr),(unsigned long long)(o),\ 287 ((__typeof__(*(ptr)))__cmpxchg64_local((ptr),(unsigned long long)(o),\
290 (unsigned long long)(n))) 288 (unsigned long long)(n)))
291#endif 289#endif
292
293#endif
diff --git a/include/asm-i386/cpufeature.h b/include/asm-i386/cpufeature.h
index f514e906643a..c961c03cf1e2 100644
--- a/include/asm-i386/cpufeature.h
+++ b/include/asm-i386/cpufeature.h
@@ -12,7 +12,7 @@
12#endif 12#endif
13#include <asm/required-features.h> 13#include <asm/required-features.h>
14 14
15#define NCAPINTS 7 /* N 32-bit words worth of info */ 15#define NCAPINTS 8 /* N 32-bit words worth of info */
16 16
17/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ 17/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
18#define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */ 18#define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */
@@ -81,6 +81,7 @@
81#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ 81#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */
82#define X86_FEATURE_LAPIC_TIMER_BROKEN (3*32+ 14) /* lapic timer broken in C1 */ 82#define X86_FEATURE_LAPIC_TIMER_BROKEN (3*32+ 14) /* lapic timer broken in C1 */
83#define X86_FEATURE_SYNC_RDTSC (3*32+15) /* RDTSC synchronizes the CPU */ 83#define X86_FEATURE_SYNC_RDTSC (3*32+15) /* RDTSC synchronizes the CPU */
84#define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well on this CPU */
84 85
85/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ 86/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
86#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ 87#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
@@ -108,11 +109,24 @@
108#define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */ 109#define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */
109#define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */ 110#define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */
110 111
111#define cpu_has(c, bit) \ 112/*
112 ((__builtin_constant_p(bit) && (bit) < 32 && \ 113 * Auxiliary flags: Linux defined - For features scattered in various
113 (1UL << (bit)) & REQUIRED_MASK1) ? \ 114 * CPUID levels like 0x6, 0xA etc
114 1 : \ 115 */
115 test_bit(bit, (c)->x86_capability)) 116#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */
117
118#define cpu_has(c, bit) \
119 (__builtin_constant_p(bit) && \
120 ( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \
121 (((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1)) || \
122 (((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2)) || \
123 (((bit)>>5)==3 && (1UL<<((bit)&31) & REQUIRED_MASK3)) || \
124 (((bit)>>5)==4 && (1UL<<((bit)&31) & REQUIRED_MASK4)) || \
125 (((bit)>>5)==5 && (1UL<<((bit)&31) & REQUIRED_MASK5)) || \
126 (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \
127 (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) ) \
128 ? 1 : \
129 test_bit(bit, (c)->x86_capability))
116#define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit) 130#define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit)
117 131
118#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) 132#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU)
diff --git a/include/asm-i386/e820.h b/include/asm-i386/e820.h
index 096a2a8eb1da..43114c824608 100644
--- a/include/asm-i386/e820.h
+++ b/include/asm-i386/e820.h
@@ -25,13 +25,15 @@
25 25
26#ifndef __ASSEMBLY__ 26#ifndef __ASSEMBLY__
27 27
28struct e820entry {
29 u64 addr; /* start of memory segment */
30 u64 size; /* size of memory segment */
31 u32 type; /* type of memory segment */
32} __attribute__((packed));
33
28struct e820map { 34struct e820map {
29 int nr_map; 35 u32 nr_map;
30 struct e820entry { 36 struct e820entry map[E820MAX];
31 unsigned long long addr; /* start of memory segment */
32 unsigned long long size; /* size of memory segment */
33 unsigned long type; /* type of memory segment */
34 } map[E820MAX];
35}; 37};
36 38
37extern struct e820map e820; 39extern struct e820map e820;
@@ -45,6 +47,14 @@ extern void e820_register_memory(void);
45extern void limit_regions(unsigned long long size); 47extern void limit_regions(unsigned long long size);
46extern void print_memory_map(char *who); 48extern void print_memory_map(char *who);
47 49
50#if defined(CONFIG_PM) && defined(CONFIG_SOFTWARE_SUSPEND)
51extern void e820_mark_nosave_regions(void);
52#else
53static inline void e820_mark_nosave_regions(void)
54{
55}
56#endif
57
48#endif/*!__ASSEMBLY__*/ 58#endif/*!__ASSEMBLY__*/
49 59
50#endif/*__E820_HEADER*/ 60#endif/*__E820_HEADER*/
diff --git a/include/asm-i386/fb.h b/include/asm-i386/fb.h
new file mode 100644
index 000000000000..d1c6297d4a61
--- /dev/null
+++ b/include/asm-i386/fb.h
@@ -0,0 +1,17 @@
1#ifndef _ASM_FB_H_
2#define _ASM_FB_H_
3
4#include <linux/fb.h>
5#include <linux/fs.h>
6#include <asm/page.h>
7
8extern int fb_is_primary_device(struct fb_info *info);
9
10static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
11 unsigned long off)
12{
13 if (boot_cpu_data.x86 > 3)
14 pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
15}
16
17#endif /* _ASM_FB_H_ */
diff --git a/include/asm-i386/fixmap.h b/include/asm-i386/fixmap.h
index 80ea052ee3a4..249e753ac805 100644
--- a/include/asm-i386/fixmap.h
+++ b/include/asm-i386/fixmap.h
@@ -54,6 +54,8 @@ extern unsigned long __FIXADDR_TOP;
54enum fixed_addresses { 54enum fixed_addresses {
55 FIX_HOLE, 55 FIX_HOLE,
56 FIX_VDSO, 56 FIX_VDSO,
57 FIX_DBGP_BASE,
58 FIX_EARLYCON_MEM_BASE,
57#ifdef CONFIG_X86_LOCAL_APIC 59#ifdef CONFIG_X86_LOCAL_APIC
58 FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ 60 FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
59#endif 61#endif
diff --git a/include/asm-i386/geode.h b/include/asm-i386/geode.h
new file mode 100644
index 000000000000..6da4bbbea3dc
--- /dev/null
+++ b/include/asm-i386/geode.h
@@ -0,0 +1,159 @@
1/*
2 * AMD Geode definitions
3 * Copyright (C) 2006, Advanced Micro Devices, Inc.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public License
7 * as published by the Free Software Foundation.
8 */
9
10#ifndef _ASM_GEODE_H_
11#define _ASM_GEODE_H_
12
13#include <asm/processor.h>
14#include <linux/io.h>
15
16/* Generic southbridge functions */
17
18#define GEODE_DEV_PMS 0
19#define GEODE_DEV_ACPI 1
20#define GEODE_DEV_GPIO 2
21#define GEODE_DEV_MFGPT 3
22
23extern int geode_get_dev_base(unsigned int dev);
24
25/* Useful macros */
26#define geode_pms_base() geode_get_dev_base(GEODE_DEV_PMS)
27#define geode_acpi_base() geode_get_dev_base(GEODE_DEV_ACPI)
28#define geode_gpio_base() geode_get_dev_base(GEODE_DEV_GPIO)
29#define geode_mfgpt_base() geode_get_dev_base(GEODE_DEV_MFGPT)
30
31/* MSRS */
32
33#define GX_GLCP_SYS_RSTPLL 0x4C000014
34
35#define MSR_LBAR_SMB 0x5140000B
36#define MSR_LBAR_GPIO 0x5140000C
37#define MSR_LBAR_MFGPT 0x5140000D
38#define MSR_LBAR_ACPI 0x5140000E
39#define MSR_LBAR_PMS 0x5140000F
40
41#define MSR_PIC_YSEL_LOW 0x51400020
42#define MSR_PIC_YSEL_HIGH 0x51400021
43#define MSR_PIC_ZSEL_LOW 0x51400022
44#define MSR_PIC_ZSEL_HIGH 0x51400023
45
46#define MFGPT_IRQ_MSR 0x51400028
47#define MFGPT_NR_MSR 0x51400029
48
49/* Resource Sizes */
50
51#define LBAR_GPIO_SIZE 0xFF
52#define LBAR_MFGPT_SIZE 0x40
53#define LBAR_ACPI_SIZE 0x40
54#define LBAR_PMS_SIZE 0x80
55
56/* ACPI registers (PMS block) */
57
58/*
59 * PM1_EN is only valid when VSA is enabled for 16 bit reads.
60 * When VSA is not enabled, *always* read both PM1_STS and PM1_EN
61 * with a 32 bit read at offset 0x0
62 */
63
64#define PM1_STS 0x00
65#define PM1_EN 0x02
66#define PM1_CNT 0x08
67#define PM2_CNT 0x0C
68#define PM_TMR 0x10
69#define PM_GPE0_STS 0x18
70#define PM_GPE0_EN 0x1C
71
72/* PMC registers (PMS block) */
73
74#define PM_SSD 0x00
75#define PM_SCXA 0x04
76#define PM_SCYA 0x08
77#define PM_OUT_SLPCTL 0x0C
78#define PM_SCLK 0x10
79#define PM_SED 0x1
80#define PM_SCXD 0x18
81#define PM_SCYD 0x1C
82#define PM_IN_SLPCTL 0x20
83#define PM_WKD 0x30
84#define PM_WKXD 0x34
85#define PM_RD 0x38
86#define PM_WKXA 0x3C
87#define PM_FSD 0x40
88#define PM_TSD 0x44
89#define PM_PSD 0x48
90#define PM_NWKD 0x4C
91#define PM_AWKD 0x50
92#define PM_SSC 0x54
93
94/* GPIO */
95
96#define GPIO_OUTPUT_VAL 0x00
97#define GPIO_OUTPUT_ENABLE 0x04
98#define GPIO_OUTPUT_OPEN_DRAIN 0x08
99#define GPIO_OUTPUT_INVERT 0x0C
100#define GPIO_OUTPUT_AUX1 0x10
101#define GPIO_OUTPUT_AUX2 0x14
102#define GPIO_PULL_UP 0x18
103#define GPIO_PULL_DOWN 0x1C
104#define GPIO_INPUT_ENABLE 0x20
105#define GPIO_INPUT_INVERT 0x24
106#define GPIO_INPUT_FILTER 0x28
107#define GPIO_INPUT_EVENT_COUNT 0x2C
108#define GPIO_READ_BACK 0x30
109#define GPIO_INPUT_AUX1 0x34
110#define GPIO_EVENTS_ENABLE 0x38
111#define GPIO_LOCK_ENABLE 0x3C
112#define GPIO_POSITIVE_EDGE_EN 0x40
113#define GPIO_NEGATIVE_EDGE_EN 0x44
114#define GPIO_POSITIVE_EDGE_STS 0x48
115#define GPIO_NEGATIVE_EDGE_STS 0x4C
116
117#define GPIO_MAP_X 0xE0
118#define GPIO_MAP_Y 0xE4
119#define GPIO_MAP_Z 0xE8
120#define GPIO_MAP_W 0xEC
121
122extern void geode_gpio_set(unsigned int, unsigned int);
123extern void geode_gpio_clear(unsigned int, unsigned int);
124extern int geode_gpio_isset(unsigned int, unsigned int);
125extern void geode_gpio_setup_event(unsigned int, int, int);
126extern void geode_gpio_set_irq(unsigned int, unsigned int);
127
128static inline void geode_gpio_event_irq(unsigned int gpio, int pair)
129{
130 geode_gpio_setup_event(gpio, pair, 0);
131}
132
133static inline void geode_gpio_event_pme(unsigned int gpio, int pair)
134{
135 geode_gpio_setup_event(gpio, pair, 1);
136}
137
138/* Specific geode tests */
139
140static inline int is_geode_gx(void)
141{
142 return ((boot_cpu_data.x86_vendor == X86_VENDOR_NSC) &&
143 (boot_cpu_data.x86 == 5) &&
144 (boot_cpu_data.x86_model == 5));
145}
146
147static inline int is_geode_lx(void)
148{
149 return ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
150 (boot_cpu_data.x86 == 5) &&
151 (boot_cpu_data.x86_model == 10));
152}
153
154static inline int is_geode(void)
155{
156 return (is_geode_gx() || is_geode_lx());
157}
158
159#endif
diff --git a/include/asm-i386/hpet.h b/include/asm-i386/hpet.h
index dddeedf504b7..c82dc7ed96b3 100644
--- a/include/asm-i386/hpet.h
+++ b/include/asm-i386/hpet.h
@@ -4,112 +4,82 @@
4 4
5#ifdef CONFIG_HPET_TIMER 5#ifdef CONFIG_HPET_TIMER
6 6
7#include <linux/errno.h>
8#include <linux/module.h>
9#include <linux/sched.h>
10#include <linux/kernel.h>
11#include <linux/param.h>
12#include <linux/string.h>
13#include <linux/mm.h>
14#include <linux/interrupt.h>
15#include <linux/time.h>
16#include <linux/delay.h>
17#include <linux/init.h>
18#include <linux/smp.h>
19
20#include <asm/io.h>
21#include <asm/smp.h>
22#include <asm/irq.h>
23#include <asm/msr.h>
24#include <asm/delay.h>
25#include <asm/mpspec.h>
26#include <asm/uaccess.h>
27#include <asm/processor.h>
28
29#include <linux/timex.h>
30
31/* 7/*
32 * Documentation on HPET can be found at: 8 * Documentation on HPET can be found at:
33 * http://www.intel.com/ial/home/sp/pcmmspec.htm 9 * http://www.intel.com/ial/home/sp/pcmmspec.htm
34 * ftp://download.intel.com/ial/home/sp/mmts098.pdf 10 * ftp://download.intel.com/ial/home/sp/mmts098.pdf
35 */ 11 */
36 12
37#define HPET_MMAP_SIZE 1024 13#define HPET_MMAP_SIZE 1024
38 14
39#define HPET_ID 0x000 15#define HPET_ID 0x000
40#define HPET_PERIOD 0x004 16#define HPET_PERIOD 0x004
41#define HPET_CFG 0x010 17#define HPET_CFG 0x010
42#define HPET_STATUS 0x020 18#define HPET_STATUS 0x020
43#define HPET_COUNTER 0x0f0 19#define HPET_COUNTER 0x0f0
44#define HPET_T0_CFG 0x100 20#define HPET_T0_CFG 0x100
45#define HPET_T0_CMP 0x108 21#define HPET_T0_CMP 0x108
46#define HPET_T0_ROUTE 0x110 22#define HPET_T0_ROUTE 0x110
47#define HPET_T1_CFG 0x120 23#define HPET_T1_CFG 0x120
48#define HPET_T1_CMP 0x128 24#define HPET_T1_CMP 0x128
49#define HPET_T1_ROUTE 0x130 25#define HPET_T1_ROUTE 0x130
50#define HPET_T2_CFG 0x140 26#define HPET_T2_CFG 0x140
51#define HPET_T2_CMP 0x148 27#define HPET_T2_CMP 0x148
52#define HPET_T2_ROUTE 0x150 28#define HPET_T2_ROUTE 0x150
53 29
54#define HPET_ID_LEGSUP 0x00008000 30#define HPET_ID_REV 0x000000ff
55#define HPET_ID_NUMBER 0x00001f00 31#define HPET_ID_NUMBER 0x00001f00
56#define HPET_ID_REV 0x000000ff 32#define HPET_ID_64BIT 0x00002000
33#define HPET_ID_LEGSUP 0x00008000
34#define HPET_ID_VENDOR 0xffff0000
57#define HPET_ID_NUMBER_SHIFT 8 35#define HPET_ID_NUMBER_SHIFT 8
36#define HPET_ID_VENDOR_SHIFT 16
58 37
59#define HPET_CFG_ENABLE 0x001 38#define HPET_ID_VENDOR_8086 0x8086
60#define HPET_CFG_LEGACY 0x002 39
40#define HPET_CFG_ENABLE 0x001
41#define HPET_CFG_LEGACY 0x002
61#define HPET_LEGACY_8254 2 42#define HPET_LEGACY_8254 2
62#define HPET_LEGACY_RTC 8 43#define HPET_LEGACY_RTC 8
63 44
64#define HPET_TN_ENABLE 0x004 45#define HPET_TN_LEVEL 0x0002
65#define HPET_TN_PERIODIC 0x008 46#define HPET_TN_ENABLE 0x0004
66#define HPET_TN_PERIODIC_CAP 0x010 47#define HPET_TN_PERIODIC 0x0008
67#define HPET_TN_SETVAL 0x040 48#define HPET_TN_PERIODIC_CAP 0x0010
68#define HPET_TN_32BIT 0x100 49#define HPET_TN_64BIT_CAP 0x0020
69 50#define HPET_TN_SETVAL 0x0040
70/* Use our own asm for 64 bit multiply/divide */ 51#define HPET_TN_32BIT 0x0100
71#define ASM_MUL64_REG(eax_out,edx_out,reg_in,eax_in) \ 52#define HPET_TN_ROUTE 0x3e00
72 __asm__ __volatile__("mull %2" \ 53#define HPET_TN_FSB 0x4000
73 :"=a" (eax_out), "=d" (edx_out) \ 54#define HPET_TN_FSB_CAP 0x8000
74 :"r" (reg_in), "0" (eax_in)) 55#define HPET_TN_ROUTE_SHIFT 9
75 56
76#define ASM_DIV64_REG(eax_out,edx_out,reg_in,eax_in,edx_in) \
77 __asm__ __volatile__("divl %2" \
78 :"=a" (eax_out), "=d" (edx_out) \
79 :"r" (reg_in), "0" (eax_in), "1" (edx_in))
80
81#define KERNEL_TICK_USEC (1000000UL/HZ) /* tick value in microsec */
82/* Max HPET Period is 10^8 femto sec as in HPET spec */ 57/* Max HPET Period is 10^8 femto sec as in HPET spec */
83#define HPET_MAX_PERIOD (100000000UL) 58#define HPET_MAX_PERIOD 100000000UL
84/* 59/*
85 * Min HPET period is 10^5 femto sec just for safety. If it is less than this, 60 * Min HPET period is 10^5 femto sec just for safety. If it is less than this,
86 * then 32 bit HPET counter wrapsaround in less than 0.5 sec. 61 * then 32 bit HPET counter wrapsaround in less than 0.5 sec.
87 */ 62 */
88#define HPET_MIN_PERIOD (100000UL) 63#define HPET_MIN_PERIOD 100000UL
89#define HPET_TICK_RATE (HZ * 100000UL)
90 64
91extern unsigned long hpet_address; /* hpet memory map physical address */ 65/* hpet memory map physical address */
66extern unsigned long hpet_address;
92extern int is_hpet_enabled(void); 67extern int is_hpet_enabled(void);
93
94#ifdef CONFIG_X86_64
95extern unsigned long hpet_tick; /* hpet clks count per tick */
96extern int hpet_use_timer;
97extern int hpet_rtc_timer_init(void);
98extern int hpet_enable(void); 68extern int hpet_enable(void);
99extern int is_hpet_capable(void);
100extern int hpet_readl(unsigned long a);
101#else
102extern int hpet_enable(void);
103#endif
104 69
105#ifdef CONFIG_HPET_EMULATE_RTC 70#ifdef CONFIG_HPET_EMULATE_RTC
71
72#include <linux/interrupt.h>
73
106extern int hpet_mask_rtc_irq_bit(unsigned long bit_mask); 74extern int hpet_mask_rtc_irq_bit(unsigned long bit_mask);
107extern int hpet_set_rtc_irq_bit(unsigned long bit_mask); 75extern int hpet_set_rtc_irq_bit(unsigned long bit_mask);
108extern int hpet_set_alarm_time(unsigned char hrs, unsigned char min, unsigned char sec); 76extern int hpet_set_alarm_time(unsigned char hrs, unsigned char min,
77 unsigned char sec);
109extern int hpet_set_periodic_freq(unsigned long freq); 78extern int hpet_set_periodic_freq(unsigned long freq);
110extern int hpet_rtc_dropped_irq(void); 79extern int hpet_rtc_dropped_irq(void);
111extern int hpet_rtc_timer_init(void); 80extern int hpet_rtc_timer_init(void);
112extern irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id); 81extern irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id);
82
113#endif /* CONFIG_HPET_EMULATE_RTC */ 83#endif /* CONFIG_HPET_EMULATE_RTC */
114 84
115#else 85#else
diff --git a/include/asm-i386/i8253.h b/include/asm-i386/i8253.h
index 6cb0dd4dcdde..7577d058d86e 100644
--- a/include/asm-i386/i8253.h
+++ b/include/asm-i386/i8253.h
@@ -3,19 +3,15 @@
3 3
4#include <linux/clockchips.h> 4#include <linux/clockchips.h>
5 5
6/* i8253A PIT registers */
7#define PIT_MODE 0x43
8#define PIT_CH0 0x40
9#define PIT_CH2 0x42
10
6extern spinlock_t i8253_lock; 11extern spinlock_t i8253_lock;
7 12
8extern struct clock_event_device *global_clock_event; 13extern struct clock_event_device *global_clock_event;
9 14
10/** 15extern void setup_pit_timer(void);
11 * pit_interrupt_hook - hook into timer tick
12 * @regs: standard registers from interrupt
13 *
14 * Call the global clock event handler.
15 **/
16static inline void pit_interrupt_hook(void)
17{
18 global_clock_event->event_handler(global_clock_event);
19}
20 16
21#endif /* __ASM_I8253_H__ */ 17#endif /* __ASM_I8253_H__ */
diff --git a/include/asm-i386/ide.h b/include/asm-i386/ide.h
index 0fc240c80f49..e7817a3d6578 100644
--- a/include/asm-i386/ide.h
+++ b/include/asm-i386/ide.h
@@ -40,14 +40,13 @@ static __inline__ int ide_default_irq(unsigned long base)
40 40
41static __inline__ unsigned long ide_default_io_base(int index) 41static __inline__ unsigned long ide_default_io_base(int index)
42{ 42{
43 struct pci_dev *pdev;
44 /* 43 /*
45 * If PCI is present then it is not safe to poke around 44 * If PCI is present then it is not safe to poke around
46 * the other legacy IDE ports. Only 0x1f0 and 0x170 are 45 * the other legacy IDE ports. Only 0x1f0 and 0x170 are
47 * defined compatibility mode ports for PCI. A user can 46 * defined compatibility mode ports for PCI. A user can
48 * override this using ide= but we must default safe. 47 * override this using ide= but we must default safe.
49 */ 48 */
50 if ((pdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL)) == NULL) { 49 if (no_pci_devices()) {
51 switch(index) { 50 switch(index) {
52 case 2: return 0x1e8; 51 case 2: return 0x1e8;
53 case 3: return 0x168; 52 case 3: return 0x168;
@@ -55,7 +54,6 @@ static __inline__ unsigned long ide_default_io_base(int index)
55 case 5: return 0x160; 54 case 5: return 0x160;
56 } 55 }
57 } 56 }
58 pci_dev_put(pdev);
59 switch (index) { 57 switch (index) {
60 case 0: return 0x1f0; 58 case 0: return 0x1f0;
61 case 1: return 0x170; 59 case 1: return 0x170;
diff --git a/include/asm-i386/io.h b/include/asm-i386/io.h
index e797586a5bfc..7b65b5b00034 100644
--- a/include/asm-i386/io.h
+++ b/include/asm-i386/io.h
@@ -129,6 +129,7 @@ extern void iounmap(volatile void __iomem *addr);
129 */ 129 */
130extern void *bt_ioremap(unsigned long offset, unsigned long size); 130extern void *bt_ioremap(unsigned long offset, unsigned long size);
131extern void bt_iounmap(void *addr, unsigned long size); 131extern void bt_iounmap(void *addr, unsigned long size);
132extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
132 133
133/* Use early IO mappings for DMI because it's initialized early */ 134/* Use early IO mappings for DMI because it's initialized early */
134#define dmi_ioremap bt_ioremap 135#define dmi_ioremap bt_ioremap
diff --git a/include/asm-i386/irq.h b/include/asm-i386/irq.h
index 9e15ce0006eb..36f310632c49 100644
--- a/include/asm-i386/irq.h
+++ b/include/asm-i386/irq.h
@@ -41,6 +41,7 @@ extern int irqbalance_disable(char *str);
41extern void fixup_irqs(cpumask_t map); 41extern void fixup_irqs(cpumask_t map);
42#endif 42#endif
43 43
44unsigned int do_IRQ(struct pt_regs *regs);
44void init_IRQ(void); 45void init_IRQ(void);
45void __init native_init_IRQ(void); 46void __init native_init_IRQ(void);
46 47
diff --git a/include/asm-i386/kprobes.h b/include/asm-i386/kprobes.h
index 8774d06689da..06f7303c30ca 100644
--- a/include/asm-i386/kprobes.h
+++ b/include/asm-i386/kprobes.h
@@ -42,7 +42,6 @@ typedef u8 kprobe_opcode_t;
42 ? (MAX_STACK_SIZE) \ 42 ? (MAX_STACK_SIZE) \
43 : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) 43 : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR)))
44 44
45#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry
46#define ARCH_SUPPORTS_KRETPROBES 45#define ARCH_SUPPORTS_KRETPROBES
47#define ARCH_INACTIVE_KPROBE_COUNT 0 46#define ARCH_INACTIVE_KPROBE_COUNT 0
48#define flush_insn_slot(p) do { } while (0) 47#define flush_insn_slot(p) do { } while (0)
diff --git a/include/asm-i386/mach-default/do_timer.h b/include/asm-i386/mach-default/do_timer.h
index 56e5689863ae..23ecda0b28a0 100644
--- a/include/asm-i386/mach-default/do_timer.h
+++ b/include/asm-i386/mach-default/do_timer.h
@@ -12,5 +12,5 @@
12 12
13static inline void do_timer_interrupt_hook(void) 13static inline void do_timer_interrupt_hook(void)
14{ 14{
15 pit_interrupt_hook(); 15 global_clock_event->event_handler(global_clock_event);
16} 16}
diff --git a/include/asm-i386/mach-default/io_ports.h b/include/asm-i386/mach-default/io_ports.h
index a96d9f6604ee..48540ba97166 100644
--- a/include/asm-i386/mach-default/io_ports.h
+++ b/include/asm-i386/mach-default/io_ports.h
@@ -7,11 +7,6 @@
7#ifndef _MACH_IO_PORTS_H 7#ifndef _MACH_IO_PORTS_H
8#define _MACH_IO_PORTS_H 8#define _MACH_IO_PORTS_H
9 9
10/* i8253A PIT registers */
11#define PIT_MODE 0x43
12#define PIT_CH0 0x40
13#define PIT_CH2 0x42
14
15/* i8259A PIC registers */ 10/* i8259A PIC registers */
16#define PIC_MASTER_CMD 0x20 11#define PIC_MASTER_CMD 0x20
17#define PIC_MASTER_IMR 0x21 12#define PIC_MASTER_IMR 0x21
diff --git a/include/asm-i386/mach-default/irq_vectors_limits.h b/include/asm-i386/mach-default/irq_vectors_limits.h
index 7f161e760be6..a90c7a60109f 100644
--- a/include/asm-i386/mach-default/irq_vectors_limits.h
+++ b/include/asm-i386/mach-default/irq_vectors_limits.h
@@ -1,7 +1,7 @@
1#ifndef _ASM_IRQ_VECTORS_LIMITS_H 1#ifndef _ASM_IRQ_VECTORS_LIMITS_H
2#define _ASM_IRQ_VECTORS_LIMITS_H 2#define _ASM_IRQ_VECTORS_LIMITS_H
3 3
4#ifdef CONFIG_X86_IO_APIC 4#if defined(CONFIG_X86_IO_APIC) || defined(CONFIG_PARAVIRT)
5#define NR_IRQS 224 5#define NR_IRQS 224
6# if (224 >= 32 * NR_CPUS) 6# if (224 >= 32 * NR_CPUS)
7# define NR_IRQ_VECTORS NR_IRQS 7# define NR_IRQ_VECTORS NR_IRQS
diff --git a/include/asm-i386/mach-default/mach_reboot.h b/include/asm-i386/mach-default/mach_reboot.h
index a955e57ad016..e23fd9fbebb3 100644
--- a/include/asm-i386/mach-default/mach_reboot.h
+++ b/include/asm-i386/mach-default/mach_reboot.h
@@ -19,14 +19,37 @@ static inline void kb_wait(void)
19static inline void mach_reboot(void) 19static inline void mach_reboot(void)
20{ 20{
21 int i; 21 int i;
22
23 /* old method, works on most machines */
22 for (i = 0; i < 10; i++) { 24 for (i = 0; i < 10; i++) {
23 kb_wait(); 25 kb_wait();
24 udelay(50); 26 udelay(50);
27 outb(0xfe, 0x64); /* pulse reset low */
28 udelay(50);
29 }
30
31 /* New method: sets the "System flag" which, when set, indicates
32 * successful completion of the keyboard controller self-test (Basic
33 * Assurance Test, BAT). This is needed for some machines with no
34 * keyboard plugged in. This read-modify-write sequence sets only the
35 * system flag
36 */
37 for (i = 0; i < 10; i++) {
38 int cmd;
39
40 outb(0x20, 0x64); /* read Controller Command Byte */
41 udelay(50);
42 kb_wait();
43 udelay(50);
44 cmd = inb(0x60);
45 udelay(50);
46 kb_wait();
47 udelay(50);
25 outb(0x60, 0x64); /* write Controller Command Byte */ 48 outb(0x60, 0x64); /* write Controller Command Byte */
26 udelay(50); 49 udelay(50);
27 kb_wait(); 50 kb_wait();
28 udelay(50); 51 udelay(50);
29 outb(0x14, 0x60); /* set "System flag" */ 52 outb(cmd | 0x04, 0x60); /* set "System flag" */
30 udelay(50); 53 udelay(50);
31 kb_wait(); 54 kb_wait();
32 udelay(50); 55 udelay(50);
diff --git a/include/asm-i386/mach-voyager/do_timer.h b/include/asm-i386/mach-voyager/do_timer.h
index 60f9dcc15d54..bc2b58926308 100644
--- a/include/asm-i386/mach-voyager/do_timer.h
+++ b/include/asm-i386/mach-voyager/do_timer.h
@@ -12,7 +12,7 @@
12 **/ 12 **/
13static inline void do_timer_interrupt_hook(void) 13static inline void do_timer_interrupt_hook(void)
14{ 14{
15 pit_interrupt_hook(); 15 global_clock_event->event_handler(global_clock_event);
16 voyager_timer_interrupt(); 16 voyager_timer_interrupt();
17} 17}
18 18
diff --git a/include/asm-i386/mc146818rtc.h b/include/asm-i386/mc146818rtc.h
index 99a890047023..1613b42eaf58 100644
--- a/include/asm-i386/mc146818rtc.h
+++ b/include/asm-i386/mc146818rtc.h
@@ -6,6 +6,7 @@
6 6
7#include <asm/io.h> 7#include <asm/io.h>
8#include <asm/system.h> 8#include <asm/system.h>
9#include <asm/processor.h>
9#include <linux/mc146818rtc.h> 10#include <linux/mc146818rtc.h>
10 11
11#ifndef RTC_PORT 12#ifndef RTC_PORT
@@ -43,8 +44,10 @@ static inline void lock_cmos(unsigned char reg)
43 unsigned long new; 44 unsigned long new;
44 new = ((smp_processor_id()+1) << 8) | reg; 45 new = ((smp_processor_id()+1) << 8) | reg;
45 for (;;) { 46 for (;;) {
46 if (cmos_lock) 47 if (cmos_lock) {
48 cpu_relax();
47 continue; 49 continue;
50 }
48 if (__cmpxchg(&cmos_lock, 0, new, sizeof(cmos_lock)) == 0) 51 if (__cmpxchg(&cmos_lock, 0, new, sizeof(cmos_lock)) == 0)
49 return; 52 return;
50 } 53 }
diff --git a/include/asm-i386/mce.h b/include/asm-i386/mce.h
index b0a02ee34ffd..d56d89742e8f 100644
--- a/include/asm-i386/mce.h
+++ b/include/asm-i386/mce.h
@@ -5,3 +5,7 @@ extern void mcheck_init(struct cpuinfo_x86 *c);
5#endif 5#endif
6 6
7extern int mce_disabled; 7extern int mce_disabled;
8
9extern void stop_mce(void);
10extern void restart_mce(void);
11
diff --git a/include/asm-i386/mmu_context.h b/include/asm-i386/mmu_context.h
index 8198d1cca1f3..7eb0b0b1fb3c 100644
--- a/include/asm-i386/mmu_context.h
+++ b/include/asm-i386/mmu_context.h
@@ -32,6 +32,8 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
32#endif 32#endif
33} 33}
34 34
35void leave_mm(unsigned long cpu);
36
35static inline void switch_mm(struct mm_struct *prev, 37static inline void switch_mm(struct mm_struct *prev,
36 struct mm_struct *next, 38 struct mm_struct *next,
37 struct task_struct *tsk) 39 struct task_struct *tsk)
diff --git a/include/asm-i386/nmi.h b/include/asm-i386/nmi.h
index fb1e133efd9f..ff30c98f87b0 100644
--- a/include/asm-i386/nmi.h
+++ b/include/asm-i386/nmi.h
@@ -57,5 +57,7 @@ unsigned lapic_adjust_nmi_hz(unsigned hz);
57int lapic_watchdog_ok(void); 57int lapic_watchdog_ok(void);
58void disable_lapic_nmi_watchdog(void); 58void disable_lapic_nmi_watchdog(void);
59void enable_lapic_nmi_watchdog(void); 59void enable_lapic_nmi_watchdog(void);
60void stop_nmi(void);
61void restart_nmi(void);
60 62
61#endif /* ASM_NMI_H */ 63#endif /* ASM_NMI_H */
diff --git a/include/asm-i386/page.h b/include/asm-i386/page.h
index 818ac8bf01e2..80ecc66b6d86 100644
--- a/include/asm-i386/page.h
+++ b/include/asm-i386/page.h
@@ -34,7 +34,8 @@
34#define clear_user_page(page, vaddr, pg) clear_page(page) 34#define clear_user_page(page, vaddr, pg) clear_page(page)
35#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) 35#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
36 36
37#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr) 37#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
38 alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
38#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE 39#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
39 40
40/* 41/*
@@ -43,7 +44,6 @@
43extern int nx_enabled; 44extern int nx_enabled;
44 45
45#ifdef CONFIG_X86_PAE 46#ifdef CONFIG_X86_PAE
46extern unsigned long long __supported_pte_mask;
47typedef struct { unsigned long pte_low, pte_high; } pte_t; 47typedef struct { unsigned long pte_low, pte_high; } pte_t;
48typedef struct { unsigned long long pmd; } pmd_t; 48typedef struct { unsigned long long pmd; } pmd_t;
49typedef struct { unsigned long long pgd; } pgd_t; 49typedef struct { unsigned long long pgd; } pgd_t;
diff --git a/include/asm-i386/paravirt.h b/include/asm-i386/paravirt.h
index 7f846a7d6bcc..7df88be2dd9e 100644
--- a/include/asm-i386/paravirt.h
+++ b/include/asm-i386/paravirt.h
@@ -52,6 +52,8 @@ struct paravirt_ops
52 /* Basic arch-specific setup */ 52 /* Basic arch-specific setup */
53 void (*arch_setup)(void); 53 void (*arch_setup)(void);
54 char *(*memory_setup)(void); 54 char *(*memory_setup)(void);
55 void (*post_allocator_init)(void);
56
55 void (*init_IRQ)(void); 57 void (*init_IRQ)(void);
56 void (*time_init)(void); 58 void (*time_init)(void);
57 59
@@ -116,7 +118,7 @@ struct paravirt_ops
116 118
117 u64 (*read_tsc)(void); 119 u64 (*read_tsc)(void);
118 u64 (*read_pmc)(void); 120 u64 (*read_pmc)(void);
119 u64 (*get_scheduled_cycles)(void); 121 unsigned long long (*sched_clock)(void);
120 unsigned long (*get_cpu_khz)(void); 122 unsigned long (*get_cpu_khz)(void);
121 123
122 /* Segment descriptor handling */ 124 /* Segment descriptor handling */
@@ -173,7 +175,7 @@ struct paravirt_ops
173 unsigned long va); 175 unsigned long va);
174 176
175 /* Hooks for allocating/releasing pagetable pages */ 177 /* Hooks for allocating/releasing pagetable pages */
176 void (*alloc_pt)(u32 pfn); 178 void (*alloc_pt)(struct mm_struct *mm, u32 pfn);
177 void (*alloc_pd)(u32 pfn); 179 void (*alloc_pd)(u32 pfn);
178 void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count); 180 void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
179 void (*release_pt)(u32 pfn); 181 void (*release_pt)(u32 pfn);
@@ -260,6 +262,7 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *site, unsigned len)
260unsigned paravirt_patch_insns(void *site, unsigned len, 262unsigned paravirt_patch_insns(void *site, unsigned len,
261 const char *start, const char *end); 263 const char *start, const char *end);
262 264
265int paravirt_disable_iospace(void);
263 266
264/* 267/*
265 * This generates an indirect call based on the operation type number. 268 * This generates an indirect call based on the operation type number.
@@ -563,7 +566,10 @@ static inline u64 paravirt_read_tsc(void)
563 566
564#define rdtscll(val) (val = paravirt_read_tsc()) 567#define rdtscll(val) (val = paravirt_read_tsc())
565 568
566#define get_scheduled_cycles(val) (val = paravirt_ops.get_scheduled_cycles()) 569static inline unsigned long long paravirt_sched_clock(void)
570{
571 return PVOP_CALL0(unsigned long long, sched_clock);
572}
567#define calculate_cpu_khz() (paravirt_ops.get_cpu_khz()) 573#define calculate_cpu_khz() (paravirt_ops.get_cpu_khz())
568 574
569#define write_tsc(val1,val2) wrmsr(0x10, val1, val2) 575#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
@@ -669,6 +675,12 @@ static inline void setup_secondary_clock(void)
669} 675}
670#endif 676#endif
671 677
678static inline void paravirt_post_allocator_init(void)
679{
680 if (paravirt_ops.post_allocator_init)
681 (*paravirt_ops.post_allocator_init)();
682}
683
672static inline void paravirt_pagetable_setup_start(pgd_t *base) 684static inline void paravirt_pagetable_setup_start(pgd_t *base)
673{ 685{
674 if (paravirt_ops.pagetable_setup_start) 686 if (paravirt_ops.pagetable_setup_start)
@@ -725,9 +737,9 @@ static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
725 PVOP_VCALL3(flush_tlb_others, &cpumask, mm, va); 737 PVOP_VCALL3(flush_tlb_others, &cpumask, mm, va);
726} 738}
727 739
728static inline void paravirt_alloc_pt(unsigned pfn) 740static inline void paravirt_alloc_pt(struct mm_struct *mm, unsigned pfn)
729{ 741{
730 PVOP_VCALL1(alloc_pt, pfn); 742 PVOP_VCALL2(alloc_pt, mm, pfn);
731} 743}
732static inline void paravirt_release_pt(unsigned pfn) 744static inline void paravirt_release_pt(unsigned pfn)
733{ 745{
diff --git a/include/asm-i386/pci.h b/include/asm-i386/pci.h
index 64b6d0baedbc..d790343e9982 100644
--- a/include/asm-i386/pci.h
+++ b/include/asm-i386/pci.h
@@ -3,6 +3,11 @@
3 3
4 4
5#ifdef __KERNEL__ 5#ifdef __KERNEL__
6
7struct pci_sysdata {
8 int node; /* NUMA node */
9};
10
6#include <linux/mm.h> /* for struct page */ 11#include <linux/mm.h> /* for struct page */
7 12
8/* Can be used to override the logic in pci_scan_bus for skipping 13/* Can be used to override the logic in pci_scan_bus for skipping
@@ -56,48 +61,11 @@ struct pci_dev;
56#define pci_unmap_len(PTR, LEN_NAME) (0) 61#define pci_unmap_len(PTR, LEN_NAME) (0)
57#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) 62#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
58 63
59/* This is always fine. */
60#define pci_dac_dma_supported(pci_dev, mask) (1)
61
62static inline dma64_addr_t
63pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction)
64{
65 return ((dma64_addr_t) page_to_phys(page) +
66 (dma64_addr_t) offset);
67}
68
69static inline struct page *
70pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
71{
72 return pfn_to_page(dma_addr >> PAGE_SHIFT);
73}
74
75static inline unsigned long
76pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
77{
78 return (dma_addr & ~PAGE_MASK);
79}
80
81static inline void
82pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
83{
84}
85
86static inline void
87pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
88{
89 flush_write_buffers();
90}
91
92#define HAVE_PCI_MMAP 64#define HAVE_PCI_MMAP
93extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, 65extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
94 enum pci_mmap_state mmap_state, int write_combine); 66 enum pci_mmap_state mmap_state, int write_combine);
95 67
96 68
97static inline void pcibios_add_platform_entries(struct pci_dev *dev)
98{
99}
100
101#ifdef CONFIG_PCI 69#ifdef CONFIG_PCI
102static inline void pci_dma_burst_advice(struct pci_dev *pdev, 70static inline void pci_dma_burst_advice(struct pci_dev *pdev,
103 enum pci_dma_burst_strategy *strat, 71 enum pci_dma_burst_strategy *strat,
diff --git a/include/asm-i386/percpu.h b/include/asm-i386/percpu.h
index f54830b5d5ac..a7ebd436f3cc 100644
--- a/include/asm-i386/percpu.h
+++ b/include/asm-i386/percpu.h
@@ -54,6 +54,11 @@ extern unsigned long __per_cpu_offset[];
54#define DEFINE_PER_CPU(type, name) \ 54#define DEFINE_PER_CPU(type, name) \
55 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name 55 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
56 56
57#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
58 __attribute__((__section__(".data.percpu.shared_aligned"))) \
59 __typeof__(type) per_cpu__##name \
60 ____cacheline_aligned_in_smp
61
57/* We can use this directly for local CPU (faster). */ 62/* We can use this directly for local CPU (faster). */
58DECLARE_PER_CPU(unsigned long, this_cpu_off); 63DECLARE_PER_CPU(unsigned long, this_cpu_off);
59 64
diff --git a/include/asm-i386/pgalloc.h b/include/asm-i386/pgalloc.h
index d07b7afc2692..f2fc33ceb9f2 100644
--- a/include/asm-i386/pgalloc.h
+++ b/include/asm-i386/pgalloc.h
@@ -7,7 +7,7 @@
7#ifdef CONFIG_PARAVIRT 7#ifdef CONFIG_PARAVIRT
8#include <asm/paravirt.h> 8#include <asm/paravirt.h>
9#else 9#else
10#define paravirt_alloc_pt(pfn) do { } while (0) 10#define paravirt_alloc_pt(mm, pfn) do { } while (0)
11#define paravirt_alloc_pd(pfn) do { } while (0) 11#define paravirt_alloc_pd(pfn) do { } while (0)
12#define paravirt_alloc_pd(pfn) do { } while (0) 12#define paravirt_alloc_pd(pfn) do { } while (0)
13#define paravirt_alloc_pd_clone(pfn, clonepfn, start, count) do { } while (0) 13#define paravirt_alloc_pd_clone(pfn, clonepfn, start, count) do { } while (0)
@@ -17,13 +17,13 @@
17 17
18#define pmd_populate_kernel(mm, pmd, pte) \ 18#define pmd_populate_kernel(mm, pmd, pte) \
19do { \ 19do { \
20 paravirt_alloc_pt(__pa(pte) >> PAGE_SHIFT); \ 20 paravirt_alloc_pt(mm, __pa(pte) >> PAGE_SHIFT); \
21 set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))); \ 21 set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))); \
22} while (0) 22} while (0)
23 23
24#define pmd_populate(mm, pmd, pte) \ 24#define pmd_populate(mm, pmd, pte) \
25do { \ 25do { \
26 paravirt_alloc_pt(page_to_pfn(pte)); \ 26 paravirt_alloc_pt(mm, page_to_pfn(pte)); \
27 set_pmd(pmd, __pmd(_PAGE_TABLE + \ 27 set_pmd(pmd, __pmd(_PAGE_TABLE + \
28 ((unsigned long long)page_to_pfn(pte) << \ 28 ((unsigned long long)page_to_pfn(pte) << \
29 (unsigned long long) PAGE_SHIFT))); \ 29 (unsigned long long) PAGE_SHIFT))); \
diff --git a/include/asm-i386/pgtable-2level.h b/include/asm-i386/pgtable-2level.h
index a50fd1773de8..84b03cf56a79 100644
--- a/include/asm-i386/pgtable-2level.h
+++ b/include/asm-i386/pgtable-2level.h
@@ -57,14 +57,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
57#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) 57#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
58 58
59/* 59/*
60 * All present user pages are user-executable:
61 */
62static inline int pte_exec(pte_t pte)
63{
64 return pte_user(pte);
65}
66
67/*
68 * All present pages are kernel-executable: 60 * All present pages are kernel-executable:
69 */ 61 */
70static inline int pte_exec_kernel(pte_t pte) 62static inline int pte_exec_kernel(pte_t pte)
diff --git a/include/asm-i386/pgtable-3level.h b/include/asm-i386/pgtable-3level.h
index eb0f1d7e96a1..948a33414118 100644
--- a/include/asm-i386/pgtable-3level.h
+++ b/include/asm-i386/pgtable-3level.h
@@ -20,26 +20,11 @@
20#define pud_present(pud) 1 20#define pud_present(pud) 1
21 21
22/* 22/*
23 * Is the pte executable?
24 */
25static inline int pte_x(pte_t pte)
26{
27 return !(pte_val(pte) & _PAGE_NX);
28}
29
30/*
31 * All present user-pages with !NX bit are user-executable:
32 */
33static inline int pte_exec(pte_t pte)
34{
35 return pte_user(pte) && pte_x(pte);
36}
37/*
38 * All present pages with !NX bit are kernel-executable: 23 * All present pages with !NX bit are kernel-executable:
39 */ 24 */
40static inline int pte_exec_kernel(pte_t pte) 25static inline int pte_exec_kernel(pte_t pte)
41{ 26{
42 return pte_x(pte); 27 return !(pte_val(pte) & _PAGE_NX);
43} 28}
44 29
45/* Rules for using set_pte: the pte being assigned *must* be 30/* Rules for using set_pte: the pte being assigned *must* be
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index 628fa7747d0c..c7fefa6b12fd 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -79,7 +79,7 @@ void paging_init(void);
79 * area for the same reason. ;) 79 * area for the same reason. ;)
80 */ 80 */
81#define VMALLOC_OFFSET (8*1024*1024) 81#define VMALLOC_OFFSET (8*1024*1024)
82#define VMALLOC_START (((unsigned long) high_memory + vmalloc_earlyreserve + \ 82#define VMALLOC_START (((unsigned long) high_memory + \
83 2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1)) 83 2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1))
84#ifdef CONFIG_HIGHMEM 84#ifdef CONFIG_HIGHMEM
85# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) 85# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
@@ -218,8 +218,6 @@ extern unsigned long pg0[];
218 * The following only work if pte_present() is true. 218 * The following only work if pte_present() is true.
219 * Undefined behaviour if not.. 219 * Undefined behaviour if not..
220 */ 220 */
221static inline int pte_user(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
222static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
223static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; } 221static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; }
224static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; } 222static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; }
225static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; } 223static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; }
@@ -230,13 +228,9 @@ static inline int pte_huge(pte_t pte) { return (pte).pte_low & _PAGE_PSE; }
230 */ 228 */
231static inline int pte_file(pte_t pte) { return (pte).pte_low & _PAGE_FILE; } 229static inline int pte_file(pte_t pte) { return (pte).pte_low & _PAGE_FILE; }
232 230
233static inline pte_t pte_rdprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; }
234static inline pte_t pte_exprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; }
235static inline pte_t pte_mkclean(pte_t pte) { (pte).pte_low &= ~_PAGE_DIRTY; return pte; } 231static inline pte_t pte_mkclean(pte_t pte) { (pte).pte_low &= ~_PAGE_DIRTY; return pte; }
236static inline pte_t pte_mkold(pte_t pte) { (pte).pte_low &= ~_PAGE_ACCESSED; return pte; } 232static inline pte_t pte_mkold(pte_t pte) { (pte).pte_low &= ~_PAGE_ACCESSED; return pte; }
237static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_RW; return pte; } 233static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_RW; return pte; }
238static inline pte_t pte_mkread(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; }
239static inline pte_t pte_mkexec(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; }
240static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; } 234static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; }
241static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; } 235static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; }
242static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; } 236static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; }
@@ -295,17 +289,6 @@ static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
295 __changed; \ 289 __changed; \
296}) 290})
297 291
298#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
299#define ptep_test_and_clear_dirty(vma, addr, ptep) ({ \
300 int __ret = 0; \
301 if (pte_dirty(*(ptep))) \
302 __ret = test_and_clear_bit(_PAGE_BIT_DIRTY, \
303 &(ptep)->pte_low); \
304 if (__ret) \
305 pte_update((vma)->vm_mm, addr, ptep); \
306 __ret; \
307})
308
309#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 292#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
310#define ptep_test_and_clear_young(vma, addr, ptep) ({ \ 293#define ptep_test_and_clear_young(vma, addr, ptep) ({ \
311 int __ret = 0; \ 294 int __ret = 0; \
@@ -317,27 +300,6 @@ static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
317 __ret; \ 300 __ret; \
318}) 301})
319 302
320/*
321 * Rules for using ptep_establish: the pte MUST be a user pte, and
322 * must be a present->present transition.
323 */
324#define __HAVE_ARCH_PTEP_ESTABLISH
325#define ptep_establish(vma, address, ptep, pteval) \
326do { \
327 set_pte_present((vma)->vm_mm, address, ptep, pteval); \
328 flush_tlb_page(vma, address); \
329} while (0)
330
331#define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
332#define ptep_clear_flush_dirty(vma, address, ptep) \
333({ \
334 int __dirty; \
335 __dirty = ptep_test_and_clear_dirty((vma), (address), (ptep)); \
336 if (__dirty) \
337 flush_tlb_page(vma, address); \
338 __dirty; \
339})
340
341#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 303#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
342#define ptep_clear_flush_young(vma, address, ptep) \ 304#define ptep_clear_flush_young(vma, address, ptep) \
343({ \ 305({ \
diff --git a/include/asm-i386/processor-cyrix.h b/include/asm-i386/processor-cyrix.h
new file mode 100644
index 000000000000..97568ada1f97
--- /dev/null
+++ b/include/asm-i386/processor-cyrix.h
@@ -0,0 +1,30 @@
1/*
2 * NSC/Cyrix CPU indexed register access. Must be inlined instead of
3 * macros to ensure correct access ordering
4 * Access order is always 0x22 (=offset), 0x23 (=value)
5 *
6 * When using the old macros a line like
7 * setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
8 * gets expanded to:
9 * do {
10 * outb((CX86_CCR2), 0x22);
11 * outb((({
12 * outb((CX86_CCR2), 0x22);
13 * inb(0x23);
14 * }) | 0x88), 0x23);
15 * } while (0);
16 *
17 * which in fact violates the access order (= 0x22, 0x22, 0x23, 0x23).
18 */
19
20static inline u8 getCx86(u8 reg)
21{
22 outb(reg, 0x22);
23 return inb(0x23);
24}
25
26static inline void setCx86(u8 reg, u8 data)
27{
28 outb(reg, 0x22);
29 outb(data, 0x23);
30}
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index 338668bfb0a2..3845fe72383e 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -88,7 +88,6 @@ struct cpuinfo_x86 {
88#define X86_VENDOR_UMC 3 88#define X86_VENDOR_UMC 3
89#define X86_VENDOR_NEXGEN 4 89#define X86_VENDOR_NEXGEN 4
90#define X86_VENDOR_CENTAUR 5 90#define X86_VENDOR_CENTAUR 5
91#define X86_VENDOR_RISE 6
92#define X86_VENDOR_TRANSMETA 7 91#define X86_VENDOR_TRANSMETA 7
93#define X86_VENDOR_NSC 8 92#define X86_VENDOR_NSC 8
94#define X86_VENDOR_NUM 9 93#define X86_VENDOR_NUM 9
@@ -119,6 +118,7 @@ void __init cpu_detect(struct cpuinfo_x86 *c);
119extern void identify_boot_cpu(void); 118extern void identify_boot_cpu(void);
120extern void identify_secondary_cpu(struct cpuinfo_x86 *); 119extern void identify_secondary_cpu(struct cpuinfo_x86 *);
121extern void print_cpu_info(struct cpuinfo_x86 *); 120extern void print_cpu_info(struct cpuinfo_x86 *);
121extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
122extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); 122extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
123extern unsigned short num_cache_leaves; 123extern unsigned short num_cache_leaves;
124 124
@@ -168,17 +168,6 @@ static inline void clear_in_cr4 (unsigned long mask)
168 write_cr4(cr4); 168 write_cr4(cr4);
169} 169}
170 170
171/*
172 * NSC/Cyrix CPU indexed register access macros
173 */
174
175#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
176
177#define setCx86(reg, data) do { \
178 outb((reg), 0x22); \
179 outb((data), 0x23); \
180} while (0)
181
182/* Stop speculative execution */ 171/* Stop speculative execution */
183static inline void sync_core(void) 172static inline void sync_core(void)
184{ 173{
@@ -227,6 +216,10 @@ extern int bootloader_type;
227 216
228#define HAVE_ARCH_PICK_MMAP_LAYOUT 217#define HAVE_ARCH_PICK_MMAP_LAYOUT
229 218
219extern void hard_disable_TSC(void);
220extern void disable_TSC(void);
221extern void hard_enable_TSC(void);
222
230/* 223/*
231 * Size of io_bitmap. 224 * Size of io_bitmap.
232 */ 225 */
diff --git a/include/asm-i386/required-features.h b/include/asm-i386/required-features.h
index 9db866c1e64c..618feb98f9f5 100644
--- a/include/asm-i386/required-features.h
+++ b/include/asm-i386/required-features.h
@@ -3,32 +3,53 @@
3 3
4/* Define minimum CPUID feature set for kernel These bits are checked 4/* Define minimum CPUID feature set for kernel These bits are checked
5 really early to actually display a visible error message before the 5 really early to actually display a visible error message before the
6 kernel dies. Only add word 0 bits here 6 kernel dies. Make sure to assign features to the proper mask!
7 7
8 Some requirements that are not in CPUID yet are also in the 8 Some requirements that are not in CPUID yet are also in the
9 CONFIG_X86_MINIMUM_CPU mode which is checked too. 9 CONFIG_X86_MINIMUM_CPU_FAMILY which is checked too.
10 10
11 The real information is in arch/i386/Kconfig.cpu, this just converts 11 The real information is in arch/i386/Kconfig.cpu, this just converts
12 the CONFIGs into a bitmask */ 12 the CONFIGs into a bitmask */
13 13
14#ifndef CONFIG_MATH_EMULATION
15# define NEED_FPU (1<<(X86_FEATURE_FPU & 31))
16#else
17# define NEED_FPU 0
18#endif
19
14#ifdef CONFIG_X86_PAE 20#ifdef CONFIG_X86_PAE
15#define NEED_PAE (1<<X86_FEATURE_PAE) 21# define NEED_PAE (1<<(X86_FEATURE_PAE & 31))
16#else 22#else
17#define NEED_PAE 0 23# define NEED_PAE 0
18#endif 24#endif
19 25
20#ifdef CONFIG_X86_CMOV 26#ifdef CONFIG_X86_CMOV
21#define NEED_CMOV (1<<X86_FEATURE_CMOV) 27# define NEED_CMOV (1<<(X86_FEATURE_CMOV & 31))
22#else 28#else
23#define NEED_CMOV 0 29# define NEED_CMOV 0
24#endif 30#endif
25 31
26#ifdef CONFIG_X86_CMPXCHG64 32#ifdef CONFIG_X86_PAE
27#define NEED_CMPXCHG64 (1<<X86_FEATURE_CX8) 33# define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31))
28#else 34#else
29#define NEED_CMPXCHG64 0 35# define NEED_CX8 0
30#endif 36#endif
31 37
32#define REQUIRED_MASK1 (NEED_PAE|NEED_CMOV|NEED_CMPXCHG64) 38#define REQUIRED_MASK0 (NEED_FPU|NEED_PAE|NEED_CMOV|NEED_CX8)
39
40#ifdef CONFIG_X86_USE_3DNOW
41# define NEED_3DNOW (1<<(X86_FEATURE_3DNOW & 31))
42#else
43# define NEED_3DNOW 0
44#endif
45
46#define REQUIRED_MASK1 (NEED_3DNOW)
47
48#define REQUIRED_MASK2 0
49#define REQUIRED_MASK3 0
50#define REQUIRED_MASK4 0
51#define REQUIRED_MASK5 0
52#define REQUIRED_MASK6 0
53#define REQUIRED_MASK7 0
33 54
34#endif 55#endif
diff --git a/include/asm-i386/resume-trace.h b/include/asm-i386/resume-trace.h
new file mode 100644
index 000000000000..ec9cfd656230
--- /dev/null
+++ b/include/asm-i386/resume-trace.h
@@ -0,0 +1,13 @@
1#define TRACE_RESUME(user) do { \
2 if (pm_trace_enabled) { \
3 void *tracedata; \
4 asm volatile("movl $1f,%0\n" \
5 ".section .tracedata,\"a\"\n" \
6 "1:\t.word %c1\n" \
7 "\t.long %c2\n" \
8 ".previous" \
9 :"=r" (tracedata) \
10 : "i" (__LINE__), "i" (__FILE__)); \
11 generate_resume_trace(tracedata, user); \
12 } \
13} while (0)
diff --git a/include/asm-i386/setup.h b/include/asm-i386/setup.h
index 0e8077cbfdac..7862fe858a9e 100644
--- a/include/asm-i386/setup.h
+++ b/include/asm-i386/setup.h
@@ -26,12 +26,15 @@
26#define NEW_CL_POINTER 0x228 /* Relative to real mode data */ 26#define NEW_CL_POINTER 0x228 /* Relative to real mode data */
27 27
28#ifndef __ASSEMBLY__ 28#ifndef __ASSEMBLY__
29
30#include <asm/bootparam.h>
31
29/* 32/*
30 * This is set up by the setup-routine at boot-time 33 * This is set up by the setup-routine at boot-time
31 */ 34 */
32extern unsigned char boot_params[PARAM_SIZE]; 35extern struct boot_params boot_params;
33 36
34#define PARAM (boot_params) 37#define PARAM ((char *)&boot_params)
35#define SCREEN_INFO (*(struct screen_info *) (PARAM+0)) 38#define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
36#define EXT_MEM_K (*(unsigned short *) (PARAM+2)) 39#define EXT_MEM_K (*(unsigned short *) (PARAM+2))
37#define ALT_MEM_K (*(unsigned long *) (PARAM+0x1e0)) 40#define ALT_MEM_K (*(unsigned long *) (PARAM+0x1e0))
@@ -39,8 +42,7 @@ extern unsigned char boot_params[PARAM_SIZE];
39#define E820_MAP ((struct e820entry *) (PARAM+E820MAP)) 42#define E820_MAP ((struct e820entry *) (PARAM+E820MAP))
40#define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40)) 43#define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
41#define IST_INFO (*(struct ist_info *) (PARAM+0x60)) 44#define IST_INFO (*(struct ist_info *) (PARAM+0x60))
42#define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80)) 45#define SYS_DESC_TABLE (*(struct sys_desc_table *)(PARAM+0xa0))
43#define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
44#define EFI_SYSTAB ((efi_system_table_t *) *((unsigned long *)(PARAM+0x1c4))) 46#define EFI_SYSTAB ((efi_system_table_t *) *((unsigned long *)(PARAM+0x1c4)))
45#define EFI_MEMDESC_SIZE (*((unsigned long *) (PARAM+0x1c8))) 47#define EFI_MEMDESC_SIZE (*((unsigned long *) (PARAM+0x1c8)))
46#define EFI_MEMDESC_VERSION (*((unsigned long *) (PARAM+0x1cc))) 48#define EFI_MEMDESC_VERSION (*((unsigned long *) (PARAM+0x1cc)))
@@ -79,6 +81,10 @@ void __init add_memory_region(unsigned long long start,
79 81
80extern unsigned long init_pg_tables_end; 82extern unsigned long init_pg_tables_end;
81 83
84#ifndef CONFIG_PARAVIRT
85#define paravirt_post_allocator_init() do {} while (0)
86#endif
87
82#endif /* __ASSEMBLY__ */ 88#endif /* __ASSEMBLY__ */
83 89
84#endif /* __KERNEL__ */ 90#endif /* __KERNEL__ */
diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h
index 0c7132787062..1f73bde165b1 100644
--- a/include/asm-i386/smp.h
+++ b/include/asm-i386/smp.h
@@ -43,9 +43,12 @@ extern u8 x86_cpu_to_apicid[];
43 43
44#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu] 44#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu]
45 45
46extern void set_cpu_sibling_map(int cpu);
47
46#ifdef CONFIG_HOTPLUG_CPU 48#ifdef CONFIG_HOTPLUG_CPU
47extern void cpu_exit_clear(void); 49extern void cpu_exit_clear(void);
48extern void cpu_uninit(void); 50extern void cpu_uninit(void);
51extern void remove_siblinginfo(int cpu);
49#endif 52#endif
50 53
51struct smp_ops 54struct smp_ops
@@ -129,6 +132,8 @@ extern int __cpu_disable(void);
129extern void __cpu_die(unsigned int cpu); 132extern void __cpu_die(unsigned int cpu);
130extern unsigned int num_processors; 133extern unsigned int num_processors;
131 134
135void __cpuinit smp_store_cpu_info(int id);
136
132#endif /* !__ASSEMBLY__ */ 137#endif /* !__ASSEMBLY__ */
133 138
134#else /* CONFIG_SMP */ 139#else /* CONFIG_SMP */
diff --git a/include/asm-i386/string.h b/include/asm-i386/string.h
index b9277361954b..a9b64453bdf5 100644
--- a/include/asm-i386/string.h
+++ b/include/asm-i386/string.h
@@ -2,203 +2,35 @@
2#define _I386_STRING_H_ 2#define _I386_STRING_H_
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5/*
6 * On a 486 or Pentium, we are better off not using the
7 * byte string operations. But on a 386 or a PPro the
8 * byte string ops are faster than doing it by hand
9 * (MUCH faster on a Pentium).
10 */
11
12/*
13 * This string-include defines all string functions as inline
14 * functions. Use gcc. It also assumes ds=es=data space, this should be
15 * normal. Most of the string-functions are rather heavily hand-optimized,
16 * see especially strsep,strstr,str[c]spn. They should work, but are not
17 * very easy to understand. Everything is done entirely within the register
18 * set, making the functions fast and clean. String instructions have been
19 * used through-out, making for "slightly" unclear code :-)
20 *
21 * NO Copyright (C) 1991, 1992 Linus Torvalds,
22 * consider these trivial functions to be PD.
23 */
24 5
25/* AK: in fact I bet it would be better to move this stuff all out of line. 6/* Let gcc decide wether to inline or use the out of line functions */
26 */
27 7
28#define __HAVE_ARCH_STRCPY 8#define __HAVE_ARCH_STRCPY
29static inline char * strcpy(char * dest,const char *src) 9extern char *strcpy(char *dest, const char *src);
30{
31int d0, d1, d2;
32__asm__ __volatile__(
33 "1:\tlodsb\n\t"
34 "stosb\n\t"
35 "testb %%al,%%al\n\t"
36 "jne 1b"
37 : "=&S" (d0), "=&D" (d1), "=&a" (d2)
38 :"0" (src),"1" (dest) : "memory");
39return dest;
40}
41 10
42#define __HAVE_ARCH_STRNCPY 11#define __HAVE_ARCH_STRNCPY
43static inline char * strncpy(char * dest,const char *src,size_t count) 12extern char *strncpy(char *dest, const char *src, size_t count);
44{
45int d0, d1, d2, d3;
46__asm__ __volatile__(
47 "1:\tdecl %2\n\t"
48 "js 2f\n\t"
49 "lodsb\n\t"
50 "stosb\n\t"
51 "testb %%al,%%al\n\t"
52 "jne 1b\n\t"
53 "rep\n\t"
54 "stosb\n"
55 "2:"
56 : "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3)
57 :"0" (src),"1" (dest),"2" (count) : "memory");
58return dest;
59}
60 13
61#define __HAVE_ARCH_STRCAT 14#define __HAVE_ARCH_STRCAT
62static inline char * strcat(char * dest,const char * src) 15extern char *strcat(char *dest, const char *src);
63{
64int d0, d1, d2, d3;
65__asm__ __volatile__(
66 "repne\n\t"
67 "scasb\n\t"
68 "decl %1\n"
69 "1:\tlodsb\n\t"
70 "stosb\n\t"
71 "testb %%al,%%al\n\t"
72 "jne 1b"
73 : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3)
74 : "0" (src), "1" (dest), "2" (0), "3" (0xffffffffu):"memory");
75return dest;
76}
77 16
78#define __HAVE_ARCH_STRNCAT 17#define __HAVE_ARCH_STRNCAT
79static inline char * strncat(char * dest,const char * src,size_t count) 18extern char *strncat(char *dest, const char *src, size_t count);
80{
81int d0, d1, d2, d3;
82__asm__ __volatile__(
83 "repne\n\t"
84 "scasb\n\t"
85 "decl %1\n\t"
86 "movl %8,%3\n"
87 "1:\tdecl %3\n\t"
88 "js 2f\n\t"
89 "lodsb\n\t"
90 "stosb\n\t"
91 "testb %%al,%%al\n\t"
92 "jne 1b\n"
93 "2:\txorl %2,%2\n\t"
94 "stosb"
95 : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3)
96 : "0" (src),"1" (dest),"2" (0),"3" (0xffffffffu), "g" (count)
97 : "memory");
98return dest;
99}
100 19
101#define __HAVE_ARCH_STRCMP 20#define __HAVE_ARCH_STRCMP
102static inline int strcmp(const char * cs,const char * ct) 21extern int strcmp(const char *cs, const char *ct);
103{
104int d0, d1;
105register int __res;
106__asm__ __volatile__(
107 "1:\tlodsb\n\t"
108 "scasb\n\t"
109 "jne 2f\n\t"
110 "testb %%al,%%al\n\t"
111 "jne 1b\n\t"
112 "xorl %%eax,%%eax\n\t"
113 "jmp 3f\n"
114 "2:\tsbbl %%eax,%%eax\n\t"
115 "orb $1,%%al\n"
116 "3:"
117 :"=a" (__res), "=&S" (d0), "=&D" (d1)
118 :"1" (cs),"2" (ct)
119 :"memory");
120return __res;
121}
122 22
123#define __HAVE_ARCH_STRNCMP 23#define __HAVE_ARCH_STRNCMP
124static inline int strncmp(const char * cs,const char * ct,size_t count) 24extern int strncmp(const char *cs, const char *ct, size_t count);
125{
126register int __res;
127int d0, d1, d2;
128__asm__ __volatile__(
129 "1:\tdecl %3\n\t"
130 "js 2f\n\t"
131 "lodsb\n\t"
132 "scasb\n\t"
133 "jne 3f\n\t"
134 "testb %%al,%%al\n\t"
135 "jne 1b\n"
136 "2:\txorl %%eax,%%eax\n\t"
137 "jmp 4f\n"
138 "3:\tsbbl %%eax,%%eax\n\t"
139 "orb $1,%%al\n"
140 "4:"
141 :"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
142 :"1" (cs),"2" (ct),"3" (count)
143 :"memory");
144return __res;
145}
146 25
147#define __HAVE_ARCH_STRCHR 26#define __HAVE_ARCH_STRCHR
148static inline char * strchr(const char * s, int c) 27extern char *strchr(const char *s, int c);
149{
150int d0;
151register char * __res;
152__asm__ __volatile__(
153 "movb %%al,%%ah\n"
154 "1:\tlodsb\n\t"
155 "cmpb %%ah,%%al\n\t"
156 "je 2f\n\t"
157 "testb %%al,%%al\n\t"
158 "jne 1b\n\t"
159 "movl $1,%1\n"
160 "2:\tmovl %1,%0\n\t"
161 "decl %0"
162 :"=a" (__res), "=&S" (d0)
163 :"1" (s),"0" (c)
164 :"memory");
165return __res;
166}
167 28
168#define __HAVE_ARCH_STRRCHR 29#define __HAVE_ARCH_STRRCHR
169static inline char * strrchr(const char * s, int c) 30extern char *strrchr(const char *s, int c);
170{
171int d0, d1;
172register char * __res;
173__asm__ __volatile__(
174 "movb %%al,%%ah\n"
175 "1:\tlodsb\n\t"
176 "cmpb %%ah,%%al\n\t"
177 "jne 2f\n\t"
178 "leal -1(%%esi),%0\n"
179 "2:\ttestb %%al,%%al\n\t"
180 "jne 1b"
181 :"=g" (__res), "=&S" (d0), "=&a" (d1)
182 :"0" (0),"1" (s),"2" (c)
183 :"memory");
184return __res;
185}
186 31
187#define __HAVE_ARCH_STRLEN 32#define __HAVE_ARCH_STRLEN
188static inline size_t strlen(const char * s) 33extern size_t strlen(const char *s);
189{
190int d0;
191register int __res;
192__asm__ __volatile__(
193 "repne\n\t"
194 "scasb\n\t"
195 "notl %0\n\t"
196 "decl %0"
197 :"=c" (__res), "=&D" (d0)
198 :"1" (s),"a" (0), "0" (0xffffffffu)
199 :"memory");
200return __res;
201}
202 34
203static __always_inline void * __memcpy(void * to, const void * from, size_t n) 35static __always_inline void * __memcpy(void * to, const void * from, size_t n)
204{ 36{
@@ -207,9 +39,7 @@ __asm__ __volatile__(
207 "rep ; movsl\n\t" 39 "rep ; movsl\n\t"
208 "movl %4,%%ecx\n\t" 40 "movl %4,%%ecx\n\t"
209 "andl $3,%%ecx\n\t" 41 "andl $3,%%ecx\n\t"
210#if 1 /* want to pay 2 byte penalty for a chance to skip microcoded rep? */
211 "jz 1f\n\t" 42 "jz 1f\n\t"
212#endif
213 "rep ; movsb\n\t" 43 "rep ; movsb\n\t"
214 "1:" 44 "1:"
215 : "=&c" (d0), "=&D" (d1), "=&S" (d2) 45 : "=&c" (d0), "=&D" (d1), "=&S" (d2)
@@ -328,23 +158,7 @@ void *memmove(void * dest,const void * src, size_t n);
328#define memcmp __builtin_memcmp 158#define memcmp __builtin_memcmp
329 159
330#define __HAVE_ARCH_MEMCHR 160#define __HAVE_ARCH_MEMCHR
331static inline void * memchr(const void * cs,int c,size_t count) 161extern void *memchr(const void * cs,int c,size_t count);
332{
333int d0;
334register void * __res;
335if (!count)
336 return NULL;
337__asm__ __volatile__(
338 "repne\n\t"
339 "scasb\n\t"
340 "je 1f\n\t"
341 "movl $1,%0\n"
342 "1:\tdecl %0"
343 :"=D" (__res), "=&c" (d0)
344 :"a" (c),"0" (cs),"1" (count)
345 :"memory");
346return __res;
347}
348 162
349static inline void * __memset_generic(void * s, char c,size_t count) 163static inline void * __memset_generic(void * s, char c,size_t count)
350{ 164{
@@ -386,29 +200,10 @@ return (s);
386 200
387/* Added by Gertjan van Wingerde to make minix and sysv module work */ 201/* Added by Gertjan van Wingerde to make minix and sysv module work */
388#define __HAVE_ARCH_STRNLEN 202#define __HAVE_ARCH_STRNLEN
389static inline size_t strnlen(const char * s, size_t count) 203extern size_t strnlen(const char * s, size_t count);
390{
391int d0;
392register int __res;
393__asm__ __volatile__(
394 "movl %2,%0\n\t"
395 "jmp 2f\n"
396 "1:\tcmpb $0,(%0)\n\t"
397 "je 3f\n\t"
398 "incl %0\n"
399 "2:\tdecl %1\n\t"
400 "cmpl $-1,%1\n\t"
401 "jne 1b\n"
402 "3:\tsubl %2,%0"
403 :"=a" (__res), "=&d" (d0)
404 :"c" (s),"1" (count)
405 :"memory");
406return __res;
407}
408/* end of additional stuff */ 204/* end of additional stuff */
409 205
410#define __HAVE_ARCH_STRSTR 206#define __HAVE_ARCH_STRSTR
411
412extern char *strstr(const char *cs, const char *ct); 207extern char *strstr(const char *cs, const char *ct);
413 208
414/* 209/*
@@ -474,19 +269,7 @@ __asm__ __volatile__( \
474 * find the first occurrence of byte 'c', or 1 past the area if none 269 * find the first occurrence of byte 'c', or 1 past the area if none
475 */ 270 */
476#define __HAVE_ARCH_MEMSCAN 271#define __HAVE_ARCH_MEMSCAN
477static inline void * memscan(void * addr, int c, size_t size) 272extern void *memscan(void * addr, int c, size_t size);
478{
479 if (!size)
480 return addr;
481 __asm__("repnz; scasb\n\t"
482 "jnz 1f\n\t"
483 "dec %%edi\n"
484 "1:"
485 : "=D" (addr), "=c" (size)
486 : "0" (addr), "1" (size), "a" (c)
487 : "memory");
488 return addr;
489}
490 273
491#endif /* __KERNEL__ */ 274#endif /* __KERNEL__ */
492 275
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 94ed3686a5f3..609756c61676 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -310,15 +310,6 @@ void enable_hlt(void);
310extern int es7000_plat; 310extern int es7000_plat;
311void cpu_idle_wait(void); 311void cpu_idle_wait(void);
312 312
313/*
314 * On SMP systems, when the scheduler does migration-cost autodetection,
315 * it needs a way to flush as much of the CPU's caches as possible:
316 */
317static inline void sched_cacheflush(void)
318{
319 wbinvd();
320}
321
322extern unsigned long arch_align_stack(unsigned long sp); 313extern unsigned long arch_align_stack(unsigned long sp);
323extern void free_init_pages(char *what, unsigned long begin, unsigned long end); 314extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
324 315
diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h
index 4cb0f91ae64f..54424e045e01 100644
--- a/include/asm-i386/thread_info.h
+++ b/include/asm-i386/thread_info.h
@@ -137,6 +137,7 @@ static inline struct thread_info *current_thread_info(void)
137#define TIF_DEBUG 17 /* uses debug registers */ 137#define TIF_DEBUG 17 /* uses debug registers */
138#define TIF_IO_BITMAP 18 /* uses I/O bitmap */ 138#define TIF_IO_BITMAP 18 /* uses I/O bitmap */
139#define TIF_FREEZE 19 /* is freezing for suspend */ 139#define TIF_FREEZE 19 /* is freezing for suspend */
140#define TIF_NOTSC 20 /* TSC is not accessible in userland */
140 141
141#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 142#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
142#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 143#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
@@ -151,6 +152,7 @@ static inline struct thread_info *current_thread_info(void)
151#define _TIF_DEBUG (1<<TIF_DEBUG) 152#define _TIF_DEBUG (1<<TIF_DEBUG)
152#define _TIF_IO_BITMAP (1<<TIF_IO_BITMAP) 153#define _TIF_IO_BITMAP (1<<TIF_IO_BITMAP)
153#define _TIF_FREEZE (1<<TIF_FREEZE) 154#define _TIF_FREEZE (1<<TIF_FREEZE)
155#define _TIF_NOTSC (1<<TIF_NOTSC)
154 156
155/* work to do on interrupt/exception return */ 157/* work to do on interrupt/exception return */
156#define _TIF_WORK_MASK \ 158#define _TIF_WORK_MASK \
@@ -160,7 +162,8 @@ static inline struct thread_info *current_thread_info(void)
160#define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP) 162#define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP)
161 163
162/* flags to check in __switch_to() */ 164/* flags to check in __switch_to() */
163#define _TIF_WORK_CTXSW (_TIF_DEBUG|_TIF_IO_BITMAP) 165#define _TIF_WORK_CTXSW_NEXT (_TIF_IO_BITMAP | _TIF_NOTSC | _TIF_DEBUG)
166#define _TIF_WORK_CTXSW_PREV (_TIF_IO_BITMAP | _TIF_NOTSC)
164 167
165/* 168/*
166 * Thread-synchronous status. 169 * Thread-synchronous status.
diff --git a/include/asm-i386/timer.h b/include/asm-i386/timer.h
index 153770e25faa..0db7e994fb8b 100644
--- a/include/asm-i386/timer.h
+++ b/include/asm-i386/timer.h
@@ -5,18 +5,46 @@
5 5
6#define TICK_SIZE (tick_nsec / 1000) 6#define TICK_SIZE (tick_nsec / 1000)
7 7
8void setup_pit_timer(void);
9unsigned long long native_sched_clock(void); 8unsigned long long native_sched_clock(void);
10unsigned long native_calculate_cpu_khz(void); 9unsigned long native_calculate_cpu_khz(void);
11 10
12extern int timer_ack; 11extern int timer_ack;
13extern int no_timer_check; 12extern int no_timer_check;
14extern int no_sync_cmos_clock;
15extern int recalibrate_cpu_khz(void); 13extern int recalibrate_cpu_khz(void);
16 14
17#ifndef CONFIG_PARAVIRT 15#ifndef CONFIG_PARAVIRT
18#define get_scheduled_cycles(val) rdtscll(val)
19#define calculate_cpu_khz() native_calculate_cpu_khz() 16#define calculate_cpu_khz() native_calculate_cpu_khz()
20#endif 17#endif
21 18
19/* Accellerators for sched_clock()
20 * convert from cycles(64bits) => nanoseconds (64bits)
21 * basic equation:
22 * ns = cycles / (freq / ns_per_sec)
23 * ns = cycles * (ns_per_sec / freq)
24 * ns = cycles * (10^9 / (cpu_khz * 10^3))
25 * ns = cycles * (10^6 / cpu_khz)
26 *
27 * Then we use scaling math (suggested by george@mvista.com) to get:
28 * ns = cycles * (10^6 * SC / cpu_khz) / SC
29 * ns = cycles * cyc2ns_scale / SC
30 *
31 * And since SC is a constant power of two, we can convert the div
32 * into a shift.
33 *
34 * We can use khz divisor instead of mhz to keep a better percision, since
35 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
36 * (mathieu.desnoyers@polymtl.ca)
37 *
38 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
39 */
40extern unsigned long cyc2ns_scale __read_mostly;
41
42#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
43
44static inline unsigned long long cycles_2_ns(unsigned long long cyc)
45{
46 return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
47}
48
49
22#endif 50#endif
diff --git a/include/asm-i386/tlbflush.h b/include/asm-i386/tlbflush.h
index fc525c5cd5a9..a50fa6741486 100644
--- a/include/asm-i386/tlbflush.h
+++ b/include/asm-i386/tlbflush.h
@@ -160,7 +160,11 @@ DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
160 native_flush_tlb_others(&mask, mm, va) 160 native_flush_tlb_others(&mask, mm, va)
161#endif 161#endif
162 162
163#define flush_tlb_kernel_range(start, end) flush_tlb_all() 163static inline void flush_tlb_kernel_range(unsigned long start,
164 unsigned long end)
165{
166 flush_tlb_all();
167}
164 168
165static inline void flush_tlb_pgtables(struct mm_struct *mm, 169static inline void flush_tlb_pgtables(struct mm_struct *mm,
166 unsigned long start, unsigned long end) 170 unsigned long start, unsigned long end)
diff --git a/include/asm-i386/topology.h b/include/asm-i386/topology.h
index 7fc512d90ea8..19b2dafd0c81 100644
--- a/include/asm-i386/topology.h
+++ b/include/asm-i386/topology.h
@@ -67,7 +67,7 @@ static inline int node_to_first_cpu(int node)
67 return first_cpu(mask); 67 return first_cpu(mask);
68} 68}
69 69
70#define pcibus_to_node(bus) ((long) (bus)->sysdata) 70#define pcibus_to_node(bus) ((struct pci_sysdata *)((bus)->sysdata))->node
71#define pcibus_to_cpumask(bus) node_to_cpumask(pcibus_to_node(bus)) 71#define pcibus_to_cpumask(bus) node_to_cpumask(pcibus_to_node(bus))
72 72
73/* sched_domains SD_NODE_INIT for NUMAQ machines */ 73/* sched_domains SD_NODE_INIT for NUMAQ machines */
diff --git a/include/asm-i386/tsc.h b/include/asm-i386/tsc.h
index 62c091ffcccc..a4d806610b7f 100644
--- a/include/asm-i386/tsc.h
+++ b/include/asm-i386/tsc.h
@@ -63,6 +63,7 @@ extern void tsc_init(void);
63extern void mark_tsc_unstable(char *reason); 63extern void mark_tsc_unstable(char *reason);
64extern int unsynchronized_tsc(void); 64extern int unsynchronized_tsc(void);
65extern void init_tsc_clocksource(void); 65extern void init_tsc_clocksource(void);
66int check_tsc_unstable(void);
66 67
67/* 68/*
68 * Boot-time check whether the TSCs are synchronized across 69 * Boot-time check whether the TSCs are synchronized across
diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h
index e2aa5e0d0cc7..d2a4f7be9c2c 100644
--- a/include/asm-i386/uaccess.h
+++ b/include/asm-i386/uaccess.h
@@ -581,7 +581,7 @@ long __must_check __strncpy_from_user(char *dst,
581 * If there is a limit on the length of a valid string, you may wish to 581 * If there is a limit on the length of a valid string, you may wish to
582 * consider using strnlen_user() instead. 582 * consider using strnlen_user() instead.
583 */ 583 */
584#define strlen_user(str) strnlen_user(str, ~0UL >> 1) 584#define strlen_user(str) strnlen_user(str, LONG_MAX)
585 585
586long strnlen_user(const char __user *str, long n); 586long strnlen_user(const char __user *str, long n);
587unsigned long __must_check clear_user(void __user *mem, unsigned long len); 587unsigned long __must_check clear_user(void __user *mem, unsigned long len);
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h
index e84ace1ec8bf..9b15545eb9b5 100644
--- a/include/asm-i386/unistd.h
+++ b/include/asm-i386/unistd.h
@@ -329,10 +329,11 @@
329#define __NR_signalfd 321 329#define __NR_signalfd 321
330#define __NR_timerfd 322 330#define __NR_timerfd 322
331#define __NR_eventfd 323 331#define __NR_eventfd 323
332#define __NR_fallocate 324
332 333
333#ifdef __KERNEL__ 334#ifdef __KERNEL__
334 335
335#define NR_syscalls 324 336#define NR_syscalls 325
336 337
337#define __ARCH_WANT_IPC_PARSE_VERSION 338#define __ARCH_WANT_IPC_PARSE_VERSION
338#define __ARCH_WANT_OLD_READDIR 339#define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-i386/vmi_time.h b/include/asm-i386/vmi_time.h
index 213930b995cb..478188130328 100644
--- a/include/asm-i386/vmi_time.h
+++ b/include/asm-i386/vmi_time.h
@@ -49,7 +49,7 @@ extern struct vmi_timer_ops {
49extern void __init vmi_time_init(void); 49extern void __init vmi_time_init(void);
50extern unsigned long vmi_get_wallclock(void); 50extern unsigned long vmi_get_wallclock(void);
51extern int vmi_set_wallclock(unsigned long now); 51extern int vmi_set_wallclock(unsigned long now);
52extern unsigned long long vmi_get_sched_cycles(void); 52extern unsigned long long vmi_sched_clock(void);
53extern unsigned long vmi_cpu_khz(void); 53extern unsigned long vmi_cpu_khz(void);
54 54
55#ifdef CONFIG_X86_LOCAL_APIC 55#ifdef CONFIG_X86_LOCAL_APIC
diff --git a/include/asm-i386/xen/hypercall.h b/include/asm-i386/xen/hypercall.h
new file mode 100644
index 000000000000..bc0ee7d961ca
--- /dev/null
+++ b/include/asm-i386/xen/hypercall.h
@@ -0,0 +1,413 @@
1/******************************************************************************
2 * hypercall.h
3 *
4 * Linux-specific hypervisor handling.
5 *
6 * Copyright (c) 2002-2004, K A Fraser
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
32
33#ifndef __HYPERCALL_H__
34#define __HYPERCALL_H__
35
36#include <linux/errno.h>
37#include <linux/string.h>
38
39#include <xen/interface/xen.h>
40#include <xen/interface/sched.h>
41#include <xen/interface/physdev.h>
42
43extern struct { char _entry[32]; } hypercall_page[];
44
45#define _hypercall0(type, name) \
46({ \
47 long __res; \
48 asm volatile ( \
49 "call %[call]" \
50 : "=a" (__res) \
51 : [call] "m" (hypercall_page[__HYPERVISOR_##name]) \
52 : "memory" ); \
53 (type)__res; \
54})
55
56#define _hypercall1(type, name, a1) \
57({ \
58 long __res, __ign1; \
59 asm volatile ( \
60 "call %[call]" \
61 : "=a" (__res), "=b" (__ign1) \
62 : "1" ((long)(a1)), \
63 [call] "m" (hypercall_page[__HYPERVISOR_##name]) \
64 : "memory" ); \
65 (type)__res; \
66})
67
68#define _hypercall2(type, name, a1, a2) \
69({ \
70 long __res, __ign1, __ign2; \
71 asm volatile ( \
72 "call %[call]" \
73 : "=a" (__res), "=b" (__ign1), "=c" (__ign2) \
74 : "1" ((long)(a1)), "2" ((long)(a2)), \
75 [call] "m" (hypercall_page[__HYPERVISOR_##name]) \
76 : "memory" ); \
77 (type)__res; \
78})
79
80#define _hypercall3(type, name, a1, a2, a3) \
81({ \
82 long __res, __ign1, __ign2, __ign3; \
83 asm volatile ( \
84 "call %[call]" \
85 : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \
86 "=d" (__ign3) \
87 : "1" ((long)(a1)), "2" ((long)(a2)), \
88 "3" ((long)(a3)), \
89 [call] "m" (hypercall_page[__HYPERVISOR_##name]) \
90 : "memory" ); \
91 (type)__res; \
92})
93
94#define _hypercall4(type, name, a1, a2, a3, a4) \
95({ \
96 long __res, __ign1, __ign2, __ign3, __ign4; \
97 asm volatile ( \
98 "call %[call]" \
99 : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \
100 "=d" (__ign3), "=S" (__ign4) \
101 : "1" ((long)(a1)), "2" ((long)(a2)), \
102 "3" ((long)(a3)), "4" ((long)(a4)), \
103 [call] "m" (hypercall_page[__HYPERVISOR_##name]) \
104 : "memory" ); \
105 (type)__res; \
106})
107
108#define _hypercall5(type, name, a1, a2, a3, a4, a5) \
109({ \
110 long __res, __ign1, __ign2, __ign3, __ign4, __ign5; \
111 asm volatile ( \
112 "call %[call]" \
113 : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \
114 "=d" (__ign3), "=S" (__ign4), "=D" (__ign5) \
115 : "1" ((long)(a1)), "2" ((long)(a2)), \
116 "3" ((long)(a3)), "4" ((long)(a4)), \
117 "5" ((long)(a5)), \
118 [call] "m" (hypercall_page[__HYPERVISOR_##name]) \
119 : "memory" ); \
120 (type)__res; \
121})
122
123static inline int
124HYPERVISOR_set_trap_table(struct trap_info *table)
125{
126 return _hypercall1(int, set_trap_table, table);
127}
128
129static inline int
130HYPERVISOR_mmu_update(struct mmu_update *req, int count,
131 int *success_count, domid_t domid)
132{
133 return _hypercall4(int, mmu_update, req, count, success_count, domid);
134}
135
136static inline int
137HYPERVISOR_mmuext_op(struct mmuext_op *op, int count,
138 int *success_count, domid_t domid)
139{
140 return _hypercall4(int, mmuext_op, op, count, success_count, domid);
141}
142
143static inline int
144HYPERVISOR_set_gdt(unsigned long *frame_list, int entries)
145{
146 return _hypercall2(int, set_gdt, frame_list, entries);
147}
148
149static inline int
150HYPERVISOR_stack_switch(unsigned long ss, unsigned long esp)
151{
152 return _hypercall2(int, stack_switch, ss, esp);
153}
154
155static inline int
156HYPERVISOR_set_callbacks(unsigned long event_selector,
157 unsigned long event_address,
158 unsigned long failsafe_selector,
159 unsigned long failsafe_address)
160{
161 return _hypercall4(int, set_callbacks,
162 event_selector, event_address,
163 failsafe_selector, failsafe_address);
164}
165
166static inline int
167HYPERVISOR_fpu_taskswitch(int set)
168{
169 return _hypercall1(int, fpu_taskswitch, set);
170}
171
172static inline int
173HYPERVISOR_sched_op(int cmd, unsigned long arg)
174{
175 return _hypercall2(int, sched_op, cmd, arg);
176}
177
178static inline long
179HYPERVISOR_set_timer_op(u64 timeout)
180{
181 unsigned long timeout_hi = (unsigned long)(timeout>>32);
182 unsigned long timeout_lo = (unsigned long)timeout;
183 return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
184}
185
186static inline int
187HYPERVISOR_set_debugreg(int reg, unsigned long value)
188{
189 return _hypercall2(int, set_debugreg, reg, value);
190}
191
192static inline unsigned long
193HYPERVISOR_get_debugreg(int reg)
194{
195 return _hypercall1(unsigned long, get_debugreg, reg);
196}
197
198static inline int
199HYPERVISOR_update_descriptor(u64 ma, u64 desc)
200{
201 return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
202}
203
204static inline int
205HYPERVISOR_memory_op(unsigned int cmd, void *arg)
206{
207 return _hypercall2(int, memory_op, cmd, arg);
208}
209
210static inline int
211HYPERVISOR_multicall(void *call_list, int nr_calls)
212{
213 return _hypercall2(int, multicall, call_list, nr_calls);
214}
215
216static inline int
217HYPERVISOR_update_va_mapping(unsigned long va, pte_t new_val,
218 unsigned long flags)
219{
220 unsigned long pte_hi = 0;
221#ifdef CONFIG_X86_PAE
222 pte_hi = new_val.pte_high;
223#endif
224 return _hypercall4(int, update_va_mapping, va,
225 new_val.pte_low, pte_hi, flags);
226}
227
228static inline int
229HYPERVISOR_event_channel_op(int cmd, void *arg)
230{
231 int rc = _hypercall2(int, event_channel_op, cmd, arg);
232 if (unlikely(rc == -ENOSYS)) {
233 struct evtchn_op op;
234 op.cmd = cmd;
235 memcpy(&op.u, arg, sizeof(op.u));
236 rc = _hypercall1(int, event_channel_op_compat, &op);
237 memcpy(arg, &op.u, sizeof(op.u));
238 }
239 return rc;
240}
241
242static inline int
243HYPERVISOR_xen_version(int cmd, void *arg)
244{
245 return _hypercall2(int, xen_version, cmd, arg);
246}
247
248static inline int
249HYPERVISOR_console_io(int cmd, int count, char *str)
250{
251 return _hypercall3(int, console_io, cmd, count, str);
252}
253
254static inline int
255HYPERVISOR_physdev_op(int cmd, void *arg)
256{
257 int rc = _hypercall2(int, physdev_op, cmd, arg);
258 if (unlikely(rc == -ENOSYS)) {
259 struct physdev_op op;
260 op.cmd = cmd;
261 memcpy(&op.u, arg, sizeof(op.u));
262 rc = _hypercall1(int, physdev_op_compat, &op);
263 memcpy(arg, &op.u, sizeof(op.u));
264 }
265 return rc;
266}
267
268static inline int
269HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count)
270{
271 return _hypercall3(int, grant_table_op, cmd, uop, count);
272}
273
274static inline int
275HYPERVISOR_update_va_mapping_otherdomain(unsigned long va, pte_t new_val,
276 unsigned long flags, domid_t domid)
277{
278 unsigned long pte_hi = 0;
279#ifdef CONFIG_X86_PAE
280 pte_hi = new_val.pte_high;
281#endif
282 return _hypercall5(int, update_va_mapping_otherdomain, va,
283 new_val.pte_low, pte_hi, flags, domid);
284}
285
286static inline int
287HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type)
288{
289 return _hypercall2(int, vm_assist, cmd, type);
290}
291
292static inline int
293HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args)
294{
295 return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
296}
297
298static inline int
299HYPERVISOR_suspend(unsigned long srec)
300{
301 return _hypercall3(int, sched_op, SCHEDOP_shutdown,
302 SHUTDOWN_suspend, srec);
303}
304
305static inline int
306HYPERVISOR_nmi_op(unsigned long op, unsigned long arg)
307{
308 return _hypercall2(int, nmi_op, op, arg);
309}
310
311static inline void
312MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
313 pte_t new_val, unsigned long flags)
314{
315 mcl->op = __HYPERVISOR_update_va_mapping;
316 mcl->args[0] = va;
317#ifdef CONFIG_X86_PAE
318 mcl->args[1] = new_val.pte_low;
319 mcl->args[2] = new_val.pte_high;
320#else
321 mcl->args[1] = new_val.pte_low;
322 mcl->args[2] = 0;
323#endif
324 mcl->args[3] = flags;
325}
326
327static inline void
328MULTI_grant_table_op(struct multicall_entry *mcl, unsigned int cmd,
329 void *uop, unsigned int count)
330{
331 mcl->op = __HYPERVISOR_grant_table_op;
332 mcl->args[0] = cmd;
333 mcl->args[1] = (unsigned long)uop;
334 mcl->args[2] = count;
335}
336
337static inline void
338MULTI_update_va_mapping_otherdomain(struct multicall_entry *mcl, unsigned long va,
339 pte_t new_val, unsigned long flags,
340 domid_t domid)
341{
342 mcl->op = __HYPERVISOR_update_va_mapping_otherdomain;
343 mcl->args[0] = va;
344#ifdef CONFIG_X86_PAE
345 mcl->args[1] = new_val.pte_low;
346 mcl->args[2] = new_val.pte_high;
347#else
348 mcl->args[1] = new_val.pte_low;
349 mcl->args[2] = 0;
350#endif
351 mcl->args[3] = flags;
352 mcl->args[4] = domid;
353}
354
355static inline void
356MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr,
357 struct desc_struct desc)
358{
359 mcl->op = __HYPERVISOR_update_descriptor;
360 mcl->args[0] = maddr;
361 mcl->args[1] = maddr >> 32;
362 mcl->args[2] = desc.a;
363 mcl->args[3] = desc.b;
364}
365
366static inline void
367MULTI_memory_op(struct multicall_entry *mcl, unsigned int cmd, void *arg)
368{
369 mcl->op = __HYPERVISOR_memory_op;
370 mcl->args[0] = cmd;
371 mcl->args[1] = (unsigned long)arg;
372}
373
374static inline void
375MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
376 int count, int *success_count, domid_t domid)
377{
378 mcl->op = __HYPERVISOR_mmu_update;
379 mcl->args[0] = (unsigned long)req;
380 mcl->args[1] = count;
381 mcl->args[2] = (unsigned long)success_count;
382 mcl->args[3] = domid;
383}
384
385static inline void
386MULTI_mmuext_op(struct multicall_entry *mcl, struct mmuext_op *op, int count,
387 int *success_count, domid_t domid)
388{
389 mcl->op = __HYPERVISOR_mmuext_op;
390 mcl->args[0] = (unsigned long)op;
391 mcl->args[1] = count;
392 mcl->args[2] = (unsigned long)success_count;
393 mcl->args[3] = domid;
394}
395
396static inline void
397MULTI_set_gdt(struct multicall_entry *mcl, unsigned long *frames, int entries)
398{
399 mcl->op = __HYPERVISOR_set_gdt;
400 mcl->args[0] = (unsigned long)frames;
401 mcl->args[1] = entries;
402}
403
404static inline void
405MULTI_stack_switch(struct multicall_entry *mcl,
406 unsigned long ss, unsigned long esp)
407{
408 mcl->op = __HYPERVISOR_stack_switch;
409 mcl->args[0] = ss;
410 mcl->args[1] = esp;
411}
412
413#endif /* __HYPERCALL_H__ */
diff --git a/include/asm-i386/xen/hypervisor.h b/include/asm-i386/xen/hypervisor.h
new file mode 100644
index 000000000000..8e15dd28c91f
--- /dev/null
+++ b/include/asm-i386/xen/hypervisor.h
@@ -0,0 +1,73 @@
1/******************************************************************************
2 * hypervisor.h
3 *
4 * Linux-specific hypervisor handling.
5 *
6 * Copyright (c) 2002-2004, K A Fraser
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
32
33#ifndef __HYPERVISOR_H__
34#define __HYPERVISOR_H__
35
36#include <linux/types.h>
37#include <linux/kernel.h>
38#include <linux/version.h>
39
40#include <xen/interface/xen.h>
41#include <xen/interface/version.h>
42
43#include <asm/ptrace.h>
44#include <asm/page.h>
45#include <asm/desc.h>
46#if defined(__i386__)
47# ifdef CONFIG_X86_PAE
48# include <asm-generic/pgtable-nopud.h>
49# else
50# include <asm-generic/pgtable-nopmd.h>
51# endif
52#endif
53#include <asm/xen/hypercall.h>
54
55/* arch/i386/kernel/setup.c */
56extern struct shared_info *HYPERVISOR_shared_info;
57extern struct start_info *xen_start_info;
58#define is_initial_xendomain() (xen_start_info->flags & SIF_INITDOMAIN)
59
60/* arch/i386/mach-xen/evtchn.c */
61/* Force a proper event-channel callback from Xen. */
62extern void force_evtchn_callback(void);
63
64/* Turn jiffies into Xen system time. */
65u64 jiffies_to_st(unsigned long jiffies);
66
67
68#define MULTI_UVMFLAGS_INDEX 3
69#define MULTI_UVMDOMID_INDEX 4
70
71#define is_running_on_xen() (xen_start_info ? 1 : 0)
72
73#endif /* __HYPERVISOR_H__ */
diff --git a/include/asm-i386/xen/interface.h b/include/asm-i386/xen/interface.h
new file mode 100644
index 000000000000..165c3968e138
--- /dev/null
+++ b/include/asm-i386/xen/interface.h
@@ -0,0 +1,188 @@
1/******************************************************************************
2 * arch-x86_32.h
3 *
4 * Guest OS interface to x86 32-bit Xen.
5 *
6 * Copyright (c) 2004, K A Fraser
7 */
8
9#ifndef __XEN_PUBLIC_ARCH_X86_32_H__
10#define __XEN_PUBLIC_ARCH_X86_32_H__
11
12#ifdef __XEN__
13#define __DEFINE_GUEST_HANDLE(name, type) \
14 typedef struct { type *p; } __guest_handle_ ## name
15#else
16#define __DEFINE_GUEST_HANDLE(name, type) \
17 typedef type * __guest_handle_ ## name
18#endif
19
20#define DEFINE_GUEST_HANDLE_STRUCT(name) \
21 __DEFINE_GUEST_HANDLE(name, struct name)
22#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
23#define GUEST_HANDLE(name) __guest_handle_ ## name
24
25#ifndef __ASSEMBLY__
26/* Guest handles for primitive C types. */
27__DEFINE_GUEST_HANDLE(uchar, unsigned char);
28__DEFINE_GUEST_HANDLE(uint, unsigned int);
29__DEFINE_GUEST_HANDLE(ulong, unsigned long);
30DEFINE_GUEST_HANDLE(char);
31DEFINE_GUEST_HANDLE(int);
32DEFINE_GUEST_HANDLE(long);
33DEFINE_GUEST_HANDLE(void);
34#endif
35
36/*
37 * SEGMENT DESCRIPTOR TABLES
38 */
39/*
40 * A number of GDT entries are reserved by Xen. These are not situated at the
41 * start of the GDT because some stupid OSes export hard-coded selector values
42 * in their ABI. These hard-coded values are always near the start of the GDT,
43 * so Xen places itself out of the way, at the far end of the GDT.
44 */
45#define FIRST_RESERVED_GDT_PAGE 14
46#define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096)
47#define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
48
49/*
50 * These flat segments are in the Xen-private section of every GDT. Since these
51 * are also present in the initial GDT, many OSes will be able to avoid
52 * installing their own GDT.
53 */
54#define FLAT_RING1_CS 0xe019 /* GDT index 259 */
55#define FLAT_RING1_DS 0xe021 /* GDT index 260 */
56#define FLAT_RING1_SS 0xe021 /* GDT index 260 */
57#define FLAT_RING3_CS 0xe02b /* GDT index 261 */
58#define FLAT_RING3_DS 0xe033 /* GDT index 262 */
59#define FLAT_RING3_SS 0xe033 /* GDT index 262 */
60
61#define FLAT_KERNEL_CS FLAT_RING1_CS
62#define FLAT_KERNEL_DS FLAT_RING1_DS
63#define FLAT_KERNEL_SS FLAT_RING1_SS
64#define FLAT_USER_CS FLAT_RING3_CS
65#define FLAT_USER_DS FLAT_RING3_DS
66#define FLAT_USER_SS FLAT_RING3_SS
67
68/* And the trap vector is... */
69#define TRAP_INSTR "int $0x82"
70
71/*
72 * Virtual addresses beyond this are not modifiable by guest OSes. The
73 * machine->physical mapping table starts at this address, read-only.
74 */
75#ifdef CONFIG_X86_PAE
76#define __HYPERVISOR_VIRT_START 0xF5800000
77#else
78#define __HYPERVISOR_VIRT_START 0xFC000000
79#endif
80
81#ifndef HYPERVISOR_VIRT_START
82#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
83#endif
84
85#ifndef machine_to_phys_mapping
86#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
87#endif
88
89/* Maximum number of virtual CPUs in multi-processor guests. */
90#define MAX_VIRT_CPUS 32
91
92#ifndef __ASSEMBLY__
93
94/*
95 * Send an array of these to HYPERVISOR_set_trap_table()
96 */
97#define TI_GET_DPL(_ti) ((_ti)->flags & 3)
98#define TI_GET_IF(_ti) ((_ti)->flags & 4)
99#define TI_SET_DPL(_ti, _dpl) ((_ti)->flags |= (_dpl))
100#define TI_SET_IF(_ti, _if) ((_ti)->flags |= ((!!(_if))<<2))
101
102struct trap_info {
103 uint8_t vector; /* exception vector */
104 uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */
105 uint16_t cs; /* code selector */
106 unsigned long address; /* code offset */
107};
108DEFINE_GUEST_HANDLE_STRUCT(trap_info);
109
110struct cpu_user_regs {
111 uint32_t ebx;
112 uint32_t ecx;
113 uint32_t edx;
114 uint32_t esi;
115 uint32_t edi;
116 uint32_t ebp;
117 uint32_t eax;
118 uint16_t error_code; /* private */
119 uint16_t entry_vector; /* private */
120 uint32_t eip;
121 uint16_t cs;
122 uint8_t saved_upcall_mask;
123 uint8_t _pad0;
124 uint32_t eflags; /* eflags.IF == !saved_upcall_mask */
125 uint32_t esp;
126 uint16_t ss, _pad1;
127 uint16_t es, _pad2;
128 uint16_t ds, _pad3;
129 uint16_t fs, _pad4;
130 uint16_t gs, _pad5;
131};
132DEFINE_GUEST_HANDLE_STRUCT(cpu_user_regs);
133
134typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */
135
136/*
137 * The following is all CPU context. Note that the fpu_ctxt block is filled
138 * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
139 */
140struct vcpu_guest_context {
141 /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */
142 struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */
143#define VGCF_I387_VALID (1<<0)
144#define VGCF_HVM_GUEST (1<<1)
145#define VGCF_IN_KERNEL (1<<2)
146 unsigned long flags; /* VGCF_* flags */
147 struct cpu_user_regs user_regs; /* User-level CPU registers */
148 struct trap_info trap_ctxt[256]; /* Virtual IDT */
149 unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */
150 unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
151 unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */
152 unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */
153 unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */
154 unsigned long event_callback_cs; /* CS:EIP of event callback */
155 unsigned long event_callback_eip;
156 unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */
157 unsigned long failsafe_callback_eip;
158 unsigned long vm_assist; /* VMASST_TYPE_* bitmap */
159};
160DEFINE_GUEST_HANDLE_STRUCT(vcpu_guest_context);
161
162struct arch_shared_info {
163 unsigned long max_pfn; /* max pfn that appears in table */
164 /* Frame containing list of mfns containing list of mfns containing p2m. */
165 unsigned long pfn_to_mfn_frame_list_list;
166 unsigned long nmi_reason;
167};
168
169struct arch_vcpu_info {
170 unsigned long cr2;
171 unsigned long pad[5]; /* sizeof(struct vcpu_info) == 64 */
172};
173
174#endif /* !__ASSEMBLY__ */
175
176/*
177 * Prefix forces emulation of some non-trapping instructions.
178 * Currently only CPUID.
179 */
180#ifdef __ASSEMBLY__
181#define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ;
182#define XEN_CPUID XEN_EMULATE_PREFIX cpuid
183#else
184#define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; "
185#define XEN_CPUID XEN_EMULATE_PREFIX "cpuid"
186#endif
187
188#endif