diff options
author | Len Brown <len.brown@intel.com> | 2005-09-08 01:45:47 -0400 |
---|---|---|
committer | Len Brown <len.brown@intel.com> | 2005-09-08 01:45:47 -0400 |
commit | 64e47488c913ac704d465a6af86a26786d1412a5 (patch) | |
tree | d3b0148592963dcde26e4bb35ddfec8b1eaf8e23 /include/asm-i386 | |
parent | 4a35a46bf1cda4737c428380d1db5d15e2590d18 (diff) | |
parent | caf39e87cc1182f7dae84eefc43ca14d54c78ef9 (diff) |
Merge linux-2.6 with linux-acpi-2.6
Diffstat (limited to 'include/asm-i386')
27 files changed, 329 insertions, 200 deletions
diff --git a/include/asm-i386/agp.h b/include/asm-i386/agp.h index b82f5f3ab887..9075083bab76 100644 --- a/include/asm-i386/agp.h +++ b/include/asm-i386/agp.h | |||
@@ -19,7 +19,7 @@ int unmap_page_from_agp(struct page *page); | |||
19 | /* Could use CLFLUSH here if the cpu supports it. But then it would | 19 | /* Could use CLFLUSH here if the cpu supports it. But then it would |
20 | need to be called for each cacheline of the whole page so it may not be | 20 | need to be called for each cacheline of the whole page so it may not be |
21 | worth it. Would need a page for it. */ | 21 | worth it. Would need a page for it. */ |
22 | #define flush_agp_cache() asm volatile("wbinvd":::"memory") | 22 | #define flush_agp_cache() wbinvd() |
23 | 23 | ||
24 | /* Convert a physical address to an address suitable for the GART. */ | 24 | /* Convert a physical address to an address suitable for the GART. */ |
25 | #define phys_to_gart(x) (x) | 25 | #define phys_to_gart(x) (x) |
diff --git a/include/asm-i386/apicdef.h b/include/asm-i386/apicdef.h index a96a8f48fbfc..03185cef8e0a 100644 --- a/include/asm-i386/apicdef.h +++ b/include/asm-i386/apicdef.h | |||
@@ -16,6 +16,7 @@ | |||
16 | #define GET_APIC_VERSION(x) ((x)&0xFF) | 16 | #define GET_APIC_VERSION(x) ((x)&0xFF) |
17 | #define GET_APIC_MAXLVT(x) (((x)>>16)&0xFF) | 17 | #define GET_APIC_MAXLVT(x) (((x)>>16)&0xFF) |
18 | #define APIC_INTEGRATED(x) ((x)&0xF0) | 18 | #define APIC_INTEGRATED(x) ((x)&0xF0) |
19 | #define APIC_XAPIC(x) ((x) >= 0x14) | ||
19 | #define APIC_TASKPRI 0x80 | 20 | #define APIC_TASKPRI 0x80 |
20 | #define APIC_TPRI_MASK 0xFF | 21 | #define APIC_TPRI_MASK 0xFF |
21 | #define APIC_ARBPRI 0x90 | 22 | #define APIC_ARBPRI 0x90 |
diff --git a/include/asm-i386/auxvec.h b/include/asm-i386/auxvec.h new file mode 100644 index 000000000000..395e13016bfb --- /dev/null +++ b/include/asm-i386/auxvec.h | |||
@@ -0,0 +1,11 @@ | |||
1 | #ifndef __ASMi386_AUXVEC_H | ||
2 | #define __ASMi386_AUXVEC_H | ||
3 | |||
4 | /* | ||
5 | * Architecture-neutral AT_ values in 0-17, leave some room | ||
6 | * for more of them, start the x86-specific ones at 32. | ||
7 | */ | ||
8 | #define AT_SYSINFO 32 | ||
9 | #define AT_SYSINFO_EHDR 33 | ||
10 | |||
11 | #endif | ||
diff --git a/include/asm-i386/bugs.h b/include/asm-i386/bugs.h index 6789fc275da3..ea54540638d2 100644 --- a/include/asm-i386/bugs.h +++ b/include/asm-i386/bugs.h | |||
@@ -118,7 +118,10 @@ static void __init check_hlt(void) | |||
118 | printk("disabled\n"); | 118 | printk("disabled\n"); |
119 | return; | 119 | return; |
120 | } | 120 | } |
121 | __asm__ __volatile__("hlt ; hlt ; hlt ; hlt"); | 121 | halt(); |
122 | halt(); | ||
123 | halt(); | ||
124 | halt(); | ||
122 | printk("OK.\n"); | 125 | printk("OK.\n"); |
123 | } | 126 | } |
124 | 127 | ||
diff --git a/include/asm-i386/desc.h b/include/asm-i386/desc.h index 11e67811a990..6df1a53c190e 100644 --- a/include/asm-i386/desc.h +++ b/include/asm-i386/desc.h | |||
@@ -27,8 +27,18 @@ struct Xgt_desc_struct { | |||
27 | 27 | ||
28 | extern struct Xgt_desc_struct idt_descr, cpu_gdt_descr[NR_CPUS]; | 28 | extern struct Xgt_desc_struct idt_descr, cpu_gdt_descr[NR_CPUS]; |
29 | 29 | ||
30 | #define load_TR_desc() __asm__ __volatile__("ltr %%ax"::"a" (GDT_ENTRY_TSS*8)) | 30 | #define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8)) |
31 | #define load_LDT_desc() __asm__ __volatile__("lldt %%ax"::"a" (GDT_ENTRY_LDT*8)) | 31 | #define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8)) |
32 | |||
33 | #define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr)) | ||
34 | #define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr)) | ||
35 | #define load_tr(tr) __asm__ __volatile("ltr %0"::"mr" (tr)) | ||
36 | #define load_ldt(ldt) __asm__ __volatile("lldt %0"::"mr" (ldt)) | ||
37 | |||
38 | #define store_gdt(dtr) __asm__ ("sgdt %0":"=m" (*dtr)) | ||
39 | #define store_idt(dtr) __asm__ ("sidt %0":"=m" (*dtr)) | ||
40 | #define store_tr(tr) __asm__ ("str %0":"=mr" (tr)) | ||
41 | #define store_ldt(ldt) __asm__ ("sldt %0":"=mr" (ldt)) | ||
32 | 42 | ||
33 | /* | 43 | /* |
34 | * This is the ldt that every process will get unless we need | 44 | * This is the ldt that every process will get unless we need |
@@ -39,14 +49,14 @@ extern void set_intr_gate(unsigned int irq, void * addr); | |||
39 | 49 | ||
40 | #define _set_tssldt_desc(n,addr,limit,type) \ | 50 | #define _set_tssldt_desc(n,addr,limit,type) \ |
41 | __asm__ __volatile__ ("movw %w3,0(%2)\n\t" \ | 51 | __asm__ __volatile__ ("movw %w3,0(%2)\n\t" \ |
42 | "movw %%ax,2(%2)\n\t" \ | 52 | "movw %w1,2(%2)\n\t" \ |
43 | "rorl $16,%%eax\n\t" \ | 53 | "rorl $16,%1\n\t" \ |
44 | "movb %%al,4(%2)\n\t" \ | 54 | "movb %b1,4(%2)\n\t" \ |
45 | "movb %4,5(%2)\n\t" \ | 55 | "movb %4,5(%2)\n\t" \ |
46 | "movb $0,6(%2)\n\t" \ | 56 | "movb $0,6(%2)\n\t" \ |
47 | "movb %%ah,7(%2)\n\t" \ | 57 | "movb %h1,7(%2)\n\t" \ |
48 | "rorl $16,%%eax" \ | 58 | "rorl $16,%1" \ |
49 | : "=m"(*(n)) : "a" (addr), "r"(n), "ir"(limit), "i"(type)) | 59 | : "=m"(*(n)) : "q" (addr), "r"(n), "ir"(limit), "i"(type)) |
50 | 60 | ||
51 | static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr) | 61 | static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr) |
52 | { | 62 | { |
@@ -86,6 +96,13 @@ static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size) | |||
86 | (info)->seg_not_present == 1 && \ | 96 | (info)->seg_not_present == 1 && \ |
87 | (info)->useable == 0 ) | 97 | (info)->useable == 0 ) |
88 | 98 | ||
99 | static inline void write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b) | ||
100 | { | ||
101 | __u32 *lp = (__u32 *)((char *)ldt + entry*8); | ||
102 | *lp = entry_a; | ||
103 | *(lp+1) = entry_b; | ||
104 | } | ||
105 | |||
89 | #if TLS_SIZE != 24 | 106 | #if TLS_SIZE != 24 |
90 | # error update this code. | 107 | # error update this code. |
91 | #endif | 108 | #endif |
diff --git a/include/asm-i386/elf.h b/include/asm-i386/elf.h index 130bdc8c68cf..fa11117d3cfa 100644 --- a/include/asm-i386/elf.h +++ b/include/asm-i386/elf.h | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <asm/user.h> | 9 | #include <asm/user.h> |
10 | #include <asm/processor.h> | 10 | #include <asm/processor.h> |
11 | #include <asm/system.h> /* for savesegment */ | 11 | #include <asm/system.h> /* for savesegment */ |
12 | #include <asm/auxvec.h> | ||
12 | 13 | ||
13 | #include <linux/utsname.h> | 14 | #include <linux/utsname.h> |
14 | 15 | ||
@@ -109,13 +110,6 @@ typedef struct user_fxsr_struct elf_fpxregset_t; | |||
109 | 110 | ||
110 | #define ELF_PLATFORM (system_utsname.machine) | 111 | #define ELF_PLATFORM (system_utsname.machine) |
111 | 112 | ||
112 | /* | ||
113 | * Architecture-neutral AT_ values in 0-17, leave some room | ||
114 | * for more of them, start the x86-specific ones at 32. | ||
115 | */ | ||
116 | #define AT_SYSINFO 32 | ||
117 | #define AT_SYSINFO_EHDR 33 | ||
118 | |||
119 | #ifdef __KERNEL__ | 113 | #ifdef __KERNEL__ |
120 | #define SET_PERSONALITY(ex, ibcs2) do { } while (0) | 114 | #define SET_PERSONALITY(ex, ibcs2) do { } while (0) |
121 | 115 | ||
diff --git a/include/asm-i386/fcntl.h b/include/asm-i386/fcntl.h index 511cde94a3ed..46ab12db5739 100644 --- a/include/asm-i386/fcntl.h +++ b/include/asm-i386/fcntl.h | |||
@@ -1,88 +1 @@ | |||
1 | #ifndef _I386_FCNTL_H | #include <asm-generic/fcntl.h> | |
2 | #define _I386_FCNTL_H | ||
3 | |||
4 | /* open/fcntl - O_SYNC is only implemented on blocks devices and on files | ||
5 | located on an ext2 file system */ | ||
6 | #define O_ACCMODE 0003 | ||
7 | #define O_RDONLY 00 | ||
8 | #define O_WRONLY 01 | ||
9 | #define O_RDWR 02 | ||
10 | #define O_CREAT 0100 /* not fcntl */ | ||
11 | #define O_EXCL 0200 /* not fcntl */ | ||
12 | #define O_NOCTTY 0400 /* not fcntl */ | ||
13 | #define O_TRUNC 01000 /* not fcntl */ | ||
14 | #define O_APPEND 02000 | ||
15 | #define O_NONBLOCK 04000 | ||
16 | #define O_NDELAY O_NONBLOCK | ||
17 | #define O_SYNC 010000 | ||
18 | #define FASYNC 020000 /* fcntl, for BSD compatibility */ | ||
19 | #define O_DIRECT 040000 /* direct disk access hint */ | ||
20 | #define O_LARGEFILE 0100000 | ||
21 | #define O_DIRECTORY 0200000 /* must be a directory */ | ||
22 | #define O_NOFOLLOW 0400000 /* don't follow links */ | ||
23 | #define O_NOATIME 01000000 | ||
24 | |||
25 | #define F_DUPFD 0 /* dup */ | ||
26 | #define F_GETFD 1 /* get close_on_exec */ | ||
27 | #define F_SETFD 2 /* set/clear close_on_exec */ | ||
28 | #define F_GETFL 3 /* get file->f_flags */ | ||
29 | #define F_SETFL 4 /* set file->f_flags */ | ||
30 | #define F_GETLK 5 | ||
31 | #define F_SETLK 6 | ||
32 | #define F_SETLKW 7 | ||
33 | |||
34 | #define F_SETOWN 8 /* for sockets. */ | ||
35 | #define F_GETOWN 9 /* for sockets. */ | ||
36 | #define F_SETSIG 10 /* for sockets. */ | ||
37 | #define F_GETSIG 11 /* for sockets. */ | ||
38 | |||
39 | #define F_GETLK64 12 /* using 'struct flock64' */ | ||
40 | #define F_SETLK64 13 | ||
41 | #define F_SETLKW64 14 | ||
42 | |||
43 | /* for F_[GET|SET]FL */ | ||
44 | #define FD_CLOEXEC 1 /* actually anything with low bit set goes */ | ||
45 | |||
46 | /* for posix fcntl() and lockf() */ | ||
47 | #define F_RDLCK 0 | ||
48 | #define F_WRLCK 1 | ||
49 | #define F_UNLCK 2 | ||
50 | |||
51 | /* for old implementation of bsd flock () */ | ||
52 | #define F_EXLCK 4 /* or 3 */ | ||
53 | #define F_SHLCK 8 /* or 4 */ | ||
54 | |||
55 | /* for leases */ | ||
56 | #define F_INPROGRESS 16 | ||
57 | |||
58 | /* operations for bsd flock(), also used by the kernel implementation */ | ||
59 | #define LOCK_SH 1 /* shared lock */ | ||
60 | #define LOCK_EX 2 /* exclusive lock */ | ||
61 | #define LOCK_NB 4 /* or'd with one of the above to prevent | ||
62 | blocking */ | ||
63 | #define LOCK_UN 8 /* remove lock */ | ||
64 | |||
65 | #define LOCK_MAND 32 /* This is a mandatory flock */ | ||
66 | #define LOCK_READ 64 /* ... Which allows concurrent read operations */ | ||
67 | #define LOCK_WRITE 128 /* ... Which allows concurrent write operations */ | ||
68 | #define LOCK_RW 192 /* ... Which allows concurrent read & write ops */ | ||
69 | |||
70 | struct flock { | ||
71 | short l_type; | ||
72 | short l_whence; | ||
73 | off_t l_start; | ||
74 | off_t l_len; | ||
75 | pid_t l_pid; | ||
76 | }; | ||
77 | |||
78 | struct flock64 { | ||
79 | short l_type; | ||
80 | short l_whence; | ||
81 | loff_t l_start; | ||
82 | loff_t l_len; | ||
83 | pid_t l_pid; | ||
84 | }; | ||
85 | |||
86 | #define F_LINUX_SPECIFIC_BASE 1024 | ||
87 | |||
88 | #endif | ||
diff --git a/include/asm-i386/futex.h b/include/asm-i386/futex.h new file mode 100644 index 000000000000..44b9db806474 --- /dev/null +++ b/include/asm-i386/futex.h | |||
@@ -0,0 +1,108 @@ | |||
1 | #ifndef _ASM_FUTEX_H | ||
2 | #define _ASM_FUTEX_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | |||
6 | #include <linux/futex.h> | ||
7 | #include <asm/errno.h> | ||
8 | #include <asm/system.h> | ||
9 | #include <asm/processor.h> | ||
10 | #include <asm/uaccess.h> | ||
11 | |||
12 | #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ | ||
13 | __asm__ __volatile ( \ | ||
14 | "1: " insn "\n" \ | ||
15 | "2: .section .fixup,\"ax\"\n\ | ||
16 | 3: mov %3, %1\n\ | ||
17 | jmp 2b\n\ | ||
18 | .previous\n\ | ||
19 | .section __ex_table,\"a\"\n\ | ||
20 | .align 8\n\ | ||
21 | .long 1b,3b\n\ | ||
22 | .previous" \ | ||
23 | : "=r" (oldval), "=r" (ret), "=m" (*uaddr) \ | ||
24 | : "i" (-EFAULT), "m" (*uaddr), "0" (oparg), "1" (0)) | ||
25 | |||
26 | #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ | ||
27 | __asm__ __volatile ( \ | ||
28 | "1: movl %2, %0\n\ | ||
29 | movl %0, %3\n" \ | ||
30 | insn "\n" \ | ||
31 | "2: " LOCK_PREFIX "cmpxchgl %3, %2\n\ | ||
32 | jnz 1b\n\ | ||
33 | 3: .section .fixup,\"ax\"\n\ | ||
34 | 4: mov %5, %1\n\ | ||
35 | jmp 3b\n\ | ||
36 | .previous\n\ | ||
37 | .section __ex_table,\"a\"\n\ | ||
38 | .align 8\n\ | ||
39 | .long 1b,4b,2b,4b\n\ | ||
40 | .previous" \ | ||
41 | : "=&a" (oldval), "=&r" (ret), "=m" (*uaddr), \ | ||
42 | "=&r" (tem) \ | ||
43 | : "r" (oparg), "i" (-EFAULT), "m" (*uaddr), "1" (0)) | ||
44 | |||
45 | static inline int | ||
46 | futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | ||
47 | { | ||
48 | int op = (encoded_op >> 28) & 7; | ||
49 | int cmp = (encoded_op >> 24) & 15; | ||
50 | int oparg = (encoded_op << 8) >> 20; | ||
51 | int cmparg = (encoded_op << 20) >> 20; | ||
52 | int oldval = 0, ret, tem; | ||
53 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | ||
54 | oparg = 1 << oparg; | ||
55 | |||
56 | if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) | ||
57 | return -EFAULT; | ||
58 | |||
59 | inc_preempt_count(); | ||
60 | |||
61 | if (op == FUTEX_OP_SET) | ||
62 | __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg); | ||
63 | else { | ||
64 | #ifndef CONFIG_X86_BSWAP | ||
65 | if (boot_cpu_data.x86 == 3) | ||
66 | ret = -ENOSYS; | ||
67 | else | ||
68 | #endif | ||
69 | switch (op) { | ||
70 | case FUTEX_OP_ADD: | ||
71 | __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, | ||
72 | oldval, uaddr, oparg); | ||
73 | break; | ||
74 | case FUTEX_OP_OR: | ||
75 | __futex_atomic_op2("orl %4, %3", ret, oldval, uaddr, | ||
76 | oparg); | ||
77 | break; | ||
78 | case FUTEX_OP_ANDN: | ||
79 | __futex_atomic_op2("andl %4, %3", ret, oldval, uaddr, | ||
80 | ~oparg); | ||
81 | break; | ||
82 | case FUTEX_OP_XOR: | ||
83 | __futex_atomic_op2("xorl %4, %3", ret, oldval, uaddr, | ||
84 | oparg); | ||
85 | break; | ||
86 | default: | ||
87 | ret = -ENOSYS; | ||
88 | } | ||
89 | } | ||
90 | |||
91 | dec_preempt_count(); | ||
92 | |||
93 | if (!ret) { | ||
94 | switch (cmp) { | ||
95 | case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; | ||
96 | case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; | ||
97 | case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; | ||
98 | case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; | ||
99 | case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; | ||
100 | case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; | ||
101 | default: ret = -ENOSYS; | ||
102 | } | ||
103 | } | ||
104 | return ret; | ||
105 | } | ||
106 | |||
107 | #endif | ||
108 | #endif | ||
diff --git a/include/asm-i386/hdreg.h b/include/asm-i386/hdreg.h deleted file mode 100644 index 5989bbc97cbf..000000000000 --- a/include/asm-i386/hdreg.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #warning this file is obsolete, please do not use it | ||
diff --git a/include/asm-i386/kdebug.h b/include/asm-i386/kdebug.h index b3f8d5f59d5d..316138e89910 100644 --- a/include/asm-i386/kdebug.h +++ b/include/asm-i386/kdebug.h | |||
@@ -41,9 +41,16 @@ enum die_val { | |||
41 | DIE_PAGE_FAULT, | 41 | DIE_PAGE_FAULT, |
42 | }; | 42 | }; |
43 | 43 | ||
44 | static inline int notify_die(enum die_val val,char *str,struct pt_regs *regs,long err,int trap, int sig) | 44 | static inline int notify_die(enum die_val val, const char *str, |
45 | struct pt_regs *regs, long err, int trap, int sig) | ||
45 | { | 46 | { |
46 | struct die_args args = { .regs=regs, .str=str, .err=err, .trapnr=trap,.signr=sig }; | 47 | struct die_args args = { |
48 | .regs = regs, | ||
49 | .str = str, | ||
50 | .err = err, | ||
51 | .trapnr = trap, | ||
52 | .signr = sig | ||
53 | }; | ||
47 | return notifier_call_chain(&i386die_chain, val, &args); | 54 | return notifier_call_chain(&i386die_chain, val, &args); |
48 | } | 55 | } |
49 | 56 | ||
diff --git a/include/asm-i386/mach-es7000/mach_mpparse.h b/include/asm-i386/mach-es7000/mach_mpparse.h index 85809e0898d7..28a84f6185a7 100644 --- a/include/asm-i386/mach-es7000/mach_mpparse.h +++ b/include/asm-i386/mach-es7000/mach_mpparse.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef __ASM_MACH_MPPARSE_H | 1 | #ifndef __ASM_MACH_MPPARSE_H |
2 | #define __ASM_MACH_MPPARSE_H | 2 | #define __ASM_MACH_MPPARSE_H |
3 | 3 | ||
4 | #include <linux/acpi.h> | ||
5 | |||
4 | static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, | 6 | static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, |
5 | struct mpc_config_translation *translation) | 7 | struct mpc_config_translation *translation) |
6 | { | 8 | { |
@@ -12,8 +14,9 @@ static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, | |||
12 | { | 14 | { |
13 | } | 15 | } |
14 | 16 | ||
15 | extern int parse_unisys_oem (char *oemptr, int oem_entries); | 17 | extern int parse_unisys_oem (char *oemptr); |
16 | extern int find_unisys_acpi_oem_table(unsigned long *oem_addr, int *length); | 18 | extern int find_unisys_acpi_oem_table(unsigned long *oem_addr); |
19 | extern void setup_unisys(); | ||
17 | 20 | ||
18 | static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, | 21 | static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, |
19 | char *productid) | 22 | char *productid) |
@@ -22,18 +25,33 @@ static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, | |||
22 | struct mp_config_oemtable *oem_table = | 25 | struct mp_config_oemtable *oem_table = |
23 | (struct mp_config_oemtable *)mpc->mpc_oemptr; | 26 | (struct mp_config_oemtable *)mpc->mpc_oemptr; |
24 | if (!strncmp(oem, "UNISYS", 6)) | 27 | if (!strncmp(oem, "UNISYS", 6)) |
25 | return parse_unisys_oem((char *)oem_table, oem_table->oem_length); | 28 | return parse_unisys_oem((char *)oem_table); |
26 | } | 29 | } |
27 | return 0; | 30 | return 0; |
28 | } | 31 | } |
29 | 32 | ||
33 | static inline int es7000_check_dsdt() | ||
34 | { | ||
35 | struct acpi_table_header *header = NULL; | ||
36 | if(!acpi_get_table_header_early(ACPI_DSDT, &header)) | ||
37 | acpi_table_print(header, 0); | ||
38 | if (!strncmp(header->oem_id, "UNISYS", 6)) | ||
39 | return 1; | ||
40 | return 0; | ||
41 | } | ||
42 | |||
30 | /* Hook from generic ACPI tables.c */ | 43 | /* Hook from generic ACPI tables.c */ |
31 | static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) | 44 | static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) |
32 | { | 45 | { |
33 | unsigned long oem_addr; | 46 | unsigned long oem_addr; |
34 | int oem_entries; | 47 | if (!find_unisys_acpi_oem_table(&oem_addr)) { |
35 | if (!find_unisys_acpi_oem_table(&oem_addr, &oem_entries)) | 48 | if (es7000_check_dsdt()) |
36 | return parse_unisys_oem((char *)oem_addr, oem_entries); | 49 | return parse_unisys_oem((char *)oem_addr); |
50 | else { | ||
51 | setup_unisys(); | ||
52 | return 1; | ||
53 | } | ||
54 | } | ||
37 | return 0; | 55 | return 0; |
38 | } | 56 | } |
39 | 57 | ||
diff --git a/include/asm-i386/mach-generic/mach_apic.h b/include/asm-i386/mach-generic/mach_apic.h index b13767a4e934..d9dc039da94a 100644 --- a/include/asm-i386/mach-generic/mach_apic.h +++ b/include/asm-i386/mach-generic/mach_apic.h | |||
@@ -28,4 +28,6 @@ | |||
28 | #define enable_apic_mode (genapic->enable_apic_mode) | 28 | #define enable_apic_mode (genapic->enable_apic_mode) |
29 | #define phys_pkg_id (genapic->phys_pkg_id) | 29 | #define phys_pkg_id (genapic->phys_pkg_id) |
30 | 30 | ||
31 | extern void generic_bigsmp_probe(void); | ||
32 | |||
31 | #endif /* __ASM_MACH_APIC_H */ | 33 | #endif /* __ASM_MACH_APIC_H */ |
diff --git a/include/asm-i386/mpspec.h b/include/asm-i386/mpspec.h index b9e9f66d2721..64a0b8e6afeb 100644 --- a/include/asm-i386/mpspec.h +++ b/include/asm-i386/mpspec.h | |||
@@ -11,6 +11,7 @@ extern int mp_bus_id_to_local [MAX_MP_BUSSES]; | |||
11 | extern int quad_local_to_mp_bus_id [NR_CPUS/4][4]; | 11 | extern int quad_local_to_mp_bus_id [NR_CPUS/4][4]; |
12 | extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES]; | 12 | extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES]; |
13 | 13 | ||
14 | extern unsigned int def_to_bigsmp; | ||
14 | extern unsigned int boot_cpu_physical_apicid; | 15 | extern unsigned int boot_cpu_physical_apicid; |
15 | extern int smp_found_config; | 16 | extern int smp_found_config; |
16 | extern void find_smp_config (void); | 17 | extern void find_smp_config (void); |
diff --git a/include/asm-i386/msr.h b/include/asm-i386/msr.h index c76fce8badbb..62b76cd96957 100644 --- a/include/asm-i386/msr.h +++ b/include/asm-i386/msr.h | |||
@@ -47,6 +47,21 @@ static inline void wrmsrl (unsigned long msr, unsigned long long val) | |||
47 | : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT));\ | 47 | : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT));\ |
48 | ret__; }) | 48 | ret__; }) |
49 | 49 | ||
50 | /* rdmsr with exception handling */ | ||
51 | #define rdmsr_safe(msr,a,b) ({ int ret__; \ | ||
52 | asm volatile("2: rdmsr ; xorl %0,%0\n" \ | ||
53 | "1:\n\t" \ | ||
54 | ".section .fixup,\"ax\"\n\t" \ | ||
55 | "3: movl %4,%0 ; jmp 1b\n\t" \ | ||
56 | ".previous\n\t" \ | ||
57 | ".section __ex_table,\"a\"\n" \ | ||
58 | " .align 4\n\t" \ | ||
59 | " .long 2b,3b\n\t" \ | ||
60 | ".previous" \ | ||
61 | : "=r" (ret__), "=a" (*(a)), "=d" (*(b)) \ | ||
62 | : "c" (msr), "i" (-EFAULT));\ | ||
63 | ret__; }) | ||
64 | |||
50 | #define rdtsc(low,high) \ | 65 | #define rdtsc(low,high) \ |
51 | __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high)) | 66 | __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high)) |
52 | 67 | ||
diff --git a/include/asm-i386/page.h b/include/asm-i386/page.h index 8d93f732d72d..73296d9924fb 100644 --- a/include/asm-i386/page.h +++ b/include/asm-i386/page.h | |||
@@ -68,7 +68,6 @@ typedef struct { unsigned long pgprot; } pgprot_t; | |||
68 | #define HPAGE_MASK (~(HPAGE_SIZE - 1)) | 68 | #define HPAGE_MASK (~(HPAGE_SIZE - 1)) |
69 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) | 69 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) |
70 | #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA | 70 | #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA |
71 | #define ARCH_HAS_HUGETLB_CLEAN_STALE_PGTABLE | ||
72 | #endif | 71 | #endif |
73 | 72 | ||
74 | #define pgd_val(x) ((x).pgd) | 73 | #define pgd_val(x) ((x).pgd) |
@@ -104,20 +103,6 @@ typedef struct { unsigned long pgprot; } pgprot_t; | |||
104 | */ | 103 | */ |
105 | extern unsigned int __VMALLOC_RESERVE; | 104 | extern unsigned int __VMALLOC_RESERVE; |
106 | 105 | ||
107 | /* Pure 2^n version of get_order */ | ||
108 | static __inline__ int get_order(unsigned long size) | ||
109 | { | ||
110 | int order; | ||
111 | |||
112 | size = (size-1) >> (PAGE_SHIFT-1); | ||
113 | order = -1; | ||
114 | do { | ||
115 | size >>= 1; | ||
116 | order++; | ||
117 | } while (size); | ||
118 | return order; | ||
119 | } | ||
120 | |||
121 | extern int sysctl_legacy_va_layout; | 106 | extern int sysctl_legacy_va_layout; |
122 | 107 | ||
123 | extern int page_is_ram(unsigned long pagenr); | 108 | extern int page_is_ram(unsigned long pagenr); |
@@ -156,4 +141,6 @@ extern int page_is_ram(unsigned long pagenr); | |||
156 | 141 | ||
157 | #endif /* __KERNEL__ */ | 142 | #endif /* __KERNEL__ */ |
158 | 143 | ||
144 | #include <asm-generic/page.h> | ||
145 | |||
159 | #endif /* _I386_PAGE_H */ | 146 | #endif /* _I386_PAGE_H */ |
diff --git a/include/asm-i386/pgtable-3level.h b/include/asm-i386/pgtable-3level.h index d609f9c2c1f0..2e3f4a344a2d 100644 --- a/include/asm-i386/pgtable-3level.h +++ b/include/asm-i386/pgtable-3level.h | |||
@@ -64,7 +64,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte) | |||
64 | #define set_pmd(pmdptr,pmdval) \ | 64 | #define set_pmd(pmdptr,pmdval) \ |
65 | set_64bit((unsigned long long *)(pmdptr),pmd_val(pmdval)) | 65 | set_64bit((unsigned long long *)(pmdptr),pmd_val(pmdval)) |
66 | #define set_pud(pudptr,pudval) \ | 66 | #define set_pud(pudptr,pudval) \ |
67 | set_64bit((unsigned long long *)(pudptr),pud_val(pudval)) | 67 | (*(pudptr) = (pudval)) |
68 | 68 | ||
69 | /* | 69 | /* |
70 | * Pentium-II erratum A13: in PAE mode we explicitly have to flush | 70 | * Pentium-II erratum A13: in PAE mode we explicitly have to flush |
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h index 77c6497f416e..47bc1ffa3d4c 100644 --- a/include/asm-i386/pgtable.h +++ b/include/asm-i386/pgtable.h | |||
@@ -86,9 +86,7 @@ void paging_init(void); | |||
86 | #endif | 86 | #endif |
87 | 87 | ||
88 | /* | 88 | /* |
89 | * The 4MB page is guessing.. Detailed in the infamous "Chapter H" | 89 | * _PAGE_PSE set in the page directory entry just means that |
90 | * of the Pentium details, but assuming intel did the straightforward | ||
91 | * thing, this bit set in the page directory entry just means that | ||
92 | * the page directory entry points directly to a 4MB-aligned block of | 90 | * the page directory entry points directly to a 4MB-aligned block of |
93 | * memory. | 91 | * memory. |
94 | */ | 92 | */ |
@@ -119,8 +117,10 @@ void paging_init(void); | |||
119 | #define _PAGE_UNUSED2 0x400 | 117 | #define _PAGE_UNUSED2 0x400 |
120 | #define _PAGE_UNUSED3 0x800 | 118 | #define _PAGE_UNUSED3 0x800 |
121 | 119 | ||
122 | #define _PAGE_FILE 0x040 /* set:pagecache unset:swap */ | 120 | /* If _PAGE_PRESENT is clear, we use these: */ |
123 | #define _PAGE_PROTNONE 0x080 /* If not present */ | 121 | #define _PAGE_FILE 0x040 /* nonlinear file mapping, saved PTE; unset:swap */ |
122 | #define _PAGE_PROTNONE 0x080 /* if the user mapped it with PROT_NONE; | ||
123 | pte_present gives true */ | ||
124 | #ifdef CONFIG_X86_PAE | 124 | #ifdef CONFIG_X86_PAE |
125 | #define _PAGE_NX (1ULL<<_PAGE_BIT_NX) | 125 | #define _PAGE_NX (1ULL<<_PAGE_BIT_NX) |
126 | #else | 126 | #else |
@@ -215,11 +215,13 @@ extern unsigned long pg0[]; | |||
215 | * The following only work if pte_present() is true. | 215 | * The following only work if pte_present() is true. |
216 | * Undefined behaviour if not.. | 216 | * Undefined behaviour if not.. |
217 | */ | 217 | */ |
218 | #define __LARGE_PTE (_PAGE_PSE | _PAGE_PRESENT) | ||
218 | static inline int pte_user(pte_t pte) { return (pte).pte_low & _PAGE_USER; } | 219 | static inline int pte_user(pte_t pte) { return (pte).pte_low & _PAGE_USER; } |
219 | static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_USER; } | 220 | static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_USER; } |
220 | static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; } | 221 | static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; } |
221 | static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; } | 222 | static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; } |
222 | static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; } | 223 | static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; } |
224 | static inline int pte_huge(pte_t pte) { return ((pte).pte_low & __LARGE_PTE) == __LARGE_PTE; } | ||
223 | 225 | ||
224 | /* | 226 | /* |
225 | * The following only works if pte_present() is not true. | 227 | * The following only works if pte_present() is not true. |
@@ -236,7 +238,7 @@ static inline pte_t pte_mkexec(pte_t pte) { (pte).pte_low |= _PAGE_USER; return | |||
236 | static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; } | 238 | static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; } |
237 | static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; } | 239 | static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; } |
238 | static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; } | 240 | static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; } |
239 | static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PRESENT | _PAGE_PSE; return pte; } | 241 | static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= __LARGE_PTE; return pte; } |
240 | 242 | ||
241 | #ifdef CONFIG_X86_PAE | 243 | #ifdef CONFIG_X86_PAE |
242 | # include <asm/pgtable-3level.h> | 244 | # include <asm/pgtable-3level.h> |
@@ -258,12 +260,39 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned | |||
258 | return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte_low); | 260 | return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte_low); |
259 | } | 261 | } |
260 | 262 | ||
263 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) | ||
264 | { | ||
265 | pte_t pte; | ||
266 | if (full) { | ||
267 | pte = *ptep; | ||
268 | *ptep = __pte(0); | ||
269 | } else { | ||
270 | pte = ptep_get_and_clear(mm, addr, ptep); | ||
271 | } | ||
272 | return pte; | ||
273 | } | ||
274 | |||
261 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 275 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
262 | { | 276 | { |
263 | clear_bit(_PAGE_BIT_RW, &ptep->pte_low); | 277 | clear_bit(_PAGE_BIT_RW, &ptep->pte_low); |
264 | } | 278 | } |
265 | 279 | ||
266 | /* | 280 | /* |
281 | * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); | ||
282 | * | ||
283 | * dst - pointer to pgd range anwhere on a pgd page | ||
284 | * src - "" | ||
285 | * count - the number of pgds to copy. | ||
286 | * | ||
287 | * dst and src can be on the same page, but the range must not overlap, | ||
288 | * and must not cross a page boundary. | ||
289 | */ | ||
290 | static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) | ||
291 | { | ||
292 | memcpy(dst, src, count * sizeof(pgd_t)); | ||
293 | } | ||
294 | |||
295 | /* | ||
267 | * Macro to mark a page protection value as "uncacheable". On processors which do not support | 296 | * Macro to mark a page protection value as "uncacheable". On processors which do not support |
268 | * it, this is a no-op. | 297 | * it, this is a no-op. |
269 | */ | 298 | */ |
@@ -415,6 +444,7 @@ extern void noexec_setup(const char *str); | |||
415 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | 444 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
416 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY | 445 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY |
417 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | 446 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR |
447 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL | ||
418 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | 448 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT |
419 | #define __HAVE_ARCH_PTE_SAME | 449 | #define __HAVE_ARCH_PTE_SAME |
420 | #include <asm-generic/pgtable.h> | 450 | #include <asm-generic/pgtable.h> |
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h index d0d8b0160090..37bef8ed7bed 100644 --- a/include/asm-i386/processor.h +++ b/include/asm-i386/processor.h | |||
@@ -203,9 +203,7 @@ static inline unsigned int cpuid_edx(unsigned int op) | |||
203 | return edx; | 203 | return edx; |
204 | } | 204 | } |
205 | 205 | ||
206 | #define load_cr3(pgdir) \ | 206 | #define load_cr3(pgdir) write_cr3(__pa(pgdir)) |
207 | asm volatile("movl %0,%%cr3": :"r" (__pa(pgdir))) | ||
208 | |||
209 | 207 | ||
210 | /* | 208 | /* |
211 | * Intel CPU features in CR4 | 209 | * Intel CPU features in CR4 |
@@ -232,22 +230,20 @@ extern unsigned long mmu_cr4_features; | |||
232 | 230 | ||
233 | static inline void set_in_cr4 (unsigned long mask) | 231 | static inline void set_in_cr4 (unsigned long mask) |
234 | { | 232 | { |
233 | unsigned cr4; | ||
235 | mmu_cr4_features |= mask; | 234 | mmu_cr4_features |= mask; |
236 | __asm__("movl %%cr4,%%eax\n\t" | 235 | cr4 = read_cr4(); |
237 | "orl %0,%%eax\n\t" | 236 | cr4 |= mask; |
238 | "movl %%eax,%%cr4\n" | 237 | write_cr4(cr4); |
239 | : : "irg" (mask) | ||
240 | :"ax"); | ||
241 | } | 238 | } |
242 | 239 | ||
243 | static inline void clear_in_cr4 (unsigned long mask) | 240 | static inline void clear_in_cr4 (unsigned long mask) |
244 | { | 241 | { |
242 | unsigned cr4; | ||
245 | mmu_cr4_features &= ~mask; | 243 | mmu_cr4_features &= ~mask; |
246 | __asm__("movl %%cr4,%%eax\n\t" | 244 | cr4 = read_cr4(); |
247 | "andl %0,%%eax\n\t" | 245 | cr4 &= ~mask; |
248 | "movl %%eax,%%cr4\n" | 246 | write_cr4(cr4); |
249 | : : "irg" (~mask) | ||
250 | :"ax"); | ||
251 | } | 247 | } |
252 | 248 | ||
253 | /* | 249 | /* |
@@ -281,6 +277,11 @@ static inline void clear_in_cr4 (unsigned long mask) | |||
281 | outb((data), 0x23); \ | 277 | outb((data), 0x23); \ |
282 | } while (0) | 278 | } while (0) |
283 | 279 | ||
280 | static inline void serialize_cpu(void) | ||
281 | { | ||
282 | __asm__ __volatile__ ("cpuid" : : : "ax", "bx", "cx", "dx"); | ||
283 | } | ||
284 | |||
284 | static inline void __monitor(const void *eax, unsigned long ecx, | 285 | static inline void __monitor(const void *eax, unsigned long ecx, |
285 | unsigned long edx) | 286 | unsigned long edx) |
286 | { | 287 | { |
@@ -454,6 +455,7 @@ struct thread_struct { | |||
454 | unsigned int saved_fs, saved_gs; | 455 | unsigned int saved_fs, saved_gs; |
455 | /* IO permissions */ | 456 | /* IO permissions */ |
456 | unsigned long *io_bitmap_ptr; | 457 | unsigned long *io_bitmap_ptr; |
458 | unsigned long iopl; | ||
457 | /* max allowed port in the bitmap, in bytes: */ | 459 | /* max allowed port in the bitmap, in bytes: */ |
458 | unsigned long io_bitmap_max; | 460 | unsigned long io_bitmap_max; |
459 | }; | 461 | }; |
@@ -474,7 +476,6 @@ struct thread_struct { | |||
474 | .esp0 = sizeof(init_stack) + (long)&init_stack, \ | 476 | .esp0 = sizeof(init_stack) + (long)&init_stack, \ |
475 | .ss0 = __KERNEL_DS, \ | 477 | .ss0 = __KERNEL_DS, \ |
476 | .ss1 = __KERNEL_CS, \ | 478 | .ss1 = __KERNEL_CS, \ |
477 | .ldt = GDT_ENTRY_LDT, \ | ||
478 | .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ | 479 | .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ |
479 | .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \ | 480 | .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \ |
480 | } | 481 | } |
@@ -511,6 +512,21 @@ static inline void load_esp0(struct tss_struct *tss, struct thread_struct *threa | |||
511 | : /* no output */ \ | 512 | : /* no output */ \ |
512 | :"r" (value)) | 513 | :"r" (value)) |
513 | 514 | ||
515 | /* | ||
516 | * Set IOPL bits in EFLAGS from given mask | ||
517 | */ | ||
518 | static inline void set_iopl_mask(unsigned mask) | ||
519 | { | ||
520 | unsigned int reg; | ||
521 | __asm__ __volatile__ ("pushfl;" | ||
522 | "popl %0;" | ||
523 | "andl %1, %0;" | ||
524 | "orl %2, %0;" | ||
525 | "pushl %0;" | ||
526 | "popfl" | ||
527 | : "=&r" (reg) | ||
528 | : "i" (~X86_EFLAGS_IOPL), "r" (mask)); | ||
529 | } | ||
514 | 530 | ||
515 | /* Forward declaration, a strange C thing */ | 531 | /* Forward declaration, a strange C thing */ |
516 | struct task_struct; | 532 | struct task_struct; |
diff --git a/include/asm-i386/ptrace.h b/include/asm-i386/ptrace.h index 05532875e39e..7e0f2945d17d 100644 --- a/include/asm-i386/ptrace.h +++ b/include/asm-i386/ptrace.h | |||
@@ -61,6 +61,13 @@ struct pt_regs { | |||
61 | struct task_struct; | 61 | struct task_struct; |
62 | extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code); | 62 | extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code); |
63 | 63 | ||
64 | /* | ||
65 | * user_mode_vm(regs) determines whether a register set came from user mode. | ||
66 | * This is true if V8086 mode was enabled OR if the register set was from | ||
67 | * protected mode with RPL-3 CS value. This tricky test checks that with | ||
68 | * one comparison. Many places in the kernel can bypass this full check | ||
69 | * if they have already ruled out V8086 mode, so user_mode(regs) can be used. | ||
70 | */ | ||
64 | static inline int user_mode(struct pt_regs *regs) | 71 | static inline int user_mode(struct pt_regs *regs) |
65 | { | 72 | { |
66 | return (regs->xcs & 3) != 0; | 73 | return (regs->xcs & 3) != 0; |
diff --git a/include/asm-i386/setup.h b/include/asm-i386/setup.h index 7a32184d54bf..826a8ca50ac8 100644 --- a/include/asm-i386/setup.h +++ b/include/asm-i386/setup.h | |||
@@ -44,7 +44,7 @@ extern unsigned char boot_params[PARAM_SIZE]; | |||
44 | #define EFI_SYSTAB ((efi_system_table_t *) *((unsigned long *)(PARAM+0x1c4))) | 44 | #define EFI_SYSTAB ((efi_system_table_t *) *((unsigned long *)(PARAM+0x1c4))) |
45 | #define EFI_MEMDESC_SIZE (*((unsigned long *) (PARAM+0x1c8))) | 45 | #define EFI_MEMDESC_SIZE (*((unsigned long *) (PARAM+0x1c8))) |
46 | #define EFI_MEMDESC_VERSION (*((unsigned long *) (PARAM+0x1cc))) | 46 | #define EFI_MEMDESC_VERSION (*((unsigned long *) (PARAM+0x1cc))) |
47 | #define EFI_MEMMAP ((efi_memory_desc_t *) *((unsigned long *)(PARAM+0x1d0))) | 47 | #define EFI_MEMMAP ((void *) *((unsigned long *)(PARAM+0x1d0))) |
48 | #define EFI_MEMMAP_SIZE (*((unsigned long *) (PARAM+0x1d4))) | 48 | #define EFI_MEMMAP_SIZE (*((unsigned long *) (PARAM+0x1d4))) |
49 | #define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2)) | 49 | #define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2)) |
50 | #define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8)) | 50 | #define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8)) |
diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h index a283738b80b3..13250199976d 100644 --- a/include/asm-i386/smp.h +++ b/include/asm-i386/smp.h | |||
@@ -59,7 +59,7 @@ extern void cpu_uninit(void); | |||
59 | 59 | ||
60 | extern cpumask_t cpu_callout_map; | 60 | extern cpumask_t cpu_callout_map; |
61 | extern cpumask_t cpu_callin_map; | 61 | extern cpumask_t cpu_callin_map; |
62 | #define cpu_possible_map cpu_callout_map | 62 | extern cpumask_t cpu_possible_map; |
63 | 63 | ||
64 | /* We don't mark CPUs online until __cpu_up(), so we need another measure */ | 64 | /* We don't mark CPUs online until __cpu_up(), so we need another measure */ |
65 | static inline int num_booting_cpus(void) | 65 | static inline int num_booting_cpus(void) |
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h index 3db717a244f0..acd5c26b69ba 100644 --- a/include/asm-i386/system.h +++ b/include/asm-i386/system.h | |||
@@ -14,8 +14,7 @@ extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struc | |||
14 | 14 | ||
15 | #define switch_to(prev,next,last) do { \ | 15 | #define switch_to(prev,next,last) do { \ |
16 | unsigned long esi,edi; \ | 16 | unsigned long esi,edi; \ |
17 | asm volatile("pushfl\n\t" \ | 17 | asm volatile("pushl %%ebp\n\t" \ |
18 | "pushl %%ebp\n\t" \ | ||
19 | "movl %%esp,%0\n\t" /* save ESP */ \ | 18 | "movl %%esp,%0\n\t" /* save ESP */ \ |
20 | "movl %5,%%esp\n\t" /* restore ESP */ \ | 19 | "movl %5,%%esp\n\t" /* restore ESP */ \ |
21 | "movl $1f,%1\n\t" /* save EIP */ \ | 20 | "movl $1f,%1\n\t" /* save EIP */ \ |
@@ -23,7 +22,6 @@ extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struc | |||
23 | "jmp __switch_to\n" \ | 22 | "jmp __switch_to\n" \ |
24 | "1:\t" \ | 23 | "1:\t" \ |
25 | "popl %%ebp\n\t" \ | 24 | "popl %%ebp\n\t" \ |
26 | "popfl" \ | ||
27 | :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \ | 25 | :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \ |
28 | "=a" (last),"=S" (esi),"=D" (edi) \ | 26 | "=a" (last),"=S" (esi),"=D" (edi) \ |
29 | :"m" (next->thread.esp),"m" (next->thread.eip), \ | 27 | :"m" (next->thread.esp),"m" (next->thread.eip), \ |
@@ -93,13 +91,13 @@ static inline unsigned long _get_base(char * addr) | |||
93 | ".align 4\n\t" \ | 91 | ".align 4\n\t" \ |
94 | ".long 1b,3b\n" \ | 92 | ".long 1b,3b\n" \ |
95 | ".previous" \ | 93 | ".previous" \ |
96 | : :"m" (value)) | 94 | : :"rm" (value)) |
97 | 95 | ||
98 | /* | 96 | /* |
99 | * Save a segment register away | 97 | * Save a segment register away |
100 | */ | 98 | */ |
101 | #define savesegment(seg, value) \ | 99 | #define savesegment(seg, value) \ |
102 | asm volatile("mov %%" #seg ",%0":"=m" (value)) | 100 | asm volatile("mov %%" #seg ",%0":"=rm" (value)) |
103 | 101 | ||
104 | /* | 102 | /* |
105 | * Clear and set 'TS' bit respectively | 103 | * Clear and set 'TS' bit respectively |
@@ -107,13 +105,33 @@ static inline unsigned long _get_base(char * addr) | |||
107 | #define clts() __asm__ __volatile__ ("clts") | 105 | #define clts() __asm__ __volatile__ ("clts") |
108 | #define read_cr0() ({ \ | 106 | #define read_cr0() ({ \ |
109 | unsigned int __dummy; \ | 107 | unsigned int __dummy; \ |
110 | __asm__( \ | 108 | __asm__ __volatile__( \ |
111 | "movl %%cr0,%0\n\t" \ | 109 | "movl %%cr0,%0\n\t" \ |
112 | :"=r" (__dummy)); \ | 110 | :"=r" (__dummy)); \ |
113 | __dummy; \ | 111 | __dummy; \ |
114 | }) | 112 | }) |
115 | #define write_cr0(x) \ | 113 | #define write_cr0(x) \ |
116 | __asm__("movl %0,%%cr0": :"r" (x)); | 114 | __asm__ __volatile__("movl %0,%%cr0": :"r" (x)); |
115 | |||
116 | #define read_cr2() ({ \ | ||
117 | unsigned int __dummy; \ | ||
118 | __asm__ __volatile__( \ | ||
119 | "movl %%cr2,%0\n\t" \ | ||
120 | :"=r" (__dummy)); \ | ||
121 | __dummy; \ | ||
122 | }) | ||
123 | #define write_cr2(x) \ | ||
124 | __asm__ __volatile__("movl %0,%%cr2": :"r" (x)); | ||
125 | |||
126 | #define read_cr3() ({ \ | ||
127 | unsigned int __dummy; \ | ||
128 | __asm__ ( \ | ||
129 | "movl %%cr3,%0\n\t" \ | ||
130 | :"=r" (__dummy)); \ | ||
131 | __dummy; \ | ||
132 | }) | ||
133 | #define write_cr3(x) \ | ||
134 | __asm__ __volatile__("movl %0,%%cr3": :"r" (x)); | ||
117 | 135 | ||
118 | #define read_cr4() ({ \ | 136 | #define read_cr4() ({ \ |
119 | unsigned int __dummy; \ | 137 | unsigned int __dummy; \ |
@@ -123,7 +141,7 @@ static inline unsigned long _get_base(char * addr) | |||
123 | __dummy; \ | 141 | __dummy; \ |
124 | }) | 142 | }) |
125 | #define write_cr4(x) \ | 143 | #define write_cr4(x) \ |
126 | __asm__("movl %0,%%cr4": :"r" (x)); | 144 | __asm__ __volatile__("movl %0,%%cr4": :"r" (x)); |
127 | #define stts() write_cr0(8 | read_cr0()) | 145 | #define stts() write_cr0(8 | read_cr0()) |
128 | 146 | ||
129 | #endif /* __KERNEL__ */ | 147 | #endif /* __KERNEL__ */ |
@@ -447,6 +465,8 @@ struct alt_instr { | |||
447 | #define local_irq_enable() __asm__ __volatile__("sti": : :"memory") | 465 | #define local_irq_enable() __asm__ __volatile__("sti": : :"memory") |
448 | /* used in the idle loop; sti takes one instruction cycle to complete */ | 466 | /* used in the idle loop; sti takes one instruction cycle to complete */ |
449 | #define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory") | 467 | #define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory") |
468 | /* used when interrupts are already enabled or to shutdown the processor */ | ||
469 | #define halt() __asm__ __volatile__("hlt": : :"memory") | ||
450 | 470 | ||
451 | #define irqs_disabled() \ | 471 | #define irqs_disabled() \ |
452 | ({ \ | 472 | ({ \ |
diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h index 95add81237ea..e2cb9fa6f563 100644 --- a/include/asm-i386/thread_info.h +++ b/include/asm-i386/thread_info.h | |||
@@ -139,6 +139,7 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__; | |||
139 | #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ | 139 | #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ |
140 | #define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */ | 140 | #define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */ |
141 | #define TIF_IRET 5 /* return with iret */ | 141 | #define TIF_IRET 5 /* return with iret */ |
142 | #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ | ||
142 | #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ | 143 | #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ |
143 | #define TIF_SECCOMP 8 /* secure computing */ | 144 | #define TIF_SECCOMP 8 /* secure computing */ |
144 | #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ | 145 | #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ |
@@ -150,13 +151,15 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__; | |||
150 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) | 151 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) |
151 | #define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) | 152 | #define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) |
152 | #define _TIF_IRET (1<<TIF_IRET) | 153 | #define _TIF_IRET (1<<TIF_IRET) |
154 | #define _TIF_SYSCALL_EMU (1<<TIF_SYSCALL_EMU) | ||
153 | #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) | 155 | #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) |
154 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) | 156 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) |
155 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) | 157 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) |
156 | 158 | ||
157 | /* work to do on interrupt/exception return */ | 159 | /* work to do on interrupt/exception return */ |
158 | #define _TIF_WORK_MASK \ | 160 | #define _TIF_WORK_MASK \ |
159 | (0x0000FFFF & ~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP|_TIF_SECCOMP)) | 161 | (0x0000FFFF & ~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP|\ |
162 | _TIF_SECCOMP|_TIF_SYSCALL_EMU)) | ||
160 | /* work to do on any return to u-space */ | 163 | /* work to do on any return to u-space */ |
161 | #define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP) | 164 | #define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP) |
162 | 165 | ||
diff --git a/include/asm-i386/timer.h b/include/asm-i386/timer.h index dcf1e07db08a..aed16437479d 100644 --- a/include/asm-i386/timer.h +++ b/include/asm-i386/timer.h | |||
@@ -1,6 +1,7 @@ | |||
1 | #ifndef _ASMi386_TIMER_H | 1 | #ifndef _ASMi386_TIMER_H |
2 | #define _ASMi386_TIMER_H | 2 | #define _ASMi386_TIMER_H |
3 | #include <linux/init.h> | 3 | #include <linux/init.h> |
4 | #include <linux/pm.h> | ||
4 | 5 | ||
5 | /** | 6 | /** |
6 | * struct timer_ops - used to define a timer source | 7 | * struct timer_ops - used to define a timer source |
@@ -23,6 +24,8 @@ struct timer_opts { | |||
23 | unsigned long long (*monotonic_clock)(void); | 24 | unsigned long long (*monotonic_clock)(void); |
24 | void (*delay)(unsigned long); | 25 | void (*delay)(unsigned long); |
25 | unsigned long (*read_timer)(void); | 26 | unsigned long (*read_timer)(void); |
27 | int (*suspend)(pm_message_t state); | ||
28 | int (*resume)(void); | ||
26 | }; | 29 | }; |
27 | 30 | ||
28 | struct init_timer_opts { | 31 | struct init_timer_opts { |
diff --git a/include/asm-i386/types.h b/include/asm-i386/types.h index 901b77c42b8a..ced00fe8fe61 100644 --- a/include/asm-i386/types.h +++ b/include/asm-i386/types.h | |||
@@ -63,8 +63,6 @@ typedef u64 sector_t; | |||
63 | #define HAVE_SECTOR_T | 63 | #define HAVE_SECTOR_T |
64 | #endif | 64 | #endif |
65 | 65 | ||
66 | typedef unsigned short kmem_bufctl_t; | ||
67 | |||
68 | #endif /* __ASSEMBLY__ */ | 66 | #endif /* __ASSEMBLY__ */ |
69 | 67 | ||
70 | #endif /* __KERNEL__ */ | 68 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h index 886867aea947..89ab7e2bc5aa 100644 --- a/include/asm-i386/uaccess.h +++ b/include/asm-i386/uaccess.h | |||
@@ -83,30 +83,6 @@ extern struct movsl_mask { | |||
83 | */ | 83 | */ |
84 | #define access_ok(type,addr,size) (likely(__range_ok(addr,size) == 0)) | 84 | #define access_ok(type,addr,size) (likely(__range_ok(addr,size) == 0)) |
85 | 85 | ||
86 | /** | ||
87 | * verify_area: - Obsolete/deprecated and will go away soon, | ||
88 | * use access_ok() instead. | ||
89 | * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE | ||
90 | * @addr: User space pointer to start of block to check | ||
91 | * @size: Size of block to check | ||
92 | * | ||
93 | * Context: User context only. This function may sleep. | ||
94 | * | ||
95 | * This function has been replaced by access_ok(). | ||
96 | * | ||
97 | * Checks if a pointer to a block of memory in user space is valid. | ||
98 | * | ||
99 | * Returns zero if the memory block may be valid, -EFAULT | ||
100 | * if it is definitely invalid. | ||
101 | * | ||
102 | * See access_ok() for more details. | ||
103 | */ | ||
104 | static inline int __deprecated verify_area(int type, const void __user * addr, unsigned long size) | ||
105 | { | ||
106 | return access_ok(type,addr,size) ? 0 : -EFAULT; | ||
107 | } | ||
108 | |||
109 | |||
110 | /* | 86 | /* |
111 | * The exception table consists of pairs of addresses: the first is the | 87 | * The exception table consists of pairs of addresses: the first is the |
112 | * address of an instruction that is allowed to fault, and the second is | 88 | * address of an instruction that is allowed to fault, and the second is |
diff --git a/include/asm-i386/xor.h b/include/asm-i386/xor.h index f80e2dbe1b56..23c86cef3b25 100644 --- a/include/asm-i386/xor.h +++ b/include/asm-i386/xor.h | |||
@@ -535,14 +535,14 @@ static struct xor_block_template xor_block_p5_mmx = { | |||
535 | 535 | ||
536 | #define XMMS_SAVE do { \ | 536 | #define XMMS_SAVE do { \ |
537 | preempt_disable(); \ | 537 | preempt_disable(); \ |
538 | cr0 = read_cr0(); \ | ||
539 | clts(); \ | ||
538 | __asm__ __volatile__ ( \ | 540 | __asm__ __volatile__ ( \ |
539 | "movl %%cr0,%0 ;\n\t" \ | 541 | "movups %%xmm0,(%0) ;\n\t" \ |
540 | "clts ;\n\t" \ | 542 | "movups %%xmm1,0x10(%0) ;\n\t" \ |
541 | "movups %%xmm0,(%1) ;\n\t" \ | 543 | "movups %%xmm2,0x20(%0) ;\n\t" \ |
542 | "movups %%xmm1,0x10(%1) ;\n\t" \ | 544 | "movups %%xmm3,0x30(%0) ;\n\t" \ |
543 | "movups %%xmm2,0x20(%1) ;\n\t" \ | 545 | : \ |
544 | "movups %%xmm3,0x30(%1) ;\n\t" \ | ||
545 | : "=&r" (cr0) \ | ||
546 | : "r" (xmm_save) \ | 546 | : "r" (xmm_save) \ |
547 | : "memory"); \ | 547 | : "memory"); \ |
548 | } while(0) | 548 | } while(0) |
@@ -550,14 +550,14 @@ static struct xor_block_template xor_block_p5_mmx = { | |||
550 | #define XMMS_RESTORE do { \ | 550 | #define XMMS_RESTORE do { \ |
551 | __asm__ __volatile__ ( \ | 551 | __asm__ __volatile__ ( \ |
552 | "sfence ;\n\t" \ | 552 | "sfence ;\n\t" \ |
553 | "movups (%1),%%xmm0 ;\n\t" \ | 553 | "movups (%0),%%xmm0 ;\n\t" \ |
554 | "movups 0x10(%1),%%xmm1 ;\n\t" \ | 554 | "movups 0x10(%0),%%xmm1 ;\n\t" \ |
555 | "movups 0x20(%1),%%xmm2 ;\n\t" \ | 555 | "movups 0x20(%0),%%xmm2 ;\n\t" \ |
556 | "movups 0x30(%1),%%xmm3 ;\n\t" \ | 556 | "movups 0x30(%0),%%xmm3 ;\n\t" \ |
557 | "movl %0,%%cr0 ;\n\t" \ | ||
558 | : \ | 557 | : \ |
559 | : "r" (cr0), "r" (xmm_save) \ | 558 | : "r" (xmm_save) \ |
560 | : "memory"); \ | 559 | : "memory"); \ |
560 | write_cr0(cr0); \ | ||
561 | preempt_enable(); \ | 561 | preempt_enable(); \ |
562 | } while(0) | 562 | } while(0) |
563 | 563 | ||