aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-10-12 06:39:30 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-12 06:39:50 -0400
commit4c7145a1ec1bb789d5f07e47510e8bda546a7c4a (patch)
treee2767b77e5413473a3bba302237f4669a203f183 /include/asm-x86
parent74e91604b2452c15bbe72d77b37cf47ed0310d13 (diff)
parentfd048088306656824958e7783ffcee27e241b361 (diff)
Merge branch 'linus' into x86/spinlocks
Done to prevent this failure of an Octopus merge: Added arch/arm/include/asm/byteorder.h in both, but differently. ERROR: Merge conflict in arch/arm/include/asm/byteorder.h Auto-merging include/asm-x86/spinlock.h ERROR: Merge conflict in include/asm-x86/spinlock.h fatal: merge program failed
Diffstat (limited to 'include/asm-x86')
-rw-r--r--include/asm-x86/acpi.h2
-rw-r--r--include/asm-x86/amd_iommu.h3
-rw-r--r--include/asm-x86/amd_iommu_types.h64
-rw-r--r--include/asm-x86/apic.h4
-rw-r--r--include/asm-x86/asm.h7
-rw-r--r--include/asm-x86/bitops.h10
-rw-r--r--include/asm-x86/bugs.h2
-rw-r--r--include/asm-x86/cacheflush.h7
-rw-r--r--include/asm-x86/cpufeature.h11
-rw-r--r--include/asm-x86/dma-mapping.h87
-rw-r--r--include/asm-x86/ds.h258
-rw-r--r--include/asm-x86/elf.h5
-rw-r--r--include/asm-x86/gart.h8
-rw-r--r--include/asm-x86/idle.h2
-rw-r--r--include/asm-x86/iommu.h1
-rw-r--r--include/asm-x86/kgdb.h24
-rw-r--r--include/asm-x86/mach-rdc321x/gpio.h3
-rw-r--r--include/asm-x86/mmu.h5
-rw-r--r--include/asm-x86/mpspec.h3
-rw-r--r--include/asm-x86/msr-index.h16
-rw-r--r--include/asm-x86/nmi.h1
-rw-r--r--include/asm-x86/page.h1
-rw-r--r--include/asm-x86/page_32.h3
-rw-r--r--include/asm-x86/paravirt.h30
-rw-r--r--include/asm-x86/pgtable-2level.h2
-rw-r--r--include/asm-x86/pgtable-3level.h7
-rw-r--r--include/asm-x86/pgtable.h20
-rw-r--r--include/asm-x86/pgtable_32.h5
-rw-r--r--include/asm-x86/pgtable_64.h2
-rw-r--r--include/asm-x86/processor.h18
-rw-r--r--include/asm-x86/ptrace-abi.h14
-rw-r--r--include/asm-x86/ptrace.h43
-rw-r--r--include/asm-x86/resume-trace.h2
-rw-r--r--include/asm-x86/syscall.h211
-rw-r--r--include/asm-x86/thread_info.h4
-rw-r--r--include/asm-x86/uaccess_64.h1
36 files changed, 731 insertions, 155 deletions
diff --git a/include/asm-x86/acpi.h b/include/asm-x86/acpi.h
index bd76299586b3..392e17336be1 100644
--- a/include/asm-x86/acpi.h
+++ b/include/asm-x86/acpi.h
@@ -140,6 +140,8 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate)
140 boot_cpu_data.x86_model <= 0x05 && 140 boot_cpu_data.x86_model <= 0x05 &&
141 boot_cpu_data.x86_mask < 0x0A) 141 boot_cpu_data.x86_mask < 0x0A)
142 return 1; 142 return 1;
143 else if (boot_cpu_has(X86_FEATURE_AMDC1E))
144 return 1;
143 else 145 else
144 return max_cstate; 146 return max_cstate;
145} 147}
diff --git a/include/asm-x86/amd_iommu.h b/include/asm-x86/amd_iommu.h
index 783f43e58052..041d0db7da27 100644
--- a/include/asm-x86/amd_iommu.h
+++ b/include/asm-x86/amd_iommu.h
@@ -20,10 +20,13 @@
20#ifndef ASM_X86__AMD_IOMMU_H 20#ifndef ASM_X86__AMD_IOMMU_H
21#define ASM_X86__AMD_IOMMU_H 21#define ASM_X86__AMD_IOMMU_H
22 22
23#include <linux/irqreturn.h>
24
23#ifdef CONFIG_AMD_IOMMU 25#ifdef CONFIG_AMD_IOMMU
24extern int amd_iommu_init(void); 26extern int amd_iommu_init(void);
25extern int amd_iommu_init_dma_ops(void); 27extern int amd_iommu_init_dma_ops(void);
26extern void amd_iommu_detect(void); 28extern void amd_iommu_detect(void);
29extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
27#else 30#else
28static inline int amd_iommu_init(void) { return -ENODEV; } 31static inline int amd_iommu_init(void) { return -ENODEV; }
29static inline void amd_iommu_detect(void) { } 32static inline void amd_iommu_detect(void) { }
diff --git a/include/asm-x86/amd_iommu_types.h b/include/asm-x86/amd_iommu_types.h
index 1ffa4e53c989..b3085869a17b 100644
--- a/include/asm-x86/amd_iommu_types.h
+++ b/include/asm-x86/amd_iommu_types.h
@@ -37,6 +37,7 @@
37/* Capability offsets used by the driver */ 37/* Capability offsets used by the driver */
38#define MMIO_CAP_HDR_OFFSET 0x00 38#define MMIO_CAP_HDR_OFFSET 0x00
39#define MMIO_RANGE_OFFSET 0x0c 39#define MMIO_RANGE_OFFSET 0x0c
40#define MMIO_MISC_OFFSET 0x10
40 41
41/* Masks, shifts and macros to parse the device range capability */ 42/* Masks, shifts and macros to parse the device range capability */
42#define MMIO_RANGE_LD_MASK 0xff000000 43#define MMIO_RANGE_LD_MASK 0xff000000
@@ -48,6 +49,7 @@
48#define MMIO_GET_LD(x) (((x) & MMIO_RANGE_LD_MASK) >> MMIO_RANGE_LD_SHIFT) 49#define MMIO_GET_LD(x) (((x) & MMIO_RANGE_LD_MASK) >> MMIO_RANGE_LD_SHIFT)
49#define MMIO_GET_FD(x) (((x) & MMIO_RANGE_FD_MASK) >> MMIO_RANGE_FD_SHIFT) 50#define MMIO_GET_FD(x) (((x) & MMIO_RANGE_FD_MASK) >> MMIO_RANGE_FD_SHIFT)
50#define MMIO_GET_BUS(x) (((x) & MMIO_RANGE_BUS_MASK) >> MMIO_RANGE_BUS_SHIFT) 51#define MMIO_GET_BUS(x) (((x) & MMIO_RANGE_BUS_MASK) >> MMIO_RANGE_BUS_SHIFT)
52#define MMIO_MSI_NUM(x) ((x) & 0x1f)
51 53
52/* Flag masks for the AMD IOMMU exclusion range */ 54/* Flag masks for the AMD IOMMU exclusion range */
53#define MMIO_EXCL_ENABLE_MASK 0x01ULL 55#define MMIO_EXCL_ENABLE_MASK 0x01ULL
@@ -69,6 +71,25 @@
69/* MMIO status bits */ 71/* MMIO status bits */
70#define MMIO_STATUS_COM_WAIT_INT_MASK 0x04 72#define MMIO_STATUS_COM_WAIT_INT_MASK 0x04
71 73
74/* event logging constants */
75#define EVENT_ENTRY_SIZE 0x10
76#define EVENT_TYPE_SHIFT 28
77#define EVENT_TYPE_MASK 0xf
78#define EVENT_TYPE_ILL_DEV 0x1
79#define EVENT_TYPE_IO_FAULT 0x2
80#define EVENT_TYPE_DEV_TAB_ERR 0x3
81#define EVENT_TYPE_PAGE_TAB_ERR 0x4
82#define EVENT_TYPE_ILL_CMD 0x5
83#define EVENT_TYPE_CMD_HARD_ERR 0x6
84#define EVENT_TYPE_IOTLB_INV_TO 0x7
85#define EVENT_TYPE_INV_DEV_REQ 0x8
86#define EVENT_DEVID_MASK 0xffff
87#define EVENT_DEVID_SHIFT 0
88#define EVENT_DOMID_MASK 0xffff
89#define EVENT_DOMID_SHIFT 0
90#define EVENT_FLAGS_MASK 0xfff
91#define EVENT_FLAGS_SHIFT 0x10
92
72/* feature control bits */ 93/* feature control bits */
73#define CONTROL_IOMMU_EN 0x00ULL 94#define CONTROL_IOMMU_EN 0x00ULL
74#define CONTROL_HT_TUN_EN 0x01ULL 95#define CONTROL_HT_TUN_EN 0x01ULL
@@ -109,6 +130,8 @@
109#define DEV_ENTRY_NMI_PASS 0xba 130#define DEV_ENTRY_NMI_PASS 0xba
110#define DEV_ENTRY_LINT0_PASS 0xbe 131#define DEV_ENTRY_LINT0_PASS 0xbe
111#define DEV_ENTRY_LINT1_PASS 0xbf 132#define DEV_ENTRY_LINT1_PASS 0xbf
133#define DEV_ENTRY_MODE_MASK 0x07
134#define DEV_ENTRY_MODE_SHIFT 0x09
112 135
113/* constants to configure the command buffer */ 136/* constants to configure the command buffer */
114#define CMD_BUFFER_SIZE 8192 137#define CMD_BUFFER_SIZE 8192
@@ -116,6 +139,10 @@
116#define MMIO_CMD_SIZE_SHIFT 56 139#define MMIO_CMD_SIZE_SHIFT 56
117#define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT) 140#define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT)
118 141
142/* constants for event buffer handling */
143#define EVT_BUFFER_SIZE 8192 /* 512 entries */
144#define EVT_LEN_MASK (0x9ULL << 56)
145
119#define PAGE_MODE_1_LEVEL 0x01 146#define PAGE_MODE_1_LEVEL 0x01
120#define PAGE_MODE_2_LEVEL 0x02 147#define PAGE_MODE_2_LEVEL 0x02
121#define PAGE_MODE_3_LEVEL 0x03 148#define PAGE_MODE_3_LEVEL 0x03
@@ -134,6 +161,7 @@
134#define IOMMU_MAP_SIZE_L3 (1ULL << 39) 161#define IOMMU_MAP_SIZE_L3 (1ULL << 39)
135 162
136#define IOMMU_PTE_P (1ULL << 0) 163#define IOMMU_PTE_P (1ULL << 0)
164#define IOMMU_PTE_TV (1ULL << 1)
137#define IOMMU_PTE_U (1ULL << 59) 165#define IOMMU_PTE_U (1ULL << 59)
138#define IOMMU_PTE_FC (1ULL << 60) 166#define IOMMU_PTE_FC (1ULL << 60)
139#define IOMMU_PTE_IR (1ULL << 61) 167#define IOMMU_PTE_IR (1ULL << 61)
@@ -159,6 +187,9 @@
159 187
160#define MAX_DOMAIN_ID 65536 188#define MAX_DOMAIN_ID 65536
161 189
190/* FIXME: move this macro to <linux/pci.h> */
191#define PCI_BUS(x) (((x) >> 8) & 0xff)
192
162/* 193/*
163 * This structure contains generic data for IOMMU protection domains 194 * This structure contains generic data for IOMMU protection domains
164 * independent of their use. 195 * independent of their use.
@@ -196,6 +227,15 @@ struct dma_ops_domain {
196 * just calculate its address in constant time. 227 * just calculate its address in constant time.
197 */ 228 */
198 u64 **pte_pages; 229 u64 **pte_pages;
230
231 /* This will be set to true when TLB needs to be flushed */
232 bool need_flush;
233
234 /*
235 * if this is a preallocated domain, keep the device for which it was
236 * preallocated in this variable
237 */
238 u16 target_dev;
199}; 239};
200 240
201/* 241/*
@@ -208,8 +248,9 @@ struct amd_iommu {
208 /* locks the accesses to the hardware */ 248 /* locks the accesses to the hardware */
209 spinlock_t lock; 249 spinlock_t lock;
210 250
211 /* device id of this IOMMU */ 251 /* Pointer to PCI device of this IOMMU */
212 u16 devid; 252 struct pci_dev *dev;
253
213 /* 254 /*
214 * Capability pointer. There could be more than one IOMMU per PCI 255 * Capability pointer. There could be more than one IOMMU per PCI
215 * device function if there are more than one AMD IOMMU capability 256 * device function if there are more than one AMD IOMMU capability
@@ -225,6 +266,9 @@ struct amd_iommu {
225 /* capabilities of that IOMMU read from ACPI */ 266 /* capabilities of that IOMMU read from ACPI */
226 u32 cap; 267 u32 cap;
227 268
269 /* pci domain of this IOMMU */
270 u16 pci_seg;
271
228 /* first device this IOMMU handles. read from PCI */ 272 /* first device this IOMMU handles. read from PCI */
229 u16 first_device; 273 u16 first_device;
230 /* last device this IOMMU handles. read from PCI */ 274 /* last device this IOMMU handles. read from PCI */
@@ -240,9 +284,19 @@ struct amd_iommu {
240 /* size of command buffer */ 284 /* size of command buffer */
241 u32 cmd_buf_size; 285 u32 cmd_buf_size;
242 286
287 /* event buffer virtual address */
288 u8 *evt_buf;
289 /* size of event buffer */
290 u32 evt_buf_size;
291 /* MSI number for event interrupt */
292 u16 evt_msi_num;
293
243 /* if one, we need to send a completion wait command */ 294 /* if one, we need to send a completion wait command */
244 int need_sync; 295 int need_sync;
245 296
297 /* true if interrupts for this IOMMU are already enabled */
298 bool int_enabled;
299
246 /* default dma_ops domain for that IOMMU */ 300 /* default dma_ops domain for that IOMMU */
247 struct dma_ops_domain *default_dom; 301 struct dma_ops_domain *default_dom;
248}; 302};
@@ -322,6 +376,12 @@ extern unsigned long *amd_iommu_pd_alloc_bitmap;
322/* will be 1 if device isolation is enabled */ 376/* will be 1 if device isolation is enabled */
323extern int amd_iommu_isolate; 377extern int amd_iommu_isolate;
324 378
379/*
380 * If true, the addresses will be flushed on unmap time, not when
381 * they are reused
382 */
383extern bool amd_iommu_unmap_flush;
384
325/* takes a PCI device id and prints it out in a readable form */ 385/* takes a PCI device id and prints it out in a readable form */
326static inline void print_devid(u16 devid, int nl) 386static inline void print_devid(u16 devid, int nl)
327{ 387{
diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h
index 1311c82b165b..d76a0839abe9 100644
--- a/include/asm-x86/apic.h
+++ b/include/asm-x86/apic.h
@@ -134,9 +134,7 @@ static inline void ack_x2APIC_irq(void)
134static inline void ack_APIC_irq(void) 134static inline void ack_APIC_irq(void)
135{ 135{
136 /* 136 /*
137 * ack_APIC_irq() actually gets compiled as a single instruction: 137 * ack_APIC_irq() actually gets compiled as a single instruction
138 * - a single rmw on Pentium/82489DX
139 * - a single write on P6+ cores (CONFIG_X86_GOOD_APIC)
140 * ... yummie. 138 * ... yummie.
141 */ 139 */
142 140
diff --git a/include/asm-x86/asm.h b/include/asm-x86/asm.h
index 2439ae49e8ac..e1355f44d7c3 100644
--- a/include/asm-x86/asm.h
+++ b/include/asm-x86/asm.h
@@ -20,17 +20,22 @@
20 20
21#define _ASM_PTR __ASM_SEL(.long, .quad) 21#define _ASM_PTR __ASM_SEL(.long, .quad)
22#define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8) 22#define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8)
23#define _ASM_MOV_UL __ASM_SIZE(mov)
24 23
24#define _ASM_MOV __ASM_SIZE(mov)
25#define _ASM_INC __ASM_SIZE(inc) 25#define _ASM_INC __ASM_SIZE(inc)
26#define _ASM_DEC __ASM_SIZE(dec) 26#define _ASM_DEC __ASM_SIZE(dec)
27#define _ASM_ADD __ASM_SIZE(add) 27#define _ASM_ADD __ASM_SIZE(add)
28#define _ASM_SUB __ASM_SIZE(sub) 28#define _ASM_SUB __ASM_SIZE(sub)
29#define _ASM_XADD __ASM_SIZE(xadd) 29#define _ASM_XADD __ASM_SIZE(xadd)
30
30#define _ASM_AX __ASM_REG(ax) 31#define _ASM_AX __ASM_REG(ax)
31#define _ASM_BX __ASM_REG(bx) 32#define _ASM_BX __ASM_REG(bx)
32#define _ASM_CX __ASM_REG(cx) 33#define _ASM_CX __ASM_REG(cx)
33#define _ASM_DX __ASM_REG(dx) 34#define _ASM_DX __ASM_REG(dx)
35#define _ASM_SP __ASM_REG(sp)
36#define _ASM_BP __ASM_REG(bp)
37#define _ASM_SI __ASM_REG(si)
38#define _ASM_DI __ASM_REG(di)
34 39
35/* Exception table entry */ 40/* Exception table entry */
36# define _ASM_EXTABLE(from,to) \ 41# define _ASM_EXTABLE(from,to) \
diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h
index 61989b93b475..451a74762bd4 100644
--- a/include/asm-x86/bitops.h
+++ b/include/asm-x86/bitops.h
@@ -424,16 +424,6 @@ static inline int fls(int x)
424 424
425#undef ADDR 425#undef ADDR
426 426
427static inline void set_bit_string(unsigned long *bitmap,
428 unsigned long i, int len)
429{
430 unsigned long end = i + len;
431 while (i < end) {
432 __set_bit(i, bitmap);
433 i++;
434 }
435}
436
437#ifdef __KERNEL__ 427#ifdef __KERNEL__
438 428
439#include <asm-generic/bitops/sched.h> 429#include <asm-generic/bitops/sched.h>
diff --git a/include/asm-x86/bugs.h b/include/asm-x86/bugs.h
index ae514c76a96f..dc604985f2ad 100644
--- a/include/asm-x86/bugs.h
+++ b/include/asm-x86/bugs.h
@@ -3,7 +3,7 @@
3 3
4extern void check_bugs(void); 4extern void check_bugs(void);
5 5
6#ifdef CONFIG_CPU_SUP_INTEL_32 6#if defined(CONFIG_CPU_SUP_INTEL) && defined(CONFIG_X86_32)
7int ppro_with_ram_bug(void); 7int ppro_with_ram_bug(void);
8#else 8#else
9static inline int ppro_with_ram_bug(void) { return 0; } 9static inline int ppro_with_ram_bug(void) { return 0; }
diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h
index 59859cb28a36..68840ef1b35a 100644
--- a/include/asm-x86/cacheflush.h
+++ b/include/asm-x86/cacheflush.h
@@ -24,6 +24,8 @@
24#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 24#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
25 memcpy((dst), (src), (len)) 25 memcpy((dst), (src), (len))
26 26
27#define PG_non_WB PG_arch_1
28PAGEFLAG(NonWB, non_WB)
27 29
28/* 30/*
29 * The set_memory_* API can be used to change various attributes of a virtual 31 * The set_memory_* API can be used to change various attributes of a virtual
@@ -66,6 +68,9 @@ int set_memory_rw(unsigned long addr, int numpages);
66int set_memory_np(unsigned long addr, int numpages); 68int set_memory_np(unsigned long addr, int numpages);
67int set_memory_4k(unsigned long addr, int numpages); 69int set_memory_4k(unsigned long addr, int numpages);
68 70
71int set_memory_array_uc(unsigned long *addr, int addrinarray);
72int set_memory_array_wb(unsigned long *addr, int addrinarray);
73
69/* 74/*
70 * For legacy compatibility with the old APIs, a few functions 75 * For legacy compatibility with the old APIs, a few functions
71 * are provided that work on a "struct page". 76 * are provided that work on a "struct page".
@@ -96,8 +101,6 @@ int set_pages_rw(struct page *page, int numpages);
96 101
97void clflush_cache_range(void *addr, unsigned int size); 102void clflush_cache_range(void *addr, unsigned int size);
98 103
99void cpa_init(void);
100
101#ifdef CONFIG_DEBUG_RODATA 104#ifdef CONFIG_DEBUG_RODATA
102void mark_rodata_ro(void); 105void mark_rodata_ro(void);
103extern const int rodata_test_data; 106extern const int rodata_test_data;
diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h
index 7ac4d93d20ed..adfeae6586e1 100644
--- a/include/asm-x86/cpufeature.h
+++ b/include/asm-x86/cpufeature.h
@@ -6,7 +6,7 @@
6 6
7#include <asm/required-features.h> 7#include <asm/required-features.h>
8 8
9#define NCAPINTS 8 /* N 32-bit words worth of info */ 9#define NCAPINTS 9 /* N 32-bit words worth of info */
10 10
11/* 11/*
12 * Note: If the comment begins with a quoted string, that string is used 12 * Note: If the comment begins with a quoted string, that string is used
@@ -80,6 +80,7 @@
80#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ 80#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */
81#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* "" FXSAVE leaks FOP/FIP/FOP */ 81#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* "" FXSAVE leaks FOP/FIP/FOP */
82#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ 82#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
83#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */
83#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */ 84#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */
84#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ 85#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */
85#define X86_FEATURE_SYSCALL32 (3*32+14) /* "" syscall in ia32 userspace */ 86#define X86_FEATURE_SYSCALL32 (3*32+14) /* "" syscall in ia32 userspace */
@@ -89,6 +90,7 @@
89#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* "" Lfence synchronizes RDTSC */ 90#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* "" Lfence synchronizes RDTSC */
90#define X86_FEATURE_11AP (3*32+19) /* "" Bad local APIC aka 11AP */ 91#define X86_FEATURE_11AP (3*32+19) /* "" Bad local APIC aka 11AP */
91#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ 92#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */
93#define X86_FEATURE_AMDC1E (3*32+21) /* AMD C1E detected */
92#define X86_FEATURE_XTOPOLOGY (3*32+21) /* cpu topology enum extensions */ 94#define X86_FEATURE_XTOPOLOGY (3*32+21) /* cpu topology enum extensions */
93 95
94/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ 96/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
@@ -150,6 +152,13 @@
150 */ 152 */
151#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */ 153#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */
152 154
155/* Virtualization flags: Linux defined */
156#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */
157#define X86_FEATURE_VNMI (8*32+ 1) /* Intel Virtual NMI */
158#define X86_FEATURE_FLEXPRIORITY (8*32+ 2) /* Intel FlexPriority */
159#define X86_FEATURE_EPT (8*32+ 3) /* Intel Extended Page Table */
160#define X86_FEATURE_VPID (8*32+ 4) /* Intel Virtual Processor ID */
161
153#if defined(__KERNEL__) && !defined(__ASSEMBLY__) 162#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
154 163
155#include <linux/bitops.h> 164#include <linux/bitops.h>
diff --git a/include/asm-x86/dma-mapping.h b/include/asm-x86/dma-mapping.h
index 5d200e78bd81..219c33d6361c 100644
--- a/include/asm-x86/dma-mapping.h
+++ b/include/asm-x86/dma-mapping.h
@@ -9,12 +9,12 @@
9#include <linux/scatterlist.h> 9#include <linux/scatterlist.h>
10#include <asm/io.h> 10#include <asm/io.h>
11#include <asm/swiotlb.h> 11#include <asm/swiotlb.h>
12#include <asm-generic/dma-coherent.h>
12 13
13extern dma_addr_t bad_dma_address; 14extern dma_addr_t bad_dma_address;
14extern int iommu_merge; 15extern int iommu_merge;
15extern struct device fallback_dev; 16extern struct device x86_dma_fallback_dev;
16extern int panic_on_overflow; 17extern int panic_on_overflow;
17extern int force_iommu;
18 18
19struct dma_mapping_ops { 19struct dma_mapping_ops {
20 int (*mapping_error)(struct device *dev, 20 int (*mapping_error)(struct device *dev,
@@ -25,9 +25,6 @@ struct dma_mapping_ops {
25 void *vaddr, dma_addr_t dma_handle); 25 void *vaddr, dma_addr_t dma_handle);
26 dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr, 26 dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr,
27 size_t size, int direction); 27 size_t size, int direction);
28 /* like map_single, but doesn't check the device mask */
29 dma_addr_t (*map_simple)(struct device *hwdev, phys_addr_t ptr,
30 size_t size, int direction);
31 void (*unmap_single)(struct device *dev, dma_addr_t addr, 28 void (*unmap_single)(struct device *dev, dma_addr_t addr,
32 size_t size, int direction); 29 size_t size, int direction);
33 void (*sync_single_for_cpu)(struct device *hwdev, 30 void (*sync_single_for_cpu)(struct device *hwdev,
@@ -68,7 +65,7 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
68 return dma_ops; 65 return dma_ops;
69 else 66 else
70 return dev->archdata.dma_ops; 67 return dev->archdata.dma_ops;
71#endif 68#endif /* ASM_X86__DMA_MAPPING_H */
72} 69}
73 70
74/* Make sure we keep the same behaviour */ 71/* Make sure we keep the same behaviour */
@@ -87,17 +84,14 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
87 84
88#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 85#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
89#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 86#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
90 87#define dma_is_consistent(d, h) (1)
91void *dma_alloc_coherent(struct device *dev, size_t size,
92 dma_addr_t *dma_handle, gfp_t flag);
93
94void dma_free_coherent(struct device *dev, size_t size,
95 void *vaddr, dma_addr_t dma_handle);
96
97 88
98extern int dma_supported(struct device *hwdev, u64 mask); 89extern int dma_supported(struct device *hwdev, u64 mask);
99extern int dma_set_mask(struct device *dev, u64 mask); 90extern int dma_set_mask(struct device *dev, u64 mask);
100 91
92extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
93 dma_addr_t *dma_addr, gfp_t flag);
94
101static inline dma_addr_t 95static inline dma_addr_t
102dma_map_single(struct device *hwdev, void *ptr, size_t size, 96dma_map_single(struct device *hwdev, void *ptr, size_t size,
103 int direction) 97 int direction)
@@ -247,7 +241,68 @@ static inline int dma_get_cache_alignment(void)
247 return boot_cpu_data.x86_clflush_size; 241 return boot_cpu_data.x86_clflush_size;
248} 242}
249 243
250#define dma_is_consistent(d, h) (1) 244static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
245 gfp_t gfp)
246{
247 unsigned long dma_mask = 0;
251 248
252#include <asm-generic/dma-coherent.h> 249 dma_mask = dev->coherent_dma_mask;
253#endif /* ASM_X86__DMA_MAPPING_H */ 250 if (!dma_mask)
251 dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
252
253 return dma_mask;
254}
255
256static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
257{
258#ifdef CONFIG_X86_64
259 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
260
261 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA))
262 gfp |= GFP_DMA32;
263#endif
264 return gfp;
265}
266
267static inline void *
268dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
269 gfp_t gfp)
270{
271 struct dma_mapping_ops *ops = get_dma_ops(dev);
272 void *memory;
273
274 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
275
276 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
277 return memory;
278
279 if (!dev) {
280 dev = &x86_dma_fallback_dev;
281 gfp |= GFP_DMA;
282 }
283
284 if (!is_device_dma_capable(dev))
285 return NULL;
286
287 if (!ops->alloc_coherent)
288 return NULL;
289
290 return ops->alloc_coherent(dev, size, dma_handle,
291 dma_alloc_coherent_gfp_flags(dev, gfp));
292}
293
294static inline void dma_free_coherent(struct device *dev, size_t size,
295 void *vaddr, dma_addr_t bus)
296{
297 struct dma_mapping_ops *ops = get_dma_ops(dev);
298
299 WARN_ON(irqs_disabled()); /* for portability */
300
301 if (dma_release_from_coherent(dev, get_order(size), vaddr))
302 return;
303
304 if (ops->free_coherent)
305 ops->free_coherent(dev, size, vaddr, bus);
306}
307
308#endif
diff --git a/include/asm-x86/ds.h b/include/asm-x86/ds.h
index 6b27c686fa10..c3c953a45b21 100644
--- a/include/asm-x86/ds.h
+++ b/include/asm-x86/ds.h
@@ -2,71 +2,237 @@
2 * Debug Store (DS) support 2 * Debug Store (DS) support
3 * 3 *
4 * This provides a low-level interface to the hardware's Debug Store 4 * This provides a low-level interface to the hardware's Debug Store
5 * feature that is used for last branch recording (LBR) and 5 * feature that is used for branch trace store (BTS) and
6 * precise-event based sampling (PEBS). 6 * precise-event based sampling (PEBS).
7 * 7 *
8 * Different architectures use a different DS layout/pointer size. 8 * It manages:
9 * The below functions therefore work on a void*. 9 * - per-thread and per-cpu allocation of BTS and PEBS
10 * - buffer memory allocation (optional)
11 * - buffer overflow handling
12 * - buffer access
10 * 13 *
14 * It assumes:
15 * - get_task_struct on all parameter tasks
16 * - current is allowed to trace parameter tasks
11 * 17 *
12 * Since there is no user for PEBS, yet, only LBR (or branch
13 * trace store, BTS) is supported.
14 * 18 *
15 * 19 * Copyright (C) 2007-2008 Intel Corporation.
16 * Copyright (C) 2007 Intel Corporation. 20 * Markus Metzger <markus.t.metzger@intel.com>, 2007-2008
17 * Markus Metzger <markus.t.metzger@intel.com>, Dec 2007
18 */ 21 */
19 22
20#ifndef ASM_X86__DS_H 23#ifndef ASM_X86__DS_H
21#define ASM_X86__DS_H 24#define ASM_X86__DS_H
22 25
26#ifdef CONFIG_X86_DS
27
23#include <linux/types.h> 28#include <linux/types.h>
24#include <linux/init.h> 29#include <linux/init.h>
25 30
26struct cpuinfo_x86;
27 31
32struct task_struct;
28 33
29/* a branch trace record entry 34/*
35 * Request BTS or PEBS
36 *
37 * Due to alignement constraints, the actual buffer may be slightly
38 * smaller than the requested or provided buffer.
30 * 39 *
31 * In order to unify the interface between various processor versions, 40 * Returns 0 on success; -Eerrno otherwise
32 * we use the below data structure for all processors. 41 *
42 * task: the task to request recording for;
43 * NULL for per-cpu recording on the current cpu
44 * base: the base pointer for the (non-pageable) buffer;
45 * NULL if buffer allocation requested
46 * size: the size of the requested or provided buffer
47 * ovfl: pointer to a function to be called on buffer overflow;
48 * NULL if cyclic buffer requested
33 */ 49 */
34enum bts_qualifier { 50typedef void (*ds_ovfl_callback_t)(struct task_struct *);
35 BTS_INVALID = 0, 51extern int ds_request_bts(struct task_struct *task, void *base, size_t size,
36 BTS_BRANCH, 52 ds_ovfl_callback_t ovfl);
37 BTS_TASK_ARRIVES, 53extern int ds_request_pebs(struct task_struct *task, void *base, size_t size,
38 BTS_TASK_DEPARTS 54 ds_ovfl_callback_t ovfl);
39}; 55
56/*
57 * Release BTS or PEBS resources
58 *
59 * Frees buffers allocated on ds_request.
60 *
61 * Returns 0 on success; -Eerrno otherwise
62 *
63 * task: the task to release resources for;
64 * NULL to release resources for the current cpu
65 */
66extern int ds_release_bts(struct task_struct *task);
67extern int ds_release_pebs(struct task_struct *task);
68
69/*
70 * Return the (array) index of the write pointer.
71 * (assuming an array of BTS/PEBS records)
72 *
73 * Returns -Eerrno on error
74 *
75 * task: the task to access;
76 * NULL to access the current cpu
77 * pos (out): if not NULL, will hold the result
78 */
79extern int ds_get_bts_index(struct task_struct *task, size_t *pos);
80extern int ds_get_pebs_index(struct task_struct *task, size_t *pos);
81
82/*
83 * Return the (array) index one record beyond the end of the array.
84 * (assuming an array of BTS/PEBS records)
85 *
86 * Returns -Eerrno on error
87 *
88 * task: the task to access;
89 * NULL to access the current cpu
90 * pos (out): if not NULL, will hold the result
91 */
92extern int ds_get_bts_end(struct task_struct *task, size_t *pos);
93extern int ds_get_pebs_end(struct task_struct *task, size_t *pos);
94
95/*
96 * Provide a pointer to the BTS/PEBS record at parameter index.
97 * (assuming an array of BTS/PEBS records)
98 *
99 * The pointer points directly into the buffer. The user is
100 * responsible for copying the record.
101 *
102 * Returns the size of a single record on success; -Eerrno on error
103 *
104 * task: the task to access;
105 * NULL to access the current cpu
106 * index: the index of the requested record
107 * record (out): pointer to the requested record
108 */
109extern int ds_access_bts(struct task_struct *task,
110 size_t index, const void **record);
111extern int ds_access_pebs(struct task_struct *task,
112 size_t index, const void **record);
113
114/*
115 * Write one or more BTS/PEBS records at the write pointer index and
116 * advance the write pointer.
117 *
118 * If size is not a multiple of the record size, trailing bytes are
119 * zeroed out.
120 *
121 * May result in one or more overflow notifications.
122 *
123 * If called during overflow handling, that is, with index >=
124 * interrupt threshold, the write will wrap around.
125 *
126 * An overflow notification is given if and when the interrupt
127 * threshold is reached during or after the write.
128 *
129 * Returns the number of bytes written or -Eerrno.
130 *
131 * task: the task to access;
132 * NULL to access the current cpu
133 * buffer: the buffer to write
134 * size: the size of the buffer
135 */
136extern int ds_write_bts(struct task_struct *task,
137 const void *buffer, size_t size);
138extern int ds_write_pebs(struct task_struct *task,
139 const void *buffer, size_t size);
140
141/*
142 * Same as ds_write_bts/pebs, but omit ownership checks.
143 *
144 * This is needed to have some other task than the owner of the
145 * BTS/PEBS buffer or the parameter task itself write into the
146 * respective buffer.
147 */
148extern int ds_unchecked_write_bts(struct task_struct *task,
149 const void *buffer, size_t size);
150extern int ds_unchecked_write_pebs(struct task_struct *task,
151 const void *buffer, size_t size);
152
153/*
154 * Reset the write pointer of the BTS/PEBS buffer.
155 *
156 * Returns 0 on success; -Eerrno on error
157 *
158 * task: the task to access;
159 * NULL to access the current cpu
160 */
161extern int ds_reset_bts(struct task_struct *task);
162extern int ds_reset_pebs(struct task_struct *task);
163
164/*
165 * Clear the BTS/PEBS buffer and reset the write pointer.
166 * The entire buffer will be zeroed out.
167 *
168 * Returns 0 on success; -Eerrno on error
169 *
170 * task: the task to access;
171 * NULL to access the current cpu
172 */
173extern int ds_clear_bts(struct task_struct *task);
174extern int ds_clear_pebs(struct task_struct *task);
175
176/*
177 * Provide the PEBS counter reset value.
178 *
179 * Returns 0 on success; -Eerrno on error
180 *
181 * task: the task to access;
182 * NULL to access the current cpu
183 * value (out): the counter reset value
184 */
185extern int ds_get_pebs_reset(struct task_struct *task, u64 *value);
186
187/*
188 * Set the PEBS counter reset value.
189 *
190 * Returns 0 on success; -Eerrno on error
191 *
192 * task: the task to access;
193 * NULL to access the current cpu
194 * value: the new counter reset value
195 */
196extern int ds_set_pebs_reset(struct task_struct *task, u64 value);
197
198/*
199 * Initialization
200 */
201struct cpuinfo_x86;
202extern void __cpuinit ds_init_intel(struct cpuinfo_x86 *);
203
204
40 205
41struct bts_struct { 206/*
42 u64 qualifier; 207 * The DS context - part of struct thread_struct.
43 union { 208 */
44 /* BTS_BRANCH */ 209struct ds_context {
45 struct { 210 /* pointer to the DS configuration; goes into MSR_IA32_DS_AREA */
46 u64 from_ip; 211 unsigned char *ds;
47 u64 to_ip; 212 /* the owner of the BTS and PEBS configuration, respectively */
48 } lbr; 213 struct task_struct *owner[2];
49 /* BTS_TASK_ARRIVES or 214 /* buffer overflow notification function for BTS and PEBS */
50 BTS_TASK_DEPARTS */ 215 ds_ovfl_callback_t callback[2];
51 u64 jiffies; 216 /* the original buffer address */
52 } variant; 217 void *buffer[2];
218 /* the number of allocated pages for on-request allocated buffers */
219 unsigned int pages[2];
220 /* use count */
221 unsigned long count;
222 /* a pointer to the context location inside the thread_struct
223 * or the per_cpu context array */
224 struct ds_context **this;
225 /* a pointer to the task owning this context, or NULL, if the
226 * context is owned by a cpu */
227 struct task_struct *task;
53}; 228};
54 229
55/* Overflow handling mechanisms */ 230/* called by exit_thread() to free leftover contexts */
56#define DS_O_SIGNAL 1 /* send overflow signal */ 231extern void ds_free(struct ds_context *context);
57#define DS_O_WRAP 2 /* wrap around */ 232
58 233#else /* CONFIG_X86_DS */
59extern int ds_allocate(void **, size_t); 234
60extern int ds_free(void **); 235#define ds_init_intel(config) do {} while (0)
61extern int ds_get_bts_size(void *);
62extern int ds_get_bts_end(void *);
63extern int ds_get_bts_index(void *);
64extern int ds_set_overflow(void *, int);
65extern int ds_get_overflow(void *);
66extern int ds_clear(void *);
67extern int ds_read_bts(void *, int, struct bts_struct *);
68extern int ds_write_bts(void *, const struct bts_struct *);
69extern unsigned long ds_debugctl_mask(void);
70extern void __cpuinit ds_init_intel(struct cpuinfo_x86 *c);
71 236
237#endif /* CONFIG_X86_DS */
72#endif /* ASM_X86__DS_H */ 238#endif /* ASM_X86__DS_H */
diff --git a/include/asm-x86/elf.h b/include/asm-x86/elf.h
index cd678b2d6a74..5c4745bec906 100644
--- a/include/asm-x86/elf.h
+++ b/include/asm-x86/elf.h
@@ -148,8 +148,9 @@ do { \
148 148
149static inline void start_ia32_thread(struct pt_regs *regs, u32 ip, u32 sp) 149static inline void start_ia32_thread(struct pt_regs *regs, u32 ip, u32 sp)
150{ 150{
151 asm volatile("movl %0,%%fs" :: "r" (0)); 151 loadsegment(fs, 0);
152 asm volatile("movl %0,%%es; movl %0,%%ds" : : "r" (__USER32_DS)); 152 loadsegment(ds, __USER32_DS);
153 loadsegment(es, __USER32_DS);
153 load_gs_index(0); 154 load_gs_index(0);
154 regs->ip = ip; 155 regs->ip = ip;
155 regs->sp = sp; 156 regs->sp = sp;
diff --git a/include/asm-x86/gart.h b/include/asm-x86/gart.h
index 07f445844146..605edb39ef9e 100644
--- a/include/asm-x86/gart.h
+++ b/include/asm-x86/gart.h
@@ -29,6 +29,8 @@ extern int fix_aperture;
29#define AMD64_GARTCACHECTL 0x9c 29#define AMD64_GARTCACHECTL 0x9c
30#define AMD64_GARTEN (1<<0) 30#define AMD64_GARTEN (1<<0)
31 31
32extern int agp_amd64_init(void);
33
32static inline void enable_gart_translation(struct pci_dev *dev, u64 addr) 34static inline void enable_gart_translation(struct pci_dev *dev, u64 addr)
33{ 35{
34 u32 tmp, ctl; 36 u32 tmp, ctl;
@@ -52,15 +54,15 @@ static inline int aperture_valid(u64 aper_base, u32 aper_size, u32 min_size)
52 return 0; 54 return 0;
53 55
54 if (aper_base + aper_size > 0x100000000ULL) { 56 if (aper_base + aper_size > 0x100000000ULL) {
55 printk(KERN_ERR "Aperture beyond 4GB. Ignoring.\n"); 57 printk(KERN_INFO "Aperture beyond 4GB. Ignoring.\n");
56 return 0; 58 return 0;
57 } 59 }
58 if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) { 60 if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) {
59 printk(KERN_ERR "Aperture pointing to e820 RAM. Ignoring.\n"); 61 printk(KERN_INFO "Aperture pointing to e820 RAM. Ignoring.\n");
60 return 0; 62 return 0;
61 } 63 }
62 if (aper_size < min_size) { 64 if (aper_size < min_size) {
63 printk(KERN_ERR "Aperture too small (%d MB) than (%d MB)\n", 65 printk(KERN_INFO "Aperture too small (%d MB) than (%d MB)\n",
64 aper_size>>20, min_size>>20); 66 aper_size>>20, min_size>>20);
65 return 0; 67 return 0;
66 } 68 }
diff --git a/include/asm-x86/idle.h b/include/asm-x86/idle.h
index dc9c7944847b..baa3f783d27d 100644
--- a/include/asm-x86/idle.h
+++ b/include/asm-x86/idle.h
@@ -10,4 +10,6 @@ void idle_notifier_register(struct notifier_block *n);
10void enter_idle(void); 10void enter_idle(void);
11void exit_idle(void); 11void exit_idle(void);
12 12
13void c1e_remove_cpu(int cpu);
14
13#endif /* ASM_X86__IDLE_H */ 15#endif /* ASM_X86__IDLE_H */
diff --git a/include/asm-x86/iommu.h b/include/asm-x86/iommu.h
index e86f44148c66..546ad3110fea 100644
--- a/include/asm-x86/iommu.h
+++ b/include/asm-x86/iommu.h
@@ -6,6 +6,7 @@ extern void no_iommu_init(void);
6extern struct dma_mapping_ops nommu_dma_ops; 6extern struct dma_mapping_ops nommu_dma_ops;
7extern int force_iommu, no_iommu; 7extern int force_iommu, no_iommu;
8extern int iommu_detected; 8extern int iommu_detected;
9extern int dmar_disabled;
9 10
10extern unsigned long iommu_num_pages(unsigned long addr, unsigned long len); 11extern unsigned long iommu_num_pages(unsigned long addr, unsigned long len);
11 12
diff --git a/include/asm-x86/kgdb.h b/include/asm-x86/kgdb.h
index 83a7ee228ab1..d283863354de 100644
--- a/include/asm-x86/kgdb.h
+++ b/include/asm-x86/kgdb.h
@@ -39,12 +39,13 @@ enum regnames {
39 GDB_FS, /* 14 */ 39 GDB_FS, /* 14 */
40 GDB_GS, /* 15 */ 40 GDB_GS, /* 15 */
41}; 41};
42#define NUMREGBYTES ((GDB_GS+1)*4)
42#else /* ! CONFIG_X86_32 */ 43#else /* ! CONFIG_X86_32 */
43enum regnames { 44enum regnames64 {
44 GDB_AX, /* 0 */ 45 GDB_AX, /* 0 */
45 GDB_DX, /* 1 */ 46 GDB_BX, /* 1 */
46 GDB_CX, /* 2 */ 47 GDB_CX, /* 2 */
47 GDB_BX, /* 3 */ 48 GDB_DX, /* 3 */
48 GDB_SI, /* 4 */ 49 GDB_SI, /* 4 */
49 GDB_DI, /* 5 */ 50 GDB_DI, /* 5 */
50 GDB_BP, /* 6 */ 51 GDB_BP, /* 6 */
@@ -58,18 +59,15 @@ enum regnames {
58 GDB_R14, /* 14 */ 59 GDB_R14, /* 14 */
59 GDB_R15, /* 15 */ 60 GDB_R15, /* 15 */
60 GDB_PC, /* 16 */ 61 GDB_PC, /* 16 */
61 GDB_PS, /* 17 */
62}; 62};
63#endif /* CONFIG_X86_32 */
64 63
65/* 64enum regnames32 {
66 * Number of bytes of registers: 65 GDB_PS = 34,
67 */ 66 GDB_CS,
68#ifdef CONFIG_X86_32 67 GDB_SS,
69# define NUMREGBYTES 64 68};
70#else 69#define NUMREGBYTES ((GDB_SS+1)*4)
71# define NUMREGBYTES ((GDB_PS+1)*8) 70#endif /* CONFIG_X86_32 */
72#endif
73 71
74static inline void arch_kgdb_breakpoint(void) 72static inline void arch_kgdb_breakpoint(void)
75{ 73{
diff --git a/include/asm-x86/mach-rdc321x/gpio.h b/include/asm-x86/mach-rdc321x/gpio.h
index 6184561980f2..94b6cdf532e2 100644
--- a/include/asm-x86/mach-rdc321x/gpio.h
+++ b/include/asm-x86/mach-rdc321x/gpio.h
@@ -1,6 +1,8 @@
1#ifndef ASM_X86__MACH_RDC321X__GPIO_H 1#ifndef ASM_X86__MACH_RDC321X__GPIO_H
2#define ASM_X86__MACH_RDC321X__GPIO_H 2#define ASM_X86__MACH_RDC321X__GPIO_H
3 3
4#include <linux/kernel.h>
5
4extern int rdc_gpio_get_value(unsigned gpio); 6extern int rdc_gpio_get_value(unsigned gpio);
5extern void rdc_gpio_set_value(unsigned gpio, int value); 7extern void rdc_gpio_set_value(unsigned gpio, int value);
6extern int rdc_gpio_direction_input(unsigned gpio); 8extern int rdc_gpio_direction_input(unsigned gpio);
@@ -18,6 +20,7 @@ static inline int gpio_request(unsigned gpio, const char *label)
18 20
19static inline void gpio_free(unsigned gpio) 21static inline void gpio_free(unsigned gpio)
20{ 22{
23 might_sleep();
21 rdc_gpio_free(gpio); 24 rdc_gpio_free(gpio);
22} 25}
23 26
diff --git a/include/asm-x86/mmu.h b/include/asm-x86/mmu.h
index a30d7a9c8297..9d5aff14334a 100644
--- a/include/asm-x86/mmu.h
+++ b/include/asm-x86/mmu.h
@@ -7,14 +7,9 @@
7/* 7/*
8 * The x86 doesn't have a mmu context, but 8 * The x86 doesn't have a mmu context, but
9 * we put the segment information here. 9 * we put the segment information here.
10 *
11 * cpu_vm_mask is used to optimize ldt flushing.
12 */ 10 */
13typedef struct { 11typedef struct {
14 void *ldt; 12 void *ldt;
15#ifdef CONFIG_X86_64
16 rwlock_t ldtlock;
17#endif
18 int size; 13 int size;
19 struct mutex lock; 14 struct mutex lock;
20 void *vdso; 15 void *vdso;
diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h
index 118da365e371..be2241a818f1 100644
--- a/include/asm-x86/mpspec.h
+++ b/include/asm-x86/mpspec.h
@@ -5,11 +5,12 @@
5 5
6#include <asm/mpspec_def.h> 6#include <asm/mpspec_def.h>
7 7
8extern int apic_version[MAX_APICS];
9
8#ifdef CONFIG_X86_32 10#ifdef CONFIG_X86_32
9#include <mach_mpspec.h> 11#include <mach_mpspec.h>
10 12
11extern unsigned int def_to_bigsmp; 13extern unsigned int def_to_bigsmp;
12extern int apic_version[MAX_APICS];
13extern u8 apicid_2_node[]; 14extern u8 apicid_2_node[];
14extern int pic_mode; 15extern int pic_mode;
15 16
diff --git a/include/asm-x86/msr-index.h b/include/asm-x86/msr-index.h
index 3052f058ab06..0bb43301a202 100644
--- a/include/asm-x86/msr-index.h
+++ b/include/asm-x86/msr-index.h
@@ -176,6 +176,7 @@
176#define MSR_IA32_TSC 0x00000010 176#define MSR_IA32_TSC 0x00000010
177#define MSR_IA32_PLATFORM_ID 0x00000017 177#define MSR_IA32_PLATFORM_ID 0x00000017
178#define MSR_IA32_EBL_CR_POWERON 0x0000002a 178#define MSR_IA32_EBL_CR_POWERON 0x0000002a
179#define MSR_IA32_FEATURE_CONTROL 0x0000003a
179 180
180#define MSR_IA32_APICBASE 0x0000001b 181#define MSR_IA32_APICBASE 0x0000001b
181#define MSR_IA32_APICBASE_BSP (1<<8) 182#define MSR_IA32_APICBASE_BSP (1<<8)
@@ -310,4 +311,19 @@
310/* Geode defined MSRs */ 311/* Geode defined MSRs */
311#define MSR_GEODE_BUSCONT_CONF0 0x00001900 312#define MSR_GEODE_BUSCONT_CONF0 0x00001900
312 313
314/* Intel VT MSRs */
315#define MSR_IA32_VMX_BASIC 0x00000480
316#define MSR_IA32_VMX_PINBASED_CTLS 0x00000481
317#define MSR_IA32_VMX_PROCBASED_CTLS 0x00000482
318#define MSR_IA32_VMX_EXIT_CTLS 0x00000483
319#define MSR_IA32_VMX_ENTRY_CTLS 0x00000484
320#define MSR_IA32_VMX_MISC 0x00000485
321#define MSR_IA32_VMX_CR0_FIXED0 0x00000486
322#define MSR_IA32_VMX_CR0_FIXED1 0x00000487
323#define MSR_IA32_VMX_CR4_FIXED0 0x00000488
324#define MSR_IA32_VMX_CR4_FIXED1 0x00000489
325#define MSR_IA32_VMX_VMCS_ENUM 0x0000048a
326#define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b
327#define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c
328
313#endif /* ASM_X86__MSR_INDEX_H */ 329#endif /* ASM_X86__MSR_INDEX_H */
diff --git a/include/asm-x86/nmi.h b/include/asm-x86/nmi.h
index f8b76f383904..d5e715f024dc 100644
--- a/include/asm-x86/nmi.h
+++ b/include/asm-x86/nmi.h
@@ -34,6 +34,7 @@ extern void stop_apic_nmi_watchdog(void *);
34extern void disable_timer_nmi_watchdog(void); 34extern void disable_timer_nmi_watchdog(void);
35extern void enable_timer_nmi_watchdog(void); 35extern void enable_timer_nmi_watchdog(void);
36extern int nmi_watchdog_tick(struct pt_regs *regs, unsigned reason); 36extern int nmi_watchdog_tick(struct pt_regs *regs, unsigned reason);
37extern void cpu_nmi_set_wd_enabled(void);
37 38
38extern atomic_t nmi_active; 39extern atomic_t nmi_active;
39extern unsigned int nmi_watchdog; 40extern unsigned int nmi_watchdog;
diff --git a/include/asm-x86/page.h b/include/asm-x86/page.h
index 79544e6ffb8b..c91574776751 100644
--- a/include/asm-x86/page.h
+++ b/include/asm-x86/page.h
@@ -57,6 +57,7 @@ typedef struct { pgdval_t pgd; } pgd_t;
57typedef struct { pgprotval_t pgprot; } pgprot_t; 57typedef struct { pgprotval_t pgprot; } pgprot_t;
58 58
59extern int page_is_ram(unsigned long pagenr); 59extern int page_is_ram(unsigned long pagenr);
60extern int pagerange_is_ram(unsigned long start, unsigned long end);
60extern int devmem_is_allowed(unsigned long pagenr); 61extern int devmem_is_allowed(unsigned long pagenr);
61extern void map_devmem(unsigned long pfn, unsigned long size, 62extern void map_devmem(unsigned long pfn, unsigned long size,
62 pgprot_t vma_prot); 63 pgprot_t vma_prot);
diff --git a/include/asm-x86/page_32.h b/include/asm-x86/page_32.h
index f32062a821c5..72f7305682c6 100644
--- a/include/asm-x86/page_32.h
+++ b/include/asm-x86/page_32.h
@@ -89,9 +89,6 @@ extern int nx_enabled;
89extern unsigned int __VMALLOC_RESERVE; 89extern unsigned int __VMALLOC_RESERVE;
90extern int sysctl_legacy_va_layout; 90extern int sysctl_legacy_va_layout;
91 91
92#define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
93#define MAXMEM (-__PAGE_OFFSET - __VMALLOC_RESERVE)
94
95extern void find_low_pfn_range(void); 92extern void find_low_pfn_range(void);
96extern unsigned long init_memory_mapping(unsigned long start, 93extern unsigned long init_memory_mapping(unsigned long start,
97 unsigned long end); 94 unsigned long end);
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h
index cba612c89e06..d7d358a43996 100644
--- a/include/asm-x86/paravirt.h
+++ b/include/asm-x86/paravirt.h
@@ -252,13 +252,13 @@ struct pv_mmu_ops {
252 * Hooks for allocating/releasing pagetable pages when they're 252 * Hooks for allocating/releasing pagetable pages when they're
253 * attached to a pagetable 253 * attached to a pagetable
254 */ 254 */
255 void (*alloc_pte)(struct mm_struct *mm, u32 pfn); 255 void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
256 void (*alloc_pmd)(struct mm_struct *mm, u32 pfn); 256 void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
257 void (*alloc_pmd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count); 257 void (*alloc_pmd_clone)(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count);
258 void (*alloc_pud)(struct mm_struct *mm, u32 pfn); 258 void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
259 void (*release_pte)(u32 pfn); 259 void (*release_pte)(unsigned long pfn);
260 void (*release_pmd)(u32 pfn); 260 void (*release_pmd)(unsigned long pfn);
261 void (*release_pud)(u32 pfn); 261 void (*release_pud)(unsigned long pfn);
262 262
263 /* Pagetable manipulation functions */ 263 /* Pagetable manipulation functions */
264 void (*set_pte)(pte_t *ptep, pte_t pteval); 264 void (*set_pte)(pte_t *ptep, pte_t pteval);
@@ -986,35 +986,35 @@ static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
986 PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd); 986 PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
987} 987}
988 988
989static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned pfn) 989static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
990{ 990{
991 PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn); 991 PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
992} 992}
993static inline void paravirt_release_pte(unsigned pfn) 993static inline void paravirt_release_pte(unsigned long pfn)
994{ 994{
995 PVOP_VCALL1(pv_mmu_ops.release_pte, pfn); 995 PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
996} 996}
997 997
998static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned pfn) 998static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
999{ 999{
1000 PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn); 1000 PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
1001} 1001}
1002 1002
1003static inline void paravirt_alloc_pmd_clone(unsigned pfn, unsigned clonepfn, 1003static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
1004 unsigned start, unsigned count) 1004 unsigned long start, unsigned long count)
1005{ 1005{
1006 PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count); 1006 PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count);
1007} 1007}
1008static inline void paravirt_release_pmd(unsigned pfn) 1008static inline void paravirt_release_pmd(unsigned long pfn)
1009{ 1009{
1010 PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn); 1010 PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
1011} 1011}
1012 1012
1013static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned pfn) 1013static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1014{ 1014{
1015 PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn); 1015 PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
1016} 1016}
1017static inline void paravirt_release_pud(unsigned pfn) 1017static inline void paravirt_release_pud(unsigned long pfn)
1018{ 1018{
1019 PVOP_VCALL1(pv_mmu_ops.release_pud, pfn); 1019 PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
1020} 1020}
diff --git a/include/asm-x86/pgtable-2level.h b/include/asm-x86/pgtable-2level.h
index 60440b191626..81762081dcd8 100644
--- a/include/asm-x86/pgtable-2level.h
+++ b/include/asm-x86/pgtable-2level.h
@@ -53,9 +53,7 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
53#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp) 53#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
54#endif 54#endif
55 55
56#define pte_page(x) pfn_to_page(pte_pfn(x))
57#define pte_none(x) (!(x).pte_low) 56#define pte_none(x) (!(x).pte_low)
58#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
59 57
60/* 58/*
61 * Bits 0, 6 and 7 are taken, split up the 29 bits of offset 59 * Bits 0, 6 and 7 are taken, split up the 29 bits of offset
diff --git a/include/asm-x86/pgtable-3level.h b/include/asm-x86/pgtable-3level.h
index e713bd5f39a6..75f4276b5ddb 100644
--- a/include/asm-x86/pgtable-3level.h
+++ b/include/asm-x86/pgtable-3level.h
@@ -151,18 +151,11 @@ static inline int pte_same(pte_t a, pte_t b)
151 return a.pte_low == b.pte_low && a.pte_high == b.pte_high; 151 return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
152} 152}
153 153
154#define pte_page(x) pfn_to_page(pte_pfn(x))
155
156static inline int pte_none(pte_t pte) 154static inline int pte_none(pte_t pte)
157{ 155{
158 return !pte.pte_low && !pte.pte_high; 156 return !pte.pte_low && !pte.pte_high;
159} 157}
160 158
161static inline unsigned long pte_pfn(pte_t pte)
162{
163 return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
164}
165
166/* 159/*
167 * Bits 0, 6 and 7 are taken in the low part of the pte, 160 * Bits 0, 6 and 7 are taken in the low part of the pte,
168 * put the 32 bits of offset into the high part. 161 * put the 32 bits of offset into the high part.
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h
index 57d919a2d79d..ed932453ef26 100644
--- a/include/asm-x86/pgtable.h
+++ b/include/asm-x86/pgtable.h
@@ -19,6 +19,7 @@
19#define _PAGE_BIT_UNUSED3 11 19#define _PAGE_BIT_UNUSED3 11
20#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ 20#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
21#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1 21#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
22#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
22#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ 23#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
23 24
24#define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT) 25#define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
@@ -36,6 +37,7 @@
36#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT) 37#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
37#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE) 38#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
38#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL) 39#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
40#define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
39#define __HAVE_ARCH_PTE_SPECIAL 41#define __HAVE_ARCH_PTE_SPECIAL
40 42
41#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) 43#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
@@ -130,6 +132,17 @@
130#define __S110 PAGE_SHARED_EXEC 132#define __S110 PAGE_SHARED_EXEC
131#define __S111 PAGE_SHARED_EXEC 133#define __S111 PAGE_SHARED_EXEC
132 134
135/*
136 * early identity mapping pte attrib macros.
137 */
138#ifdef CONFIG_X86_64
139#define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
140#else
141#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
142#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
143#define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
144#endif
145
133#ifndef __ASSEMBLY__ 146#ifndef __ASSEMBLY__
134 147
135/* 148/*
@@ -186,6 +199,13 @@ static inline int pte_special(pte_t pte)
186 return pte_val(pte) & _PAGE_SPECIAL; 199 return pte_val(pte) & _PAGE_SPECIAL;
187} 200}
188 201
202static inline unsigned long pte_pfn(pte_t pte)
203{
204 return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
205}
206
207#define pte_page(pte) pfn_to_page(pte_pfn(pte))
208
189static inline int pmd_large(pmd_t pte) 209static inline int pmd_large(pmd_t pte)
190{ 210{
191 return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) == 211 return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h
index 45c8235400fe..8de702dc7d62 100644
--- a/include/asm-x86/pgtable_32.h
+++ b/include/asm-x86/pgtable_32.h
@@ -57,8 +57,7 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
57 * area for the same reason. ;) 57 * area for the same reason. ;)
58 */ 58 */
59#define VMALLOC_OFFSET (8 * 1024 * 1024) 59#define VMALLOC_OFFSET (8 * 1024 * 1024)
60#define VMALLOC_START (((unsigned long)high_memory + 2 * VMALLOC_OFFSET - 1) \ 60#define VMALLOC_START ((unsigned long)high_memory + VMALLOC_OFFSET)
61 & ~(VMALLOC_OFFSET - 1))
62#ifdef CONFIG_X86_PAE 61#ifdef CONFIG_X86_PAE
63#define LAST_PKMAP 512 62#define LAST_PKMAP 512
64#else 63#else
@@ -74,6 +73,8 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
74# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE) 73# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
75#endif 74#endif
76 75
76#define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE)
77
77/* 78/*
78 * Define this if things work differently on an i386 and an i486: 79 * Define this if things work differently on an i386 and an i486:
79 * it will (on an i486) warn about kernel memory accesses that are 80 * it will (on an i486) warn about kernel memory accesses that are
diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h
index e3dcf7a08a0b..fde9770e53d1 100644
--- a/include/asm-x86/pgtable_64.h
+++ b/include/asm-x86/pgtable_64.h
@@ -175,8 +175,6 @@ static inline int pmd_bad(pmd_t pmd)
175#define pte_present(x) (pte_val((x)) & (_PAGE_PRESENT | _PAGE_PROTNONE)) 175#define pte_present(x) (pte_val((x)) & (_PAGE_PRESENT | _PAGE_PROTNONE))
176 176
177#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) /* FIXME: is this right? */ 177#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) /* FIXME: is this right? */
178#define pte_page(x) pfn_to_page(pte_pfn((x)))
179#define pte_pfn(x) ((pte_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT)
180 178
181/* 179/*
182 * Macro to mark a page protection value as "uncacheable". 180 * Macro to mark a page protection value as "uncacheable".
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h
index bbbbe1fc5ce1..c7d35464a4bb 100644
--- a/include/asm-x86/processor.h
+++ b/include/asm-x86/processor.h
@@ -20,6 +20,7 @@ struct mm_struct;
20#include <asm/msr.h> 20#include <asm/msr.h>
21#include <asm/desc_defs.h> 21#include <asm/desc_defs.h>
22#include <asm/nops.h> 22#include <asm/nops.h>
23#include <asm/ds.h>
23 24
24#include <linux/personality.h> 25#include <linux/personality.h>
25#include <linux/cpumask.h> 26#include <linux/cpumask.h>
@@ -75,9 +76,9 @@ struct cpuinfo_x86 {
75 int x86_tlbsize; 76 int x86_tlbsize;
76 __u8 x86_virt_bits; 77 __u8 x86_virt_bits;
77 __u8 x86_phys_bits; 78 __u8 x86_phys_bits;
79#endif
78 /* CPUID returned core id bits: */ 80 /* CPUID returned core id bits: */
79 __u8 x86_coreid_bits; 81 __u8 x86_coreid_bits;
80#endif
81 /* Max extended CPUID function supported: */ 82 /* Max extended CPUID function supported: */
82 __u32 extended_cpuid_level; 83 __u32 extended_cpuid_level;
83 /* Maximum supported CPUID level, -1=no CPUID: */ 84 /* Maximum supported CPUID level, -1=no CPUID: */
@@ -166,11 +167,7 @@ extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
166extern unsigned short num_cache_leaves; 167extern unsigned short num_cache_leaves;
167 168
168extern void detect_extended_topology(struct cpuinfo_x86 *c); 169extern void detect_extended_topology(struct cpuinfo_x86 *c);
169#if defined(CONFIG_X86_HT) || defined(CONFIG_X86_64)
170extern void detect_ht(struct cpuinfo_x86 *c); 170extern void detect_ht(struct cpuinfo_x86 *c);
171#else
172static inline void detect_ht(struct cpuinfo_x86 *c) {}
173#endif
174 171
175static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, 172static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
176 unsigned int *ecx, unsigned int *edx) 173 unsigned int *ecx, unsigned int *edx)
@@ -434,9 +431,14 @@ struct thread_struct {
434 unsigned io_bitmap_max; 431 unsigned io_bitmap_max;
435/* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */ 432/* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */
436 unsigned long debugctlmsr; 433 unsigned long debugctlmsr;
437/* Debug Store - if not 0 points to a DS Save Area configuration; 434#ifdef CONFIG_X86_DS
438 * goes into MSR_IA32_DS_AREA */ 435/* Debug Store context; see include/asm-x86/ds.h; goes into MSR_IA32_DS_AREA */
439 unsigned long ds_area_msr; 436 struct ds_context *ds_ctx;
437#endif /* CONFIG_X86_DS */
438#ifdef CONFIG_X86_PTRACE_BTS
439/* the signal to send on a bts buffer overflow */
440 unsigned int bts_ovfl_signal;
441#endif /* CONFIG_X86_PTRACE_BTS */
440}; 442};
441 443
442static inline unsigned long native_get_debugreg(int regno) 444static inline unsigned long native_get_debugreg(int regno)
diff --git a/include/asm-x86/ptrace-abi.h b/include/asm-x86/ptrace-abi.h
index d0cf3344a586..4298b8882a78 100644
--- a/include/asm-x86/ptrace-abi.h
+++ b/include/asm-x86/ptrace-abi.h
@@ -80,8 +80,9 @@
80 80
81#define PTRACE_SINGLEBLOCK 33 /* resume execution until next branch */ 81#define PTRACE_SINGLEBLOCK 33 /* resume execution until next branch */
82 82
83#ifndef __ASSEMBLY__ 83#ifdef CONFIG_X86_PTRACE_BTS
84 84
85#ifndef __ASSEMBLY__
85#include <asm/types.h> 86#include <asm/types.h>
86 87
87/* configuration/status structure used in PTRACE_BTS_CONFIG and 88/* configuration/status structure used in PTRACE_BTS_CONFIG and
@@ -97,20 +98,20 @@ struct ptrace_bts_config {
97 /* actual size of bts_struct in bytes */ 98 /* actual size of bts_struct in bytes */
98 __u32 bts_size; 99 __u32 bts_size;
99}; 100};
100#endif 101#endif /* __ASSEMBLY__ */
101 102
102#define PTRACE_BTS_O_TRACE 0x1 /* branch trace */ 103#define PTRACE_BTS_O_TRACE 0x1 /* branch trace */
103#define PTRACE_BTS_O_SCHED 0x2 /* scheduling events w/ jiffies */ 104#define PTRACE_BTS_O_SCHED 0x2 /* scheduling events w/ jiffies */
104#define PTRACE_BTS_O_SIGNAL 0x4 /* send SIG<signal> on buffer overflow 105#define PTRACE_BTS_O_SIGNAL 0x4 /* send SIG<signal> on buffer overflow
105 instead of wrapping around */ 106 instead of wrapping around */
106#define PTRACE_BTS_O_CUT_SIZE 0x8 /* cut requested size to max available 107#define PTRACE_BTS_O_ALLOC 0x8 /* (re)allocate buffer */
107 instead of failing */
108 108
109#define PTRACE_BTS_CONFIG 40 109#define PTRACE_BTS_CONFIG 40
110/* Configure branch trace recording. 110/* Configure branch trace recording.
111 ADDR points to a struct ptrace_bts_config. 111 ADDR points to a struct ptrace_bts_config.
112 DATA gives the size of that buffer. 112 DATA gives the size of that buffer.
113 A new buffer is allocated, iff the size changes. 113 A new buffer is allocated, if requested in the flags.
114 An overflow signal may only be requested for new buffers.
114 Returns the number of bytes read. 115 Returns the number of bytes read.
115*/ 116*/
116#define PTRACE_BTS_STATUS 41 117#define PTRACE_BTS_STATUS 41
@@ -119,7 +120,7 @@ struct ptrace_bts_config {
119 Returns the number of bytes written. 120 Returns the number of bytes written.
120*/ 121*/
121#define PTRACE_BTS_SIZE 42 122#define PTRACE_BTS_SIZE 42
122/* Return the number of available BTS records. 123/* Return the number of available BTS records for draining.
123 DATA and ADDR are ignored. 124 DATA and ADDR are ignored.
124*/ 125*/
125#define PTRACE_BTS_GET 43 126#define PTRACE_BTS_GET 43
@@ -139,5 +140,6 @@ struct ptrace_bts_config {
139 BTS records are read from oldest to newest. 140 BTS records are read from oldest to newest.
140 Returns number of BTS records drained. 141 Returns number of BTS records drained.
141*/ 142*/
143#endif /* CONFIG_X86_PTRACE_BTS */
142 144
143#endif /* ASM_X86__PTRACE_ABI_H */ 145#endif /* ASM_X86__PTRACE_ABI_H */
diff --git a/include/asm-x86/ptrace.h b/include/asm-x86/ptrace.h
index 66ff7bd47379..d64a61097165 100644
--- a/include/asm-x86/ptrace.h
+++ b/include/asm-x86/ptrace.h
@@ -127,14 +127,48 @@ struct pt_regs {
127#endif /* __KERNEL__ */ 127#endif /* __KERNEL__ */
128#endif /* !__i386__ */ 128#endif /* !__i386__ */
129 129
130
131#ifdef CONFIG_X86_PTRACE_BTS
132/* a branch trace record entry
133 *
134 * In order to unify the interface between various processor versions,
135 * we use the below data structure for all processors.
136 */
137enum bts_qualifier {
138 BTS_INVALID = 0,
139 BTS_BRANCH,
140 BTS_TASK_ARRIVES,
141 BTS_TASK_DEPARTS
142};
143
144struct bts_struct {
145 __u64 qualifier;
146 union {
147 /* BTS_BRANCH */
148 struct {
149 __u64 from_ip;
150 __u64 to_ip;
151 } lbr;
152 /* BTS_TASK_ARRIVES or
153 BTS_TASK_DEPARTS */
154 __u64 jiffies;
155 } variant;
156};
157#endif /* CONFIG_X86_PTRACE_BTS */
158
130#ifdef __KERNEL__ 159#ifdef __KERNEL__
131 160
132/* the DS BTS struct is used for ptrace as well */ 161#include <linux/init.h>
133#include <asm/ds.h>
134 162
163struct cpuinfo_x86;
135struct task_struct; 164struct task_struct;
136 165
166#ifdef CONFIG_X86_PTRACE_BTS
167extern void __cpuinit ptrace_bts_init_intel(struct cpuinfo_x86 *);
137extern void ptrace_bts_take_timestamp(struct task_struct *, enum bts_qualifier); 168extern void ptrace_bts_take_timestamp(struct task_struct *, enum bts_qualifier);
169#else
170#define ptrace_bts_init_intel(config) do {} while (0)
171#endif /* CONFIG_X86_PTRACE_BTS */
138 172
139extern unsigned long profile_pc(struct pt_regs *regs); 173extern unsigned long profile_pc(struct pt_regs *regs);
140 174
@@ -216,6 +250,11 @@ static inline unsigned long frame_pointer(struct pt_regs *regs)
216 return regs->bp; 250 return regs->bp;
217} 251}
218 252
253static inline unsigned long user_stack_pointer(struct pt_regs *regs)
254{
255 return regs->sp;
256}
257
219/* 258/*
220 * These are defined as per linux/ptrace.h, which see. 259 * These are defined as per linux/ptrace.h, which see.
221 */ 260 */
diff --git a/include/asm-x86/resume-trace.h b/include/asm-x86/resume-trace.h
index 519a8ecbfc95..e39376d7de50 100644
--- a/include/asm-x86/resume-trace.h
+++ b/include/asm-x86/resume-trace.h
@@ -7,7 +7,7 @@
7do { \ 7do { \
8 if (pm_trace_enabled) { \ 8 if (pm_trace_enabled) { \
9 const void *tracedata; \ 9 const void *tracedata; \
10 asm volatile(_ASM_MOV_UL " $1f,%0\n" \ 10 asm volatile(_ASM_MOV " $1f,%0\n" \
11 ".section .tracedata,\"a\"\n" \ 11 ".section .tracedata,\"a\"\n" \
12 "1:\t.word %c1\n\t" \ 12 "1:\t.word %c1\n\t" \
13 _ASM_PTR " %c2\n" \ 13 _ASM_PTR " %c2\n" \
diff --git a/include/asm-x86/syscall.h b/include/asm-x86/syscall.h
new file mode 100644
index 000000000000..04c47dc5597c
--- /dev/null
+++ b/include/asm-x86/syscall.h
@@ -0,0 +1,211 @@
1/*
2 * Access to user system call parameters and results
3 *
4 * Copyright (C) 2008 Red Hat, Inc. All rights reserved.
5 *
6 * This copyrighted material is made available to anyone wishing to use,
7 * modify, copy, or redistribute it subject to the terms and conditions
8 * of the GNU General Public License v.2.
9 *
10 * See asm-generic/syscall.h for descriptions of what we must do here.
11 */
12
13#ifndef _ASM_SYSCALL_H
14#define _ASM_SYSCALL_H 1
15
16#include <linux/sched.h>
17#include <linux/err.h>
18
19static inline long syscall_get_nr(struct task_struct *task,
20 struct pt_regs *regs)
21{
22 /*
23 * We always sign-extend a -1 value being set here,
24 * so this is always either -1L or a syscall number.
25 */
26 return regs->orig_ax;
27}
28
29static inline void syscall_rollback(struct task_struct *task,
30 struct pt_regs *regs)
31{
32 regs->ax = regs->orig_ax;
33}
34
35static inline long syscall_get_error(struct task_struct *task,
36 struct pt_regs *regs)
37{
38 unsigned long error = regs->ax;
39#ifdef CONFIG_IA32_EMULATION
40 /*
41 * TS_COMPAT is set for 32-bit syscall entries and then
42 * remains set until we return to user mode.
43 */
44 if (task_thread_info(task)->status & TS_COMPAT)
45 /*
46 * Sign-extend the value so (int)-EFOO becomes (long)-EFOO
47 * and will match correctly in comparisons.
48 */
49 error = (long) (int) error;
50#endif
51 return IS_ERR_VALUE(error) ? error : 0;
52}
53
54static inline long syscall_get_return_value(struct task_struct *task,
55 struct pt_regs *regs)
56{
57 return regs->ax;
58}
59
60static inline void syscall_set_return_value(struct task_struct *task,
61 struct pt_regs *regs,
62 int error, long val)
63{
64 regs->ax = (long) error ?: val;
65}
66
67#ifdef CONFIG_X86_32
68
69static inline void syscall_get_arguments(struct task_struct *task,
70 struct pt_regs *regs,
71 unsigned int i, unsigned int n,
72 unsigned long *args)
73{
74 BUG_ON(i + n > 6);
75 memcpy(args, &regs->bx + i, n * sizeof(args[0]));
76}
77
78static inline void syscall_set_arguments(struct task_struct *task,
79 struct pt_regs *regs,
80 unsigned int i, unsigned int n,
81 const unsigned long *args)
82{
83 BUG_ON(i + n > 6);
84 memcpy(&regs->bx + i, args, n * sizeof(args[0]));
85}
86
87#else /* CONFIG_X86_64 */
88
89static inline void syscall_get_arguments(struct task_struct *task,
90 struct pt_regs *regs,
91 unsigned int i, unsigned int n,
92 unsigned long *args)
93{
94# ifdef CONFIG_IA32_EMULATION
95 if (task_thread_info(task)->status & TS_COMPAT)
96 switch (i + n) {
97 case 6:
98 if (!n--) break;
99 *args++ = regs->bp;
100 case 5:
101 if (!n--) break;
102 *args++ = regs->di;
103 case 4:
104 if (!n--) break;
105 *args++ = regs->si;
106 case 3:
107 if (!n--) break;
108 *args++ = regs->dx;
109 case 2:
110 if (!n--) break;
111 *args++ = regs->cx;
112 case 1:
113 if (!n--) break;
114 *args++ = regs->bx;
115 case 0:
116 if (!n--) break;
117 default:
118 BUG();
119 break;
120 }
121 else
122# endif
123 switch (i + n) {
124 case 6:
125 if (!n--) break;
126 *args++ = regs->r9;
127 case 5:
128 if (!n--) break;
129 *args++ = regs->r8;
130 case 4:
131 if (!n--) break;
132 *args++ = regs->r10;
133 case 3:
134 if (!n--) break;
135 *args++ = regs->dx;
136 case 2:
137 if (!n--) break;
138 *args++ = regs->si;
139 case 1:
140 if (!n--) break;
141 *args++ = regs->di;
142 case 0:
143 if (!n--) break;
144 default:
145 BUG();
146 break;
147 }
148}
149
150static inline void syscall_set_arguments(struct task_struct *task,
151 struct pt_regs *regs,
152 unsigned int i, unsigned int n,
153 const unsigned long *args)
154{
155# ifdef CONFIG_IA32_EMULATION
156 if (task_thread_info(task)->status & TS_COMPAT)
157 switch (i + n) {
158 case 6:
159 if (!n--) break;
160 regs->bp = *args++;
161 case 5:
162 if (!n--) break;
163 regs->di = *args++;
164 case 4:
165 if (!n--) break;
166 regs->si = *args++;
167 case 3:
168 if (!n--) break;
169 regs->dx = *args++;
170 case 2:
171 if (!n--) break;
172 regs->cx = *args++;
173 case 1:
174 if (!n--) break;
175 regs->bx = *args++;
176 case 0:
177 if (!n--) break;
178 default:
179 BUG();
180 }
181 else
182# endif
183 switch (i + n) {
184 case 6:
185 if (!n--) break;
186 regs->r9 = *args++;
187 case 5:
188 if (!n--) break;
189 regs->r8 = *args++;
190 case 4:
191 if (!n--) break;
192 regs->r10 = *args++;
193 case 3:
194 if (!n--) break;
195 regs->dx = *args++;
196 case 2:
197 if (!n--) break;
198 regs->si = *args++;
199 case 1:
200 if (!n--) break;
201 regs->di = *args++;
202 case 0:
203 if (!n--) break;
204 default:
205 BUG();
206 }
207}
208
209#endif /* CONFIG_X86_32 */
210
211#endif /* _ASM_SYSCALL_H */
diff --git a/include/asm-x86/thread_info.h b/include/asm-x86/thread_info.h
index 30586f2ee558..3f4e52bb77f5 100644
--- a/include/asm-x86/thread_info.h
+++ b/include/asm-x86/thread_info.h
@@ -71,6 +71,7 @@ struct thread_info {
71 * Warning: layout of LSW is hardcoded in entry.S 71 * Warning: layout of LSW is hardcoded in entry.S
72 */ 72 */
73#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ 73#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
74#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
74#define TIF_SIGPENDING 2 /* signal pending */ 75#define TIF_SIGPENDING 2 /* signal pending */
75#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ 76#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
76#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/ 77#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
@@ -93,6 +94,7 @@ struct thread_info {
93#define TIF_BTS_TRACE_TS 27 /* record scheduling event timestamps */ 94#define TIF_BTS_TRACE_TS 27 /* record scheduling event timestamps */
94 95
95#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 96#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
97#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
96#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 98#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
97#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) 99#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
98#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 100#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
@@ -133,7 +135,7 @@ struct thread_info {
133 135
134/* Only used for 64 bit */ 136/* Only used for 64 bit */
135#define _TIF_DO_NOTIFY_MASK \ 137#define _TIF_DO_NOTIFY_MASK \
136 (_TIF_SIGPENDING|_TIF_MCE_NOTIFY) 138 (_TIF_SIGPENDING|_TIF_MCE_NOTIFY|_TIF_NOTIFY_RESUME)
137 139
138/* flags to check in __switch_to() */ 140/* flags to check in __switch_to() */
139#define _TIF_WORK_CTXSW \ 141#define _TIF_WORK_CTXSW \
diff --git a/include/asm-x86/uaccess_64.h b/include/asm-x86/uaccess_64.h
index 5cfd2951c9e7..c96c1f5d07a2 100644
--- a/include/asm-x86/uaccess_64.h
+++ b/include/asm-x86/uaccess_64.h
@@ -7,6 +7,7 @@
7#include <linux/compiler.h> 7#include <linux/compiler.h>
8#include <linux/errno.h> 8#include <linux/errno.h>
9#include <linux/prefetch.h> 9#include <linux/prefetch.h>
10#include <linux/lockdep.h>
10#include <asm/page.h> 11#include <asm/page.h>
11 12
12/* 13/*