diff options
Diffstat (limited to 'arch/x86')
40 files changed, 226 insertions, 99 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index e60c59b81bdd..ac22bb7719f7 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -167,9 +167,12 @@ config GENERIC_PENDING_IRQ | |||
167 | config X86_SMP | 167 | config X86_SMP |
168 | bool | 168 | bool |
169 | depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64) | 169 | depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64) |
170 | select USE_GENERIC_SMP_HELPERS | ||
171 | default y | 170 | default y |
172 | 171 | ||
172 | config USE_GENERIC_SMP_HELPERS | ||
173 | def_bool y | ||
174 | depends on SMP | ||
175 | |||
173 | config X86_32_SMP | 176 | config X86_32_SMP |
174 | def_bool y | 177 | def_bool y |
175 | depends on X86_32 && SMP | 178 | depends on X86_32 && SMP |
@@ -1494,7 +1497,7 @@ config HAVE_ARCH_EARLY_PFN_TO_NID | |||
1494 | def_bool X86_64 | 1497 | def_bool X86_64 |
1495 | depends on NUMA | 1498 | depends on NUMA |
1496 | 1499 | ||
1497 | menu "Power management options" | 1500 | menu "Power management and ACPI options" |
1498 | depends on !X86_VOYAGER | 1501 | depends on !X86_VOYAGER |
1499 | 1502 | ||
1500 | config ARCH_HIBERNATION_HEADER | 1503 | config ARCH_HIBERNATION_HEADER |
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 8d676d8ecde9..9830681446ad 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h | |||
@@ -113,7 +113,6 @@ static inline void acpi_disable_pci(void) | |||
113 | acpi_pci_disabled = 1; | 113 | acpi_pci_disabled = 1; |
114 | acpi_noirq_set(); | 114 | acpi_noirq_set(); |
115 | } | 115 | } |
116 | extern int acpi_irq_balance_set(char *str); | ||
117 | 116 | ||
118 | /* routines for saving/restoring kernel state */ | 117 | /* routines for saving/restoring kernel state */ |
119 | extern int acpi_save_state_mem(void); | 118 | extern int acpi_save_state_mem(void); |
diff --git a/arch/x86/include/asm/iomap.h b/arch/x86/include/asm/iomap.h new file mode 100644 index 000000000000..c1f06289b14b --- /dev/null +++ b/arch/x86/include/asm/iomap.h | |||
@@ -0,0 +1,30 @@ | |||
1 | /* | ||
2 | * Copyright © 2008 Ingo Molnar | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but | ||
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
12 | * General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along | ||
15 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
16 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. | ||
17 | */ | ||
18 | |||
19 | #include <linux/fs.h> | ||
20 | #include <linux/mm.h> | ||
21 | #include <linux/uaccess.h> | ||
22 | #include <asm/cacheflush.h> | ||
23 | #include <asm/pgtable.h> | ||
24 | #include <asm/tlbflush.h> | ||
25 | |||
26 | void * | ||
27 | iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); | ||
28 | |||
29 | void | ||
30 | iounmap_atomic(void *kvaddr, enum km_type type); | ||
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h index e4a552d44465..0b500c5b6446 100644 --- a/arch/x86/include/asm/iommu.h +++ b/arch/x86/include/asm/iommu.h | |||
@@ -6,7 +6,6 @@ extern void no_iommu_init(void); | |||
6 | extern struct dma_mapping_ops nommu_dma_ops; | 6 | extern struct dma_mapping_ops nommu_dma_ops; |
7 | extern int force_iommu, no_iommu; | 7 | extern int force_iommu, no_iommu; |
8 | extern int iommu_detected; | 8 | extern int iommu_detected; |
9 | extern int dmar_disabled; | ||
10 | 9 | ||
11 | extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len); | 10 | extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len); |
12 | 11 | ||
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index d843ed0e9b2e..0005adb0f941 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h | |||
@@ -101,30 +101,22 @@ | |||
101 | #define LAST_VM86_IRQ 15 | 101 | #define LAST_VM86_IRQ 15 |
102 | #define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15) | 102 | #define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15) |
103 | 103 | ||
104 | #ifdef CONFIG_X86_64 | 104 | #if defined(CONFIG_X86_IO_APIC) && !defined(CONFIG_X86_VOYAGER) |
105 | # if NR_CPUS < MAX_IO_APICS | 105 | # if NR_CPUS < MAX_IO_APICS |
106 | # define NR_IRQS (NR_VECTORS + (32 * NR_CPUS)) | 106 | # define NR_IRQS (NR_VECTORS + (32 * NR_CPUS)) |
107 | # else | 107 | # else |
108 | # define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS)) | 108 | # define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS)) |
109 | # endif | 109 | # endif |
110 | 110 | ||
111 | #elif !defined(CONFIG_X86_VOYAGER) | 111 | #elif defined(CONFIG_X86_VOYAGER) |
112 | 112 | ||
113 | # if defined(CONFIG_X86_IO_APIC) || defined(CONFIG_PARAVIRT) || defined(CONFIG_X86_VISWS) | 113 | # define NR_IRQS 224 |
114 | |||
115 | # define NR_IRQS 224 | ||
116 | |||
117 | # else /* IO_APIC || PARAVIRT */ | ||
118 | |||
119 | # define NR_IRQS 16 | ||
120 | |||
121 | # endif | ||
122 | 114 | ||
123 | #else /* !VISWS && !VOYAGER */ | 115 | #else /* IO_APIC || VOYAGER */ |
124 | 116 | ||
125 | # define NR_IRQS 224 | 117 | # define NR_IRQS 16 |
126 | 118 | ||
127 | #endif /* VISWS */ | 119 | #endif |
128 | 120 | ||
129 | /* Voyager specific defines */ | 121 | /* Voyager specific defines */ |
130 | /* These define the CPIs we use in linux */ | 122 | /* These define the CPIs we use in linux */ |
diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h index 485bdf059ffb..07f1af494ca5 100644 --- a/arch/x86/include/asm/mmzone_32.h +++ b/arch/x86/include/asm/mmzone_32.h | |||
@@ -34,10 +34,14 @@ static inline void get_memcfg_numa(void) | |||
34 | 34 | ||
35 | extern int early_pfn_to_nid(unsigned long pfn); | 35 | extern int early_pfn_to_nid(unsigned long pfn); |
36 | 36 | ||
37 | extern void resume_map_numa_kva(pgd_t *pgd); | ||
38 | |||
37 | #else /* !CONFIG_NUMA */ | 39 | #else /* !CONFIG_NUMA */ |
38 | 40 | ||
39 | #define get_memcfg_numa get_memcfg_numa_flat | 41 | #define get_memcfg_numa get_memcfg_numa_flat |
40 | 42 | ||
43 | static inline void resume_map_numa_kva(pgd_t *pgd) {} | ||
44 | |||
41 | #endif /* CONFIG_NUMA */ | 45 | #endif /* CONFIG_NUMA */ |
42 | 46 | ||
43 | #ifdef CONFIG_DISCONTIGMEM | 47 | #ifdef CONFIG_DISCONTIGMEM |
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index 46be2fa7ac26..c2a812ebde89 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h | |||
@@ -108,9 +108,7 @@ static __always_inline unsigned long long __native_read_tsc(void) | |||
108 | { | 108 | { |
109 | DECLARE_ARGS(val, low, high); | 109 | DECLARE_ARGS(val, low, high); |
110 | 110 | ||
111 | rdtsc_barrier(); | ||
112 | asm volatile("rdtsc" : EAX_EDX_RET(val, low, high)); | 111 | asm volatile("rdtsc" : EAX_EDX_RET(val, low, high)); |
113 | rdtsc_barrier(); | ||
114 | 112 | ||
115 | return EAX_EDX_VAL(val, low, high); | 113 | return EAX_EDX_VAL(val, low, high); |
116 | } | 114 | } |
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index 90ac7718469a..4850e4b02b61 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h | |||
@@ -154,7 +154,7 @@ extern unsigned long node_remap_size[]; | |||
154 | 154 | ||
155 | #endif | 155 | #endif |
156 | 156 | ||
157 | /* sched_domains SD_NODE_INIT for NUMAQ machines */ | 157 | /* sched_domains SD_NODE_INIT for NUMA machines */ |
158 | #define SD_NODE_INIT (struct sched_domain) { \ | 158 | #define SD_NODE_INIT (struct sched_domain) { \ |
159 | .min_interval = 8, \ | 159 | .min_interval = 8, \ |
160 | .max_interval = 32, \ | 160 | .max_interval = 32, \ |
@@ -169,8 +169,9 @@ extern unsigned long node_remap_size[]; | |||
169 | .flags = SD_LOAD_BALANCE \ | 169 | .flags = SD_LOAD_BALANCE \ |
170 | | SD_BALANCE_EXEC \ | 170 | | SD_BALANCE_EXEC \ |
171 | | SD_BALANCE_FORK \ | 171 | | SD_BALANCE_FORK \ |
172 | | SD_SERIALIZE \ | 172 | | SD_WAKE_AFFINE \ |
173 | | SD_WAKE_BALANCE, \ | 173 | | SD_WAKE_BALANCE \ |
174 | | SD_SERIALIZE, \ | ||
174 | .last_balance = jiffies, \ | 175 | .last_balance = jiffies, \ |
175 | .balance_interval = 1, \ | 176 | .balance_interval = 1, \ |
176 | } | 177 | } |
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index 38ae163cc91b..9cd83a8e40d5 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h | |||
@@ -34,6 +34,8 @@ static inline cycles_t get_cycles(void) | |||
34 | 34 | ||
35 | static __always_inline cycles_t vget_cycles(void) | 35 | static __always_inline cycles_t vget_cycles(void) |
36 | { | 36 | { |
37 | cycles_t cycles; | ||
38 | |||
37 | /* | 39 | /* |
38 | * We only do VDSOs on TSC capable CPUs, so this shouldnt | 40 | * We only do VDSOs on TSC capable CPUs, so this shouldnt |
39 | * access boot_cpu_data (which is not VDSO-safe): | 41 | * access boot_cpu_data (which is not VDSO-safe): |
@@ -42,7 +44,11 @@ static __always_inline cycles_t vget_cycles(void) | |||
42 | if (!cpu_has_tsc) | 44 | if (!cpu_has_tsc) |
43 | return 0; | 45 | return 0; |
44 | #endif | 46 | #endif |
45 | return (cycles_t)__native_read_tsc(); | 47 | rdtsc_barrier(); |
48 | cycles = (cycles_t)__native_read_tsc(); | ||
49 | rdtsc_barrier(); | ||
50 | |||
51 | return cycles; | ||
46 | } | 52 | } |
47 | 53 | ||
48 | extern void tsc_init(void); | 54 | extern void tsc_init(void); |
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 664f15280f14..f8cfd00db450 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h | |||
@@ -46,7 +46,7 @@ int __copy_from_user(void *dst, const void __user *src, unsigned size) | |||
46 | return ret; | 46 | return ret; |
47 | case 10: | 47 | case 10: |
48 | __get_user_asm(*(u64 *)dst, (u64 __user *)src, | 48 | __get_user_asm(*(u64 *)dst, (u64 __user *)src, |
49 | ret, "q", "", "=r", 16); | 49 | ret, "q", "", "=r", 10); |
50 | if (unlikely(ret)) | 50 | if (unlikely(ret)) |
51 | return ret; | 51 | return ret; |
52 | __get_user_asm(*(u16 *)(8 + (char *)dst), | 52 | __get_user_asm(*(u16 *)(8 + (char *)dst), |
diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h index 834b2c1d89fb..d2e415e6666f 100644 --- a/arch/x86/include/asm/unistd_64.h +++ b/arch/x86/include/asm/unistd_64.h | |||
@@ -639,8 +639,8 @@ __SYSCALL(__NR_fallocate, sys_fallocate) | |||
639 | __SYSCALL(__NR_timerfd_settime, sys_timerfd_settime) | 639 | __SYSCALL(__NR_timerfd_settime, sys_timerfd_settime) |
640 | #define __NR_timerfd_gettime 287 | 640 | #define __NR_timerfd_gettime 287 |
641 | __SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime) | 641 | __SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime) |
642 | #define __NR_paccept 288 | 642 | #define __NR_accept4 288 |
643 | __SYSCALL(__NR_paccept, sys_paccept) | 643 | __SYSCALL(__NR_accept4, sys_accept4) |
644 | #define __NR_signalfd4 289 | 644 | #define __NR_signalfd4 289 |
645 | __SYSCALL(__NR_signalfd4, sys_signalfd4) | 645 | __SYSCALL(__NR_signalfd4, sys_signalfd4) |
646 | #define __NR_eventfd2 290 | 646 | #define __NR_eventfd2 290 |
diff --git a/arch/x86/include/asm/voyager.h b/arch/x86/include/asm/voyager.h index 9c811d2e6f91..b3e647307625 100644 --- a/arch/x86/include/asm/voyager.h +++ b/arch/x86/include/asm/voyager.h | |||
@@ -520,6 +520,7 @@ extern void voyager_restart(void); | |||
520 | extern void voyager_cat_power_off(void); | 520 | extern void voyager_cat_power_off(void); |
521 | extern void voyager_cat_do_common_interrupt(void); | 521 | extern void voyager_cat_do_common_interrupt(void); |
522 | extern void voyager_handle_nmi(void); | 522 | extern void voyager_handle_nmi(void); |
523 | extern void voyager_smp_intr_init(void); | ||
523 | /* Commands for the following are */ | 524 | /* Commands for the following are */ |
524 | #define VOYAGER_PSI_READ 0 | 525 | #define VOYAGER_PSI_READ 0 |
525 | #define VOYAGER_PSI_WRITE 1 | 526 | #define VOYAGER_PSI_WRITE 1 |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 8c1f76abae9e..4c51a2f8fd31 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -1343,7 +1343,6 @@ static void __init acpi_process_madt(void) | |||
1343 | error = acpi_parse_madt_ioapic_entries(); | 1343 | error = acpi_parse_madt_ioapic_entries(); |
1344 | if (!error) { | 1344 | if (!error) { |
1345 | acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC; | 1345 | acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC; |
1346 | acpi_irq_balance_set(NULL); | ||
1347 | acpi_ioapic = 1; | 1346 | acpi_ioapic = 1; |
1348 | 1347 | ||
1349 | smp_found_config = 1; | 1348 | smp_found_config = 1; |
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index a8fd9ebdc8e2..e4899e0e8787 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -50,7 +50,7 @@ static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, | |||
50 | /* returns !0 if the IOMMU is caching non-present entries in its TLB */ | 50 | /* returns !0 if the IOMMU is caching non-present entries in its TLB */ |
51 | static int iommu_has_npcache(struct amd_iommu *iommu) | 51 | static int iommu_has_npcache(struct amd_iommu *iommu) |
52 | { | 52 | { |
53 | return iommu->cap & IOMMU_CAP_NPCACHE; | 53 | return iommu->cap & (1UL << IOMMU_CAP_NPCACHE); |
54 | } | 54 | } |
55 | 55 | ||
56 | /**************************************************************************** | 56 | /**************************************************************************** |
@@ -536,6 +536,9 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom, | |||
536 | { | 536 | { |
537 | address >>= PAGE_SHIFT; | 537 | address >>= PAGE_SHIFT; |
538 | iommu_area_free(dom->bitmap, address, pages); | 538 | iommu_area_free(dom->bitmap, address, pages); |
539 | |||
540 | if (address >= dom->next_bit) | ||
541 | dom->need_flush = true; | ||
539 | } | 542 | } |
540 | 543 | ||
541 | /**************************************************************************** | 544 | /**************************************************************************** |
@@ -992,8 +995,10 @@ static void __unmap_single(struct amd_iommu *iommu, | |||
992 | 995 | ||
993 | dma_ops_free_addresses(dma_dom, dma_addr, pages); | 996 | dma_ops_free_addresses(dma_dom, dma_addr, pages); |
994 | 997 | ||
995 | if (amd_iommu_unmap_flush) | 998 | if (amd_iommu_unmap_flush || dma_dom->need_flush) { |
996 | iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size); | 999 | iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size); |
1000 | dma_dom->need_flush = false; | ||
1001 | } | ||
997 | } | 1002 | } |
998 | 1003 | ||
999 | /* | 1004 | /* |
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index 0cdcda35a05f..30ae2701b3df 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c | |||
@@ -121,7 +121,7 @@ u16 amd_iommu_last_bdf; /* largest PCI device id we have | |||
121 | LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings | 121 | LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings |
122 | we find in ACPI */ | 122 | we find in ACPI */ |
123 | unsigned amd_iommu_aperture_order = 26; /* size of aperture in power of 2 */ | 123 | unsigned amd_iommu_aperture_order = 26; /* size of aperture in power of 2 */ |
124 | int amd_iommu_isolate; /* if 1, device isolation is enabled */ | 124 | int amd_iommu_isolate = 1; /* if 1, device isolation is enabled */ |
125 | bool amd_iommu_unmap_flush; /* if true, flush on every unmap */ | 125 | bool amd_iommu_unmap_flush; /* if true, flush on every unmap */ |
126 | 126 | ||
127 | LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the | 127 | LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the |
@@ -1213,7 +1213,9 @@ static int __init parse_amd_iommu_options(char *str) | |||
1213 | for (; *str; ++str) { | 1213 | for (; *str; ++str) { |
1214 | if (strncmp(str, "isolate", 7) == 0) | 1214 | if (strncmp(str, "isolate", 7) == 0) |
1215 | amd_iommu_isolate = 1; | 1215 | amd_iommu_isolate = 1; |
1216 | if (strncmp(str, "fullflush", 11) == 0) | 1216 | if (strncmp(str, "share", 5) == 0) |
1217 | amd_iommu_isolate = 0; | ||
1218 | if (strncmp(str, "fullflush", 9) == 0) | ||
1217 | amd_iommu_unmap_flush = true; | 1219 | amd_iommu_unmap_flush = true; |
1218 | } | 1220 | } |
1219 | 1221 | ||
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c index 2b69994fd3a8..d1a121443bde 100644 --- a/arch/x86/kernel/ds.c +++ b/arch/x86/kernel/ds.c | |||
@@ -236,17 +236,33 @@ static inline struct ds_context *ds_alloc_context(struct task_struct *task) | |||
236 | struct ds_context *context = *p_context; | 236 | struct ds_context *context = *p_context; |
237 | 237 | ||
238 | if (!context) { | 238 | if (!context) { |
239 | spin_unlock(&ds_lock); | ||
240 | |||
239 | context = kzalloc(sizeof(*context), GFP_KERNEL); | 241 | context = kzalloc(sizeof(*context), GFP_KERNEL); |
240 | 242 | ||
241 | if (!context) | 243 | if (!context) { |
244 | spin_lock(&ds_lock); | ||
242 | return NULL; | 245 | return NULL; |
246 | } | ||
243 | 247 | ||
244 | context->ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL); | 248 | context->ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL); |
245 | if (!context->ds) { | 249 | if (!context->ds) { |
246 | kfree(context); | 250 | kfree(context); |
251 | spin_lock(&ds_lock); | ||
247 | return NULL; | 252 | return NULL; |
248 | } | 253 | } |
249 | 254 | ||
255 | spin_lock(&ds_lock); | ||
256 | /* | ||
257 | * Check for race - another CPU could have allocated | ||
258 | * it meanwhile: | ||
259 | */ | ||
260 | if (*p_context) { | ||
261 | kfree(context->ds); | ||
262 | kfree(context); | ||
263 | return *p_context; | ||
264 | } | ||
265 | |||
250 | *p_context = context; | 266 | *p_context = context; |
251 | 267 | ||
252 | context->this = p_context; | 268 | context->this = p_context; |
@@ -384,14 +400,15 @@ static int ds_request(struct task_struct *task, void *base, size_t size, | |||
384 | 400 | ||
385 | spin_lock(&ds_lock); | 401 | spin_lock(&ds_lock); |
386 | 402 | ||
387 | if (!check_tracer(task)) | ||
388 | return -EPERM; | ||
389 | |||
390 | error = -ENOMEM; | 403 | error = -ENOMEM; |
391 | context = ds_alloc_context(task); | 404 | context = ds_alloc_context(task); |
392 | if (!context) | 405 | if (!context) |
393 | goto out_unlock; | 406 | goto out_unlock; |
394 | 407 | ||
408 | error = -EPERM; | ||
409 | if (!check_tracer(task)) | ||
410 | goto out_unlock; | ||
411 | |||
395 | error = -EALREADY; | 412 | error = -EALREADY; |
396 | if (context->owner[qual] == current) | 413 | if (context->owner[qual] == current) |
397 | goto out_unlock; | 414 | goto out_unlock; |
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index 3ce029ffaa55..1b894b72c0f5 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c | |||
@@ -188,20 +188,6 @@ static void __init ati_bugs_contd(int num, int slot, int func) | |||
188 | } | 188 | } |
189 | #endif | 189 | #endif |
190 | 190 | ||
191 | #ifdef CONFIG_DMAR | ||
192 | static void __init intel_g33_dmar(int num, int slot, int func) | ||
193 | { | ||
194 | struct acpi_table_header *dmar_tbl; | ||
195 | acpi_status status; | ||
196 | |||
197 | status = acpi_get_table(ACPI_SIG_DMAR, 0, &dmar_tbl); | ||
198 | if (ACPI_SUCCESS(status)) { | ||
199 | printk(KERN_INFO "BIOS BUG: DMAR advertised on Intel G31/G33 chipset -- ignoring\n"); | ||
200 | dmar_disabled = 1; | ||
201 | } | ||
202 | } | ||
203 | #endif | ||
204 | |||
205 | #define QFLAG_APPLY_ONCE 0x1 | 191 | #define QFLAG_APPLY_ONCE 0x1 |
206 | #define QFLAG_APPLIED 0x2 | 192 | #define QFLAG_APPLIED 0x2 |
207 | #define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED) | 193 | #define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED) |
@@ -225,10 +211,6 @@ static struct chipset early_qrk[] __initdata = { | |||
225 | PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs }, | 211 | PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs }, |
226 | { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, | 212 | { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, |
227 | PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd }, | 213 | PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd }, |
228 | #ifdef CONFIG_DMAR | ||
229 | { PCI_VENDOR_ID_INTEL, 0x29c0, | ||
230 | PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, intel_g33_dmar }, | ||
231 | #endif | ||
232 | {} | 214 | {} |
233 | }; | 215 | }; |
234 | 216 | ||
diff --git a/arch/x86/kernel/es7000_32.c b/arch/x86/kernel/es7000_32.c index f454c78fcef6..0aa2c443d600 100644 --- a/arch/x86/kernel/es7000_32.c +++ b/arch/x86/kernel/es7000_32.c | |||
@@ -250,31 +250,24 @@ int __init find_unisys_acpi_oem_table(unsigned long *oem_addr) | |||
250 | { | 250 | { |
251 | struct acpi_table_header *header = NULL; | 251 | struct acpi_table_header *header = NULL; |
252 | int i = 0; | 252 | int i = 0; |
253 | acpi_size tbl_size; | ||
254 | 253 | ||
255 | while (ACPI_SUCCESS(acpi_get_table_with_size("OEM1", i++, &header, &tbl_size))) { | 254 | while (ACPI_SUCCESS(acpi_get_table("OEM1", i++, &header))) { |
256 | if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) { | 255 | if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) { |
257 | struct oem_table *t = (struct oem_table *)header; | 256 | struct oem_table *t = (struct oem_table *)header; |
258 | 257 | ||
259 | oem_addrX = t->OEMTableAddr; | 258 | oem_addrX = t->OEMTableAddr; |
260 | oem_size = t->OEMTableSize; | 259 | oem_size = t->OEMTableSize; |
261 | early_acpi_os_unmap_memory(header, tbl_size); | ||
262 | 260 | ||
263 | *oem_addr = (unsigned long)__acpi_map_table(oem_addrX, | 261 | *oem_addr = (unsigned long)__acpi_map_table(oem_addrX, |
264 | oem_size); | 262 | oem_size); |
265 | return 0; | 263 | return 0; |
266 | } | 264 | } |
267 | early_acpi_os_unmap_memory(header, tbl_size); | ||
268 | } | 265 | } |
269 | return -1; | 266 | return -1; |
270 | } | 267 | } |
271 | 268 | ||
272 | void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr) | 269 | void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr) |
273 | { | 270 | { |
274 | if (!oem_addr) | ||
275 | return; | ||
276 | |||
277 | __acpi_unmap_table((char *)oem_addr, oem_size); | ||
278 | } | 271 | } |
279 | #endif | 272 | #endif |
280 | 273 | ||
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 77017e834cf7..067d8de913f6 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
@@ -322,7 +322,7 @@ static int hpet_next_event(unsigned long delta, | |||
322 | * what we wrote hit the chip before we compare it to the | 322 | * what we wrote hit the chip before we compare it to the |
323 | * counter. | 323 | * counter. |
324 | */ | 324 | */ |
325 | WARN_ON((u32)hpet_readl(HPET_T0_CMP) != cnt); | 325 | WARN_ON_ONCE((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt); |
326 | 326 | ||
327 | return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; | 327 | return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; |
328 | } | 328 | } |
@@ -445,7 +445,7 @@ static int hpet_setup_irq(struct hpet_dev *dev) | |||
445 | { | 445 | { |
446 | 446 | ||
447 | if (request_irq(dev->irq, hpet_interrupt_handler, | 447 | if (request_irq(dev->irq, hpet_interrupt_handler, |
448 | IRQF_SHARED|IRQF_NOBALANCING, dev->name, dev)) | 448 | IRQF_DISABLED|IRQF_NOBALANCING, dev->name, dev)) |
449 | return -1; | 449 | return -1; |
450 | 450 | ||
451 | disable_irq(dev->irq); | 451 | disable_irq(dev->irq); |
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index b764d7429c61..c9513e1ff28d 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c | |||
@@ -1140,6 +1140,20 @@ static void __clear_irq_vector(int irq) | |||
1140 | 1140 | ||
1141 | cfg->vector = 0; | 1141 | cfg->vector = 0; |
1142 | cpus_clear(cfg->domain); | 1142 | cpus_clear(cfg->domain); |
1143 | |||
1144 | if (likely(!cfg->move_in_progress)) | ||
1145 | return; | ||
1146 | cpus_and(mask, cfg->old_domain, cpu_online_map); | ||
1147 | for_each_cpu_mask_nr(cpu, mask) { | ||
1148 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; | ||
1149 | vector++) { | ||
1150 | if (per_cpu(vector_irq, cpu)[vector] != irq) | ||
1151 | continue; | ||
1152 | per_cpu(vector_irq, cpu)[vector] = -1; | ||
1153 | break; | ||
1154 | } | ||
1155 | } | ||
1156 | cfg->move_in_progress = 0; | ||
1143 | } | 1157 | } |
1144 | 1158 | ||
1145 | void __setup_vector_irq(int cpu) | 1159 | void __setup_vector_irq(int cpu) |
@@ -3611,6 +3625,8 @@ int __init probe_nr_irqs(void) | |||
3611 | /* something wrong ? */ | 3625 | /* something wrong ? */ |
3612 | if (nr < nr_min) | 3626 | if (nr < nr_min) |
3613 | nr = nr_min; | 3627 | nr = nr_min; |
3628 | if (WARN_ON(nr > NR_IRQS)) | ||
3629 | nr = NR_IRQS; | ||
3614 | 3630 | ||
3615 | return nr; | 3631 | return nr; |
3616 | } | 3632 | } |
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 774ac4991568..1c9cc431ea4f 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c | |||
@@ -128,7 +128,7 @@ static int kvm_register_clock(char *txt) | |||
128 | } | 128 | } |
129 | 129 | ||
130 | #ifdef CONFIG_X86_LOCAL_APIC | 130 | #ifdef CONFIG_X86_LOCAL_APIC |
131 | static void kvm_setup_secondary_clock(void) | 131 | static void __devinit kvm_setup_secondary_clock(void) |
132 | { | 132 | { |
133 | /* | 133 | /* |
134 | * Now that the first cpu already had this clocksource initialized, | 134 | * Now that the first cpu already had this clocksource initialized, |
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index f4c93f1cfc19..cc5a2545dd41 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -29,11 +29,7 @@ EXPORT_SYMBOL(pm_power_off); | |||
29 | 29 | ||
30 | static const struct desc_ptr no_idt = {}; | 30 | static const struct desc_ptr no_idt = {}; |
31 | static int reboot_mode; | 31 | static int reboot_mode; |
32 | /* | 32 | enum reboot_type reboot_type = BOOT_KBD; |
33 | * Keyboard reset and triple fault may result in INIT, not RESET, which | ||
34 | * doesn't work when we're in vmx root mode. Try ACPI first. | ||
35 | */ | ||
36 | enum reboot_type reboot_type = BOOT_ACPI; | ||
37 | int reboot_force; | 33 | int reboot_force; |
38 | 34 | ||
39 | #if defined(CONFIG_X86_32) && defined(CONFIG_SMP) | 35 | #if defined(CONFIG_X86_32) && defined(CONFIG_SMP) |
@@ -173,6 +169,15 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { | |||
173 | DMI_MATCH(DMI_BOARD_NAME, "0KW626"), | 169 | DMI_MATCH(DMI_BOARD_NAME, "0KW626"), |
174 | }, | 170 | }, |
175 | }, | 171 | }, |
172 | { /* Handle problems with rebooting on Dell Optiplex 330 with 0KP561 */ | ||
173 | .callback = set_bios_reboot, | ||
174 | .ident = "Dell OptiPlex 330", | ||
175 | .matches = { | ||
176 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
177 | DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 330"), | ||
178 | DMI_MATCH(DMI_BOARD_NAME, "0KP561"), | ||
179 | }, | ||
180 | }, | ||
176 | { /* Handle problems with rebooting on Dell 2400's */ | 181 | { /* Handle problems with rebooting on Dell 2400's */ |
177 | .callback = set_bios_reboot, | 182 | .callback = set_bios_reboot, |
178 | .ident = "Dell PowerEdge 2400", | 183 | .ident = "Dell PowerEdge 2400", |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 0fa6790c1dd3..9d5674f7b6cc 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -764,7 +764,7 @@ static struct dmi_system_id __initdata bad_bios_dmi_table[] = { | |||
764 | .callback = dmi_low_memory_corruption, | 764 | .callback = dmi_low_memory_corruption, |
765 | .ident = "Phoenix BIOS", | 765 | .ident = "Phoenix BIOS", |
766 | .matches = { | 766 | .matches = { |
767 | DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"), | 767 | DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies"), |
768 | }, | 768 | }, |
769 | }, | 769 | }, |
770 | #endif | 770 | #endif |
diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c index e00534b33534..f4049f3513b6 100644 --- a/arch/x86/kernel/tlb_32.c +++ b/arch/x86/kernel/tlb_32.c | |||
@@ -154,6 +154,12 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, | |||
154 | flush_mm = mm; | 154 | flush_mm = mm; |
155 | flush_va = va; | 155 | flush_va = va; |
156 | cpus_or(flush_cpumask, cpumask, flush_cpumask); | 156 | cpus_or(flush_cpumask, cpumask, flush_cpumask); |
157 | |||
158 | /* | ||
159 | * Make the above memory operations globally visible before | ||
160 | * sending the IPI. | ||
161 | */ | ||
162 | smp_mb(); | ||
157 | /* | 163 | /* |
158 | * We have to send the IPI only to | 164 | * We have to send the IPI only to |
159 | * CPUs affected. | 165 | * CPUs affected. |
diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c index dcbf7a1159ea..8f919ca69494 100644 --- a/arch/x86/kernel/tlb_64.c +++ b/arch/x86/kernel/tlb_64.c | |||
@@ -183,6 +183,11 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, | |||
183 | cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask); | 183 | cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask); |
184 | 184 | ||
185 | /* | 185 | /* |
186 | * Make the above memory operations globally visible before | ||
187 | * sending the IPI. | ||
188 | */ | ||
189 | smp_mb(); | ||
190 | /* | ||
186 | * We have to send the IPI only to | 191 | * We have to send the IPI only to |
187 | * CPUs affected. | 192 | * CPUs affected. |
188 | */ | 193 | */ |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 62348e4fd8d1..424093b157d3 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -55,7 +55,7 @@ u64 native_sched_clock(void) | |||
55 | rdtscll(this_offset); | 55 | rdtscll(this_offset); |
56 | 56 | ||
57 | /* return the value in ns */ | 57 | /* return the value in ns */ |
58 | return cycles_2_ns(this_offset); | 58 | return __cycles_2_ns(this_offset); |
59 | } | 59 | } |
60 | 60 | ||
61 | /* We need to define a real function for sched_clock, to override the | 61 | /* We need to define a real function for sched_clock, to override the |
@@ -813,10 +813,6 @@ void __init tsc_init(void) | |||
813 | cpu_khz = calibrate_cpu(); | 813 | cpu_khz = calibrate_cpu(); |
814 | #endif | 814 | #endif |
815 | 815 | ||
816 | lpj = ((u64)tsc_khz * 1000); | ||
817 | do_div(lpj, HZ); | ||
818 | lpj_fine = lpj; | ||
819 | |||
820 | printk("Detected %lu.%03lu MHz processor.\n", | 816 | printk("Detected %lu.%03lu MHz processor.\n", |
821 | (unsigned long)cpu_khz / 1000, | 817 | (unsigned long)cpu_khz / 1000, |
822 | (unsigned long)cpu_khz % 1000); | 818 | (unsigned long)cpu_khz % 1000); |
@@ -836,6 +832,10 @@ void __init tsc_init(void) | |||
836 | /* now allow native_sched_clock() to use rdtsc */ | 832 | /* now allow native_sched_clock() to use rdtsc */ |
837 | tsc_disabled = 0; | 833 | tsc_disabled = 0; |
838 | 834 | ||
835 | lpj = ((u64)tsc_khz * 1000); | ||
836 | do_div(lpj, HZ); | ||
837 | lpj_fine = lpj; | ||
838 | |||
839 | use_tsc_delay(); | 839 | use_tsc_delay(); |
840 | /* Check and install the TSC clocksource */ | 840 | /* Check and install the TSC clocksource */ |
841 | dmi_check_system(bad_tsc_dmi_table); | 841 | dmi_check_system(bad_tsc_dmi_table); |
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index 9ffb01c31c40..1c0dfbca87c1 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c | |||
@@ -46,7 +46,9 @@ static __cpuinit void check_tsc_warp(void) | |||
46 | cycles_t start, now, prev, end; | 46 | cycles_t start, now, prev, end; |
47 | int i; | 47 | int i; |
48 | 48 | ||
49 | rdtsc_barrier(); | ||
49 | start = get_cycles(); | 50 | start = get_cycles(); |
51 | rdtsc_barrier(); | ||
50 | /* | 52 | /* |
51 | * The measurement runs for 20 msecs: | 53 | * The measurement runs for 20 msecs: |
52 | */ | 54 | */ |
@@ -61,7 +63,9 @@ static __cpuinit void check_tsc_warp(void) | |||
61 | */ | 63 | */ |
62 | __raw_spin_lock(&sync_lock); | 64 | __raw_spin_lock(&sync_lock); |
63 | prev = last_tsc; | 65 | prev = last_tsc; |
66 | rdtsc_barrier(); | ||
64 | now = get_cycles(); | 67 | now = get_cycles(); |
68 | rdtsc_barrier(); | ||
65 | last_tsc = now; | 69 | last_tsc = now; |
66 | __raw_spin_unlock(&sync_lock); | 70 | __raw_spin_unlock(&sync_lock); |
67 | 71 | ||
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index ce3251ce5504..b81125f0bdee 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig | |||
@@ -20,6 +20,8 @@ if VIRTUALIZATION | |||
20 | config KVM | 20 | config KVM |
21 | tristate "Kernel-based Virtual Machine (KVM) support" | 21 | tristate "Kernel-based Virtual Machine (KVM) support" |
22 | depends on HAVE_KVM | 22 | depends on HAVE_KVM |
23 | # for device assignment: | ||
24 | depends on PCI | ||
23 | select PREEMPT_NOTIFIERS | 25 | select PREEMPT_NOTIFIERS |
24 | select MMU_NOTIFIER | 26 | select MMU_NOTIFIER |
25 | select ANON_INODES | 27 | select ANON_INODES |
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 8772dc946823..59ebd37ad79e 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c | |||
@@ -548,8 +548,10 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm) | |||
548 | mutex_lock(&kvm->lock); | 548 | mutex_lock(&kvm->lock); |
549 | pit->irq_source_id = kvm_request_irq_source_id(kvm); | 549 | pit->irq_source_id = kvm_request_irq_source_id(kvm); |
550 | mutex_unlock(&kvm->lock); | 550 | mutex_unlock(&kvm->lock); |
551 | if (pit->irq_source_id < 0) | 551 | if (pit->irq_source_id < 0) { |
552 | kfree(pit); | ||
552 | return NULL; | 553 | return NULL; |
554 | } | ||
553 | 555 | ||
554 | mutex_init(&pit->pit_state.lock); | 556 | mutex_init(&pit->pit_state.lock); |
555 | mutex_lock(&pit->pit_state.lock); | 557 | mutex_lock(&pit->pit_state.lock); |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 2a5e64881d9b..f1983d9477cd 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -314,7 +314,7 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) | |||
314 | if (r) | 314 | if (r) |
315 | goto out; | 315 | goto out; |
316 | r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache, | 316 | r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache, |
317 | rmap_desc_cache, 1); | 317 | rmap_desc_cache, 4); |
318 | if (r) | 318 | if (r) |
319 | goto out; | 319 | goto out; |
320 | r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8); | 320 | r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8); |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 2643b430d83a..d06b4dc0e2ea 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -3564,7 +3564,8 @@ static int __init vmx_init(void) | |||
3564 | bypass_guest_pf = 0; | 3564 | bypass_guest_pf = 0; |
3565 | kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK | | 3565 | kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK | |
3566 | VMX_EPT_WRITABLE_MASK | | 3566 | VMX_EPT_WRITABLE_MASK | |
3567 | VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT); | 3567 | VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT | |
3568 | VMX_EPT_IGMT_BIT); | ||
3568 | kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull, | 3569 | kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull, |
3569 | VMX_EPT_EXECUTABLE_MASK); | 3570 | VMX_EPT_EXECUTABLE_MASK); |
3570 | kvm_enable_tdp(); | 3571 | kvm_enable_tdp(); |
diff --git a/arch/x86/kvm/vmx.h b/arch/x86/kvm/vmx.h index 3e010d21fdd7..ec5edc339da6 100644 --- a/arch/x86/kvm/vmx.h +++ b/arch/x86/kvm/vmx.h | |||
@@ -352,6 +352,7 @@ enum vmcs_field { | |||
352 | #define VMX_EPT_READABLE_MASK 0x1ull | 352 | #define VMX_EPT_READABLE_MASK 0x1ull |
353 | #define VMX_EPT_WRITABLE_MASK 0x2ull | 353 | #define VMX_EPT_WRITABLE_MASK 0x2ull |
354 | #define VMX_EPT_EXECUTABLE_MASK 0x4ull | 354 | #define VMX_EPT_EXECUTABLE_MASK 0x4ull |
355 | #define VMX_EPT_IGMT_BIT (1ull << 6) | ||
355 | 356 | ||
356 | #define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul | 357 | #define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul |
357 | 358 | ||
diff --git a/arch/x86/mach-voyager/setup.c b/arch/x86/mach-voyager/setup.c index 6bbdd633864c..a580b9562e76 100644 --- a/arch/x86/mach-voyager/setup.c +++ b/arch/x86/mach-voyager/setup.c | |||
@@ -27,7 +27,7 @@ static struct irqaction irq2 = { | |||
27 | void __init intr_init_hook(void) | 27 | void __init intr_init_hook(void) |
28 | { | 28 | { |
29 | #ifdef CONFIG_SMP | 29 | #ifdef CONFIG_SMP |
30 | smp_intr_init(); | 30 | voyager_smp_intr_init(); |
31 | #endif | 31 | #endif |
32 | 32 | ||
33 | setup_irq(2, &irq2); | 33 | setup_irq(2, &irq2); |
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 7f4c6af14351..52145007bd7e 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c | |||
@@ -7,6 +7,7 @@ | |||
7 | * This file provides all the same external entries as smp.c but uses | 7 | * This file provides all the same external entries as smp.c but uses |
8 | * the voyager hal to provide the functionality | 8 | * the voyager hal to provide the functionality |
9 | */ | 9 | */ |
10 | #include <linux/cpu.h> | ||
10 | #include <linux/module.h> | 11 | #include <linux/module.h> |
11 | #include <linux/mm.h> | 12 | #include <linux/mm.h> |
12 | #include <linux/kernel_stat.h> | 13 | #include <linux/kernel_stat.h> |
@@ -1258,7 +1259,7 @@ static void handle_vic_irq(unsigned int irq, struct irq_desc *desc) | |||
1258 | #define QIC_SET_GATE(cpi, vector) \ | 1259 | #define QIC_SET_GATE(cpi, vector) \ |
1259 | set_intr_gate((cpi) + QIC_DEFAULT_CPI_BASE, (vector)) | 1260 | set_intr_gate((cpi) + QIC_DEFAULT_CPI_BASE, (vector)) |
1260 | 1261 | ||
1261 | void __init smp_intr_init(void) | 1262 | void __init voyager_smp_intr_init(void) |
1262 | { | 1263 | { |
1263 | int i; | 1264 | int i; |
1264 | 1265 | ||
@@ -1790,6 +1791,17 @@ void __init smp_setup_processor_id(void) | |||
1790 | x86_write_percpu(cpu_number, hard_smp_processor_id()); | 1791 | x86_write_percpu(cpu_number, hard_smp_processor_id()); |
1791 | } | 1792 | } |
1792 | 1793 | ||
1794 | static void voyager_send_call_func(cpumask_t callmask) | ||
1795 | { | ||
1796 | __u32 mask = cpus_addr(callmask)[0] & ~(1 << smp_processor_id()); | ||
1797 | send_CPI(mask, VIC_CALL_FUNCTION_CPI); | ||
1798 | } | ||
1799 | |||
1800 | static void voyager_send_call_func_single(int cpu) | ||
1801 | { | ||
1802 | send_CPI(1 << cpu, VIC_CALL_FUNCTION_SINGLE_CPI); | ||
1803 | } | ||
1804 | |||
1793 | struct smp_ops smp_ops = { | 1805 | struct smp_ops smp_ops = { |
1794 | .smp_prepare_boot_cpu = voyager_smp_prepare_boot_cpu, | 1806 | .smp_prepare_boot_cpu = voyager_smp_prepare_boot_cpu, |
1795 | .smp_prepare_cpus = voyager_smp_prepare_cpus, | 1807 | .smp_prepare_cpus = voyager_smp_prepare_cpus, |
@@ -1799,6 +1811,6 @@ struct smp_ops smp_ops = { | |||
1799 | .smp_send_stop = voyager_smp_send_stop, | 1811 | .smp_send_stop = voyager_smp_send_stop, |
1800 | .smp_send_reschedule = voyager_smp_send_reschedule, | 1812 | .smp_send_reschedule = voyager_smp_send_reschedule, |
1801 | 1813 | ||
1802 | .send_call_func_ipi = native_send_call_func_ipi, | 1814 | .send_call_func_ipi = voyager_send_call_func, |
1803 | .send_call_func_single_ipi = native_send_call_func_single_ipi, | 1815 | .send_call_func_single_ipi = voyager_send_call_func_single, |
1804 | }; | 1816 | }; |
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index 847c164725f4..8518c678d83f 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c | |||
@@ -222,6 +222,41 @@ static void __init remap_numa_kva(void) | |||
222 | } | 222 | } |
223 | } | 223 | } |
224 | 224 | ||
225 | #ifdef CONFIG_HIBERNATION | ||
226 | /** | ||
227 | * resume_map_numa_kva - add KVA mapping to the temporary page tables created | ||
228 | * during resume from hibernation | ||
229 | * @pgd_base - temporary resume page directory | ||
230 | */ | ||
231 | void resume_map_numa_kva(pgd_t *pgd_base) | ||
232 | { | ||
233 | int node; | ||
234 | |||
235 | for_each_online_node(node) { | ||
236 | unsigned long start_va, start_pfn, size, pfn; | ||
237 | |||
238 | start_va = (unsigned long)node_remap_start_vaddr[node]; | ||
239 | start_pfn = node_remap_start_pfn[node]; | ||
240 | size = node_remap_size[node]; | ||
241 | |||
242 | printk(KERN_DEBUG "%s: node %d\n", __FUNCTION__, node); | ||
243 | |||
244 | for (pfn = 0; pfn < size; pfn += PTRS_PER_PTE) { | ||
245 | unsigned long vaddr = start_va + (pfn << PAGE_SHIFT); | ||
246 | pgd_t *pgd = pgd_base + pgd_index(vaddr); | ||
247 | pud_t *pud = pud_offset(pgd, vaddr); | ||
248 | pmd_t *pmd = pmd_offset(pud, vaddr); | ||
249 | |||
250 | set_pmd(pmd, pfn_pmd(start_pfn + pfn, | ||
251 | PAGE_KERNEL_LARGE_EXEC)); | ||
252 | |||
253 | printk(KERN_DEBUG "%s: %08lx -> pfn %08lx\n", | ||
254 | __FUNCTION__, vaddr, start_pfn + pfn); | ||
255 | } | ||
256 | } | ||
257 | } | ||
258 | #endif | ||
259 | |||
225 | static unsigned long calculate_numa_remap_pages(void) | 260 | static unsigned long calculate_numa_remap_pages(void) |
226 | { | 261 | { |
227 | int nid; | 262 | int nid; |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index f1dc1b75d166..e89d24815f26 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -67,18 +67,18 @@ static void split_page_count(int level) | |||
67 | 67 | ||
68 | void arch_report_meminfo(struct seq_file *m) | 68 | void arch_report_meminfo(struct seq_file *m) |
69 | { | 69 | { |
70 | seq_printf(m, "DirectMap4k: %8lu kB\n", | 70 | seq_printf(m, "DirectMap4k: %8lu kB\n", |
71 | direct_pages_count[PG_LEVEL_4K] << 2); | 71 | direct_pages_count[PG_LEVEL_4K] << 2); |
72 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) | 72 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) |
73 | seq_printf(m, "DirectMap2M: %8lu kB\n", | 73 | seq_printf(m, "DirectMap2M: %8lu kB\n", |
74 | direct_pages_count[PG_LEVEL_2M] << 11); | 74 | direct_pages_count[PG_LEVEL_2M] << 11); |
75 | #else | 75 | #else |
76 | seq_printf(m, "DirectMap4M: %8lu kB\n", | 76 | seq_printf(m, "DirectMap4M: %8lu kB\n", |
77 | direct_pages_count[PG_LEVEL_2M] << 12); | 77 | direct_pages_count[PG_LEVEL_2M] << 12); |
78 | #endif | 78 | #endif |
79 | #ifdef CONFIG_X86_64 | 79 | #ifdef CONFIG_X86_64 |
80 | if (direct_gbpages) | 80 | if (direct_gbpages) |
81 | seq_printf(m, "DirectMap1G: %8lu kB\n", | 81 | seq_printf(m, "DirectMap1G: %8lu kB\n", |
82 | direct_pages_count[PG_LEVEL_1G] << 20); | 82 | direct_pages_count[PG_LEVEL_1G] << 20); |
83 | #endif | 83 | #endif |
84 | } | 84 | } |
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index 0620d6d45f7d..3f1b81a83e2e 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c | |||
@@ -27,8 +27,7 @@ static int num_counters = 2; | |||
27 | static int counter_width = 32; | 27 | static int counter_width = 32; |
28 | 28 | ||
29 | #define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0) | 29 | #define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0) |
30 | #define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0) | 30 | #define CTR_OVERFLOWED(n) (!((n) & (1ULL<<(counter_width-1)))) |
31 | #define CTR_OVERFLOWED(n) (!((n) & (1U<<(counter_width-1)))) | ||
32 | 31 | ||
33 | #define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0) | 32 | #define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0) |
34 | #define CTRL_READ(l, h, msrs, c) do {rdmsr((msrs->controls[(c)].addr), (l), (h)); } while (0) | 33 | #define CTRL_READ(l, h, msrs, c) do {rdmsr((msrs->controls[(c)].addr), (l), (h)); } while (0) |
@@ -124,14 +123,14 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs) | |||
124 | static int ppro_check_ctrs(struct pt_regs * const regs, | 123 | static int ppro_check_ctrs(struct pt_regs * const regs, |
125 | struct op_msrs const * const msrs) | 124 | struct op_msrs const * const msrs) |
126 | { | 125 | { |
127 | unsigned int low, high; | 126 | u64 val; |
128 | int i; | 127 | int i; |
129 | 128 | ||
130 | for (i = 0 ; i < num_counters; ++i) { | 129 | for (i = 0 ; i < num_counters; ++i) { |
131 | if (!reset_value[i]) | 130 | if (!reset_value[i]) |
132 | continue; | 131 | continue; |
133 | CTR_READ(low, high, msrs, i); | 132 | rdmsrl(msrs->counters[i].addr, val); |
134 | if (CTR_OVERFLOWED(low)) { | 133 | if (CTR_OVERFLOWED(val)) { |
135 | oprofile_add_sample(regs, i); | 134 | oprofile_add_sample(regs, i); |
136 | wrmsrl(msrs->counters[i].addr, -reset_value[i]); | 135 | wrmsrl(msrs->counters[i].addr, -reset_value[i]); |
137 | } | 136 | } |
diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c index f2b6e3f11bfc..81197c62d5b3 100644 --- a/arch/x86/power/hibernate_32.c +++ b/arch/x86/power/hibernate_32.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <asm/system.h> | 12 | #include <asm/system.h> |
13 | #include <asm/page.h> | 13 | #include <asm/page.h> |
14 | #include <asm/pgtable.h> | 14 | #include <asm/pgtable.h> |
15 | #include <asm/mmzone.h> | ||
15 | 16 | ||
16 | /* Defined in hibernate_asm_32.S */ | 17 | /* Defined in hibernate_asm_32.S */ |
17 | extern int restore_image(void); | 18 | extern int restore_image(void); |
@@ -127,6 +128,9 @@ static int resume_physical_mapping_init(pgd_t *pgd_base) | |||
127 | } | 128 | } |
128 | } | 129 | } |
129 | } | 130 | } |
131 | |||
132 | resume_map_numa_kva(pgd_base); | ||
133 | |||
130 | return 0; | 134 | return 0; |
131 | } | 135 | } |
132 | 136 | ||
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index b61534c7a4c4..5e4686d70f62 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -863,15 +863,16 @@ static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned l | |||
863 | if (PagePinned(virt_to_page(mm->pgd))) { | 863 | if (PagePinned(virt_to_page(mm->pgd))) { |
864 | SetPagePinned(page); | 864 | SetPagePinned(page); |
865 | 865 | ||
866 | vm_unmap_aliases(); | ||
866 | if (!PageHighMem(page)) { | 867 | if (!PageHighMem(page)) { |
867 | make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn))); | 868 | make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn))); |
868 | if (level == PT_PTE && USE_SPLIT_PTLOCKS) | 869 | if (level == PT_PTE && USE_SPLIT_PTLOCKS) |
869 | pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); | 870 | pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); |
870 | } else | 871 | } else { |
871 | /* make sure there are no stray mappings of | 872 | /* make sure there are no stray mappings of |
872 | this page */ | 873 | this page */ |
873 | kmap_flush_unused(); | 874 | kmap_flush_unused(); |
874 | vm_unmap_aliases(); | 875 | } |
875 | } | 876 | } |
876 | } | 877 | } |
877 | 878 | ||
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index aba77b2b7d18..688936044dc9 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -850,13 +850,16 @@ static int xen_pin_page(struct mm_struct *mm, struct page *page, | |||
850 | read-only, and can be pinned. */ | 850 | read-only, and can be pinned. */ |
851 | static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) | 851 | static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) |
852 | { | 852 | { |
853 | vm_unmap_aliases(); | ||
854 | |||
853 | xen_mc_batch(); | 855 | xen_mc_batch(); |
854 | 856 | ||
855 | if (xen_pgd_walk(mm, xen_pin_page, USER_LIMIT)) { | 857 | if (xen_pgd_walk(mm, xen_pin_page, USER_LIMIT)) { |
856 | /* re-enable interrupts for kmap_flush_unused */ | 858 | /* re-enable interrupts for flushing */ |
857 | xen_mc_issue(0); | 859 | xen_mc_issue(0); |
860 | |||
858 | kmap_flush_unused(); | 861 | kmap_flush_unused(); |
859 | vm_unmap_aliases(); | 862 | |
860 | xen_mc_batch(); | 863 | xen_mc_batch(); |
861 | } | 864 | } |
862 | 865 | ||
@@ -874,7 +877,7 @@ static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) | |||
874 | #else /* CONFIG_X86_32 */ | 877 | #else /* CONFIG_X86_32 */ |
875 | #ifdef CONFIG_X86_PAE | 878 | #ifdef CONFIG_X86_PAE |
876 | /* Need to make sure unshared kernel PMD is pinnable */ | 879 | /* Need to make sure unshared kernel PMD is pinnable */ |
877 | xen_pin_page(mm, virt_to_page(pgd_page(pgd[pgd_index(TASK_SIZE)])), | 880 | xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]), |
878 | PT_PMD); | 881 | PT_PMD); |
879 | #endif | 882 | #endif |
880 | xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd))); | 883 | xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd))); |
@@ -991,7 +994,7 @@ static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd) | |||
991 | 994 | ||
992 | #ifdef CONFIG_X86_PAE | 995 | #ifdef CONFIG_X86_PAE |
993 | /* Need to make sure unshared kernel PMD is unpinned */ | 996 | /* Need to make sure unshared kernel PMD is unpinned */ |
994 | xen_unpin_page(mm, virt_to_page(pgd_page(pgd[pgd_index(TASK_SIZE)])), | 997 | xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]), |
995 | PT_PMD); | 998 | PT_PMD); |
996 | #endif | 999 | #endif |
997 | 1000 | ||