diff options
Diffstat (limited to 'arch')
175 files changed, 2625 insertions, 2094 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index 7f8f281f2585..97fb7d0365d1 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
@@ -76,6 +76,15 @@ config OPTPROBES | |||
76 | depends on KPROBES && HAVE_OPTPROBES | 76 | depends on KPROBES && HAVE_OPTPROBES |
77 | depends on !PREEMPT | 77 | depends on !PREEMPT |
78 | 78 | ||
79 | config KPROBES_ON_FTRACE | ||
80 | def_bool y | ||
81 | depends on KPROBES && HAVE_KPROBES_ON_FTRACE | ||
82 | depends on DYNAMIC_FTRACE_WITH_REGS | ||
83 | help | ||
84 | If function tracer is enabled and the arch supports full | ||
85 | passing of pt_regs to function tracing, then kprobes can | ||
86 | optimize on top of function tracing. | ||
87 | |||
79 | config UPROBES | 88 | config UPROBES |
80 | bool "Transparent user-space probes (EXPERIMENTAL)" | 89 | bool "Transparent user-space probes (EXPERIMENTAL)" |
81 | depends on UPROBE_EVENT && PERF_EVENTS | 90 | depends on UPROBE_EVENT && PERF_EVENTS |
@@ -158,6 +167,9 @@ config HAVE_KRETPROBES | |||
158 | config HAVE_OPTPROBES | 167 | config HAVE_OPTPROBES |
159 | bool | 168 | bool |
160 | 169 | ||
170 | config HAVE_KPROBES_ON_FTRACE | ||
171 | bool | ||
172 | |||
161 | config HAVE_NMI_WATCHDOG | 173 | config HAVE_NMI_WATCHDOG |
162 | bool | 174 | bool |
163 | # | 175 | # |
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 9d5904cc7712..9b504af2e966 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig | |||
@@ -5,7 +5,6 @@ config ALPHA | |||
5 | select HAVE_IDE | 5 | select HAVE_IDE |
6 | select HAVE_OPROFILE | 6 | select HAVE_OPROFILE |
7 | select HAVE_SYSCALL_WRAPPERS | 7 | select HAVE_SYSCALL_WRAPPERS |
8 | select HAVE_IRQ_WORK | ||
9 | select HAVE_PCSPKR_PLATFORM | 8 | select HAVE_PCSPKR_PLATFORM |
10 | select HAVE_PERF_EVENTS | 9 | select HAVE_PERF_EVENTS |
11 | select HAVE_DMA_ATTRS | 10 | select HAVE_DMA_ATTRS |
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index 14db93e4c8a8..dbc1760f418b 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c | |||
@@ -1139,6 +1139,7 @@ struct rusage32 { | |||
1139 | SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru) | 1139 | SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru) |
1140 | { | 1140 | { |
1141 | struct rusage32 r; | 1141 | struct rusage32 r; |
1142 | cputime_t utime, stime; | ||
1142 | 1143 | ||
1143 | if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN) | 1144 | if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN) |
1144 | return -EINVAL; | 1145 | return -EINVAL; |
@@ -1146,8 +1147,9 @@ SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru) | |||
1146 | memset(&r, 0, sizeof(r)); | 1147 | memset(&r, 0, sizeof(r)); |
1147 | switch (who) { | 1148 | switch (who) { |
1148 | case RUSAGE_SELF: | 1149 | case RUSAGE_SELF: |
1149 | jiffies_to_timeval32(current->utime, &r.ru_utime); | 1150 | task_cputime(current, &utime, &stime); |
1150 | jiffies_to_timeval32(current->stime, &r.ru_stime); | 1151 | jiffies_to_timeval32(utime, &r.ru_utime); |
1152 | jiffies_to_timeval32(stime, &r.ru_stime); | ||
1151 | r.ru_minflt = current->min_flt; | 1153 | r.ru_minflt = current->min_flt; |
1152 | r.ru_majflt = current->maj_flt; | 1154 | r.ru_majflt = current->maj_flt; |
1153 | break; | 1155 | break; |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 67874b82a4ed..9bbe760f2352 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -36,7 +36,6 @@ config ARM | |||
36 | select HAVE_GENERIC_HARDIRQS | 36 | select HAVE_GENERIC_HARDIRQS |
37 | select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)) | 37 | select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)) |
38 | select HAVE_IDE if PCI || ISA || PCMCIA | 38 | select HAVE_IDE if PCI || ISA || PCMCIA |
39 | select HAVE_IRQ_WORK | ||
40 | select HAVE_KERNEL_GZIP | 39 | select HAVE_KERNEL_GZIP |
41 | select HAVE_KERNEL_LZMA | 40 | select HAVE_KERNEL_LZMA |
42 | select HAVE_KERNEL_LZO | 41 | select HAVE_KERNEL_LZO |
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c index 36ae03a3f5d1..87dfa9026c5b 100644 --- a/arch/arm/common/gic.c +++ b/arch/arm/common/gic.c | |||
@@ -351,6 +351,25 @@ void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) | |||
351 | irq_set_chained_handler(irq, gic_handle_cascade_irq); | 351 | irq_set_chained_handler(irq, gic_handle_cascade_irq); |
352 | } | 352 | } |
353 | 353 | ||
354 | static u8 gic_get_cpumask(struct gic_chip_data *gic) | ||
355 | { | ||
356 | void __iomem *base = gic_data_dist_base(gic); | ||
357 | u32 mask, i; | ||
358 | |||
359 | for (i = mask = 0; i < 32; i += 4) { | ||
360 | mask = readl_relaxed(base + GIC_DIST_TARGET + i); | ||
361 | mask |= mask >> 16; | ||
362 | mask |= mask >> 8; | ||
363 | if (mask) | ||
364 | break; | ||
365 | } | ||
366 | |||
367 | if (!mask) | ||
368 | pr_crit("GIC CPU mask not found - kernel will fail to boot.\n"); | ||
369 | |||
370 | return mask; | ||
371 | } | ||
372 | |||
354 | static void __init gic_dist_init(struct gic_chip_data *gic) | 373 | static void __init gic_dist_init(struct gic_chip_data *gic) |
355 | { | 374 | { |
356 | unsigned int i; | 375 | unsigned int i; |
@@ -369,7 +388,9 @@ static void __init gic_dist_init(struct gic_chip_data *gic) | |||
369 | /* | 388 | /* |
370 | * Set all global interrupts to this CPU only. | 389 | * Set all global interrupts to this CPU only. |
371 | */ | 390 | */ |
372 | cpumask = readl_relaxed(base + GIC_DIST_TARGET + 0); | 391 | cpumask = gic_get_cpumask(gic); |
392 | cpumask |= cpumask << 8; | ||
393 | cpumask |= cpumask << 16; | ||
373 | for (i = 32; i < gic_irqs; i += 4) | 394 | for (i = 32; i < gic_irqs; i += 4) |
374 | writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); | 395 | writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); |
375 | 396 | ||
@@ -400,7 +421,7 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic) | |||
400 | * Get what the GIC says our CPU mask is. | 421 | * Get what the GIC says our CPU mask is. |
401 | */ | 422 | */ |
402 | BUG_ON(cpu >= NR_GIC_CPU_IF); | 423 | BUG_ON(cpu >= NR_GIC_CPU_IF); |
403 | cpu_mask = readl_relaxed(dist_base + GIC_DIST_TARGET + 0); | 424 | cpu_mask = gic_get_cpumask(gic); |
404 | gic_cpu_map[cpu] = cpu_mask; | 425 | gic_cpu_map[cpu] = cpu_mask; |
405 | 426 | ||
406 | /* | 427 | /* |
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 73cf03aa981e..1c4df27f9332 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h | |||
@@ -37,7 +37,7 @@ | |||
37 | */ | 37 | */ |
38 | #define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) | 38 | #define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) |
39 | #define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(0x01000000)) | 39 | #define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(0x01000000)) |
40 | #define TASK_UNMAPPED_BASE (UL(CONFIG_PAGE_OFFSET) / 3) | 40 | #define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M) |
41 | 41 | ||
42 | /* | 42 | /* |
43 | * The maximum size of a 26-bit user space task. | 43 | * The maximum size of a 26-bit user space task. |
diff --git a/arch/arm/include/asm/smp_scu.h b/arch/arm/include/asm/smp_scu.h index 4eb6d005ffaa..86dff32a0737 100644 --- a/arch/arm/include/asm/smp_scu.h +++ b/arch/arm/include/asm/smp_scu.h | |||
@@ -7,8 +7,14 @@ | |||
7 | 7 | ||
8 | #ifndef __ASSEMBLER__ | 8 | #ifndef __ASSEMBLER__ |
9 | unsigned int scu_get_core_count(void __iomem *); | 9 | unsigned int scu_get_core_count(void __iomem *); |
10 | void scu_enable(void __iomem *); | ||
11 | int scu_power_mode(void __iomem *, unsigned int); | 10 | int scu_power_mode(void __iomem *, unsigned int); |
11 | |||
12 | #ifdef CONFIG_SMP | ||
13 | void scu_enable(void __iomem *scu_base); | ||
14 | #else | ||
15 | static inline void scu_enable(void __iomem *scu_base) {} | ||
16 | #endif | ||
17 | |||
12 | #endif | 18 | #endif |
13 | 19 | ||
14 | #endif | 20 | #endif |
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c index b9f015e843d8..45eac87ed66a 100644 --- a/arch/arm/kernel/smp_scu.c +++ b/arch/arm/kernel/smp_scu.c | |||
@@ -75,7 +75,7 @@ void scu_enable(void __iomem *scu_base) | |||
75 | int scu_power_mode(void __iomem *scu_base, unsigned int mode) | 75 | int scu_power_mode(void __iomem *scu_base, unsigned int mode) |
76 | { | 76 | { |
77 | unsigned int val; | 77 | unsigned int val; |
78 | int cpu = cpu_logical_map(smp_processor_id()); | 78 | int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0); |
79 | 79 | ||
80 | if (mode > 3 || mode == 1 || cpu > 3) | 80 | if (mode > 3 || mode == 1 || cpu > 3) |
81 | return -EINVAL; | 81 | return -EINVAL; |
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig index e103c290bc9e..85afb031b676 100644 --- a/arch/arm/mach-exynos/Kconfig +++ b/arch/arm/mach-exynos/Kconfig | |||
@@ -414,7 +414,7 @@ config MACH_EXYNOS4_DT | |||
414 | select CPU_EXYNOS4210 | 414 | select CPU_EXYNOS4210 |
415 | select HAVE_SAMSUNG_KEYPAD if INPUT_KEYBOARD | 415 | select HAVE_SAMSUNG_KEYPAD if INPUT_KEYBOARD |
416 | select PINCTRL | 416 | select PINCTRL |
417 | select PINCTRL_EXYNOS4 | 417 | select PINCTRL_EXYNOS |
418 | select USE_OF | 418 | select USE_OF |
419 | help | 419 | help |
420 | Machine support for Samsung Exynos4 machine with device tree enabled. | 420 | Machine support for Samsung Exynos4 machine with device tree enabled. |
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c index 981dc1e1da51..e6c061282939 100644 --- a/arch/arm/mach-highbank/highbank.c +++ b/arch/arm/mach-highbank/highbank.c | |||
@@ -28,6 +28,7 @@ | |||
28 | 28 | ||
29 | #include <asm/arch_timer.h> | 29 | #include <asm/arch_timer.h> |
30 | #include <asm/cacheflush.h> | 30 | #include <asm/cacheflush.h> |
31 | #include <asm/cputype.h> | ||
31 | #include <asm/smp_plat.h> | 32 | #include <asm/smp_plat.h> |
32 | #include <asm/smp_twd.h> | 33 | #include <asm/smp_twd.h> |
33 | #include <asm/hardware/arm_timer.h> | 34 | #include <asm/hardware/arm_timer.h> |
@@ -59,7 +60,7 @@ static void __init highbank_scu_map_io(void) | |||
59 | 60 | ||
60 | void highbank_set_cpu_jump(int cpu, void *jump_addr) | 61 | void highbank_set_cpu_jump(int cpu, void *jump_addr) |
61 | { | 62 | { |
62 | cpu = cpu_logical_map(cpu); | 63 | cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 0); |
63 | writel(virt_to_phys(jump_addr), HB_JUMP_TABLE_VIRT(cpu)); | 64 | writel(virt_to_phys(jump_addr), HB_JUMP_TABLE_VIRT(cpu)); |
64 | __cpuc_flush_dcache_area(HB_JUMP_TABLE_VIRT(cpu), 16); | 65 | __cpuc_flush_dcache_area(HB_JUMP_TABLE_VIRT(cpu), 16); |
65 | outer_clean_range(HB_JUMP_TABLE_PHYS(cpu), | 66 | outer_clean_range(HB_JUMP_TABLE_PHYS(cpu), |
diff --git a/arch/arm/mach-highbank/sysregs.h b/arch/arm/mach-highbank/sysregs.h index 70af9d13fcef..5995df7f2622 100644 --- a/arch/arm/mach-highbank/sysregs.h +++ b/arch/arm/mach-highbank/sysregs.h | |||
@@ -37,7 +37,7 @@ extern void __iomem *sregs_base; | |||
37 | 37 | ||
38 | static inline void highbank_set_core_pwr(void) | 38 | static inline void highbank_set_core_pwr(void) |
39 | { | 39 | { |
40 | int cpu = cpu_logical_map(smp_processor_id()); | 40 | int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0); |
41 | if (scu_base_addr) | 41 | if (scu_base_addr) |
42 | scu_power_mode(scu_base_addr, SCU_PM_POWEROFF); | 42 | scu_power_mode(scu_base_addr, SCU_PM_POWEROFF); |
43 | else | 43 | else |
@@ -46,7 +46,7 @@ static inline void highbank_set_core_pwr(void) | |||
46 | 46 | ||
47 | static inline void highbank_clear_core_pwr(void) | 47 | static inline void highbank_clear_core_pwr(void) |
48 | { | 48 | { |
49 | int cpu = cpu_logical_map(smp_processor_id()); | 49 | int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0); |
50 | if (scu_base_addr) | 50 | if (scu_base_addr) |
51 | scu_power_mode(scu_base_addr, SCU_PM_NORMAL); | 51 | scu_power_mode(scu_base_addr, SCU_PM_NORMAL); |
52 | else | 52 | else |
diff --git a/arch/arm/mach-realview/include/mach/irqs-eb.h b/arch/arm/mach-realview/include/mach/irqs-eb.h index d6b5073692d2..44754230fdcc 100644 --- a/arch/arm/mach-realview/include/mach/irqs-eb.h +++ b/arch/arm/mach-realview/include/mach/irqs-eb.h | |||
@@ -115,7 +115,7 @@ | |||
115 | /* | 115 | /* |
116 | * Only define NR_IRQS if less than NR_IRQS_EB | 116 | * Only define NR_IRQS if less than NR_IRQS_EB |
117 | */ | 117 | */ |
118 | #define NR_IRQS_EB (IRQ_EB_GIC_START + 96) | 118 | #define NR_IRQS_EB (IRQ_EB_GIC_START + 128) |
119 | 119 | ||
120 | #if defined(CONFIG_MACH_REALVIEW_EB) \ | 120 | #if defined(CONFIG_MACH_REALVIEW_EB) \ |
121 | && (!defined(NR_IRQS) || (NR_IRQS < NR_IRQS_EB)) | 121 | && (!defined(NR_IRQS) || (NR_IRQS < NR_IRQS_EB)) |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 076c26d43864..dda3904dc64c 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -640,7 +640,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | |||
640 | 640 | ||
641 | if (is_coherent || nommu()) | 641 | if (is_coherent || nommu()) |
642 | addr = __alloc_simple_buffer(dev, size, gfp, &page); | 642 | addr = __alloc_simple_buffer(dev, size, gfp, &page); |
643 | else if (gfp & GFP_ATOMIC) | 643 | else if (!(gfp & __GFP_WAIT)) |
644 | addr = __alloc_from_pool(size, &page); | 644 | addr = __alloc_from_pool(size, &page); |
645 | else if (!IS_ENABLED(CONFIG_CMA)) | 645 | else if (!IS_ENABLED(CONFIG_CMA)) |
646 | addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); | 646 | addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); |
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index f8f362aafee9..75e915b72471 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
@@ -21,7 +21,6 @@ config ARM64 | |||
21 | select HAVE_GENERIC_DMA_COHERENT | 21 | select HAVE_GENERIC_DMA_COHERENT |
22 | select HAVE_GENERIC_HARDIRQS | 22 | select HAVE_GENERIC_HARDIRQS |
23 | select HAVE_HW_BREAKPOINT if PERF_EVENTS | 23 | select HAVE_HW_BREAKPOINT if PERF_EVENTS |
24 | select HAVE_IRQ_WORK | ||
25 | select HAVE_MEMBLOCK | 24 | select HAVE_MEMBLOCK |
26 | select HAVE_PERF_EVENTS | 25 | select HAVE_PERF_EVENTS |
27 | select IRQ_DOMAIN | 26 | select IRQ_DOMAIN |
diff --git a/arch/avr32/include/asm/dma-mapping.h b/arch/avr32/include/asm/dma-mapping.h index aaf5199d8fcb..b3d18f9f3e8d 100644 --- a/arch/avr32/include/asm/dma-mapping.h +++ b/arch/avr32/include/asm/dma-mapping.h | |||
@@ -336,4 +336,14 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |||
336 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | 336 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
337 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | 337 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) |
338 | 338 | ||
339 | /* drivers/base/dma-mapping.c */ | ||
340 | extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | ||
341 | void *cpu_addr, dma_addr_t dma_addr, size_t size); | ||
342 | extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
343 | void *cpu_addr, dma_addr_t dma_addr, | ||
344 | size_t size); | ||
345 | |||
346 | #define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) | ||
347 | #define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) | ||
348 | |||
339 | #endif /* __ASM_AVR32_DMA_MAPPING_H */ | 349 | #endif /* __ASM_AVR32_DMA_MAPPING_H */ |
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index b6f3ad5441c5..67e4aaad78f5 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig | |||
@@ -24,7 +24,6 @@ config BLACKFIN | |||
24 | select HAVE_FUNCTION_TRACER | 24 | select HAVE_FUNCTION_TRACER |
25 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | 25 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST |
26 | select HAVE_IDE | 26 | select HAVE_IDE |
27 | select HAVE_IRQ_WORK | ||
28 | select HAVE_KERNEL_GZIP if RAMKERNEL | 27 | select HAVE_KERNEL_GZIP if RAMKERNEL |
29 | select HAVE_KERNEL_BZIP2 if RAMKERNEL | 28 | select HAVE_KERNEL_BZIP2 if RAMKERNEL |
30 | select HAVE_KERNEL_LZMA if RAMKERNEL | 29 | select HAVE_KERNEL_LZMA if RAMKERNEL |
@@ -38,7 +37,6 @@ config BLACKFIN | |||
38 | select HAVE_GENERIC_HARDIRQS | 37 | select HAVE_GENERIC_HARDIRQS |
39 | select GENERIC_ATOMIC64 | 38 | select GENERIC_ATOMIC64 |
40 | select GENERIC_IRQ_PROBE | 39 | select GENERIC_IRQ_PROBE |
41 | select IRQ_PER_CPU if SMP | ||
42 | select USE_GENERIC_SMP_HELPERS if SMP | 40 | select USE_GENERIC_SMP_HELPERS if SMP |
43 | select HAVE_NMI_WATCHDOG if NMI_WATCHDOG | 41 | select HAVE_NMI_WATCHDOG if NMI_WATCHDOG |
44 | select GENERIC_SMP_IDLE_THREAD | 42 | select GENERIC_SMP_IDLE_THREAD |
diff --git a/arch/blackfin/include/asm/dma-mapping.h b/arch/blackfin/include/asm/dma-mapping.h index bbf461076a0a..054d9ec57d9d 100644 --- a/arch/blackfin/include/asm/dma-mapping.h +++ b/arch/blackfin/include/asm/dma-mapping.h | |||
@@ -154,4 +154,14 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |||
154 | _dma_sync((dma_addr_t)vaddr, size, dir); | 154 | _dma_sync((dma_addr_t)vaddr, size, dir); |
155 | } | 155 | } |
156 | 156 | ||
157 | /* drivers/base/dma-mapping.c */ | ||
158 | extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | ||
159 | void *cpu_addr, dma_addr_t dma_addr, size_t size); | ||
160 | extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
161 | void *cpu_addr, dma_addr_t dma_addr, | ||
162 | size_t size); | ||
163 | |||
164 | #define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) | ||
165 | #define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) | ||
166 | |||
157 | #endif /* _BLACKFIN_DMA_MAPPING_H */ | 167 | #endif /* _BLACKFIN_DMA_MAPPING_H */ |
diff --git a/arch/c6x/include/asm/dma-mapping.h b/arch/c6x/include/asm/dma-mapping.h index 3c694065030f..88bd0d899bdb 100644 --- a/arch/c6x/include/asm/dma-mapping.h +++ b/arch/c6x/include/asm/dma-mapping.h | |||
@@ -89,4 +89,19 @@ extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t); | |||
89 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f)) | 89 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f)) |
90 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent((d), (s), (v), (h)) | 90 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent((d), (s), (v), (h)) |
91 | 91 | ||
92 | /* Not supported for now */ | ||
93 | static inline int dma_mmap_coherent(struct device *dev, | ||
94 | struct vm_area_struct *vma, void *cpu_addr, | ||
95 | dma_addr_t dma_addr, size_t size) | ||
96 | { | ||
97 | return -EINVAL; | ||
98 | } | ||
99 | |||
100 | static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
101 | void *cpu_addr, dma_addr_t dma_addr, | ||
102 | size_t size) | ||
103 | { | ||
104 | return -EINVAL; | ||
105 | } | ||
106 | |||
92 | #endif /* _ASM_C6X_DMA_MAPPING_H */ | 107 | #endif /* _ASM_C6X_DMA_MAPPING_H */ |
diff --git a/arch/cris/include/asm/dma-mapping.h b/arch/cris/include/asm/dma-mapping.h index 8588b2ccf854..2f0f654f1b44 100644 --- a/arch/cris/include/asm/dma-mapping.h +++ b/arch/cris/include/asm/dma-mapping.h | |||
@@ -158,5 +158,15 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |||
158 | { | 158 | { |
159 | } | 159 | } |
160 | 160 | ||
161 | /* drivers/base/dma-mapping.c */ | ||
162 | extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | ||
163 | void *cpu_addr, dma_addr_t dma_addr, size_t size); | ||
164 | extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
165 | void *cpu_addr, dma_addr_t dma_addr, | ||
166 | size_t size); | ||
167 | |||
168 | #define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) | ||
169 | #define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) | ||
170 | |||
161 | 171 | ||
162 | #endif | 172 | #endif |
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig index 9d262645f667..17df48fc8f44 100644 --- a/arch/frv/Kconfig +++ b/arch/frv/Kconfig | |||
@@ -3,7 +3,6 @@ config FRV | |||
3 | default y | 3 | default y |
4 | select HAVE_IDE | 4 | select HAVE_IDE |
5 | select HAVE_ARCH_TRACEHOOK | 5 | select HAVE_ARCH_TRACEHOOK |
6 | select HAVE_IRQ_WORK | ||
7 | select HAVE_PERF_EVENTS | 6 | select HAVE_PERF_EVENTS |
8 | select HAVE_UID16 | 7 | select HAVE_UID16 |
9 | select HAVE_GENERIC_HARDIRQS | 8 | select HAVE_GENERIC_HARDIRQS |
diff --git a/arch/frv/include/asm/dma-mapping.h b/arch/frv/include/asm/dma-mapping.h index dfb811002c64..1746a2b8e6e7 100644 --- a/arch/frv/include/asm/dma-mapping.h +++ b/arch/frv/include/asm/dma-mapping.h | |||
@@ -132,4 +132,19 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |||
132 | flush_write_buffers(); | 132 | flush_write_buffers(); |
133 | } | 133 | } |
134 | 134 | ||
135 | /* Not supported for now */ | ||
136 | static inline int dma_mmap_coherent(struct device *dev, | ||
137 | struct vm_area_struct *vma, void *cpu_addr, | ||
138 | dma_addr_t dma_addr, size_t size) | ||
139 | { | ||
140 | return -EINVAL; | ||
141 | } | ||
142 | |||
143 | static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
144 | void *cpu_addr, dma_addr_t dma_addr, | ||
145 | size_t size) | ||
146 | { | ||
147 | return -EINVAL; | ||
148 | } | ||
149 | |||
135 | #endif /* _ASM_DMA_MAPPING_H */ | 150 | #endif /* _ASM_DMA_MAPPING_H */ |
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig index 0744f7d7b1fd..e4decc6b8947 100644 --- a/arch/hexagon/Kconfig +++ b/arch/hexagon/Kconfig | |||
@@ -12,9 +12,7 @@ config HEXAGON | |||
12 | # select ARCH_WANT_OPTIONAL_GPIOLIB | 12 | # select ARCH_WANT_OPTIONAL_GPIOLIB |
13 | # select ARCH_REQUIRE_GPIOLIB | 13 | # select ARCH_REQUIRE_GPIOLIB |
14 | # select HAVE_CLK | 14 | # select HAVE_CLK |
15 | # select IRQ_PER_CPU | ||
16 | # select GENERIC_PENDING_IRQ if SMP | 15 | # select GENERIC_PENDING_IRQ if SMP |
17 | select HAVE_IRQ_WORK | ||
18 | select GENERIC_ATOMIC64 | 16 | select GENERIC_ATOMIC64 |
19 | select HAVE_PERF_EVENTS | 17 | select HAVE_PERF_EVENTS |
20 | select HAVE_GENERIC_HARDIRQS | 18 | select HAVE_GENERIC_HARDIRQS |
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 3279646120e3..00c2e88f7755 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
@@ -29,7 +29,6 @@ config IA64 | |||
29 | select ARCH_DISCARD_MEMBLOCK | 29 | select ARCH_DISCARD_MEMBLOCK |
30 | select GENERIC_IRQ_PROBE | 30 | select GENERIC_IRQ_PROBE |
31 | select GENERIC_PENDING_IRQ if SMP | 31 | select GENERIC_PENDING_IRQ if SMP |
32 | select IRQ_PER_CPU | ||
33 | select GENERIC_IRQ_SHOW | 32 | select GENERIC_IRQ_SHOW |
34 | select ARCH_WANT_OPTIONAL_GPIOLIB | 33 | select ARCH_WANT_OPTIONAL_GPIOLIB |
35 | select ARCH_HAVE_NMI_SAFE_CMPXCHG | 34 | select ARCH_HAVE_NMI_SAFE_CMPXCHG |
diff --git a/arch/ia64/include/asm/cputime.h b/arch/ia64/include/asm/cputime.h index 7fcf7f08ab06..e2d3f5baf265 100644 --- a/arch/ia64/include/asm/cputime.h +++ b/arch/ia64/include/asm/cputime.h | |||
@@ -11,99 +11,19 @@ | |||
11 | * as published by the Free Software Foundation; either version | 11 | * as published by the Free Software Foundation; either version |
12 | * 2 of the License, or (at your option) any later version. | 12 | * 2 of the License, or (at your option) any later version. |
13 | * | 13 | * |
14 | * If we have CONFIG_VIRT_CPU_ACCOUNTING, we measure cpu time in nsec. | 14 | * If we have CONFIG_VIRT_CPU_ACCOUNTING_NATIVE, we measure cpu time in nsec. |
15 | * Otherwise we measure cpu time in jiffies using the generic definitions. | 15 | * Otherwise we measure cpu time in jiffies using the generic definitions. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #ifndef __IA64_CPUTIME_H | 18 | #ifndef __IA64_CPUTIME_H |
19 | #define __IA64_CPUTIME_H | 19 | #define __IA64_CPUTIME_H |
20 | 20 | ||
21 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | 21 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
22 | #include <asm-generic/cputime.h> | 22 | # include <asm-generic/cputime.h> |
23 | #else | 23 | #else |
24 | 24 | # include <asm/processor.h> | |
25 | #include <linux/time.h> | 25 | # include <asm-generic/cputime_nsecs.h> |
26 | #include <linux/jiffies.h> | ||
27 | #include <asm/processor.h> | ||
28 | |||
29 | typedef u64 __nocast cputime_t; | ||
30 | typedef u64 __nocast cputime64_t; | ||
31 | |||
32 | #define cputime_one_jiffy jiffies_to_cputime(1) | ||
33 | |||
34 | /* | ||
35 | * Convert cputime <-> jiffies (HZ) | ||
36 | */ | ||
37 | #define cputime_to_jiffies(__ct) \ | ||
38 | ((__force u64)(__ct) / (NSEC_PER_SEC / HZ)) | ||
39 | #define jiffies_to_cputime(__jif) \ | ||
40 | (__force cputime_t)((__jif) * (NSEC_PER_SEC / HZ)) | ||
41 | #define cputime64_to_jiffies64(__ct) \ | ||
42 | ((__force u64)(__ct) / (NSEC_PER_SEC / HZ)) | ||
43 | #define jiffies64_to_cputime64(__jif) \ | ||
44 | (__force cputime64_t)((__jif) * (NSEC_PER_SEC / HZ)) | ||
45 | |||
46 | /* | ||
47 | * Convert cputime <-> microseconds | ||
48 | */ | ||
49 | #define cputime_to_usecs(__ct) \ | ||
50 | ((__force u64)(__ct) / NSEC_PER_USEC) | ||
51 | #define usecs_to_cputime(__usecs) \ | ||
52 | (__force cputime_t)((__usecs) * NSEC_PER_USEC) | ||
53 | #define usecs_to_cputime64(__usecs) \ | ||
54 | (__force cputime64_t)((__usecs) * NSEC_PER_USEC) | ||
55 | |||
56 | /* | ||
57 | * Convert cputime <-> seconds | ||
58 | */ | ||
59 | #define cputime_to_secs(__ct) \ | ||
60 | ((__force u64)(__ct) / NSEC_PER_SEC) | ||
61 | #define secs_to_cputime(__secs) \ | ||
62 | (__force cputime_t)((__secs) * NSEC_PER_SEC) | ||
63 | |||
64 | /* | ||
65 | * Convert cputime <-> timespec (nsec) | ||
66 | */ | ||
67 | static inline cputime_t timespec_to_cputime(const struct timespec *val) | ||
68 | { | ||
69 | u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec; | ||
70 | return (__force cputime_t) ret; | ||
71 | } | ||
72 | static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val) | ||
73 | { | ||
74 | val->tv_sec = (__force u64) ct / NSEC_PER_SEC; | ||
75 | val->tv_nsec = (__force u64) ct % NSEC_PER_SEC; | ||
76 | } | ||
77 | |||
78 | /* | ||
79 | * Convert cputime <-> timeval (msec) | ||
80 | */ | ||
81 | static inline cputime_t timeval_to_cputime(struct timeval *val) | ||
82 | { | ||
83 | u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC; | ||
84 | return (__force cputime_t) ret; | ||
85 | } | ||
86 | static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val) | ||
87 | { | ||
88 | val->tv_sec = (__force u64) ct / NSEC_PER_SEC; | ||
89 | val->tv_usec = ((__force u64) ct % NSEC_PER_SEC) / NSEC_PER_USEC; | ||
90 | } | ||
91 | |||
92 | /* | ||
93 | * Convert cputime <-> clock (USER_HZ) | ||
94 | */ | ||
95 | #define cputime_to_clock_t(__ct) \ | ||
96 | ((__force u64)(__ct) / (NSEC_PER_SEC / USER_HZ)) | ||
97 | #define clock_t_to_cputime(__x) \ | ||
98 | (__force cputime_t)((__x) * (NSEC_PER_SEC / USER_HZ)) | ||
99 | |||
100 | /* | ||
101 | * Convert cputime64 to clock. | ||
102 | */ | ||
103 | #define cputime64_to_clock_t(__ct) \ | ||
104 | cputime_to_clock_t((__force cputime_t)__ct) | ||
105 | |||
106 | extern void arch_vtime_task_switch(struct task_struct *tsk); | 26 | extern void arch_vtime_task_switch(struct task_struct *tsk); |
27 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ | ||
107 | 28 | ||
108 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ | ||
109 | #endif /* __IA64_CPUTIME_H */ | 29 | #endif /* __IA64_CPUTIME_H */ |
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h index ff2ae4136584..020d655ed082 100644 --- a/arch/ia64/include/asm/thread_info.h +++ b/arch/ia64/include/asm/thread_info.h | |||
@@ -31,7 +31,7 @@ struct thread_info { | |||
31 | mm_segment_t addr_limit; /* user-level address space limit */ | 31 | mm_segment_t addr_limit; /* user-level address space limit */ |
32 | int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */ | 32 | int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */ |
33 | struct restart_block restart_block; | 33 | struct restart_block restart_block; |
34 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 34 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
35 | __u64 ac_stamp; | 35 | __u64 ac_stamp; |
36 | __u64 ac_leave; | 36 | __u64 ac_leave; |
37 | __u64 ac_stime; | 37 | __u64 ac_stime; |
@@ -69,7 +69,7 @@ struct thread_info { | |||
69 | #define task_stack_page(tsk) ((void *)(tsk)) | 69 | #define task_stack_page(tsk) ((void *)(tsk)) |
70 | 70 | ||
71 | #define __HAVE_THREAD_FUNCTIONS | 71 | #define __HAVE_THREAD_FUNCTIONS |
72 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 72 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
73 | #define setup_thread_stack(p, org) \ | 73 | #define setup_thread_stack(p, org) \ |
74 | *task_thread_info(p) = *task_thread_info(org); \ | 74 | *task_thread_info(p) = *task_thread_info(org); \ |
75 | task_thread_info(p)->ac_stime = 0; \ | 75 | task_thread_info(p)->ac_stime = 0; \ |
diff --git a/arch/ia64/include/asm/xen/minstate.h b/arch/ia64/include/asm/xen/minstate.h index c57fa910f2c9..00cf03e0cb82 100644 --- a/arch/ia64/include/asm/xen/minstate.h +++ b/arch/ia64/include/asm/xen/minstate.h | |||
@@ -1,5 +1,5 @@ | |||
1 | 1 | ||
2 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 2 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
3 | /* read ar.itc in advance, and use it before leaving bank 0 */ | 3 | /* read ar.itc in advance, and use it before leaving bank 0 */ |
4 | #define XEN_ACCOUNT_GET_STAMP \ | 4 | #define XEN_ACCOUNT_GET_STAMP \ |
5 | MOV_FROM_ITC(pUStk, p6, r20, r2); | 5 | MOV_FROM_ITC(pUStk, p6, r20, r2); |
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c index a48bd9a9927b..46c9e3007315 100644 --- a/arch/ia64/kernel/asm-offsets.c +++ b/arch/ia64/kernel/asm-offsets.c | |||
@@ -41,7 +41,7 @@ void foo(void) | |||
41 | DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); | 41 | DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); |
42 | DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); | 42 | DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); |
43 | DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count)); | 43 | DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count)); |
44 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 44 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
45 | DEFINE(TI_AC_STAMP, offsetof(struct thread_info, ac_stamp)); | 45 | DEFINE(TI_AC_STAMP, offsetof(struct thread_info, ac_stamp)); |
46 | DEFINE(TI_AC_LEAVE, offsetof(struct thread_info, ac_leave)); | 46 | DEFINE(TI_AC_LEAVE, offsetof(struct thread_info, ac_leave)); |
47 | DEFINE(TI_AC_STIME, offsetof(struct thread_info, ac_stime)); | 47 | DEFINE(TI_AC_STIME, offsetof(struct thread_info, ac_stime)); |
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index 6bfd8429ee0f..7a53530f22c2 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S | |||
@@ -724,7 +724,7 @@ GLOBAL_ENTRY(__paravirt_leave_syscall) | |||
724 | #endif | 724 | #endif |
725 | .global __paravirt_work_processed_syscall; | 725 | .global __paravirt_work_processed_syscall; |
726 | __paravirt_work_processed_syscall: | 726 | __paravirt_work_processed_syscall: |
727 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 727 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
728 | adds r2=PT(LOADRS)+16,r12 | 728 | adds r2=PT(LOADRS)+16,r12 |
729 | MOV_FROM_ITC(pUStk, p9, r22, r19) // fetch time at leave | 729 | MOV_FROM_ITC(pUStk, p9, r22, r19) // fetch time at leave |
730 | adds r18=TI_FLAGS+IA64_TASK_SIZE,r13 | 730 | adds r18=TI_FLAGS+IA64_TASK_SIZE,r13 |
@@ -762,7 +762,7 @@ __paravirt_work_processed_syscall: | |||
762 | 762 | ||
763 | ld8 r29=[r2],16 // M0|1 load cr.ipsr | 763 | ld8 r29=[r2],16 // M0|1 load cr.ipsr |
764 | ld8 r28=[r3],16 // M0|1 load cr.iip | 764 | ld8 r28=[r3],16 // M0|1 load cr.iip |
765 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 765 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
766 | (pUStk) add r14=TI_AC_LEAVE+IA64_TASK_SIZE,r13 | 766 | (pUStk) add r14=TI_AC_LEAVE+IA64_TASK_SIZE,r13 |
767 | ;; | 767 | ;; |
768 | ld8 r30=[r2],16 // M0|1 load cr.ifs | 768 | ld8 r30=[r2],16 // M0|1 load cr.ifs |
@@ -793,7 +793,7 @@ __paravirt_work_processed_syscall: | |||
793 | ld8.fill r1=[r3],16 // M0|1 load r1 | 793 | ld8.fill r1=[r3],16 // M0|1 load r1 |
794 | (pUStk) mov r17=1 // A | 794 | (pUStk) mov r17=1 // A |
795 | ;; | 795 | ;; |
796 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 796 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
797 | (pUStk) st1 [r15]=r17 // M2|3 | 797 | (pUStk) st1 [r15]=r17 // M2|3 |
798 | #else | 798 | #else |
799 | (pUStk) st1 [r14]=r17 // M2|3 | 799 | (pUStk) st1 [r14]=r17 // M2|3 |
@@ -813,7 +813,7 @@ __paravirt_work_processed_syscall: | |||
813 | shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition | 813 | shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition |
814 | COVER // B add current frame into dirty partition & set cr.ifs | 814 | COVER // B add current frame into dirty partition & set cr.ifs |
815 | ;; | 815 | ;; |
816 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 816 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
817 | mov r19=ar.bsp // M2 get new backing store pointer | 817 | mov r19=ar.bsp // M2 get new backing store pointer |
818 | st8 [r14]=r22 // M save time at leave | 818 | st8 [r14]=r22 // M save time at leave |
819 | mov f10=f0 // F clear f10 | 819 | mov f10=f0 // F clear f10 |
@@ -948,7 +948,7 @@ GLOBAL_ENTRY(__paravirt_leave_kernel) | |||
948 | adds r16=PT(CR_IPSR)+16,r12 | 948 | adds r16=PT(CR_IPSR)+16,r12 |
949 | adds r17=PT(CR_IIP)+16,r12 | 949 | adds r17=PT(CR_IIP)+16,r12 |
950 | 950 | ||
951 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 951 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
952 | .pred.rel.mutex pUStk,pKStk | 952 | .pred.rel.mutex pUStk,pKStk |
953 | MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled | 953 | MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled |
954 | MOV_FROM_ITC(pUStk, p9, r22, r29) // M fetch time at leave | 954 | MOV_FROM_ITC(pUStk, p9, r22, r29) // M fetch time at leave |
@@ -981,7 +981,7 @@ GLOBAL_ENTRY(__paravirt_leave_kernel) | |||
981 | ;; | 981 | ;; |
982 | ld8.fill r12=[r16],16 | 982 | ld8.fill r12=[r16],16 |
983 | ld8.fill r13=[r17],16 | 983 | ld8.fill r13=[r17],16 |
984 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 984 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
985 | (pUStk) adds r3=TI_AC_LEAVE+IA64_TASK_SIZE,r18 | 985 | (pUStk) adds r3=TI_AC_LEAVE+IA64_TASK_SIZE,r18 |
986 | #else | 986 | #else |
987 | (pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18 | 987 | (pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18 |
@@ -989,7 +989,7 @@ GLOBAL_ENTRY(__paravirt_leave_kernel) | |||
989 | ;; | 989 | ;; |
990 | ld8 r20=[r16],16 // ar.fpsr | 990 | ld8 r20=[r16],16 // ar.fpsr |
991 | ld8.fill r15=[r17],16 | 991 | ld8.fill r15=[r17],16 |
992 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 992 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
993 | (pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18 // deferred | 993 | (pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18 // deferred |
994 | #endif | 994 | #endif |
995 | ;; | 995 | ;; |
@@ -997,7 +997,7 @@ GLOBAL_ENTRY(__paravirt_leave_kernel) | |||
997 | ld8.fill r2=[r17] | 997 | ld8.fill r2=[r17] |
998 | (pUStk) mov r17=1 | 998 | (pUStk) mov r17=1 |
999 | ;; | 999 | ;; |
1000 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 1000 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
1001 | // mmi_ : ld8 st1 shr;; mmi_ : st8 st1 shr;; | 1001 | // mmi_ : ld8 st1 shr;; mmi_ : st8 st1 shr;; |
1002 | // mib : mov add br -> mib : ld8 add br | 1002 | // mib : mov add br -> mib : ld8 add br |
1003 | // bbb_ : br nop cover;; mbb_ : mov br cover;; | 1003 | // bbb_ : br nop cover;; mbb_ : mov br cover;; |
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S index e662f178b990..c4cd45d97749 100644 --- a/arch/ia64/kernel/fsys.S +++ b/arch/ia64/kernel/fsys.S | |||
@@ -529,7 +529,7 @@ GLOBAL_ENTRY(paravirt_fsys_bubble_down) | |||
529 | nop.i 0 | 529 | nop.i 0 |
530 | ;; | 530 | ;; |
531 | mov ar.rsc=0 // M2 set enforced lazy mode, pl 0, LE, loadrs=0 | 531 | mov ar.rsc=0 // M2 set enforced lazy mode, pl 0, LE, loadrs=0 |
532 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 532 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
533 | MOV_FROM_ITC(p0, p6, r30, r23) // M get cycle for accounting | 533 | MOV_FROM_ITC(p0, p6, r30, r23) // M get cycle for accounting |
534 | #else | 534 | #else |
535 | nop.m 0 | 535 | nop.m 0 |
@@ -555,7 +555,7 @@ GLOBAL_ENTRY(paravirt_fsys_bubble_down) | |||
555 | cmp.ne pKStk,pUStk=r0,r0 // A set pKStk <- 0, pUStk <- 1 | 555 | cmp.ne pKStk,pUStk=r0,r0 // A set pKStk <- 0, pUStk <- 1 |
556 | br.call.sptk.many b7=ia64_syscall_setup // B | 556 | br.call.sptk.many b7=ia64_syscall_setup // B |
557 | ;; | 557 | ;; |
558 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 558 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
559 | // mov.m r30=ar.itc is called in advance | 559 | // mov.m r30=ar.itc is called in advance |
560 | add r16=TI_AC_STAMP+IA64_TASK_SIZE,r2 | 560 | add r16=TI_AC_STAMP+IA64_TASK_SIZE,r2 |
561 | add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r2 | 561 | add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r2 |
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index 4738ff7bd66a..9be4e497f3d3 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S | |||
@@ -1073,7 +1073,7 @@ END(ia64_native_sched_clock) | |||
1073 | sched_clock = ia64_native_sched_clock | 1073 | sched_clock = ia64_native_sched_clock |
1074 | #endif | 1074 | #endif |
1075 | 1075 | ||
1076 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 1076 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
1077 | GLOBAL_ENTRY(cycle_to_cputime) | 1077 | GLOBAL_ENTRY(cycle_to_cputime) |
1078 | alloc r16=ar.pfs,1,0,0,0 | 1078 | alloc r16=ar.pfs,1,0,0,0 |
1079 | addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 | 1079 | addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 |
@@ -1091,7 +1091,7 @@ GLOBAL_ENTRY(cycle_to_cputime) | |||
1091 | shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT | 1091 | shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT |
1092 | br.ret.sptk.many rp | 1092 | br.ret.sptk.many rp |
1093 | END(cycle_to_cputime) | 1093 | END(cycle_to_cputime) |
1094 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ | 1094 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ |
1095 | 1095 | ||
1096 | #ifdef CONFIG_IA64_BRL_EMU | 1096 | #ifdef CONFIG_IA64_BRL_EMU |
1097 | 1097 | ||
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S index fa25689fc453..689ffcaa284e 100644 --- a/arch/ia64/kernel/ivt.S +++ b/arch/ia64/kernel/ivt.S | |||
@@ -784,7 +784,7 @@ ENTRY(break_fault) | |||
784 | 784 | ||
785 | (p8) adds r28=16,r28 // A switch cr.iip to next bundle | 785 | (p8) adds r28=16,r28 // A switch cr.iip to next bundle |
786 | (p9) adds r8=1,r8 // A increment ei to next slot | 786 | (p9) adds r8=1,r8 // A increment ei to next slot |
787 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 787 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
788 | ;; | 788 | ;; |
789 | mov b6=r30 // I0 setup syscall handler branch reg early | 789 | mov b6=r30 // I0 setup syscall handler branch reg early |
790 | #else | 790 | #else |
@@ -801,7 +801,7 @@ ENTRY(break_fault) | |||
801 | // | 801 | // |
802 | /////////////////////////////////////////////////////////////////////// | 802 | /////////////////////////////////////////////////////////////////////// |
803 | st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag | 803 | st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag |
804 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 804 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
805 | MOV_FROM_ITC(p0, p14, r30, r18) // M get cycle for accounting | 805 | MOV_FROM_ITC(p0, p14, r30, r18) // M get cycle for accounting |
806 | #else | 806 | #else |
807 | mov b6=r30 // I0 setup syscall handler branch reg early | 807 | mov b6=r30 // I0 setup syscall handler branch reg early |
@@ -817,7 +817,7 @@ ENTRY(break_fault) | |||
817 | cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited? | 817 | cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited? |
818 | br.call.sptk.many b7=ia64_syscall_setup // B | 818 | br.call.sptk.many b7=ia64_syscall_setup // B |
819 | 1: | 819 | 1: |
820 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 820 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
821 | // mov.m r30=ar.itc is called in advance, and r13 is current | 821 | // mov.m r30=ar.itc is called in advance, and r13 is current |
822 | add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13 // A | 822 | add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13 // A |
823 | add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13 // A | 823 | add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13 // A |
@@ -1043,7 +1043,7 @@ END(ia64_syscall_setup) | |||
1043 | DBG_FAULT(16) | 1043 | DBG_FAULT(16) |
1044 | FAULT(16) | 1044 | FAULT(16) |
1045 | 1045 | ||
1046 | #if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(__IA64_ASM_PARAVIRTUALIZED_NATIVE) | 1046 | #if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(__IA64_ASM_PARAVIRTUALIZED_NATIVE) |
1047 | /* | 1047 | /* |
1048 | * There is no particular reason for this code to be here, other than | 1048 | * There is no particular reason for this code to be here, other than |
1049 | * that there happens to be space here that would go unused otherwise. | 1049 | * that there happens to be space here that would go unused otherwise. |
diff --git a/arch/ia64/kernel/minstate.h b/arch/ia64/kernel/minstate.h index d56753a11636..cc82a7d744c9 100644 --- a/arch/ia64/kernel/minstate.h +++ b/arch/ia64/kernel/minstate.h | |||
@@ -4,7 +4,7 @@ | |||
4 | #include "entry.h" | 4 | #include "entry.h" |
5 | #include "paravirt_inst.h" | 5 | #include "paravirt_inst.h" |
6 | 6 | ||
7 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 7 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
8 | /* read ar.itc in advance, and use it before leaving bank 0 */ | 8 | /* read ar.itc in advance, and use it before leaving bank 0 */ |
9 | #define ACCOUNT_GET_STAMP \ | 9 | #define ACCOUNT_GET_STAMP \ |
10 | (pUStk) mov.m r20=ar.itc; | 10 | (pUStk) mov.m r20=ar.itc; |
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 88a794536bc0..fbaac1afb844 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c | |||
@@ -77,7 +77,7 @@ static struct clocksource clocksource_itc = { | |||
77 | }; | 77 | }; |
78 | static struct clocksource *itc_clocksource; | 78 | static struct clocksource *itc_clocksource; |
79 | 79 | ||
80 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 80 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
81 | 81 | ||
82 | #include <linux/kernel_stat.h> | 82 | #include <linux/kernel_stat.h> |
83 | 83 | ||
@@ -136,13 +136,14 @@ void vtime_account_system(struct task_struct *tsk) | |||
136 | 136 | ||
137 | account_system_time(tsk, 0, delta, delta); | 137 | account_system_time(tsk, 0, delta, delta); |
138 | } | 138 | } |
139 | EXPORT_SYMBOL_GPL(vtime_account_system); | ||
139 | 140 | ||
140 | void vtime_account_idle(struct task_struct *tsk) | 141 | void vtime_account_idle(struct task_struct *tsk) |
141 | { | 142 | { |
142 | account_idle_time(vtime_delta(tsk)); | 143 | account_idle_time(vtime_delta(tsk)); |
143 | } | 144 | } |
144 | 145 | ||
145 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ | 146 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ |
146 | 147 | ||
147 | static irqreturn_t | 148 | static irqreturn_t |
148 | timer_interrupt (int irq, void *dev_id) | 149 | timer_interrupt (int irq, void *dev_id) |
diff --git a/arch/m68k/include/asm/dma-mapping.h b/arch/m68k/include/asm/dma-mapping.h index 3e6b8445af6a..292805f0762e 100644 --- a/arch/m68k/include/asm/dma-mapping.h +++ b/arch/m68k/include/asm/dma-mapping.h | |||
@@ -115,4 +115,14 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t handle) | |||
115 | #include <asm-generic/dma-mapping-broken.h> | 115 | #include <asm-generic/dma-mapping-broken.h> |
116 | #endif | 116 | #endif |
117 | 117 | ||
118 | /* drivers/base/dma-mapping.c */ | ||
119 | extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | ||
120 | void *cpu_addr, dma_addr_t dma_addr, size_t size); | ||
121 | extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
122 | void *cpu_addr, dma_addr_t dma_addr, | ||
123 | size_t size); | ||
124 | |||
125 | #define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) | ||
126 | #define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) | ||
127 | |||
118 | #endif /* _M68K_DMA_MAPPING_H */ | 128 | #endif /* _M68K_DMA_MAPPING_H */ |
diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h index ae700f49e51d..b0768a657920 100644 --- a/arch/m68k/include/asm/processor.h +++ b/arch/m68k/include/asm/processor.h | |||
@@ -130,7 +130,6 @@ extern int handle_kernel_fault(struct pt_regs *regs); | |||
130 | #define start_thread(_regs, _pc, _usp) \ | 130 | #define start_thread(_regs, _pc, _usp) \ |
131 | do { \ | 131 | do { \ |
132 | (_regs)->pc = (_pc); \ | 132 | (_regs)->pc = (_pc); \ |
133 | ((struct switch_stack *)(_regs))[-1].a6 = 0; \ | ||
134 | setframeformat(_regs); \ | 133 | setframeformat(_regs); \ |
135 | if (current->mm) \ | 134 | if (current->mm) \ |
136 | (_regs)->d5 = current->mm->start_data; \ | 135 | (_regs)->d5 = current->mm->start_data; \ |
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 2ac626ab9d43..9becc44d9d7a 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -4,7 +4,6 @@ config MIPS | |||
4 | select HAVE_GENERIC_DMA_COHERENT | 4 | select HAVE_GENERIC_DMA_COHERENT |
5 | select HAVE_IDE | 5 | select HAVE_IDE |
6 | select HAVE_OPROFILE | 6 | select HAVE_OPROFILE |
7 | select HAVE_IRQ_WORK | ||
8 | select HAVE_PERF_EVENTS | 7 | select HAVE_PERF_EVENTS |
9 | select PERF_USE_VMALLOC | 8 | select PERF_USE_VMALLOC |
10 | select HAVE_ARCH_KGDB | 9 | select HAVE_ARCH_KGDB |
@@ -2161,7 +2160,6 @@ source "mm/Kconfig" | |||
2161 | config SMP | 2160 | config SMP |
2162 | bool "Multi-Processing support" | 2161 | bool "Multi-Processing support" |
2163 | depends on SYS_SUPPORTS_SMP | 2162 | depends on SYS_SUPPORTS_SMP |
2164 | select IRQ_PER_CPU | ||
2165 | select USE_GENERIC_SMP_HELPERS | 2163 | select USE_GENERIC_SMP_HELPERS |
2166 | help | 2164 | help |
2167 | This enables support for systems with more than one CPU. If you have | 2165 | This enables support for systems with more than one CPU. If you have |
diff --git a/arch/mips/bcm47xx/Kconfig b/arch/mips/bcm47xx/Kconfig index d7af29f1fcf0..ba611927749b 100644 --- a/arch/mips/bcm47xx/Kconfig +++ b/arch/mips/bcm47xx/Kconfig | |||
@@ -8,8 +8,10 @@ config BCM47XX_SSB | |||
8 | select SSB_DRIVER_EXTIF | 8 | select SSB_DRIVER_EXTIF |
9 | select SSB_EMBEDDED | 9 | select SSB_EMBEDDED |
10 | select SSB_B43_PCI_BRIDGE if PCI | 10 | select SSB_B43_PCI_BRIDGE if PCI |
11 | select SSB_DRIVER_PCICORE if PCI | ||
11 | select SSB_PCICORE_HOSTMODE if PCI | 12 | select SSB_PCICORE_HOSTMODE if PCI |
12 | select SSB_DRIVER_GPIO | 13 | select SSB_DRIVER_GPIO |
14 | select GPIOLIB | ||
13 | default y | 15 | default y |
14 | help | 16 | help |
15 | Add support for old Broadcom BCM47xx boards with Sonics Silicon Backplane support. | 17 | Add support for old Broadcom BCM47xx boards with Sonics Silicon Backplane support. |
@@ -25,6 +27,7 @@ config BCM47XX_BCMA | |||
25 | select BCMA_HOST_PCI if PCI | 27 | select BCMA_HOST_PCI if PCI |
26 | select BCMA_DRIVER_PCI_HOSTMODE if PCI | 28 | select BCMA_DRIVER_PCI_HOSTMODE if PCI |
27 | select BCMA_DRIVER_GPIO | 29 | select BCMA_DRIVER_GPIO |
30 | select GPIOLIB | ||
28 | default y | 31 | default y |
29 | help | 32 | help |
30 | Add support for new Broadcom BCM47xx boards with Broadcom specific Advanced Microcontroller Bus. | 33 | Add support for new Broadcom BCM47xx boards with Broadcom specific Advanced Microcontroller Bus. |
diff --git a/arch/mips/cavium-octeon/executive/cvmx-l2c.c b/arch/mips/cavium-octeon/executive/cvmx-l2c.c index 9f883bf76953..33b72144db31 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-l2c.c +++ b/arch/mips/cavium-octeon/executive/cvmx-l2c.c | |||
@@ -30,6 +30,7 @@ | |||
30 | * measurement, and debugging facilities. | 30 | * measurement, and debugging facilities. |
31 | */ | 31 | */ |
32 | 32 | ||
33 | #include <linux/compiler.h> | ||
33 | #include <linux/irqflags.h> | 34 | #include <linux/irqflags.h> |
34 | #include <asm/octeon/cvmx.h> | 35 | #include <asm/octeon/cvmx.h> |
35 | #include <asm/octeon/cvmx-l2c.h> | 36 | #include <asm/octeon/cvmx-l2c.h> |
@@ -285,22 +286,22 @@ uint64_t cvmx_l2c_read_perf(uint32_t counter) | |||
285 | */ | 286 | */ |
286 | static void fault_in(uint64_t addr, int len) | 287 | static void fault_in(uint64_t addr, int len) |
287 | { | 288 | { |
288 | volatile char *ptr; | 289 | char *ptr; |
289 | volatile char dummy; | 290 | |
290 | /* | 291 | /* |
291 | * Adjust addr and length so we get all cache lines even for | 292 | * Adjust addr and length so we get all cache lines even for |
292 | * small ranges spanning two cache lines. | 293 | * small ranges spanning two cache lines. |
293 | */ | 294 | */ |
294 | len += addr & CVMX_CACHE_LINE_MASK; | 295 | len += addr & CVMX_CACHE_LINE_MASK; |
295 | addr &= ~CVMX_CACHE_LINE_MASK; | 296 | addr &= ~CVMX_CACHE_LINE_MASK; |
296 | ptr = (volatile char *)cvmx_phys_to_ptr(addr); | 297 | ptr = cvmx_phys_to_ptr(addr); |
297 | /* | 298 | /* |
298 | * Invalidate L1 cache to make sure all loads result in data | 299 | * Invalidate L1 cache to make sure all loads result in data |
299 | * being in L2. | 300 | * being in L2. |
300 | */ | 301 | */ |
301 | CVMX_DCACHE_INVALIDATE; | 302 | CVMX_DCACHE_INVALIDATE; |
302 | while (len > 0) { | 303 | while (len > 0) { |
303 | dummy += *ptr; | 304 | ACCESS_ONCE(*ptr); |
304 | len -= CVMX_CACHE_LINE_SIZE; | 305 | len -= CVMX_CACHE_LINE_SIZE; |
305 | ptr += CVMX_CACHE_LINE_SIZE; | 306 | ptr += CVMX_CACHE_LINE_SIZE; |
306 | } | 307 | } |
diff --git a/arch/mips/include/asm/dsp.h b/arch/mips/include/asm/dsp.h index e9bfc0813c72..7bfad0520e25 100644 --- a/arch/mips/include/asm/dsp.h +++ b/arch/mips/include/asm/dsp.h | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <asm/mipsregs.h> | 16 | #include <asm/mipsregs.h> |
17 | 17 | ||
18 | #define DSP_DEFAULT 0x00000000 | 18 | #define DSP_DEFAULT 0x00000000 |
19 | #define DSP_MASK 0x3ff | 19 | #define DSP_MASK 0x3f |
20 | 20 | ||
21 | #define __enable_dsp_hazard() \ | 21 | #define __enable_dsp_hazard() \ |
22 | do { \ | 22 | do { \ |
diff --git a/arch/mips/include/asm/inst.h b/arch/mips/include/asm/inst.h index ab84064283db..33c34adbecfa 100644 --- a/arch/mips/include/asm/inst.h +++ b/arch/mips/include/asm/inst.h | |||
@@ -353,6 +353,7 @@ union mips_instruction { | |||
353 | struct u_format u_format; | 353 | struct u_format u_format; |
354 | struct c_format c_format; | 354 | struct c_format c_format; |
355 | struct r_format r_format; | 355 | struct r_format r_format; |
356 | struct p_format p_format; | ||
356 | struct f_format f_format; | 357 | struct f_format f_format; |
357 | struct ma_format ma_format; | 358 | struct ma_format ma_format; |
358 | struct b_format b_format; | 359 | struct b_format b_format; |
diff --git a/arch/mips/include/asm/mach-pnx833x/war.h b/arch/mips/include/asm/mach-pnx833x/war.h index edaa06d9d492..e410df4e1b3a 100644 --- a/arch/mips/include/asm/mach-pnx833x/war.h +++ b/arch/mips/include/asm/mach-pnx833x/war.h | |||
@@ -21,4 +21,4 @@ | |||
21 | #define R10000_LLSC_WAR 0 | 21 | #define R10000_LLSC_WAR 0 |
22 | #define MIPS34K_MISSED_ITLB_WAR 0 | 22 | #define MIPS34K_MISSED_ITLB_WAR 0 |
23 | 23 | ||
24 | #endif /* __ASM_MIPS_MACH_PNX8550_WAR_H */ | 24 | #endif /* __ASM_MIPS_MACH_PNX833X_WAR_H */ |
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h index c63191055e69..013d5f781263 100644 --- a/arch/mips/include/asm/pgtable-64.h +++ b/arch/mips/include/asm/pgtable-64.h | |||
@@ -230,6 +230,7 @@ static inline void pud_clear(pud_t *pudp) | |||
230 | #else | 230 | #else |
231 | #define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT)) | 231 | #define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT)) |
232 | #define pfn_pte(pfn, prot) __pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot)) | 232 | #define pfn_pte(pfn, prot) __pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot)) |
233 | #define pfn_pmd(pfn, prot) __pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot)) | ||
233 | #endif | 234 | #endif |
234 | 235 | ||
235 | #define __pgd_offset(address) pgd_index(address) | 236 | #define __pgd_offset(address) pgd_index(address) |
diff --git a/arch/mips/include/uapi/asm/Kbuild b/arch/mips/include/uapi/asm/Kbuild index a1a0452ac185..77d4fb33f75a 100644 --- a/arch/mips/include/uapi/asm/Kbuild +++ b/arch/mips/include/uapi/asm/Kbuild | |||
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm | |||
3 | 3 | ||
4 | header-y += auxvec.h | 4 | header-y += auxvec.h |
5 | header-y += bitsperlong.h | 5 | header-y += bitsperlong.h |
6 | header-y += break.h | ||
6 | header-y += byteorder.h | 7 | header-y += byteorder.h |
7 | header-y += cachectl.h | 8 | header-y += cachectl.h |
8 | header-y += errno.h | 9 | header-y += errno.h |
diff --git a/arch/mips/include/asm/break.h b/arch/mips/include/uapi/asm/break.h index 9161e684cb4c..9161e684cb4c 100644 --- a/arch/mips/include/asm/break.h +++ b/arch/mips/include/uapi/asm/break.h | |||
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 6a2d758dd8e9..83fa1460e294 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c | |||
@@ -25,6 +25,12 @@ | |||
25 | #define MCOUNT_OFFSET_INSNS 4 | 25 | #define MCOUNT_OFFSET_INSNS 4 |
26 | #endif | 26 | #endif |
27 | 27 | ||
28 | /* Arch override because MIPS doesn't need to run this from stop_machine() */ | ||
29 | void arch_ftrace_update_code(int command) | ||
30 | { | ||
31 | ftrace_modify_all_code(command); | ||
32 | } | ||
33 | |||
28 | /* | 34 | /* |
29 | * Check if the address is in kernel space | 35 | * Check if the address is in kernel space |
30 | * | 36 | * |
@@ -89,6 +95,24 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code) | |||
89 | return 0; | 95 | return 0; |
90 | } | 96 | } |
91 | 97 | ||
98 | #ifndef CONFIG_64BIT | ||
99 | static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1, | ||
100 | unsigned int new_code2) | ||
101 | { | ||
102 | int faulted; | ||
103 | |||
104 | safe_store_code(new_code1, ip, faulted); | ||
105 | if (unlikely(faulted)) | ||
106 | return -EFAULT; | ||
107 | ip += 4; | ||
108 | safe_store_code(new_code2, ip, faulted); | ||
109 | if (unlikely(faulted)) | ||
110 | return -EFAULT; | ||
111 | flush_icache_range(ip, ip + 8); /* original ip + 12 */ | ||
112 | return 0; | ||
113 | } | ||
114 | #endif | ||
115 | |||
92 | /* | 116 | /* |
93 | * The details about the calling site of mcount on MIPS | 117 | * The details about the calling site of mcount on MIPS |
94 | * | 118 | * |
@@ -131,8 +155,18 @@ int ftrace_make_nop(struct module *mod, | |||
131 | * needed. | 155 | * needed. |
132 | */ | 156 | */ |
133 | new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F; | 157 | new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F; |
134 | 158 | #ifdef CONFIG_64BIT | |
135 | return ftrace_modify_code(ip, new); | 159 | return ftrace_modify_code(ip, new); |
160 | #else | ||
161 | /* | ||
162 | * On 32 bit MIPS platforms, gcc adds a stack adjust | ||
163 | * instruction in the delay slot after the branch to | ||
164 | * mcount and expects mcount to restore the sp on return. | ||
165 | * This is based on a legacy API and does nothing but | ||
166 | * waste instructions so it's being removed at runtime. | ||
167 | */ | ||
168 | return ftrace_modify_code_2(ip, new, INSN_NOP); | ||
169 | #endif | ||
136 | } | 170 | } |
137 | 171 | ||
138 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | 172 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) |
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S index 4c968e7efb74..165867673357 100644 --- a/arch/mips/kernel/mcount.S +++ b/arch/mips/kernel/mcount.S | |||
@@ -46,9 +46,8 @@ | |||
46 | PTR_L a5, PT_R9(sp) | 46 | PTR_L a5, PT_R9(sp) |
47 | PTR_L a6, PT_R10(sp) | 47 | PTR_L a6, PT_R10(sp) |
48 | PTR_L a7, PT_R11(sp) | 48 | PTR_L a7, PT_R11(sp) |
49 | PTR_ADDIU sp, PT_SIZE | ||
50 | #else | 49 | #else |
51 | PTR_ADDIU sp, (PT_SIZE + 8) | 50 | PTR_ADDIU sp, PT_SIZE |
52 | #endif | 51 | #endif |
53 | .endm | 52 | .endm |
54 | 53 | ||
@@ -69,7 +68,9 @@ NESTED(ftrace_caller, PT_SIZE, ra) | |||
69 | .globl _mcount | 68 | .globl _mcount |
70 | _mcount: | 69 | _mcount: |
71 | b ftrace_stub | 70 | b ftrace_stub |
72 | nop | 71 | addiu sp,sp,8 |
72 | |||
73 | /* When tracing is activated, it calls ftrace_caller+8 (aka here) */ | ||
73 | lw t1, function_trace_stop | 74 | lw t1, function_trace_stop |
74 | bnez t1, ftrace_stub | 75 | bnez t1, ftrace_stub |
75 | nop | 76 | nop |
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index eec690af6581..147cec19621d 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c | |||
@@ -705,7 +705,7 @@ static int vpe_run(struct vpe * v) | |||
705 | 705 | ||
706 | printk(KERN_WARNING | 706 | printk(KERN_WARNING |
707 | "VPE loader: TC %d is already in use.\n", | 707 | "VPE loader: TC %d is already in use.\n", |
708 | t->index); | 708 | v->tc->index); |
709 | return -ENOEXEC; | 709 | return -ENOEXEC; |
710 | } | 710 | } |
711 | } else { | 711 | } else { |
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c index f36acd1b3808..a7935bf0fecb 100644 --- a/arch/mips/lantiq/irq.c +++ b/arch/mips/lantiq/irq.c | |||
@@ -408,7 +408,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent) | |||
408 | #endif | 408 | #endif |
409 | 409 | ||
410 | /* tell oprofile which irq to use */ | 410 | /* tell oprofile which irq to use */ |
411 | cp0_perfcount_irq = LTQ_PERF_IRQ; | 411 | cp0_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ); |
412 | 412 | ||
413 | /* | 413 | /* |
414 | * if the timer irq is not one of the mips irqs we need to | 414 | * if the timer irq is not one of the mips irqs we need to |
diff --git a/arch/mips/lib/delay.c b/arch/mips/lib/delay.c index dc81ca8dc0dd..288f7954988d 100644 --- a/arch/mips/lib/delay.c +++ b/arch/mips/lib/delay.c | |||
@@ -21,7 +21,7 @@ void __delay(unsigned long loops) | |||
21 | " .set noreorder \n" | 21 | " .set noreorder \n" |
22 | " .align 3 \n" | 22 | " .align 3 \n" |
23 | "1: bnez %0, 1b \n" | 23 | "1: bnez %0, 1b \n" |
24 | #if __SIZEOF_LONG__ == 4 | 24 | #if BITS_PER_LONG == 32 |
25 | " subu %0, 1 \n" | 25 | " subu %0, 1 \n" |
26 | #else | 26 | #else |
27 | " dsubu %0, 1 \n" | 27 | " dsubu %0, 1 \n" |
diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c index 7657fd21cd3f..cacfd31e8ec9 100644 --- a/arch/mips/mm/ioremap.c +++ b/arch/mips/mm/ioremap.c | |||
@@ -190,9 +190,3 @@ void __iounmap(const volatile void __iomem *addr) | |||
190 | 190 | ||
191 | EXPORT_SYMBOL(__ioremap); | 191 | EXPORT_SYMBOL(__ioremap); |
192 | EXPORT_SYMBOL(__iounmap); | 192 | EXPORT_SYMBOL(__iounmap); |
193 | |||
194 | int __virt_addr_valid(const volatile void *kaddr) | ||
195 | { | ||
196 | return pfn_valid(PFN_DOWN(virt_to_phys(kaddr))); | ||
197 | } | ||
198 | EXPORT_SYMBOL_GPL(__virt_addr_valid); | ||
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c index d9be7540a6be..7e5fe2790d8a 100644 --- a/arch/mips/mm/mmap.c +++ b/arch/mips/mm/mmap.c | |||
@@ -192,3 +192,9 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) | |||
192 | 192 | ||
193 | return ret; | 193 | return ret; |
194 | } | 194 | } |
195 | |||
196 | int __virt_addr_valid(const volatile void *kaddr) | ||
197 | { | ||
198 | return pfn_valid(PFN_DOWN(virt_to_phys(kaddr))); | ||
199 | } | ||
200 | EXPORT_SYMBOL_GPL(__virt_addr_valid); | ||
diff --git a/arch/mips/netlogic/xlr/setup.c b/arch/mips/netlogic/xlr/setup.c index 4e7f49d3d5a8..c5ce6992ac4c 100644 --- a/arch/mips/netlogic/xlr/setup.c +++ b/arch/mips/netlogic/xlr/setup.c | |||
@@ -193,8 +193,11 @@ static void nlm_init_node(void) | |||
193 | 193 | ||
194 | void __init prom_init(void) | 194 | void __init prom_init(void) |
195 | { | 195 | { |
196 | int i, *argv, *envp; /* passed as 32 bit ptrs */ | 196 | int *argv, *envp; /* passed as 32 bit ptrs */ |
197 | struct psb_info *prom_infop; | 197 | struct psb_info *prom_infop; |
198 | #ifdef CONFIG_SMP | ||
199 | int i; | ||
200 | #endif | ||
198 | 201 | ||
199 | /* truncate to 32 bit and sign extend all args */ | 202 | /* truncate to 32 bit and sign extend all args */ |
200 | argv = (int *)(long)(int)fw_arg1; | 203 | argv = (int *)(long)(int)fw_arg1; |
diff --git a/arch/mips/pci/pci-ar71xx.c b/arch/mips/pci/pci-ar71xx.c index 1552522b8718..6eaa4f2d0e38 100644 --- a/arch/mips/pci/pci-ar71xx.c +++ b/arch/mips/pci/pci-ar71xx.c | |||
@@ -24,7 +24,7 @@ | |||
24 | #include <asm/mach-ath79/pci.h> | 24 | #include <asm/mach-ath79/pci.h> |
25 | 25 | ||
26 | #define AR71XX_PCI_MEM_BASE 0x10000000 | 26 | #define AR71XX_PCI_MEM_BASE 0x10000000 |
27 | #define AR71XX_PCI_MEM_SIZE 0x08000000 | 27 | #define AR71XX_PCI_MEM_SIZE 0x07000000 |
28 | 28 | ||
29 | #define AR71XX_PCI_WIN0_OFFS 0x10000000 | 29 | #define AR71XX_PCI_WIN0_OFFS 0x10000000 |
30 | #define AR71XX_PCI_WIN1_OFFS 0x11000000 | 30 | #define AR71XX_PCI_WIN1_OFFS 0x11000000 |
diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c index 86d77a666458..c11c75be2d7e 100644 --- a/arch/mips/pci/pci-ar724x.c +++ b/arch/mips/pci/pci-ar724x.c | |||
@@ -21,7 +21,7 @@ | |||
21 | #define AR724X_PCI_CTRL_SIZE 0x100 | 21 | #define AR724X_PCI_CTRL_SIZE 0x100 |
22 | 22 | ||
23 | #define AR724X_PCI_MEM_BASE 0x10000000 | 23 | #define AR724X_PCI_MEM_BASE 0x10000000 |
24 | #define AR724X_PCI_MEM_SIZE 0x08000000 | 24 | #define AR724X_PCI_MEM_SIZE 0x04000000 |
25 | 25 | ||
26 | #define AR724X_PCI_REG_RESET 0x18 | 26 | #define AR724X_PCI_REG_RESET 0x18 |
27 | #define AR724X_PCI_REG_INT_STATUS 0x4c | 27 | #define AR724X_PCI_REG_INT_STATUS 0x4c |
diff --git a/arch/mn10300/include/asm/dma-mapping.h b/arch/mn10300/include/asm/dma-mapping.h index c1be4397b1ed..a18abfc558eb 100644 --- a/arch/mn10300/include/asm/dma-mapping.h +++ b/arch/mn10300/include/asm/dma-mapping.h | |||
@@ -168,4 +168,19 @@ void dma_cache_sync(void *vaddr, size_t size, | |||
168 | mn10300_dcache_flush_inv(); | 168 | mn10300_dcache_flush_inv(); |
169 | } | 169 | } |
170 | 170 | ||
171 | /* Not supported for now */ | ||
172 | static inline int dma_mmap_coherent(struct device *dev, | ||
173 | struct vm_area_struct *vma, void *cpu_addr, | ||
174 | dma_addr_t dma_addr, size_t size) | ||
175 | { | ||
176 | return -EINVAL; | ||
177 | } | ||
178 | |||
179 | static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
180 | void *cpu_addr, dma_addr_t dma_addr, | ||
181 | size_t size) | ||
182 | { | ||
183 | return -EINVAL; | ||
184 | } | ||
185 | |||
171 | #endif | 186 | #endif |
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index b77feffbadea..a32e34ecda9e 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig | |||
@@ -9,14 +9,12 @@ config PARISC | |||
9 | select RTC_DRV_GENERIC | 9 | select RTC_DRV_GENERIC |
10 | select INIT_ALL_POSSIBLE | 10 | select INIT_ALL_POSSIBLE |
11 | select BUG | 11 | select BUG |
12 | select HAVE_IRQ_WORK | ||
13 | select HAVE_PERF_EVENTS | 12 | select HAVE_PERF_EVENTS |
14 | select GENERIC_ATOMIC64 if !64BIT | 13 | select GENERIC_ATOMIC64 if !64BIT |
15 | select HAVE_GENERIC_HARDIRQS | 14 | select HAVE_GENERIC_HARDIRQS |
16 | select BROKEN_RODATA | 15 | select BROKEN_RODATA |
17 | select GENERIC_IRQ_PROBE | 16 | select GENERIC_IRQ_PROBE |
18 | select GENERIC_PCI_IOMAP | 17 | select GENERIC_PCI_IOMAP |
19 | select IRQ_PER_CPU | ||
20 | select ARCH_HAVE_NMI_SAFE_CMPXCHG | 18 | select ARCH_HAVE_NMI_SAFE_CMPXCHG |
21 | select GENERIC_SMP_IDLE_THREAD | 19 | select GENERIC_SMP_IDLE_THREAD |
22 | select GENERIC_STRNCPY_FROM_USER | 20 | select GENERIC_STRNCPY_FROM_USER |
diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h index 467bbd510eac..106b395688e1 100644 --- a/arch/parisc/include/asm/dma-mapping.h +++ b/arch/parisc/include/asm/dma-mapping.h | |||
@@ -238,4 +238,19 @@ void * sba_get_iommu(struct parisc_device *dev); | |||
238 | /* At the moment, we panic on error for IOMMU resource exaustion */ | 238 | /* At the moment, we panic on error for IOMMU resource exaustion */ |
239 | #define dma_mapping_error(dev, x) 0 | 239 | #define dma_mapping_error(dev, x) 0 |
240 | 240 | ||
241 | /* This API cannot be supported on PA-RISC */ | ||
242 | static inline int dma_mmap_coherent(struct device *dev, | ||
243 | struct vm_area_struct *vma, void *cpu_addr, | ||
244 | dma_addr_t dma_addr, size_t size) | ||
245 | { | ||
246 | return -EINVAL; | ||
247 | } | ||
248 | |||
249 | static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
250 | void *cpu_addr, dma_addr_t dma_addr, | ||
251 | size_t size) | ||
252 | { | ||
253 | return -EINVAL; | ||
254 | } | ||
255 | |||
241 | #endif | 256 | #endif |
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 17903f1f356b..561ccca7b1a7 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -118,14 +118,12 @@ config PPC | |||
118 | select HAVE_SYSCALL_WRAPPERS if PPC64 | 118 | select HAVE_SYSCALL_WRAPPERS if PPC64 |
119 | select GENERIC_ATOMIC64 if PPC32 | 119 | select GENERIC_ATOMIC64 if PPC32 |
120 | select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE | 120 | select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE |
121 | select HAVE_IRQ_WORK | ||
122 | select HAVE_PERF_EVENTS | 121 | select HAVE_PERF_EVENTS |
123 | select HAVE_REGS_AND_STACK_ACCESS_API | 122 | select HAVE_REGS_AND_STACK_ACCESS_API |
124 | select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64 | 123 | select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64 |
125 | select HAVE_GENERIC_HARDIRQS | 124 | select HAVE_GENERIC_HARDIRQS |
126 | select ARCH_WANT_IPC_PARSE_VERSION | 125 | select ARCH_WANT_IPC_PARSE_VERSION |
127 | select SPARSE_IRQ | 126 | select SPARSE_IRQ |
128 | select IRQ_PER_CPU | ||
129 | select IRQ_DOMAIN | 127 | select IRQ_DOMAIN |
130 | select GENERIC_IRQ_SHOW | 128 | select GENERIC_IRQ_SHOW |
131 | select GENERIC_IRQ_SHOW_LEVEL | 129 | select GENERIC_IRQ_SHOW_LEVEL |
diff --git a/arch/powerpc/configs/chroma_defconfig b/arch/powerpc/configs/chroma_defconfig index 29bb11ec6c64..4f35fc462385 100644 --- a/arch/powerpc/configs/chroma_defconfig +++ b/arch/powerpc/configs/chroma_defconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | CONFIG_PPC64=y | 1 | CONFIG_PPC64=y |
2 | CONFIG_PPC_BOOK3E_64=y | 2 | CONFIG_PPC_BOOK3E_64=y |
3 | # CONFIG_VIRT_CPU_ACCOUNTING is not set | 3 | # CONFIG_VIRT_CPU_ACCOUNTING_NATIVE is not set |
4 | CONFIG_SMP=y | 4 | CONFIG_SMP=y |
5 | CONFIG_NR_CPUS=256 | 5 | CONFIG_NR_CPUS=256 |
6 | CONFIG_EXPERIMENTAL=y | 6 | CONFIG_EXPERIMENTAL=y |
diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig index 88fa5c46f66f..f7df8362911f 100644 --- a/arch/powerpc/configs/corenet64_smp_defconfig +++ b/arch/powerpc/configs/corenet64_smp_defconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | CONFIG_PPC64=y | 1 | CONFIG_PPC64=y |
2 | CONFIG_PPC_BOOK3E_64=y | 2 | CONFIG_PPC_BOOK3E_64=y |
3 | # CONFIG_VIRT_CPU_ACCOUNTING is not set | 3 | # CONFIG_VIRT_CPU_ACCOUNTING_NATIVE is not set |
4 | CONFIG_SMP=y | 4 | CONFIG_SMP=y |
5 | CONFIG_NR_CPUS=2 | 5 | CONFIG_NR_CPUS=2 |
6 | CONFIG_EXPERIMENTAL=y | 6 | CONFIG_EXPERIMENTAL=y |
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig index 840a2c2d0430..bcedeea0df89 100644 --- a/arch/powerpc/configs/pasemi_defconfig +++ b/arch/powerpc/configs/pasemi_defconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | CONFIG_PPC64=y | 1 | CONFIG_PPC64=y |
2 | CONFIG_ALTIVEC=y | 2 | CONFIG_ALTIVEC=y |
3 | # CONFIG_VIRT_CPU_ACCOUNTING is not set | 3 | # CONFIG_VIRT_CPU_ACCOUNTING_NATIVE is not set |
4 | CONFIG_SMP=y | 4 | CONFIG_SMP=y |
5 | CONFIG_NR_CPUS=2 | 5 | CONFIG_NR_CPUS=2 |
6 | CONFIG_EXPERIMENTAL=y | 6 | CONFIG_EXPERIMENTAL=y |
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h index 483733bd06d4..607559ab271f 100644 --- a/arch/powerpc/include/asm/cputime.h +++ b/arch/powerpc/include/asm/cputime.h | |||
@@ -8,7 +8,7 @@ | |||
8 | * as published by the Free Software Foundation; either version | 8 | * as published by the Free Software Foundation; either version |
9 | * 2 of the License, or (at your option) any later version. | 9 | * 2 of the License, or (at your option) any later version. |
10 | * | 10 | * |
11 | * If we have CONFIG_VIRT_CPU_ACCOUNTING, we measure cpu time in | 11 | * If we have CONFIG_VIRT_CPU_ACCOUNTING_NATIVE, we measure cpu time in |
12 | * the same units as the timebase. Otherwise we measure cpu time | 12 | * the same units as the timebase. Otherwise we measure cpu time |
13 | * in jiffies using the generic definitions. | 13 | * in jiffies using the generic definitions. |
14 | */ | 14 | */ |
@@ -16,7 +16,7 @@ | |||
16 | #ifndef __POWERPC_CPUTIME_H | 16 | #ifndef __POWERPC_CPUTIME_H |
17 | #define __POWERPC_CPUTIME_H | 17 | #define __POWERPC_CPUTIME_H |
18 | 18 | ||
19 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | 19 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
20 | #include <asm-generic/cputime.h> | 20 | #include <asm-generic/cputime.h> |
21 | #ifdef __KERNEL__ | 21 | #ifdef __KERNEL__ |
22 | static inline void setup_cputime_one_jiffy(void) { } | 22 | static inline void setup_cputime_one_jiffy(void) { } |
@@ -231,5 +231,5 @@ static inline cputime_t clock_t_to_cputime(const unsigned long clk) | |||
231 | static inline void arch_vtime_task_switch(struct task_struct *tsk) { } | 231 | static inline void arch_vtime_task_switch(struct task_struct *tsk) { } |
232 | 232 | ||
233 | #endif /* __KERNEL__ */ | 233 | #endif /* __KERNEL__ */ |
234 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ | 234 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ |
235 | #endif /* __POWERPC_CPUTIME_H */ | 235 | #endif /* __POWERPC_CPUTIME_H */ |
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h index 531fe0c3108f..b1e7f2af1016 100644 --- a/arch/powerpc/include/asm/lppaca.h +++ b/arch/powerpc/include/asm/lppaca.h | |||
@@ -145,7 +145,7 @@ struct dtl_entry { | |||
145 | extern struct kmem_cache *dtl_cache; | 145 | extern struct kmem_cache *dtl_cache; |
146 | 146 | ||
147 | /* | 147 | /* |
148 | * When CONFIG_VIRT_CPU_ACCOUNTING = y, the cpu accounting code controls | 148 | * When CONFIG_VIRT_CPU_ACCOUNTING_NATIVE = y, the cpu accounting code controls |
149 | * reading from the dispatch trace log. If other code wants to consume | 149 | * reading from the dispatch trace log. If other code wants to consume |
150 | * DTL entries, it can set this pointer to a function that will get | 150 | * DTL entries, it can set this pointer to a function that will get |
151 | * called once for each DTL entry that gets processed. | 151 | * called once for each DTL entry that gets processed. |
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h index 9710be3a2d17..136bba62efa4 100644 --- a/arch/powerpc/include/asm/perf_event_server.h +++ b/arch/powerpc/include/asm/perf_event_server.h | |||
@@ -11,6 +11,7 @@ | |||
11 | 11 | ||
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | #include <asm/hw_irq.h> | 13 | #include <asm/hw_irq.h> |
14 | #include <linux/device.h> | ||
14 | 15 | ||
15 | #define MAX_HWEVENTS 8 | 16 | #define MAX_HWEVENTS 8 |
16 | #define MAX_EVENT_ALTERNATIVES 8 | 17 | #define MAX_EVENT_ALTERNATIVES 8 |
@@ -35,6 +36,7 @@ struct power_pmu { | |||
35 | void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]); | 36 | void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]); |
36 | int (*limited_pmc_event)(u64 event_id); | 37 | int (*limited_pmc_event)(u64 event_id); |
37 | u32 flags; | 38 | u32 flags; |
39 | const struct attribute_group **attr_groups; | ||
38 | int n_generic; | 40 | int n_generic; |
39 | int *generic_events; | 41 | int *generic_events; |
40 | int (*cache_events)[PERF_COUNT_HW_CACHE_MAX] | 42 | int (*cache_events)[PERF_COUNT_HW_CACHE_MAX] |
@@ -109,3 +111,27 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs); | |||
109 | * If an event_id is not subject to the constraint expressed by a particular | 111 | * If an event_id is not subject to the constraint expressed by a particular |
110 | * field, then it will have 0 in both the mask and value for that field. | 112 | * field, then it will have 0 in both the mask and value for that field. |
111 | */ | 113 | */ |
114 | |||
115 | extern ssize_t power_events_sysfs_show(struct device *dev, | ||
116 | struct device_attribute *attr, char *page); | ||
117 | |||
118 | /* | ||
119 | * EVENT_VAR() is same as PMU_EVENT_VAR with a suffix. | ||
120 | * | ||
121 | * Having a suffix allows us to have aliases in sysfs - eg: the generic | ||
122 | * event 'cpu-cycles' can have two entries in sysfs: 'cpu-cycles' and | ||
123 | * 'PM_CYC' where the latter is the name by which the event is known in | ||
124 | * POWER CPU specification. | ||
125 | */ | ||
126 | #define EVENT_VAR(_id, _suffix) event_attr_##_id##_suffix | ||
127 | #define EVENT_PTR(_id, _suffix) &EVENT_VAR(_id, _suffix).attr.attr | ||
128 | |||
129 | #define EVENT_ATTR(_name, _id, _suffix) \ | ||
130 | PMU_EVENT_ATTR(_name, EVENT_VAR(_id, _suffix), PME_PM_##_id, \ | ||
131 | power_events_sysfs_show) | ||
132 | |||
133 | #define GENERIC_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _g) | ||
134 | #define GENERIC_EVENT_PTR(_id) EVENT_PTR(_id, _g) | ||
135 | |||
136 | #define POWER_EVENT_ATTR(_name, _id) EVENT_ATTR(PM_##_name, _id, _p) | ||
137 | #define POWER_EVENT_PTR(_id) EVENT_PTR(_id, _p) | ||
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index ea2a86e8ff95..2d0e1f5d8339 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h | |||
@@ -24,7 +24,7 @@ | |||
24 | * user_time and system_time fields in the paca. | 24 | * user_time and system_time fields in the paca. |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | 27 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
28 | #define ACCOUNT_CPU_USER_ENTRY(ra, rb) | 28 | #define ACCOUNT_CPU_USER_ENTRY(ra, rb) |
29 | #define ACCOUNT_CPU_USER_EXIT(ra, rb) | 29 | #define ACCOUNT_CPU_USER_EXIT(ra, rb) |
30 | #define ACCOUNT_STOLEN_TIME | 30 | #define ACCOUNT_STOLEN_TIME |
@@ -70,7 +70,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) | |||
70 | 70 | ||
71 | #endif /* CONFIG_PPC_SPLPAR */ | 71 | #endif /* CONFIG_PPC_SPLPAR */ |
72 | 72 | ||
73 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ | 73 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ |
74 | 74 | ||
75 | /* | 75 | /* |
76 | * Macros for storing registers into and loading registers from | 76 | * Macros for storing registers into and loading registers from |
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index d22e73e4618b..e514de57a125 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S | |||
@@ -439,6 +439,8 @@ ret_from_fork: | |||
439 | ret_from_kernel_thread: | 439 | ret_from_kernel_thread: |
440 | REST_NVGPRS(r1) | 440 | REST_NVGPRS(r1) |
441 | bl schedule_tail | 441 | bl schedule_tail |
442 | li r3,0 | ||
443 | stw r3,0(r1) | ||
442 | mtlr r14 | 444 | mtlr r14 |
443 | mr r3,r15 | 445 | mr r3,r15 |
444 | PPC440EP_ERR42 | 446 | PPC440EP_ERR42 |
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index b310a0573625..ac057013f9fd 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S | |||
@@ -94,7 +94,7 @@ system_call_common: | |||
94 | addi r9,r1,STACK_FRAME_OVERHEAD | 94 | addi r9,r1,STACK_FRAME_OVERHEAD |
95 | ld r11,exception_marker@toc(r2) | 95 | ld r11,exception_marker@toc(r2) |
96 | std r11,-16(r9) /* "regshere" marker */ | 96 | std r11,-16(r9) /* "regshere" marker */ |
97 | #if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR) | 97 | #if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR) |
98 | BEGIN_FW_FTR_SECTION | 98 | BEGIN_FW_FTR_SECTION |
99 | beq 33f | 99 | beq 33f |
100 | /* if from user, see if there are any DTL entries to process */ | 100 | /* if from user, see if there are any DTL entries to process */ |
@@ -110,7 +110,7 @@ BEGIN_FW_FTR_SECTION | |||
110 | addi r9,r1,STACK_FRAME_OVERHEAD | 110 | addi r9,r1,STACK_FRAME_OVERHEAD |
111 | 33: | 111 | 33: |
112 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) | 112 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) |
113 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING && CONFIG_PPC_SPLPAR */ | 113 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE && CONFIG_PPC_SPLPAR */ |
114 | 114 | ||
115 | /* | 115 | /* |
116 | * A syscall should always be called with interrupts enabled | 116 | * A syscall should always be called with interrupts enabled |
@@ -664,6 +664,19 @@ resume_kernel: | |||
664 | ld r4,TI_FLAGS(r9) | 664 | ld r4,TI_FLAGS(r9) |
665 | andi. r0,r4,_TIF_NEED_RESCHED | 665 | andi. r0,r4,_TIF_NEED_RESCHED |
666 | bne 1b | 666 | bne 1b |
667 | |||
668 | /* | ||
669 | * arch_local_irq_restore() from preempt_schedule_irq above may | ||
670 | * enable hard interrupt but we really should disable interrupts | ||
671 | * when we return from the interrupt, and so that we don't get | ||
672 | * interrupted after loading SRR0/1. | ||
673 | */ | ||
674 | #ifdef CONFIG_PPC_BOOK3E | ||
675 | wrteei 0 | ||
676 | #else | ||
677 | ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */ | ||
678 | mtmsrd r10,1 /* Update machine state */ | ||
679 | #endif /* CONFIG_PPC_BOOK3E */ | ||
667 | #endif /* CONFIG_PREEMPT */ | 680 | #endif /* CONFIG_PREEMPT */ |
668 | 681 | ||
669 | .globl fast_exc_return_irq | 682 | .globl fast_exc_return_irq |
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c index c470a40b29f5..a7bc7521c064 100644 --- a/arch/powerpc/kernel/kgdb.c +++ b/arch/powerpc/kernel/kgdb.c | |||
@@ -154,12 +154,12 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs) | |||
154 | static int kgdb_singlestep(struct pt_regs *regs) | 154 | static int kgdb_singlestep(struct pt_regs *regs) |
155 | { | 155 | { |
156 | struct thread_info *thread_info, *exception_thread_info; | 156 | struct thread_info *thread_info, *exception_thread_info; |
157 | struct thread_info *backup_current_thread_info = \ | 157 | struct thread_info *backup_current_thread_info; |
158 | (struct thread_info *)kmalloc(sizeof(struct thread_info), GFP_KERNEL); | ||
159 | 158 | ||
160 | if (user_mode(regs)) | 159 | if (user_mode(regs)) |
161 | return 0; | 160 | return 0; |
162 | 161 | ||
162 | backup_current_thread_info = (struct thread_info *)kmalloc(sizeof(struct thread_info), GFP_KERNEL); | ||
163 | /* | 163 | /* |
164 | * On Book E and perhaps other processors, singlestep is handled on | 164 | * On Book E and perhaps other processors, singlestep is handled on |
165 | * the critical exception stack. This causes current_thread_info() | 165 | * the critical exception stack. This causes current_thread_info() |
@@ -185,6 +185,7 @@ static int kgdb_singlestep(struct pt_regs *regs) | |||
185 | /* Restore current_thread_info lastly. */ | 185 | /* Restore current_thread_info lastly. */ |
186 | memcpy(exception_thread_info, backup_current_thread_info, sizeof *thread_info); | 186 | memcpy(exception_thread_info, backup_current_thread_info, sizeof *thread_info); |
187 | 187 | ||
188 | kfree(backup_current_thread_info); | ||
188 | return 1; | 189 | return 1; |
189 | } | 190 | } |
190 | 191 | ||
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 6f6b1cccc916..f77fa22754bc 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -143,7 +143,7 @@ EXPORT_SYMBOL_GPL(ppc_proc_freq); | |||
143 | unsigned long ppc_tb_freq; | 143 | unsigned long ppc_tb_freq; |
144 | EXPORT_SYMBOL_GPL(ppc_tb_freq); | 144 | EXPORT_SYMBOL_GPL(ppc_tb_freq); |
145 | 145 | ||
146 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 146 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
147 | /* | 147 | /* |
148 | * Factors for converting from cputime_t (timebase ticks) to | 148 | * Factors for converting from cputime_t (timebase ticks) to |
149 | * jiffies, microseconds, seconds, and clock_t (1/USER_HZ seconds). | 149 | * jiffies, microseconds, seconds, and clock_t (1/USER_HZ seconds). |
@@ -347,6 +347,7 @@ void vtime_account_system(struct task_struct *tsk) | |||
347 | if (stolen) | 347 | if (stolen) |
348 | account_steal_time(stolen); | 348 | account_steal_time(stolen); |
349 | } | 349 | } |
350 | EXPORT_SYMBOL_GPL(vtime_account_system); | ||
350 | 351 | ||
351 | void vtime_account_idle(struct task_struct *tsk) | 352 | void vtime_account_idle(struct task_struct *tsk) |
352 | { | 353 | { |
@@ -377,7 +378,7 @@ void vtime_account_user(struct task_struct *tsk) | |||
377 | account_user_time(tsk, utime, utimescaled); | 378 | account_user_time(tsk, utime, utimescaled); |
378 | } | 379 | } |
379 | 380 | ||
380 | #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */ | 381 | #else /* ! CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ |
381 | #define calc_cputime_factors() | 382 | #define calc_cputime_factors() |
382 | #endif | 383 | #endif |
383 | 384 | ||
@@ -494,10 +495,15 @@ void timer_interrupt(struct pt_regs * regs) | |||
494 | set_dec(DECREMENTER_MAX); | 495 | set_dec(DECREMENTER_MAX); |
495 | 496 | ||
496 | /* Some implementations of hotplug will get timer interrupts while | 497 | /* Some implementations of hotplug will get timer interrupts while |
497 | * offline, just ignore these | 498 | * offline, just ignore these and we also need to set |
499 | * decrementers_next_tb as MAX to make sure __check_irq_replay | ||
500 | * don't replay timer interrupt when return, otherwise we'll trap | ||
501 | * here infinitely :( | ||
498 | */ | 502 | */ |
499 | if (!cpu_online(smp_processor_id())) | 503 | if (!cpu_online(smp_processor_id())) { |
504 | *next_tb = ~(u64)0; | ||
500 | return; | 505 | return; |
506 | } | ||
501 | 507 | ||
502 | /* Conditionally hard-enable interrupts now that the DEC has been | 508 | /* Conditionally hard-enable interrupts now that the DEC has been |
503 | * bumped to its maximum value | 509 | * bumped to its maximum value |
@@ -663,7 +669,7 @@ int update_persistent_clock(struct timespec now) | |||
663 | struct rtc_time tm; | 669 | struct rtc_time tm; |
664 | 670 | ||
665 | if (!ppc_md.set_rtc_time) | 671 | if (!ppc_md.set_rtc_time) |
666 | return 0; | 672 | return -ENODEV; |
667 | 673 | ||
668 | to_tm(now.tv_sec + 1 + timezone_offset, &tm); | 674 | to_tm(now.tv_sec + 1 + timezone_offset, &tm); |
669 | tm.tm_year -= 1900; | 675 | tm.tm_year -= 1900; |
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S index 56585086413a..7443481a315c 100644 --- a/arch/powerpc/mm/hash_low_64.S +++ b/arch/powerpc/mm/hash_low_64.S | |||
@@ -115,11 +115,13 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) | |||
115 | sldi r29,r5,SID_SHIFT - VPN_SHIFT | 115 | sldi r29,r5,SID_SHIFT - VPN_SHIFT |
116 | rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT) | 116 | rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT) |
117 | or r29,r28,r29 | 117 | or r29,r28,r29 |
118 | 118 | /* | |
119 | /* Calculate hash value for primary slot and store it in r28 */ | 119 | * Calculate hash value for primary slot and store it in r28 |
120 | rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ | 120 | * r3 = va, r5 = vsid |
121 | rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */ | 121 | * r0 = (va >> 12) & ((1ul << (28 - 12)) -1) |
122 | xor r28,r5,r0 | 122 | */ |
123 | rldicl r0,r3,64-12,48 | ||
124 | xor r28,r5,r0 /* hash */ | ||
123 | b 4f | 125 | b 4f |
124 | 126 | ||
125 | 3: /* Calc vpn and put it in r29 */ | 127 | 3: /* Calc vpn and put it in r29 */ |
@@ -130,11 +132,12 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) | |||
130 | /* | 132 | /* |
131 | * calculate hash value for primary slot and | 133 | * calculate hash value for primary slot and |
132 | * store it in r28 for 1T segment | 134 | * store it in r28 for 1T segment |
135 | * r3 = va, r5 = vsid | ||
133 | */ | 136 | */ |
134 | rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ | 137 | sldi r28,r5,25 /* vsid << 25 */ |
135 | clrldi r5,r5,40 /* vsid & 0xffffff */ | 138 | /* r0 = (va >> 12) & ((1ul << (40 - 12)) -1) */ |
136 | rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */ | 139 | rldicl r0,r3,64-12,36 |
137 | xor r28,r28,r5 | 140 | xor r28,r28,r5 /* vsid ^ ( vsid << 25) */ |
138 | xor r28,r28,r0 /* hash */ | 141 | xor r28,r28,r0 /* hash */ |
139 | 142 | ||
140 | /* Convert linux PTE bits into HW equivalents */ | 143 | /* Convert linux PTE bits into HW equivalents */ |
@@ -407,11 +410,13 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) | |||
407 | */ | 410 | */ |
408 | rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT) | 411 | rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT) |
409 | or r29,r28,r29 | 412 | or r29,r28,r29 |
410 | 413 | /* | |
411 | /* Calculate hash value for primary slot and store it in r28 */ | 414 | * Calculate hash value for primary slot and store it in r28 |
412 | rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ | 415 | * r3 = va, r5 = vsid |
413 | rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */ | 416 | * r0 = (va >> 12) & ((1ul << (28 - 12)) -1) |
414 | xor r28,r5,r0 | 417 | */ |
418 | rldicl r0,r3,64-12,48 | ||
419 | xor r28,r5,r0 /* hash */ | ||
415 | b 4f | 420 | b 4f |
416 | 421 | ||
417 | 3: /* Calc vpn and put it in r29 */ | 422 | 3: /* Calc vpn and put it in r29 */ |
@@ -426,11 +431,12 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) | |||
426 | /* | 431 | /* |
427 | * Calculate hash value for primary slot and | 432 | * Calculate hash value for primary slot and |
428 | * store it in r28 for 1T segment | 433 | * store it in r28 for 1T segment |
434 | * r3 = va, r5 = vsid | ||
429 | */ | 435 | */ |
430 | rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ | 436 | sldi r28,r5,25 /* vsid << 25 */ |
431 | clrldi r5,r5,40 /* vsid & 0xffffff */ | 437 | /* r0 = (va >> 12) & ((1ul << (40 - 12)) -1) */ |
432 | rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */ | 438 | rldicl r0,r3,64-12,36 |
433 | xor r28,r28,r5 | 439 | xor r28,r28,r5 /* vsid ^ ( vsid << 25) */ |
434 | xor r28,r28,r0 /* hash */ | 440 | xor r28,r28,r0 /* hash */ |
435 | 441 | ||
436 | /* Convert linux PTE bits into HW equivalents */ | 442 | /* Convert linux PTE bits into HW equivalents */ |
@@ -752,25 +758,27 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) | |||
752 | rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT) | 758 | rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT) |
753 | or r29,r28,r29 | 759 | or r29,r28,r29 |
754 | 760 | ||
755 | /* Calculate hash value for primary slot and store it in r28 */ | 761 | /* Calculate hash value for primary slot and store it in r28 |
756 | rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ | 762 | * r3 = va, r5 = vsid |
757 | rldicl r0,r3,64-16,52 /* (ea >> 16) & 0xfff */ | 763 | * r0 = (va >> 16) & ((1ul << (28 - 16)) -1) |
758 | xor r28,r5,r0 | 764 | */ |
765 | rldicl r0,r3,64-16,52 | ||
766 | xor r28,r5,r0 /* hash */ | ||
759 | b 4f | 767 | b 4f |
760 | 768 | ||
761 | 3: /* Calc vpn and put it in r29 */ | 769 | 3: /* Calc vpn and put it in r29 */ |
762 | sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT | 770 | sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT |
763 | rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT) | 771 | rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT) |
764 | or r29,r28,r29 | 772 | or r29,r28,r29 |
765 | |||
766 | /* | 773 | /* |
767 | * calculate hash value for primary slot and | 774 | * calculate hash value for primary slot and |
768 | * store it in r28 for 1T segment | 775 | * store it in r28 for 1T segment |
776 | * r3 = va, r5 = vsid | ||
769 | */ | 777 | */ |
770 | rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ | 778 | sldi r28,r5,25 /* vsid << 25 */ |
771 | clrldi r5,r5,40 /* vsid & 0xffffff */ | 779 | /* r0 = (va >> 16) & ((1ul << (40 - 16)) -1) */ |
772 | rldicl r0,r3,64-16,40 /* (ea >> 16) & 0xffffff */ | 780 | rldicl r0,r3,64-16,40 |
773 | xor r28,r28,r5 | 781 | xor r28,r28,r5 /* vsid ^ ( vsid << 25) */ |
774 | xor r28,r28,r0 /* hash */ | 782 | xor r28,r28,r0 /* hash */ |
775 | 783 | ||
776 | /* Convert linux PTE bits into HW equivalents */ | 784 | /* Convert linux PTE bits into HW equivalents */ |
diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c index 315f9495e9b2..f444b94935f5 100644 --- a/arch/powerpc/oprofile/op_model_power4.c +++ b/arch/powerpc/oprofile/op_model_power4.c | |||
@@ -52,7 +52,7 @@ static int power7_marked_instr_event(u64 mmcr1) | |||
52 | for (pmc = 0; pmc < 4; pmc++) { | 52 | for (pmc = 0; pmc < 4; pmc++) { |
53 | psel = mmcr1 & (OPROFILE_PM_PMCSEL_MSK | 53 | psel = mmcr1 & (OPROFILE_PM_PMCSEL_MSK |
54 | << (OPROFILE_MAX_PMC_NUM - pmc) | 54 | << (OPROFILE_MAX_PMC_NUM - pmc) |
55 | * OPROFILE_MAX_PMC_NUM); | 55 | * OPROFILE_PMSEL_FIELD_WIDTH); |
56 | psel = (psel >> ((OPROFILE_MAX_PMC_NUM - pmc) | 56 | psel = (psel >> ((OPROFILE_MAX_PMC_NUM - pmc) |
57 | * OPROFILE_PMSEL_FIELD_WIDTH)) & ~1ULL; | 57 | * OPROFILE_PMSEL_FIELD_WIDTH)) & ~1ULL; |
58 | unit = mmcr1 & (OPROFILE_PM_UNIT_MSK | 58 | unit = mmcr1 & (OPROFILE_PM_UNIT_MSK |
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index aa2465e21f1a..fa476d50791f 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c | |||
@@ -1305,6 +1305,16 @@ static int power_pmu_event_idx(struct perf_event *event) | |||
1305 | return event->hw.idx; | 1305 | return event->hw.idx; |
1306 | } | 1306 | } |
1307 | 1307 | ||
1308 | ssize_t power_events_sysfs_show(struct device *dev, | ||
1309 | struct device_attribute *attr, char *page) | ||
1310 | { | ||
1311 | struct perf_pmu_events_attr *pmu_attr; | ||
1312 | |||
1313 | pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); | ||
1314 | |||
1315 | return sprintf(page, "event=0x%02llx\n", pmu_attr->id); | ||
1316 | } | ||
1317 | |||
1308 | struct pmu power_pmu = { | 1318 | struct pmu power_pmu = { |
1309 | .pmu_enable = power_pmu_enable, | 1319 | .pmu_enable = power_pmu_enable, |
1310 | .pmu_disable = power_pmu_disable, | 1320 | .pmu_disable = power_pmu_disable, |
@@ -1537,6 +1547,8 @@ int __cpuinit register_power_pmu(struct power_pmu *pmu) | |||
1537 | pr_info("%s performance monitor hardware support registered\n", | 1547 | pr_info("%s performance monitor hardware support registered\n", |
1538 | pmu->name); | 1548 | pmu->name); |
1539 | 1549 | ||
1550 | power_pmu.attr_groups = ppmu->attr_groups; | ||
1551 | |||
1540 | #ifdef MSR_HV | 1552 | #ifdef MSR_HV |
1541 | /* | 1553 | /* |
1542 | * Use FCHV to ignore kernel events if MSR.HV is set. | 1554 | * Use FCHV to ignore kernel events if MSR.HV is set. |
diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c index 2ee01e38d5e2..b554879bd31e 100644 --- a/arch/powerpc/perf/power7-pmu.c +++ b/arch/powerpc/perf/power7-pmu.c | |||
@@ -51,6 +51,18 @@ | |||
51 | #define MMCR1_PMCSEL_MSK 0xff | 51 | #define MMCR1_PMCSEL_MSK 0xff |
52 | 52 | ||
53 | /* | 53 | /* |
54 | * Power7 event codes. | ||
55 | */ | ||
56 | #define PME_PM_CYC 0x1e | ||
57 | #define PME_PM_GCT_NOSLOT_CYC 0x100f8 | ||
58 | #define PME_PM_CMPLU_STALL 0x4000a | ||
59 | #define PME_PM_INST_CMPL 0x2 | ||
60 | #define PME_PM_LD_REF_L1 0xc880 | ||
61 | #define PME_PM_LD_MISS_L1 0x400f0 | ||
62 | #define PME_PM_BRU_FIN 0x10068 | ||
63 | #define PME_PM_BRU_MPRED 0x400f6 | ||
64 | |||
65 | /* | ||
54 | * Layout of constraint bits: | 66 | * Layout of constraint bits: |
55 | * 6666555555555544444444443333333333222222222211111111110000000000 | 67 | * 6666555555555544444444443333333333222222222211111111110000000000 |
56 | * 3210987654321098765432109876543210987654321098765432109876543210 | 68 | * 3210987654321098765432109876543210987654321098765432109876543210 |
@@ -307,14 +319,14 @@ static void power7_disable_pmc(unsigned int pmc, unsigned long mmcr[]) | |||
307 | } | 319 | } |
308 | 320 | ||
309 | static int power7_generic_events[] = { | 321 | static int power7_generic_events[] = { |
310 | [PERF_COUNT_HW_CPU_CYCLES] = 0x1e, | 322 | [PERF_COUNT_HW_CPU_CYCLES] = PME_PM_CYC, |
311 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x100f8, /* GCT_NOSLOT_CYC */ | 323 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PME_PM_GCT_NOSLOT_CYC, |
312 | [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x4000a, /* CMPLU_STALL */ | 324 | [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PME_PM_CMPLU_STALL, |
313 | [PERF_COUNT_HW_INSTRUCTIONS] = 2, | 325 | [PERF_COUNT_HW_INSTRUCTIONS] = PME_PM_INST_CMPL, |
314 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0xc880, /* LD_REF_L1_LSU*/ | 326 | [PERF_COUNT_HW_CACHE_REFERENCES] = PME_PM_LD_REF_L1, |
315 | [PERF_COUNT_HW_CACHE_MISSES] = 0x400f0, /* LD_MISS_L1 */ | 327 | [PERF_COUNT_HW_CACHE_MISSES] = PME_PM_LD_MISS_L1, |
316 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x10068, /* BRU_FIN */ | 328 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PME_PM_BRU_FIN, |
317 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x400f6, /* BR_MPRED */ | 329 | [PERF_COUNT_HW_BRANCH_MISSES] = PME_PM_BRU_MPRED, |
318 | }; | 330 | }; |
319 | 331 | ||
320 | #define C(x) PERF_COUNT_HW_CACHE_##x | 332 | #define C(x) PERF_COUNT_HW_CACHE_##x |
@@ -362,6 +374,57 @@ static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { | |||
362 | }, | 374 | }, |
363 | }; | 375 | }; |
364 | 376 | ||
377 | |||
378 | GENERIC_EVENT_ATTR(cpu-cycles, CYC); | ||
379 | GENERIC_EVENT_ATTR(stalled-cycles-frontend, GCT_NOSLOT_CYC); | ||
380 | GENERIC_EVENT_ATTR(stalled-cycles-backend, CMPLU_STALL); | ||
381 | GENERIC_EVENT_ATTR(instructions, INST_CMPL); | ||
382 | GENERIC_EVENT_ATTR(cache-references, LD_REF_L1); | ||
383 | GENERIC_EVENT_ATTR(cache-misses, LD_MISS_L1); | ||
384 | GENERIC_EVENT_ATTR(branch-instructions, BRU_FIN); | ||
385 | GENERIC_EVENT_ATTR(branch-misses, BRU_MPRED); | ||
386 | |||
387 | POWER_EVENT_ATTR(CYC, CYC); | ||
388 | POWER_EVENT_ATTR(GCT_NOSLOT_CYC, GCT_NOSLOT_CYC); | ||
389 | POWER_EVENT_ATTR(CMPLU_STALL, CMPLU_STALL); | ||
390 | POWER_EVENT_ATTR(INST_CMPL, INST_CMPL); | ||
391 | POWER_EVENT_ATTR(LD_REF_L1, LD_REF_L1); | ||
392 | POWER_EVENT_ATTR(LD_MISS_L1, LD_MISS_L1); | ||
393 | POWER_EVENT_ATTR(BRU_FIN, BRU_FIN) | ||
394 | POWER_EVENT_ATTR(BRU_MPRED, BRU_MPRED); | ||
395 | |||
396 | static struct attribute *power7_events_attr[] = { | ||
397 | GENERIC_EVENT_PTR(CYC), | ||
398 | GENERIC_EVENT_PTR(GCT_NOSLOT_CYC), | ||
399 | GENERIC_EVENT_PTR(CMPLU_STALL), | ||
400 | GENERIC_EVENT_PTR(INST_CMPL), | ||
401 | GENERIC_EVENT_PTR(LD_REF_L1), | ||
402 | GENERIC_EVENT_PTR(LD_MISS_L1), | ||
403 | GENERIC_EVENT_PTR(BRU_FIN), | ||
404 | GENERIC_EVENT_PTR(BRU_MPRED), | ||
405 | |||
406 | POWER_EVENT_PTR(CYC), | ||
407 | POWER_EVENT_PTR(GCT_NOSLOT_CYC), | ||
408 | POWER_EVENT_PTR(CMPLU_STALL), | ||
409 | POWER_EVENT_PTR(INST_CMPL), | ||
410 | POWER_EVENT_PTR(LD_REF_L1), | ||
411 | POWER_EVENT_PTR(LD_MISS_L1), | ||
412 | POWER_EVENT_PTR(BRU_FIN), | ||
413 | POWER_EVENT_PTR(BRU_MPRED), | ||
414 | NULL | ||
415 | }; | ||
416 | |||
417 | |||
418 | static struct attribute_group power7_pmu_events_group = { | ||
419 | .name = "events", | ||
420 | .attrs = power7_events_attr, | ||
421 | }; | ||
422 | |||
423 | static const struct attribute_group *power7_pmu_attr_groups[] = { | ||
424 | &power7_pmu_events_group, | ||
425 | NULL, | ||
426 | }; | ||
427 | |||
365 | static struct power_pmu power7_pmu = { | 428 | static struct power_pmu power7_pmu = { |
366 | .name = "POWER7", | 429 | .name = "POWER7", |
367 | .n_counter = 6, | 430 | .n_counter = 6, |
@@ -373,6 +436,7 @@ static struct power_pmu power7_pmu = { | |||
373 | .get_alternatives = power7_get_alternatives, | 436 | .get_alternatives = power7_get_alternatives, |
374 | .disable_pmc = power7_disable_pmc, | 437 | .disable_pmc = power7_disable_pmc, |
375 | .flags = PPMU_ALT_SIPR, | 438 | .flags = PPMU_ALT_SIPR, |
439 | .attr_groups = power7_pmu_attr_groups, | ||
376 | .n_generic = ARRAY_SIZE(power7_generic_events), | 440 | .n_generic = ARRAY_SIZE(power7_generic_events), |
377 | .generic_events = power7_generic_events, | 441 | .generic_events = power7_generic_events, |
378 | .cache_events = &power7_cache_events, | 442 | .cache_events = &power7_cache_events, |
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 25db92a8e1cf..49318385d4fa 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c | |||
@@ -24,6 +24,7 @@ | |||
24 | 24 | ||
25 | #include <linux/errno.h> | 25 | #include <linux/errno.h> |
26 | #include <linux/sched.h> | 26 | #include <linux/sched.h> |
27 | #include <linux/sched/rt.h> | ||
27 | #include <linux/kernel.h> | 28 | #include <linux/kernel.h> |
28 | #include <linux/mm.h> | 29 | #include <linux/mm.h> |
29 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
diff --git a/arch/powerpc/platforms/pasemi/cpufreq.c b/arch/powerpc/platforms/pasemi/cpufreq.c index 95d00173029f..890f30e70f98 100644 --- a/arch/powerpc/platforms/pasemi/cpufreq.c +++ b/arch/powerpc/platforms/pasemi/cpufreq.c | |||
@@ -236,6 +236,13 @@ out: | |||
236 | 236 | ||
237 | static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy) | 237 | static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy) |
238 | { | 238 | { |
239 | /* | ||
240 | * We don't support CPU hotplug. Don't unmap after the system | ||
241 | * has already made it to a running state. | ||
242 | */ | ||
243 | if (system_state != SYSTEM_BOOTING) | ||
244 | return 0; | ||
245 | |||
239 | if (sdcasr_mapbase) | 246 | if (sdcasr_mapbase) |
240 | iounmap(sdcasr_mapbase); | 247 | iounmap(sdcasr_mapbase); |
241 | if (sdcpwr_mapbase) | 248 | if (sdcpwr_mapbase) |
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c index a7648543c59e..0cc0ac07a55d 100644 --- a/arch/powerpc/platforms/pseries/dtl.c +++ b/arch/powerpc/platforms/pseries/dtl.c | |||
@@ -57,7 +57,7 @@ static u8 dtl_event_mask = 0x7; | |||
57 | */ | 57 | */ |
58 | static int dtl_buf_entries = N_DISPATCH_LOG; | 58 | static int dtl_buf_entries = N_DISPATCH_LOG; |
59 | 59 | ||
60 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 60 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
61 | struct dtl_ring { | 61 | struct dtl_ring { |
62 | u64 write_index; | 62 | u64 write_index; |
63 | struct dtl_entry *write_ptr; | 63 | struct dtl_entry *write_ptr; |
@@ -142,7 +142,7 @@ static u64 dtl_current_index(struct dtl *dtl) | |||
142 | return per_cpu(dtl_rings, dtl->cpu).write_index; | 142 | return per_cpu(dtl_rings, dtl->cpu).write_index; |
143 | } | 143 | } |
144 | 144 | ||
145 | #else /* CONFIG_VIRT_CPU_ACCOUNTING */ | 145 | #else /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ |
146 | 146 | ||
147 | static int dtl_start(struct dtl *dtl) | 147 | static int dtl_start(struct dtl *dtl) |
148 | { | 148 | { |
@@ -188,7 +188,7 @@ static u64 dtl_current_index(struct dtl *dtl) | |||
188 | { | 188 | { |
189 | return lppaca_of(dtl->cpu).dtl_idx; | 189 | return lppaca_of(dtl->cpu).dtl_idx; |
190 | } | 190 | } |
191 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ | 191 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ |
192 | 192 | ||
193 | static int dtl_enable(struct dtl *dtl) | 193 | static int dtl_enable(struct dtl *dtl) |
194 | { | 194 | { |
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index ca55882465d6..527e12c9573b 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c | |||
@@ -281,7 +281,7 @@ static struct notifier_block pci_dn_reconfig_nb = { | |||
281 | 281 | ||
282 | struct kmem_cache *dtl_cache; | 282 | struct kmem_cache *dtl_cache; |
283 | 283 | ||
284 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 284 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
285 | /* | 285 | /* |
286 | * Allocate space for the dispatch trace log for all possible cpus | 286 | * Allocate space for the dispatch trace log for all possible cpus |
287 | * and register the buffers with the hypervisor. This is used for | 287 | * and register the buffers with the hypervisor. This is used for |
@@ -332,12 +332,12 @@ static int alloc_dispatch_logs(void) | |||
332 | 332 | ||
333 | return 0; | 333 | return 0; |
334 | } | 334 | } |
335 | #else /* !CONFIG_VIRT_CPU_ACCOUNTING */ | 335 | #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ |
336 | static inline int alloc_dispatch_logs(void) | 336 | static inline int alloc_dispatch_logs(void) |
337 | { | 337 | { |
338 | return 0; | 338 | return 0; |
339 | } | 339 | } |
340 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ | 340 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ |
341 | 341 | ||
342 | static int alloc_dispatch_log_kmem_cache(void) | 342 | static int alloc_dispatch_log_kmem_cache(void) |
343 | { | 343 | { |
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index b5ea38c25647..c15ba7d1be64 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -78,7 +78,6 @@ config S390 | |||
78 | select HAVE_KVM if 64BIT | 78 | select HAVE_KVM if 64BIT |
79 | select HAVE_ARCH_TRACEHOOK | 79 | select HAVE_ARCH_TRACEHOOK |
80 | select INIT_ALL_POSSIBLE | 80 | select INIT_ALL_POSSIBLE |
81 | select HAVE_IRQ_WORK | ||
82 | select HAVE_PERF_EVENTS | 81 | select HAVE_PERF_EVENTS |
83 | select ARCH_HAVE_NMI_SAFE_CMPXCHG | 82 | select ARCH_HAVE_NMI_SAFE_CMPXCHG |
84 | select HAVE_DEBUG_KMEMLEAK | 83 | select HAVE_DEBUG_KMEMLEAK |
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index c1d7930a82f4..098adbb62660 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h | |||
@@ -1365,6 +1365,18 @@ static inline void pmdp_invalidate(struct vm_area_struct *vma, | |||
1365 | __pmd_idte(address, pmdp); | 1365 | __pmd_idte(address, pmdp); |
1366 | } | 1366 | } |
1367 | 1367 | ||
1368 | #define __HAVE_ARCH_PMDP_SET_WRPROTECT | ||
1369 | static inline void pmdp_set_wrprotect(struct mm_struct *mm, | ||
1370 | unsigned long address, pmd_t *pmdp) | ||
1371 | { | ||
1372 | pmd_t pmd = *pmdp; | ||
1373 | |||
1374 | if (pmd_write(pmd)) { | ||
1375 | __pmd_idte(address, pmdp); | ||
1376 | set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd)); | ||
1377 | } | ||
1378 | } | ||
1379 | |||
1368 | static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot) | 1380 | static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot) |
1369 | { | 1381 | { |
1370 | pmd_t __pmd; | 1382 | pmd_t __pmd; |
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index a5f4f5a1d24b..0aa98db8a80d 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
@@ -120,6 +120,9 @@ static int s390_next_ktime(ktime_t expires, | |||
120 | nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires)); | 120 | nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires)); |
121 | do_div(nsecs, 125); | 121 | do_div(nsecs, 125); |
122 | S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9); | 122 | S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9); |
123 | /* Program the maximum value if we have an overflow (== year 2042) */ | ||
124 | if (unlikely(S390_lowcore.clock_comparator < sched_clock_base_cc)) | ||
125 | S390_lowcore.clock_comparator = -1ULL; | ||
123 | set_clock_comparator(S390_lowcore.clock_comparator); | 126 | set_clock_comparator(S390_lowcore.clock_comparator); |
124 | return 0; | 127 | return 0; |
125 | } | 128 | } |
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index e84b8b68444a..ce9cc5aa2033 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c | |||
@@ -127,7 +127,7 @@ void vtime_account_user(struct task_struct *tsk) | |||
127 | * Update process times based on virtual cpu times stored by entry.S | 127 | * Update process times based on virtual cpu times stored by entry.S |
128 | * to the lowcore fields user_timer, system_timer & steal_clock. | 128 | * to the lowcore fields user_timer, system_timer & steal_clock. |
129 | */ | 129 | */ |
130 | void vtime_account(struct task_struct *tsk) | 130 | void vtime_account_irq_enter(struct task_struct *tsk) |
131 | { | 131 | { |
132 | struct thread_info *ti = task_thread_info(tsk); | 132 | struct thread_info *ti = task_thread_info(tsk); |
133 | u64 timer, system; | 133 | u64 timer, system; |
@@ -145,10 +145,10 @@ void vtime_account(struct task_struct *tsk) | |||
145 | 145 | ||
146 | virt_timer_forward(system); | 146 | virt_timer_forward(system); |
147 | } | 147 | } |
148 | EXPORT_SYMBOL_GPL(vtime_account); | 148 | EXPORT_SYMBOL_GPL(vtime_account_irq_enter); |
149 | 149 | ||
150 | void vtime_account_system(struct task_struct *tsk) | 150 | void vtime_account_system(struct task_struct *tsk) |
151 | __attribute__((alias("vtime_account"))); | 151 | __attribute__((alias("vtime_account_irq_enter"))); |
152 | EXPORT_SYMBOL_GPL(vtime_account_system); | 152 | EXPORT_SYMBOL_GPL(vtime_account_system); |
153 | 153 | ||
154 | void __kprobes vtime_stop_cpu(void) | 154 | void __kprobes vtime_stop_cpu(void) |
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index babc2b826c5c..9c833c585871 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig | |||
@@ -11,7 +11,6 @@ config SUPERH | |||
11 | select HAVE_ARCH_TRACEHOOK | 11 | select HAVE_ARCH_TRACEHOOK |
12 | select HAVE_DMA_API_DEBUG | 12 | select HAVE_DMA_API_DEBUG |
13 | select HAVE_DMA_ATTRS | 13 | select HAVE_DMA_ATTRS |
14 | select HAVE_IRQ_WORK | ||
15 | select HAVE_PERF_EVENTS | 14 | select HAVE_PERF_EVENTS |
16 | select HAVE_DEBUG_BUGVERBOSE | 15 | select HAVE_DEBUG_BUGVERBOSE |
17 | select ARCH_HAVE_CUSTOM_GPIO_H | 16 | select ARCH_HAVE_CUSTOM_GPIO_H |
@@ -91,9 +90,6 @@ config GENERIC_CSUM | |||
91 | config GENERIC_HWEIGHT | 90 | config GENERIC_HWEIGHT |
92 | def_bool y | 91 | def_bool y |
93 | 92 | ||
94 | config IRQ_PER_CPU | ||
95 | def_bool y | ||
96 | |||
97 | config GENERIC_GPIO | 93 | config GENERIC_GPIO |
98 | def_bool n | 94 | def_bool n |
99 | 95 | ||
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 9f2edb5c5551..9bff3db17c8c 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig | |||
@@ -23,7 +23,6 @@ config SPARC | |||
23 | select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE | 23 | select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE |
24 | select RTC_CLASS | 24 | select RTC_CLASS |
25 | select RTC_DRV_M48T59 | 25 | select RTC_DRV_M48T59 |
26 | select HAVE_IRQ_WORK | ||
27 | select HAVE_DMA_ATTRS | 26 | select HAVE_DMA_ATTRS |
28 | select HAVE_DMA_API_DEBUG | 27 | select HAVE_DMA_API_DEBUG |
29 | select HAVE_ARCH_JUMP_LABEL | 28 | select HAVE_ARCH_JUMP_LABEL |
@@ -61,6 +60,7 @@ config SPARC64 | |||
61 | select HAVE_MEMBLOCK | 60 | select HAVE_MEMBLOCK |
62 | select HAVE_MEMBLOCK_NODE_MAP | 61 | select HAVE_MEMBLOCK_NODE_MAP |
63 | select HAVE_SYSCALL_WRAPPERS | 62 | select HAVE_SYSCALL_WRAPPERS |
63 | select HAVE_ARCH_TRANSPARENT_HUGEPAGE | ||
64 | select HAVE_DYNAMIC_FTRACE | 64 | select HAVE_DYNAMIC_FTRACE |
65 | select HAVE_FTRACE_MCOUNT_RECORD | 65 | select HAVE_FTRACE_MCOUNT_RECORD |
66 | select HAVE_SYSCALL_TRACEPOINTS | 66 | select HAVE_SYSCALL_TRACEPOINTS |
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 7870be0f5adc..08fcce90316b 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h | |||
@@ -71,7 +71,6 @@ | |||
71 | #define PMD_PADDR _AC(0xfffffffe,UL) | 71 | #define PMD_PADDR _AC(0xfffffffe,UL) |
72 | #define PMD_PADDR_SHIFT _AC(11,UL) | 72 | #define PMD_PADDR_SHIFT _AC(11,UL) |
73 | 73 | ||
74 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
75 | #define PMD_ISHUGE _AC(0x00000001,UL) | 74 | #define PMD_ISHUGE _AC(0x00000001,UL) |
76 | 75 | ||
77 | /* This is the PMD layout when PMD_ISHUGE is set. With 4MB huge | 76 | /* This is the PMD layout when PMD_ISHUGE is set. With 4MB huge |
@@ -86,7 +85,6 @@ | |||
86 | #define PMD_HUGE_ACCESSED _AC(0x00000080,UL) | 85 | #define PMD_HUGE_ACCESSED _AC(0x00000080,UL) |
87 | #define PMD_HUGE_EXEC _AC(0x00000040,UL) | 86 | #define PMD_HUGE_EXEC _AC(0x00000040,UL) |
88 | #define PMD_HUGE_SPLITTING _AC(0x00000020,UL) | 87 | #define PMD_HUGE_SPLITTING _AC(0x00000020,UL) |
89 | #endif | ||
90 | 88 | ||
91 | /* PGDs point to PMD tables which are 8K aligned. */ | 89 | /* PGDs point to PMD tables which are 8K aligned. */ |
92 | #define PGD_PADDR _AC(0xfffffffc,UL) | 90 | #define PGD_PADDR _AC(0xfffffffc,UL) |
@@ -628,6 +626,12 @@ static inline unsigned long pte_special(pte_t pte) | |||
628 | return pte_val(pte) & _PAGE_SPECIAL; | 626 | return pte_val(pte) & _PAGE_SPECIAL; |
629 | } | 627 | } |
630 | 628 | ||
629 | static inline int pmd_large(pmd_t pmd) | ||
630 | { | ||
631 | return (pmd_val(pmd) & (PMD_ISHUGE | PMD_HUGE_PRESENT)) == | ||
632 | (PMD_ISHUGE | PMD_HUGE_PRESENT); | ||
633 | } | ||
634 | |||
631 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 635 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
632 | static inline int pmd_young(pmd_t pmd) | 636 | static inline int pmd_young(pmd_t pmd) |
633 | { | 637 | { |
@@ -646,12 +650,6 @@ static inline unsigned long pmd_pfn(pmd_t pmd) | |||
646 | return val >> (PAGE_SHIFT - PMD_PADDR_SHIFT); | 650 | return val >> (PAGE_SHIFT - PMD_PADDR_SHIFT); |
647 | } | 651 | } |
648 | 652 | ||
649 | static inline int pmd_large(pmd_t pmd) | ||
650 | { | ||
651 | return (pmd_val(pmd) & (PMD_ISHUGE | PMD_HUGE_PRESENT)) == | ||
652 | (PMD_ISHUGE | PMD_HUGE_PRESENT); | ||
653 | } | ||
654 | |||
655 | static inline int pmd_trans_splitting(pmd_t pmd) | 653 | static inline int pmd_trans_splitting(pmd_t pmd) |
656 | { | 654 | { |
657 | return (pmd_val(pmd) & (PMD_ISHUGE|PMD_HUGE_SPLITTING)) == | 655 | return (pmd_val(pmd) & (PMD_ISHUGE|PMD_HUGE_SPLITTING)) == |
diff --git a/arch/sparc/kernel/sbus.c b/arch/sparc/kernel/sbus.c index 1271b3a27d4e..be5bdf93c767 100644 --- a/arch/sparc/kernel/sbus.c +++ b/arch/sparc/kernel/sbus.c | |||
@@ -554,10 +554,8 @@ static void __init sbus_iommu_init(struct platform_device *op) | |||
554 | regs = pr->phys_addr; | 554 | regs = pr->phys_addr; |
555 | 555 | ||
556 | iommu = kzalloc(sizeof(*iommu), GFP_ATOMIC); | 556 | iommu = kzalloc(sizeof(*iommu), GFP_ATOMIC); |
557 | if (!iommu) | ||
558 | goto fatal_memory_error; | ||
559 | strbuf = kzalloc(sizeof(*strbuf), GFP_ATOMIC); | 557 | strbuf = kzalloc(sizeof(*strbuf), GFP_ATOMIC); |
560 | if (!strbuf) | 558 | if (!iommu || !strbuf) |
561 | goto fatal_memory_error; | 559 | goto fatal_memory_error; |
562 | 560 | ||
563 | op->dev.archdata.iommu = iommu; | 561 | op->dev.archdata.iommu = iommu; |
@@ -656,6 +654,8 @@ static void __init sbus_iommu_init(struct platform_device *op) | |||
656 | return; | 654 | return; |
657 | 655 | ||
658 | fatal_memory_error: | 656 | fatal_memory_error: |
657 | kfree(iommu); | ||
658 | kfree(strbuf); | ||
659 | prom_printf("sbus_iommu_init: Fatal memory allocation error.\n"); | 659 | prom_printf("sbus_iommu_init: Fatal memory allocation error.\n"); |
660 | } | 660 | } |
661 | 661 | ||
diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c index 42c55df3aec3..01ee23dd724d 100644 --- a/arch/sparc/mm/gup.c +++ b/arch/sparc/mm/gup.c | |||
@@ -66,6 +66,56 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, | |||
66 | return 1; | 66 | return 1; |
67 | } | 67 | } |
68 | 68 | ||
69 | static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, | ||
70 | unsigned long end, int write, struct page **pages, | ||
71 | int *nr) | ||
72 | { | ||
73 | struct page *head, *page, *tail; | ||
74 | u32 mask; | ||
75 | int refs; | ||
76 | |||
77 | mask = PMD_HUGE_PRESENT; | ||
78 | if (write) | ||
79 | mask |= PMD_HUGE_WRITE; | ||
80 | if ((pmd_val(pmd) & mask) != mask) | ||
81 | return 0; | ||
82 | |||
83 | refs = 0; | ||
84 | head = pmd_page(pmd); | ||
85 | page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); | ||
86 | tail = page; | ||
87 | do { | ||
88 | VM_BUG_ON(compound_head(page) != head); | ||
89 | pages[*nr] = page; | ||
90 | (*nr)++; | ||
91 | page++; | ||
92 | refs++; | ||
93 | } while (addr += PAGE_SIZE, addr != end); | ||
94 | |||
95 | if (!page_cache_add_speculative(head, refs)) { | ||
96 | *nr -= refs; | ||
97 | return 0; | ||
98 | } | ||
99 | |||
100 | if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) { | ||
101 | *nr -= refs; | ||
102 | while (refs--) | ||
103 | put_page(head); | ||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | /* Any tail page need their mapcount reference taken before we | ||
108 | * return. | ||
109 | */ | ||
110 | while (refs--) { | ||
111 | if (PageTail(tail)) | ||
112 | get_huge_page_tail(tail); | ||
113 | tail++; | ||
114 | } | ||
115 | |||
116 | return 1; | ||
117 | } | ||
118 | |||
69 | static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, | 119 | static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, |
70 | int write, struct page **pages, int *nr) | 120 | int write, struct page **pages, int *nr) |
71 | { | 121 | { |
@@ -77,9 +127,14 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, | |||
77 | pmd_t pmd = *pmdp; | 127 | pmd_t pmd = *pmdp; |
78 | 128 | ||
79 | next = pmd_addr_end(addr, end); | 129 | next = pmd_addr_end(addr, end); |
80 | if (pmd_none(pmd)) | 130 | if (pmd_none(pmd) || pmd_trans_splitting(pmd)) |
81 | return 0; | 131 | return 0; |
82 | if (!gup_pte_range(pmd, addr, next, write, pages, nr)) | 132 | if (unlikely(pmd_large(pmd))) { |
133 | if (!gup_huge_pmd(pmdp, pmd, addr, next, | ||
134 | write, pages, nr)) | ||
135 | return 0; | ||
136 | } else if (!gup_pte_range(pmd, addr, next, write, | ||
137 | pages, nr)) | ||
83 | return 0; | 138 | return 0; |
84 | } while (pmdp++, addr = next, addr != end); | 139 | } while (pmdp++, addr = next, addr != end); |
85 | 140 | ||
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index 875d008828b8..1bb7ad4aeff4 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig | |||
@@ -140,6 +140,8 @@ config ARCH_DEFCONFIG | |||
140 | 140 | ||
141 | source "init/Kconfig" | 141 | source "init/Kconfig" |
142 | 142 | ||
143 | source "kernel/Kconfig.freezer" | ||
144 | |||
143 | menu "Tilera-specific configuration" | 145 | menu "Tilera-specific configuration" |
144 | 146 | ||
145 | config NR_CPUS | 147 | config NR_CPUS |
diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h index 2a9b293fece6..31672918064c 100644 --- a/arch/tile/include/asm/io.h +++ b/arch/tile/include/asm/io.h | |||
@@ -250,7 +250,9 @@ static inline void writeq(u64 val, unsigned long addr) | |||
250 | #define iowrite32 writel | 250 | #define iowrite32 writel |
251 | #define iowrite64 writeq | 251 | #define iowrite64 writeq |
252 | 252 | ||
253 | static inline void memset_io(void *dst, int val, size_t len) | 253 | #if CHIP_HAS_MMIO() || defined(CONFIG_PCI) |
254 | |||
255 | static inline void memset_io(volatile void *dst, int val, size_t len) | ||
254 | { | 256 | { |
255 | int x; | 257 | int x; |
256 | BUG_ON((unsigned long)dst & 0x3); | 258 | BUG_ON((unsigned long)dst & 0x3); |
@@ -277,6 +279,8 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src, | |||
277 | writel(*(u32 *)(src + x), dst + x); | 279 | writel(*(u32 *)(src + x), dst + x); |
278 | } | 280 | } |
279 | 281 | ||
282 | #endif | ||
283 | |||
280 | /* | 284 | /* |
281 | * The Tile architecture does not support IOPORT, even with PCI. | 285 | * The Tile architecture does not support IOPORT, even with PCI. |
282 | * Unfortunately we can't yet simply not declare these methods, | 286 | * Unfortunately we can't yet simply not declare these methods, |
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h index b4e96fef2cf8..241c0bb60b12 100644 --- a/arch/tile/include/asm/irqflags.h +++ b/arch/tile/include/asm/irqflags.h | |||
@@ -18,32 +18,20 @@ | |||
18 | #include <arch/interrupts.h> | 18 | #include <arch/interrupts.h> |
19 | #include <arch/chip.h> | 19 | #include <arch/chip.h> |
20 | 20 | ||
21 | #if !defined(__tilegx__) && defined(__ASSEMBLY__) | ||
22 | |||
23 | /* | 21 | /* |
24 | * The set of interrupts we want to allow when interrupts are nominally | 22 | * The set of interrupts we want to allow when interrupts are nominally |
25 | * disabled. The remainder are effectively "NMI" interrupts from | 23 | * disabled. The remainder are effectively "NMI" interrupts from |
26 | * the point of view of the generic Linux code. Note that synchronous | 24 | * the point of view of the generic Linux code. Note that synchronous |
27 | * interrupts (aka "non-queued") are not blocked by the mask in any case. | 25 | * interrupts (aka "non-queued") are not blocked by the mask in any case. |
28 | */ | 26 | */ |
29 | #if CHIP_HAS_AUX_PERF_COUNTERS() | ||
30 | #define LINUX_MASKABLE_INTERRUPTS_HI \ | ||
31 | (~(INT_MASK_HI(INT_PERF_COUNT) | INT_MASK_HI(INT_AUX_PERF_COUNT))) | ||
32 | #else | ||
33 | #define LINUX_MASKABLE_INTERRUPTS_HI \ | ||
34 | (~(INT_MASK_HI(INT_PERF_COUNT))) | ||
35 | #endif | ||
36 | |||
37 | #else | ||
38 | |||
39 | #if CHIP_HAS_AUX_PERF_COUNTERS() | ||
40 | #define LINUX_MASKABLE_INTERRUPTS \ | ||
41 | (~(INT_MASK(INT_PERF_COUNT) | INT_MASK(INT_AUX_PERF_COUNT))) | ||
42 | #else | ||
43 | #define LINUX_MASKABLE_INTERRUPTS \ | 27 | #define LINUX_MASKABLE_INTERRUPTS \ |
44 | (~(INT_MASK(INT_PERF_COUNT))) | 28 | (~((_AC(1,ULL) << INT_PERF_COUNT) | (_AC(1,ULL) << INT_AUX_PERF_COUNT))) |
45 | #endif | ||
46 | 29 | ||
30 | #if CHIP_HAS_SPLIT_INTR_MASK() | ||
31 | /* The same macro, but for the two 32-bit SPRs separately. */ | ||
32 | #define LINUX_MASKABLE_INTERRUPTS_LO (-1) | ||
33 | #define LINUX_MASKABLE_INTERRUPTS_HI \ | ||
34 | (~((1 << (INT_PERF_COUNT - 32)) | (1 << (INT_AUX_PERF_COUNT - 32)))) | ||
47 | #endif | 35 | #endif |
48 | 36 | ||
49 | #ifndef __ASSEMBLY__ | 37 | #ifndef __ASSEMBLY__ |
@@ -126,7 +114,7 @@ | |||
126 | * to know our current state. | 114 | * to know our current state. |
127 | */ | 115 | */ |
128 | DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); | 116 | DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); |
129 | #define INITIAL_INTERRUPTS_ENABLED INT_MASK(INT_MEM_ERROR) | 117 | #define INITIAL_INTERRUPTS_ENABLED (1ULL << INT_MEM_ERROR) |
130 | 118 | ||
131 | /* Disable interrupts. */ | 119 | /* Disable interrupts. */ |
132 | #define arch_local_irq_disable() \ | 120 | #define arch_local_irq_disable() \ |
@@ -165,7 +153,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); | |||
165 | 153 | ||
166 | /* Prevent the given interrupt from being enabled next time we enable irqs. */ | 154 | /* Prevent the given interrupt from being enabled next time we enable irqs. */ |
167 | #define arch_local_irq_mask(interrupt) \ | 155 | #define arch_local_irq_mask(interrupt) \ |
168 | (__get_cpu_var(interrupts_enabled_mask) &= ~INT_MASK(interrupt)) | 156 | (__get_cpu_var(interrupts_enabled_mask) &= ~(1ULL << (interrupt))) |
169 | 157 | ||
170 | /* Prevent the given interrupt from being enabled immediately. */ | 158 | /* Prevent the given interrupt from being enabled immediately. */ |
171 | #define arch_local_irq_mask_now(interrupt) do { \ | 159 | #define arch_local_irq_mask_now(interrupt) do { \ |
@@ -175,7 +163,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); | |||
175 | 163 | ||
176 | /* Allow the given interrupt to be enabled next time we enable irqs. */ | 164 | /* Allow the given interrupt to be enabled next time we enable irqs. */ |
177 | #define arch_local_irq_unmask(interrupt) \ | 165 | #define arch_local_irq_unmask(interrupt) \ |
178 | (__get_cpu_var(interrupts_enabled_mask) |= INT_MASK(interrupt)) | 166 | (__get_cpu_var(interrupts_enabled_mask) |= (1ULL << (interrupt))) |
179 | 167 | ||
180 | /* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */ | 168 | /* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */ |
181 | #define arch_local_irq_unmask_now(interrupt) do { \ | 169 | #define arch_local_irq_unmask_now(interrupt) do { \ |
@@ -250,7 +238,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); | |||
250 | /* Disable interrupts. */ | 238 | /* Disable interrupts. */ |
251 | #define IRQ_DISABLE(tmp0, tmp1) \ | 239 | #define IRQ_DISABLE(tmp0, tmp1) \ |
252 | { \ | 240 | { \ |
253 | movei tmp0, -1; \ | 241 | movei tmp0, LINUX_MASKABLE_INTERRUPTS_LO; \ |
254 | moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS_HI) \ | 242 | moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS_HI) \ |
255 | }; \ | 243 | }; \ |
256 | { \ | 244 | { \ |
diff --git a/arch/tile/include/uapi/arch/interrupts_32.h b/arch/tile/include/uapi/arch/interrupts_32.h index 96b5710505b6..2efe3f68b2d6 100644 --- a/arch/tile/include/uapi/arch/interrupts_32.h +++ b/arch/tile/include/uapi/arch/interrupts_32.h | |||
@@ -15,6 +15,7 @@ | |||
15 | #ifndef __ARCH_INTERRUPTS_H__ | 15 | #ifndef __ARCH_INTERRUPTS_H__ |
16 | #define __ARCH_INTERRUPTS_H__ | 16 | #define __ARCH_INTERRUPTS_H__ |
17 | 17 | ||
18 | #ifndef __KERNEL__ | ||
18 | /** Mask for an interrupt. */ | 19 | /** Mask for an interrupt. */ |
19 | /* Note: must handle breaking interrupts into high and low words manually. */ | 20 | /* Note: must handle breaking interrupts into high and low words manually. */ |
20 | #define INT_MASK_LO(intno) (1 << (intno)) | 21 | #define INT_MASK_LO(intno) (1 << (intno)) |
@@ -23,6 +24,7 @@ | |||
23 | #ifndef __ASSEMBLER__ | 24 | #ifndef __ASSEMBLER__ |
24 | #define INT_MASK(intno) (1ULL << (intno)) | 25 | #define INT_MASK(intno) (1ULL << (intno)) |
25 | #endif | 26 | #endif |
27 | #endif | ||
26 | 28 | ||
27 | 29 | ||
28 | /** Where a given interrupt executes */ | 30 | /** Where a given interrupt executes */ |
@@ -92,216 +94,216 @@ | |||
92 | 94 | ||
93 | #ifndef __ASSEMBLER__ | 95 | #ifndef __ASSEMBLER__ |
94 | #define QUEUED_INTERRUPTS ( \ | 96 | #define QUEUED_INTERRUPTS ( \ |
95 | INT_MASK(INT_MEM_ERROR) | \ | 97 | (1ULL << INT_MEM_ERROR) | \ |
96 | INT_MASK(INT_DMATLB_MISS) | \ | 98 | (1ULL << INT_DMATLB_MISS) | \ |
97 | INT_MASK(INT_DMATLB_ACCESS) | \ | 99 | (1ULL << INT_DMATLB_ACCESS) | \ |
98 | INT_MASK(INT_SNITLB_MISS) | \ | 100 | (1ULL << INT_SNITLB_MISS) | \ |
99 | INT_MASK(INT_SN_NOTIFY) | \ | 101 | (1ULL << INT_SN_NOTIFY) | \ |
100 | INT_MASK(INT_SN_FIREWALL) | \ | 102 | (1ULL << INT_SN_FIREWALL) | \ |
101 | INT_MASK(INT_IDN_FIREWALL) | \ | 103 | (1ULL << INT_IDN_FIREWALL) | \ |
102 | INT_MASK(INT_UDN_FIREWALL) | \ | 104 | (1ULL << INT_UDN_FIREWALL) | \ |
103 | INT_MASK(INT_TILE_TIMER) | \ | 105 | (1ULL << INT_TILE_TIMER) | \ |
104 | INT_MASK(INT_IDN_TIMER) | \ | 106 | (1ULL << INT_IDN_TIMER) | \ |
105 | INT_MASK(INT_UDN_TIMER) | \ | 107 | (1ULL << INT_UDN_TIMER) | \ |
106 | INT_MASK(INT_DMA_NOTIFY) | \ | 108 | (1ULL << INT_DMA_NOTIFY) | \ |
107 | INT_MASK(INT_IDN_CA) | \ | 109 | (1ULL << INT_IDN_CA) | \ |
108 | INT_MASK(INT_UDN_CA) | \ | 110 | (1ULL << INT_UDN_CA) | \ |
109 | INT_MASK(INT_IDN_AVAIL) | \ | 111 | (1ULL << INT_IDN_AVAIL) | \ |
110 | INT_MASK(INT_UDN_AVAIL) | \ | 112 | (1ULL << INT_UDN_AVAIL) | \ |
111 | INT_MASK(INT_PERF_COUNT) | \ | 113 | (1ULL << INT_PERF_COUNT) | \ |
112 | INT_MASK(INT_INTCTRL_3) | \ | 114 | (1ULL << INT_INTCTRL_3) | \ |
113 | INT_MASK(INT_INTCTRL_2) | \ | 115 | (1ULL << INT_INTCTRL_2) | \ |
114 | INT_MASK(INT_INTCTRL_1) | \ | 116 | (1ULL << INT_INTCTRL_1) | \ |
115 | INT_MASK(INT_INTCTRL_0) | \ | 117 | (1ULL << INT_INTCTRL_0) | \ |
116 | INT_MASK(INT_BOOT_ACCESS) | \ | 118 | (1ULL << INT_BOOT_ACCESS) | \ |
117 | INT_MASK(INT_WORLD_ACCESS) | \ | 119 | (1ULL << INT_WORLD_ACCESS) | \ |
118 | INT_MASK(INT_I_ASID) | \ | 120 | (1ULL << INT_I_ASID) | \ |
119 | INT_MASK(INT_D_ASID) | \ | 121 | (1ULL << INT_D_ASID) | \ |
120 | INT_MASK(INT_DMA_ASID) | \ | 122 | (1ULL << INT_DMA_ASID) | \ |
121 | INT_MASK(INT_SNI_ASID) | \ | 123 | (1ULL << INT_SNI_ASID) | \ |
122 | INT_MASK(INT_DMA_CPL) | \ | 124 | (1ULL << INT_DMA_CPL) | \ |
123 | INT_MASK(INT_SN_CPL) | \ | 125 | (1ULL << INT_SN_CPL) | \ |
124 | INT_MASK(INT_DOUBLE_FAULT) | \ | 126 | (1ULL << INT_DOUBLE_FAULT) | \ |
125 | INT_MASK(INT_AUX_PERF_COUNT) | \ | 127 | (1ULL << INT_AUX_PERF_COUNT) | \ |
126 | 0) | 128 | 0) |
127 | #define NONQUEUED_INTERRUPTS ( \ | 129 | #define NONQUEUED_INTERRUPTS ( \ |
128 | INT_MASK(INT_ITLB_MISS) | \ | 130 | (1ULL << INT_ITLB_MISS) | \ |
129 | INT_MASK(INT_ILL) | \ | 131 | (1ULL << INT_ILL) | \ |
130 | INT_MASK(INT_GPV) | \ | 132 | (1ULL << INT_GPV) | \ |
131 | INT_MASK(INT_SN_ACCESS) | \ | 133 | (1ULL << INT_SN_ACCESS) | \ |
132 | INT_MASK(INT_IDN_ACCESS) | \ | 134 | (1ULL << INT_IDN_ACCESS) | \ |
133 | INT_MASK(INT_UDN_ACCESS) | \ | 135 | (1ULL << INT_UDN_ACCESS) | \ |
134 | INT_MASK(INT_IDN_REFILL) | \ | 136 | (1ULL << INT_IDN_REFILL) | \ |
135 | INT_MASK(INT_UDN_REFILL) | \ | 137 | (1ULL << INT_UDN_REFILL) | \ |
136 | INT_MASK(INT_IDN_COMPLETE) | \ | 138 | (1ULL << INT_IDN_COMPLETE) | \ |
137 | INT_MASK(INT_UDN_COMPLETE) | \ | 139 | (1ULL << INT_UDN_COMPLETE) | \ |
138 | INT_MASK(INT_SWINT_3) | \ | 140 | (1ULL << INT_SWINT_3) | \ |
139 | INT_MASK(INT_SWINT_2) | \ | 141 | (1ULL << INT_SWINT_2) | \ |
140 | INT_MASK(INT_SWINT_1) | \ | 142 | (1ULL << INT_SWINT_1) | \ |
141 | INT_MASK(INT_SWINT_0) | \ | 143 | (1ULL << INT_SWINT_0) | \ |
142 | INT_MASK(INT_UNALIGN_DATA) | \ | 144 | (1ULL << INT_UNALIGN_DATA) | \ |
143 | INT_MASK(INT_DTLB_MISS) | \ | 145 | (1ULL << INT_DTLB_MISS) | \ |
144 | INT_MASK(INT_DTLB_ACCESS) | \ | 146 | (1ULL << INT_DTLB_ACCESS) | \ |
145 | INT_MASK(INT_SN_STATIC_ACCESS) | \ | 147 | (1ULL << INT_SN_STATIC_ACCESS) | \ |
146 | 0) | 148 | 0) |
147 | #define CRITICAL_MASKED_INTERRUPTS ( \ | 149 | #define CRITICAL_MASKED_INTERRUPTS ( \ |
148 | INT_MASK(INT_MEM_ERROR) | \ | 150 | (1ULL << INT_MEM_ERROR) | \ |
149 | INT_MASK(INT_DMATLB_MISS) | \ | 151 | (1ULL << INT_DMATLB_MISS) | \ |
150 | INT_MASK(INT_DMATLB_ACCESS) | \ | 152 | (1ULL << INT_DMATLB_ACCESS) | \ |
151 | INT_MASK(INT_SNITLB_MISS) | \ | 153 | (1ULL << INT_SNITLB_MISS) | \ |
152 | INT_MASK(INT_SN_NOTIFY) | \ | 154 | (1ULL << INT_SN_NOTIFY) | \ |
153 | INT_MASK(INT_SN_FIREWALL) | \ | 155 | (1ULL << INT_SN_FIREWALL) | \ |
154 | INT_MASK(INT_IDN_FIREWALL) | \ | 156 | (1ULL << INT_IDN_FIREWALL) | \ |
155 | INT_MASK(INT_UDN_FIREWALL) | \ | 157 | (1ULL << INT_UDN_FIREWALL) | \ |
156 | INT_MASK(INT_TILE_TIMER) | \ | 158 | (1ULL << INT_TILE_TIMER) | \ |
157 | INT_MASK(INT_IDN_TIMER) | \ | 159 | (1ULL << INT_IDN_TIMER) | \ |
158 | INT_MASK(INT_UDN_TIMER) | \ | 160 | (1ULL << INT_UDN_TIMER) | \ |
159 | INT_MASK(INT_DMA_NOTIFY) | \ | 161 | (1ULL << INT_DMA_NOTIFY) | \ |
160 | INT_MASK(INT_IDN_CA) | \ | 162 | (1ULL << INT_IDN_CA) | \ |
161 | INT_MASK(INT_UDN_CA) | \ | 163 | (1ULL << INT_UDN_CA) | \ |
162 | INT_MASK(INT_IDN_AVAIL) | \ | 164 | (1ULL << INT_IDN_AVAIL) | \ |
163 | INT_MASK(INT_UDN_AVAIL) | \ | 165 | (1ULL << INT_UDN_AVAIL) | \ |
164 | INT_MASK(INT_PERF_COUNT) | \ | 166 | (1ULL << INT_PERF_COUNT) | \ |
165 | INT_MASK(INT_INTCTRL_3) | \ | 167 | (1ULL << INT_INTCTRL_3) | \ |
166 | INT_MASK(INT_INTCTRL_2) | \ | 168 | (1ULL << INT_INTCTRL_2) | \ |
167 | INT_MASK(INT_INTCTRL_1) | \ | 169 | (1ULL << INT_INTCTRL_1) | \ |
168 | INT_MASK(INT_INTCTRL_0) | \ | 170 | (1ULL << INT_INTCTRL_0) | \ |
169 | INT_MASK(INT_AUX_PERF_COUNT) | \ | 171 | (1ULL << INT_AUX_PERF_COUNT) | \ |
170 | 0) | 172 | 0) |
171 | #define CRITICAL_UNMASKED_INTERRUPTS ( \ | 173 | #define CRITICAL_UNMASKED_INTERRUPTS ( \ |
172 | INT_MASK(INT_ITLB_MISS) | \ | 174 | (1ULL << INT_ITLB_MISS) | \ |
173 | INT_MASK(INT_ILL) | \ | 175 | (1ULL << INT_ILL) | \ |
174 | INT_MASK(INT_GPV) | \ | 176 | (1ULL << INT_GPV) | \ |
175 | INT_MASK(INT_SN_ACCESS) | \ | 177 | (1ULL << INT_SN_ACCESS) | \ |
176 | INT_MASK(INT_IDN_ACCESS) | \ | 178 | (1ULL << INT_IDN_ACCESS) | \ |
177 | INT_MASK(INT_UDN_ACCESS) | \ | 179 | (1ULL << INT_UDN_ACCESS) | \ |
178 | INT_MASK(INT_IDN_REFILL) | \ | 180 | (1ULL << INT_IDN_REFILL) | \ |
179 | INT_MASK(INT_UDN_REFILL) | \ | 181 | (1ULL << INT_UDN_REFILL) | \ |
180 | INT_MASK(INT_IDN_COMPLETE) | \ | 182 | (1ULL << INT_IDN_COMPLETE) | \ |
181 | INT_MASK(INT_UDN_COMPLETE) | \ | 183 | (1ULL << INT_UDN_COMPLETE) | \ |
182 | INT_MASK(INT_SWINT_3) | \ | 184 | (1ULL << INT_SWINT_3) | \ |
183 | INT_MASK(INT_SWINT_2) | \ | 185 | (1ULL << INT_SWINT_2) | \ |
184 | INT_MASK(INT_SWINT_1) | \ | 186 | (1ULL << INT_SWINT_1) | \ |
185 | INT_MASK(INT_SWINT_0) | \ | 187 | (1ULL << INT_SWINT_0) | \ |
186 | INT_MASK(INT_UNALIGN_DATA) | \ | 188 | (1ULL << INT_UNALIGN_DATA) | \ |
187 | INT_MASK(INT_DTLB_MISS) | \ | 189 | (1ULL << INT_DTLB_MISS) | \ |
188 | INT_MASK(INT_DTLB_ACCESS) | \ | 190 | (1ULL << INT_DTLB_ACCESS) | \ |
189 | INT_MASK(INT_BOOT_ACCESS) | \ | 191 | (1ULL << INT_BOOT_ACCESS) | \ |
190 | INT_MASK(INT_WORLD_ACCESS) | \ | 192 | (1ULL << INT_WORLD_ACCESS) | \ |
191 | INT_MASK(INT_I_ASID) | \ | 193 | (1ULL << INT_I_ASID) | \ |
192 | INT_MASK(INT_D_ASID) | \ | 194 | (1ULL << INT_D_ASID) | \ |
193 | INT_MASK(INT_DMA_ASID) | \ | 195 | (1ULL << INT_DMA_ASID) | \ |
194 | INT_MASK(INT_SNI_ASID) | \ | 196 | (1ULL << INT_SNI_ASID) | \ |
195 | INT_MASK(INT_DMA_CPL) | \ | 197 | (1ULL << INT_DMA_CPL) | \ |
196 | INT_MASK(INT_SN_CPL) | \ | 198 | (1ULL << INT_SN_CPL) | \ |
197 | INT_MASK(INT_DOUBLE_FAULT) | \ | 199 | (1ULL << INT_DOUBLE_FAULT) | \ |
198 | INT_MASK(INT_SN_STATIC_ACCESS) | \ | 200 | (1ULL << INT_SN_STATIC_ACCESS) | \ |
199 | 0) | 201 | 0) |
200 | #define MASKABLE_INTERRUPTS ( \ | 202 | #define MASKABLE_INTERRUPTS ( \ |
201 | INT_MASK(INT_MEM_ERROR) | \ | 203 | (1ULL << INT_MEM_ERROR) | \ |
202 | INT_MASK(INT_IDN_REFILL) | \ | 204 | (1ULL << INT_IDN_REFILL) | \ |
203 | INT_MASK(INT_UDN_REFILL) | \ | 205 | (1ULL << INT_UDN_REFILL) | \ |
204 | INT_MASK(INT_IDN_COMPLETE) | \ | 206 | (1ULL << INT_IDN_COMPLETE) | \ |
205 | INT_MASK(INT_UDN_COMPLETE) | \ | 207 | (1ULL << INT_UDN_COMPLETE) | \ |
206 | INT_MASK(INT_DMATLB_MISS) | \ | 208 | (1ULL << INT_DMATLB_MISS) | \ |
207 | INT_MASK(INT_DMATLB_ACCESS) | \ | 209 | (1ULL << INT_DMATLB_ACCESS) | \ |
208 | INT_MASK(INT_SNITLB_MISS) | \ | 210 | (1ULL << INT_SNITLB_MISS) | \ |
209 | INT_MASK(INT_SN_NOTIFY) | \ | 211 | (1ULL << INT_SN_NOTIFY) | \ |
210 | INT_MASK(INT_SN_FIREWALL) | \ | 212 | (1ULL << INT_SN_FIREWALL) | \ |
211 | INT_MASK(INT_IDN_FIREWALL) | \ | 213 | (1ULL << INT_IDN_FIREWALL) | \ |
212 | INT_MASK(INT_UDN_FIREWALL) | \ | 214 | (1ULL << INT_UDN_FIREWALL) | \ |
213 | INT_MASK(INT_TILE_TIMER) | \ | 215 | (1ULL << INT_TILE_TIMER) | \ |
214 | INT_MASK(INT_IDN_TIMER) | \ | 216 | (1ULL << INT_IDN_TIMER) | \ |
215 | INT_MASK(INT_UDN_TIMER) | \ | 217 | (1ULL << INT_UDN_TIMER) | \ |
216 | INT_MASK(INT_DMA_NOTIFY) | \ | 218 | (1ULL << INT_DMA_NOTIFY) | \ |
217 | INT_MASK(INT_IDN_CA) | \ | 219 | (1ULL << INT_IDN_CA) | \ |
218 | INT_MASK(INT_UDN_CA) | \ | 220 | (1ULL << INT_UDN_CA) | \ |
219 | INT_MASK(INT_IDN_AVAIL) | \ | 221 | (1ULL << INT_IDN_AVAIL) | \ |
220 | INT_MASK(INT_UDN_AVAIL) | \ | 222 | (1ULL << INT_UDN_AVAIL) | \ |
221 | INT_MASK(INT_PERF_COUNT) | \ | 223 | (1ULL << INT_PERF_COUNT) | \ |
222 | INT_MASK(INT_INTCTRL_3) | \ | 224 | (1ULL << INT_INTCTRL_3) | \ |
223 | INT_MASK(INT_INTCTRL_2) | \ | 225 | (1ULL << INT_INTCTRL_2) | \ |
224 | INT_MASK(INT_INTCTRL_1) | \ | 226 | (1ULL << INT_INTCTRL_1) | \ |
225 | INT_MASK(INT_INTCTRL_0) | \ | 227 | (1ULL << INT_INTCTRL_0) | \ |
226 | INT_MASK(INT_AUX_PERF_COUNT) | \ | 228 | (1ULL << INT_AUX_PERF_COUNT) | \ |
227 | 0) | 229 | 0) |
228 | #define UNMASKABLE_INTERRUPTS ( \ | 230 | #define UNMASKABLE_INTERRUPTS ( \ |
229 | INT_MASK(INT_ITLB_MISS) | \ | 231 | (1ULL << INT_ITLB_MISS) | \ |
230 | INT_MASK(INT_ILL) | \ | 232 | (1ULL << INT_ILL) | \ |
231 | INT_MASK(INT_GPV) | \ | 233 | (1ULL << INT_GPV) | \ |
232 | INT_MASK(INT_SN_ACCESS) | \ | 234 | (1ULL << INT_SN_ACCESS) | \ |
233 | INT_MASK(INT_IDN_ACCESS) | \ | 235 | (1ULL << INT_IDN_ACCESS) | \ |
234 | INT_MASK(INT_UDN_ACCESS) | \ | 236 | (1ULL << INT_UDN_ACCESS) | \ |
235 | INT_MASK(INT_SWINT_3) | \ | 237 | (1ULL << INT_SWINT_3) | \ |
236 | INT_MASK(INT_SWINT_2) | \ | 238 | (1ULL << INT_SWINT_2) | \ |
237 | INT_MASK(INT_SWINT_1) | \ | 239 | (1ULL << INT_SWINT_1) | \ |
238 | INT_MASK(INT_SWINT_0) | \ | 240 | (1ULL << INT_SWINT_0) | \ |
239 | INT_MASK(INT_UNALIGN_DATA) | \ | 241 | (1ULL << INT_UNALIGN_DATA) | \ |
240 | INT_MASK(INT_DTLB_MISS) | \ | 242 | (1ULL << INT_DTLB_MISS) | \ |
241 | INT_MASK(INT_DTLB_ACCESS) | \ | 243 | (1ULL << INT_DTLB_ACCESS) | \ |
242 | INT_MASK(INT_BOOT_ACCESS) | \ | 244 | (1ULL << INT_BOOT_ACCESS) | \ |
243 | INT_MASK(INT_WORLD_ACCESS) | \ | 245 | (1ULL << INT_WORLD_ACCESS) | \ |
244 | INT_MASK(INT_I_ASID) | \ | 246 | (1ULL << INT_I_ASID) | \ |
245 | INT_MASK(INT_D_ASID) | \ | 247 | (1ULL << INT_D_ASID) | \ |
246 | INT_MASK(INT_DMA_ASID) | \ | 248 | (1ULL << INT_DMA_ASID) | \ |
247 | INT_MASK(INT_SNI_ASID) | \ | 249 | (1ULL << INT_SNI_ASID) | \ |
248 | INT_MASK(INT_DMA_CPL) | \ | 250 | (1ULL << INT_DMA_CPL) | \ |
249 | INT_MASK(INT_SN_CPL) | \ | 251 | (1ULL << INT_SN_CPL) | \ |
250 | INT_MASK(INT_DOUBLE_FAULT) | \ | 252 | (1ULL << INT_DOUBLE_FAULT) | \ |
251 | INT_MASK(INT_SN_STATIC_ACCESS) | \ | 253 | (1ULL << INT_SN_STATIC_ACCESS) | \ |
252 | 0) | 254 | 0) |
253 | #define SYNC_INTERRUPTS ( \ | 255 | #define SYNC_INTERRUPTS ( \ |
254 | INT_MASK(INT_ITLB_MISS) | \ | 256 | (1ULL << INT_ITLB_MISS) | \ |
255 | INT_MASK(INT_ILL) | \ | 257 | (1ULL << INT_ILL) | \ |
256 | INT_MASK(INT_GPV) | \ | 258 | (1ULL << INT_GPV) | \ |
257 | INT_MASK(INT_SN_ACCESS) | \ | 259 | (1ULL << INT_SN_ACCESS) | \ |
258 | INT_MASK(INT_IDN_ACCESS) | \ | 260 | (1ULL << INT_IDN_ACCESS) | \ |
259 | INT_MASK(INT_UDN_ACCESS) | \ | 261 | (1ULL << INT_UDN_ACCESS) | \ |
260 | INT_MASK(INT_IDN_REFILL) | \ | 262 | (1ULL << INT_IDN_REFILL) | \ |
261 | INT_MASK(INT_UDN_REFILL) | \ | 263 | (1ULL << INT_UDN_REFILL) | \ |
262 | INT_MASK(INT_IDN_COMPLETE) | \ | 264 | (1ULL << INT_IDN_COMPLETE) | \ |
263 | INT_MASK(INT_UDN_COMPLETE) | \ | 265 | (1ULL << INT_UDN_COMPLETE) | \ |
264 | INT_MASK(INT_SWINT_3) | \ | 266 | (1ULL << INT_SWINT_3) | \ |
265 | INT_MASK(INT_SWINT_2) | \ | 267 | (1ULL << INT_SWINT_2) | \ |
266 | INT_MASK(INT_SWINT_1) | \ | 268 | (1ULL << INT_SWINT_1) | \ |
267 | INT_MASK(INT_SWINT_0) | \ | 269 | (1ULL << INT_SWINT_0) | \ |
268 | INT_MASK(INT_UNALIGN_DATA) | \ | 270 | (1ULL << INT_UNALIGN_DATA) | \ |
269 | INT_MASK(INT_DTLB_MISS) | \ | 271 | (1ULL << INT_DTLB_MISS) | \ |
270 | INT_MASK(INT_DTLB_ACCESS) | \ | 272 | (1ULL << INT_DTLB_ACCESS) | \ |
271 | INT_MASK(INT_SN_STATIC_ACCESS) | \ | 273 | (1ULL << INT_SN_STATIC_ACCESS) | \ |
272 | 0) | 274 | 0) |
273 | #define NON_SYNC_INTERRUPTS ( \ | 275 | #define NON_SYNC_INTERRUPTS ( \ |
274 | INT_MASK(INT_MEM_ERROR) | \ | 276 | (1ULL << INT_MEM_ERROR) | \ |
275 | INT_MASK(INT_DMATLB_MISS) | \ | 277 | (1ULL << INT_DMATLB_MISS) | \ |
276 | INT_MASK(INT_DMATLB_ACCESS) | \ | 278 | (1ULL << INT_DMATLB_ACCESS) | \ |
277 | INT_MASK(INT_SNITLB_MISS) | \ | 279 | (1ULL << INT_SNITLB_MISS) | \ |
278 | INT_MASK(INT_SN_NOTIFY) | \ | 280 | (1ULL << INT_SN_NOTIFY) | \ |
279 | INT_MASK(INT_SN_FIREWALL) | \ | 281 | (1ULL << INT_SN_FIREWALL) | \ |
280 | INT_MASK(INT_IDN_FIREWALL) | \ | 282 | (1ULL << INT_IDN_FIREWALL) | \ |
281 | INT_MASK(INT_UDN_FIREWALL) | \ | 283 | (1ULL << INT_UDN_FIREWALL) | \ |
282 | INT_MASK(INT_TILE_TIMER) | \ | 284 | (1ULL << INT_TILE_TIMER) | \ |
283 | INT_MASK(INT_IDN_TIMER) | \ | 285 | (1ULL << INT_IDN_TIMER) | \ |
284 | INT_MASK(INT_UDN_TIMER) | \ | 286 | (1ULL << INT_UDN_TIMER) | \ |
285 | INT_MASK(INT_DMA_NOTIFY) | \ | 287 | (1ULL << INT_DMA_NOTIFY) | \ |
286 | INT_MASK(INT_IDN_CA) | \ | 288 | (1ULL << INT_IDN_CA) | \ |
287 | INT_MASK(INT_UDN_CA) | \ | 289 | (1ULL << INT_UDN_CA) | \ |
288 | INT_MASK(INT_IDN_AVAIL) | \ | 290 | (1ULL << INT_IDN_AVAIL) | \ |
289 | INT_MASK(INT_UDN_AVAIL) | \ | 291 | (1ULL << INT_UDN_AVAIL) | \ |
290 | INT_MASK(INT_PERF_COUNT) | \ | 292 | (1ULL << INT_PERF_COUNT) | \ |
291 | INT_MASK(INT_INTCTRL_3) | \ | 293 | (1ULL << INT_INTCTRL_3) | \ |
292 | INT_MASK(INT_INTCTRL_2) | \ | 294 | (1ULL << INT_INTCTRL_2) | \ |
293 | INT_MASK(INT_INTCTRL_1) | \ | 295 | (1ULL << INT_INTCTRL_1) | \ |
294 | INT_MASK(INT_INTCTRL_0) | \ | 296 | (1ULL << INT_INTCTRL_0) | \ |
295 | INT_MASK(INT_BOOT_ACCESS) | \ | 297 | (1ULL << INT_BOOT_ACCESS) | \ |
296 | INT_MASK(INT_WORLD_ACCESS) | \ | 298 | (1ULL << INT_WORLD_ACCESS) | \ |
297 | INT_MASK(INT_I_ASID) | \ | 299 | (1ULL << INT_I_ASID) | \ |
298 | INT_MASK(INT_D_ASID) | \ | 300 | (1ULL << INT_D_ASID) | \ |
299 | INT_MASK(INT_DMA_ASID) | \ | 301 | (1ULL << INT_DMA_ASID) | \ |
300 | INT_MASK(INT_SNI_ASID) | \ | 302 | (1ULL << INT_SNI_ASID) | \ |
301 | INT_MASK(INT_DMA_CPL) | \ | 303 | (1ULL << INT_DMA_CPL) | \ |
302 | INT_MASK(INT_SN_CPL) | \ | 304 | (1ULL << INT_SN_CPL) | \ |
303 | INT_MASK(INT_DOUBLE_FAULT) | \ | 305 | (1ULL << INT_DOUBLE_FAULT) | \ |
304 | INT_MASK(INT_AUX_PERF_COUNT) | \ | 306 | (1ULL << INT_AUX_PERF_COUNT) | \ |
305 | 0) | 307 | 0) |
306 | #endif /* !__ASSEMBLER__ */ | 308 | #endif /* !__ASSEMBLER__ */ |
307 | #endif /* !__ARCH_INTERRUPTS_H__ */ | 309 | #endif /* !__ARCH_INTERRUPTS_H__ */ |
diff --git a/arch/tile/include/uapi/arch/interrupts_64.h b/arch/tile/include/uapi/arch/interrupts_64.h index 5bb58b2e4e6f..13c9f9182348 100644 --- a/arch/tile/include/uapi/arch/interrupts_64.h +++ b/arch/tile/include/uapi/arch/interrupts_64.h | |||
@@ -15,6 +15,7 @@ | |||
15 | #ifndef __ARCH_INTERRUPTS_H__ | 15 | #ifndef __ARCH_INTERRUPTS_H__ |
16 | #define __ARCH_INTERRUPTS_H__ | 16 | #define __ARCH_INTERRUPTS_H__ |
17 | 17 | ||
18 | #ifndef __KERNEL__ | ||
18 | /** Mask for an interrupt. */ | 19 | /** Mask for an interrupt. */ |
19 | #ifdef __ASSEMBLER__ | 20 | #ifdef __ASSEMBLER__ |
20 | /* Note: must handle breaking interrupts into high and low words manually. */ | 21 | /* Note: must handle breaking interrupts into high and low words manually. */ |
@@ -22,6 +23,7 @@ | |||
22 | #else | 23 | #else |
23 | #define INT_MASK(intno) (1ULL << (intno)) | 24 | #define INT_MASK(intno) (1ULL << (intno)) |
24 | #endif | 25 | #endif |
26 | #endif | ||
25 | 27 | ||
26 | 28 | ||
27 | /** Where a given interrupt executes */ | 29 | /** Where a given interrupt executes */ |
@@ -85,192 +87,192 @@ | |||
85 | 87 | ||
86 | #ifndef __ASSEMBLER__ | 88 | #ifndef __ASSEMBLER__ |
87 | #define QUEUED_INTERRUPTS ( \ | 89 | #define QUEUED_INTERRUPTS ( \ |
88 | INT_MASK(INT_MEM_ERROR) | \ | 90 | (1ULL << INT_MEM_ERROR) | \ |
89 | INT_MASK(INT_IDN_COMPLETE) | \ | 91 | (1ULL << INT_IDN_COMPLETE) | \ |
90 | INT_MASK(INT_UDN_COMPLETE) | \ | 92 | (1ULL << INT_UDN_COMPLETE) | \ |
91 | INT_MASK(INT_IDN_FIREWALL) | \ | 93 | (1ULL << INT_IDN_FIREWALL) | \ |
92 | INT_MASK(INT_UDN_FIREWALL) | \ | 94 | (1ULL << INT_UDN_FIREWALL) | \ |
93 | INT_MASK(INT_TILE_TIMER) | \ | 95 | (1ULL << INT_TILE_TIMER) | \ |
94 | INT_MASK(INT_AUX_TILE_TIMER) | \ | 96 | (1ULL << INT_AUX_TILE_TIMER) | \ |
95 | INT_MASK(INT_IDN_TIMER) | \ | 97 | (1ULL << INT_IDN_TIMER) | \ |
96 | INT_MASK(INT_UDN_TIMER) | \ | 98 | (1ULL << INT_UDN_TIMER) | \ |
97 | INT_MASK(INT_IDN_AVAIL) | \ | 99 | (1ULL << INT_IDN_AVAIL) | \ |
98 | INT_MASK(INT_UDN_AVAIL) | \ | 100 | (1ULL << INT_UDN_AVAIL) | \ |
99 | INT_MASK(INT_IPI_3) | \ | 101 | (1ULL << INT_IPI_3) | \ |
100 | INT_MASK(INT_IPI_2) | \ | 102 | (1ULL << INT_IPI_2) | \ |
101 | INT_MASK(INT_IPI_1) | \ | 103 | (1ULL << INT_IPI_1) | \ |
102 | INT_MASK(INT_IPI_0) | \ | 104 | (1ULL << INT_IPI_0) | \ |
103 | INT_MASK(INT_PERF_COUNT) | \ | 105 | (1ULL << INT_PERF_COUNT) | \ |
104 | INT_MASK(INT_AUX_PERF_COUNT) | \ | 106 | (1ULL << INT_AUX_PERF_COUNT) | \ |
105 | INT_MASK(INT_INTCTRL_3) | \ | 107 | (1ULL << INT_INTCTRL_3) | \ |
106 | INT_MASK(INT_INTCTRL_2) | \ | 108 | (1ULL << INT_INTCTRL_2) | \ |
107 | INT_MASK(INT_INTCTRL_1) | \ | 109 | (1ULL << INT_INTCTRL_1) | \ |
108 | INT_MASK(INT_INTCTRL_0) | \ | 110 | (1ULL << INT_INTCTRL_0) | \ |
109 | INT_MASK(INT_BOOT_ACCESS) | \ | 111 | (1ULL << INT_BOOT_ACCESS) | \ |
110 | INT_MASK(INT_WORLD_ACCESS) | \ | 112 | (1ULL << INT_WORLD_ACCESS) | \ |
111 | INT_MASK(INT_I_ASID) | \ | 113 | (1ULL << INT_I_ASID) | \ |
112 | INT_MASK(INT_D_ASID) | \ | 114 | (1ULL << INT_D_ASID) | \ |
113 | INT_MASK(INT_DOUBLE_FAULT) | \ | 115 | (1ULL << INT_DOUBLE_FAULT) | \ |
114 | 0) | 116 | 0) |
115 | #define NONQUEUED_INTERRUPTS ( \ | 117 | #define NONQUEUED_INTERRUPTS ( \ |
116 | INT_MASK(INT_SINGLE_STEP_3) | \ | 118 | (1ULL << INT_SINGLE_STEP_3) | \ |
117 | INT_MASK(INT_SINGLE_STEP_2) | \ | 119 | (1ULL << INT_SINGLE_STEP_2) | \ |
118 | INT_MASK(INT_SINGLE_STEP_1) | \ | 120 | (1ULL << INT_SINGLE_STEP_1) | \ |
119 | INT_MASK(INT_SINGLE_STEP_0) | \ | 121 | (1ULL << INT_SINGLE_STEP_0) | \ |
120 | INT_MASK(INT_ITLB_MISS) | \ | 122 | (1ULL << INT_ITLB_MISS) | \ |
121 | INT_MASK(INT_ILL) | \ | 123 | (1ULL << INT_ILL) | \ |
122 | INT_MASK(INT_GPV) | \ | 124 | (1ULL << INT_GPV) | \ |
123 | INT_MASK(INT_IDN_ACCESS) | \ | 125 | (1ULL << INT_IDN_ACCESS) | \ |
124 | INT_MASK(INT_UDN_ACCESS) | \ | 126 | (1ULL << INT_UDN_ACCESS) | \ |
125 | INT_MASK(INT_SWINT_3) | \ | 127 | (1ULL << INT_SWINT_3) | \ |
126 | INT_MASK(INT_SWINT_2) | \ | 128 | (1ULL << INT_SWINT_2) | \ |
127 | INT_MASK(INT_SWINT_1) | \ | 129 | (1ULL << INT_SWINT_1) | \ |
128 | INT_MASK(INT_SWINT_0) | \ | 130 | (1ULL << INT_SWINT_0) | \ |
129 | INT_MASK(INT_ILL_TRANS) | \ | 131 | (1ULL << INT_ILL_TRANS) | \ |
130 | INT_MASK(INT_UNALIGN_DATA) | \ | 132 | (1ULL << INT_UNALIGN_DATA) | \ |
131 | INT_MASK(INT_DTLB_MISS) | \ | 133 | (1ULL << INT_DTLB_MISS) | \ |
132 | INT_MASK(INT_DTLB_ACCESS) | \ | 134 | (1ULL << INT_DTLB_ACCESS) | \ |
133 | 0) | 135 | 0) |
134 | #define CRITICAL_MASKED_INTERRUPTS ( \ | 136 | #define CRITICAL_MASKED_INTERRUPTS ( \ |
135 | INT_MASK(INT_MEM_ERROR) | \ | 137 | (1ULL << INT_MEM_ERROR) | \ |
136 | INT_MASK(INT_SINGLE_STEP_3) | \ | 138 | (1ULL << INT_SINGLE_STEP_3) | \ |
137 | INT_MASK(INT_SINGLE_STEP_2) | \ | 139 | (1ULL << INT_SINGLE_STEP_2) | \ |
138 | INT_MASK(INT_SINGLE_STEP_1) | \ | 140 | (1ULL << INT_SINGLE_STEP_1) | \ |
139 | INT_MASK(INT_SINGLE_STEP_0) | \ | 141 | (1ULL << INT_SINGLE_STEP_0) | \ |
140 | INT_MASK(INT_IDN_COMPLETE) | \ | 142 | (1ULL << INT_IDN_COMPLETE) | \ |
141 | INT_MASK(INT_UDN_COMPLETE) | \ | 143 | (1ULL << INT_UDN_COMPLETE) | \ |
142 | INT_MASK(INT_IDN_FIREWALL) | \ | 144 | (1ULL << INT_IDN_FIREWALL) | \ |
143 | INT_MASK(INT_UDN_FIREWALL) | \ | 145 | (1ULL << INT_UDN_FIREWALL) | \ |
144 | INT_MASK(INT_TILE_TIMER) | \ | 146 | (1ULL << INT_TILE_TIMER) | \ |
145 | INT_MASK(INT_AUX_TILE_TIMER) | \ | 147 | (1ULL << INT_AUX_TILE_TIMER) | \ |
146 | INT_MASK(INT_IDN_TIMER) | \ | 148 | (1ULL << INT_IDN_TIMER) | \ |
147 | INT_MASK(INT_UDN_TIMER) | \ | 149 | (1ULL << INT_UDN_TIMER) | \ |
148 | INT_MASK(INT_IDN_AVAIL) | \ | 150 | (1ULL << INT_IDN_AVAIL) | \ |
149 | INT_MASK(INT_UDN_AVAIL) | \ | 151 | (1ULL << INT_UDN_AVAIL) | \ |
150 | INT_MASK(INT_IPI_3) | \ | 152 | (1ULL << INT_IPI_3) | \ |
151 | INT_MASK(INT_IPI_2) | \ | 153 | (1ULL << INT_IPI_2) | \ |
152 | INT_MASK(INT_IPI_1) | \ | 154 | (1ULL << INT_IPI_1) | \ |
153 | INT_MASK(INT_IPI_0) | \ | 155 | (1ULL << INT_IPI_0) | \ |
154 | INT_MASK(INT_PERF_COUNT) | \ | 156 | (1ULL << INT_PERF_COUNT) | \ |
155 | INT_MASK(INT_AUX_PERF_COUNT) | \ | 157 | (1ULL << INT_AUX_PERF_COUNT) | \ |
156 | INT_MASK(INT_INTCTRL_3) | \ | 158 | (1ULL << INT_INTCTRL_3) | \ |
157 | INT_MASK(INT_INTCTRL_2) | \ | 159 | (1ULL << INT_INTCTRL_2) | \ |
158 | INT_MASK(INT_INTCTRL_1) | \ | 160 | (1ULL << INT_INTCTRL_1) | \ |
159 | INT_MASK(INT_INTCTRL_0) | \ | 161 | (1ULL << INT_INTCTRL_0) | \ |
160 | 0) | 162 | 0) |
161 | #define CRITICAL_UNMASKED_INTERRUPTS ( \ | 163 | #define CRITICAL_UNMASKED_INTERRUPTS ( \ |
162 | INT_MASK(INT_ITLB_MISS) | \ | 164 | (1ULL << INT_ITLB_MISS) | \ |
163 | INT_MASK(INT_ILL) | \ | 165 | (1ULL << INT_ILL) | \ |
164 | INT_MASK(INT_GPV) | \ | 166 | (1ULL << INT_GPV) | \ |
165 | INT_MASK(INT_IDN_ACCESS) | \ | 167 | (1ULL << INT_IDN_ACCESS) | \ |
166 | INT_MASK(INT_UDN_ACCESS) | \ | 168 | (1ULL << INT_UDN_ACCESS) | \ |
167 | INT_MASK(INT_SWINT_3) | \ | 169 | (1ULL << INT_SWINT_3) | \ |
168 | INT_MASK(INT_SWINT_2) | \ | 170 | (1ULL << INT_SWINT_2) | \ |
169 | INT_MASK(INT_SWINT_1) | \ | 171 | (1ULL << INT_SWINT_1) | \ |
170 | INT_MASK(INT_SWINT_0) | \ | 172 | (1ULL << INT_SWINT_0) | \ |
171 | INT_MASK(INT_ILL_TRANS) | \ | 173 | (1ULL << INT_ILL_TRANS) | \ |
172 | INT_MASK(INT_UNALIGN_DATA) | \ | 174 | (1ULL << INT_UNALIGN_DATA) | \ |
173 | INT_MASK(INT_DTLB_MISS) | \ | 175 | (1ULL << INT_DTLB_MISS) | \ |
174 | INT_MASK(INT_DTLB_ACCESS) | \ | 176 | (1ULL << INT_DTLB_ACCESS) | \ |
175 | INT_MASK(INT_BOOT_ACCESS) | \ | 177 | (1ULL << INT_BOOT_ACCESS) | \ |
176 | INT_MASK(INT_WORLD_ACCESS) | \ | 178 | (1ULL << INT_WORLD_ACCESS) | \ |
177 | INT_MASK(INT_I_ASID) | \ | 179 | (1ULL << INT_I_ASID) | \ |
178 | INT_MASK(INT_D_ASID) | \ | 180 | (1ULL << INT_D_ASID) | \ |
179 | INT_MASK(INT_DOUBLE_FAULT) | \ | 181 | (1ULL << INT_DOUBLE_FAULT) | \ |
180 | 0) | 182 | 0) |
181 | #define MASKABLE_INTERRUPTS ( \ | 183 | #define MASKABLE_INTERRUPTS ( \ |
182 | INT_MASK(INT_MEM_ERROR) | \ | 184 | (1ULL << INT_MEM_ERROR) | \ |
183 | INT_MASK(INT_SINGLE_STEP_3) | \ | 185 | (1ULL << INT_SINGLE_STEP_3) | \ |
184 | INT_MASK(INT_SINGLE_STEP_2) | \ | 186 | (1ULL << INT_SINGLE_STEP_2) | \ |
185 | INT_MASK(INT_SINGLE_STEP_1) | \ | 187 | (1ULL << INT_SINGLE_STEP_1) | \ |
186 | INT_MASK(INT_SINGLE_STEP_0) | \ | 188 | (1ULL << INT_SINGLE_STEP_0) | \ |
187 | INT_MASK(INT_IDN_COMPLETE) | \ | 189 | (1ULL << INT_IDN_COMPLETE) | \ |
188 | INT_MASK(INT_UDN_COMPLETE) | \ | 190 | (1ULL << INT_UDN_COMPLETE) | \ |
189 | INT_MASK(INT_IDN_FIREWALL) | \ | 191 | (1ULL << INT_IDN_FIREWALL) | \ |
190 | INT_MASK(INT_UDN_FIREWALL) | \ | 192 | (1ULL << INT_UDN_FIREWALL) | \ |
191 | INT_MASK(INT_TILE_TIMER) | \ | 193 | (1ULL << INT_TILE_TIMER) | \ |
192 | INT_MASK(INT_AUX_TILE_TIMER) | \ | 194 | (1ULL << INT_AUX_TILE_TIMER) | \ |
193 | INT_MASK(INT_IDN_TIMER) | \ | 195 | (1ULL << INT_IDN_TIMER) | \ |
194 | INT_MASK(INT_UDN_TIMER) | \ | 196 | (1ULL << INT_UDN_TIMER) | \ |
195 | INT_MASK(INT_IDN_AVAIL) | \ | 197 | (1ULL << INT_IDN_AVAIL) | \ |
196 | INT_MASK(INT_UDN_AVAIL) | \ | 198 | (1ULL << INT_UDN_AVAIL) | \ |
197 | INT_MASK(INT_IPI_3) | \ | 199 | (1ULL << INT_IPI_3) | \ |
198 | INT_MASK(INT_IPI_2) | \ | 200 | (1ULL << INT_IPI_2) | \ |
199 | INT_MASK(INT_IPI_1) | \ | 201 | (1ULL << INT_IPI_1) | \ |
200 | INT_MASK(INT_IPI_0) | \ | 202 | (1ULL << INT_IPI_0) | \ |
201 | INT_MASK(INT_PERF_COUNT) | \ | 203 | (1ULL << INT_PERF_COUNT) | \ |
202 | INT_MASK(INT_AUX_PERF_COUNT) | \ | 204 | (1ULL << INT_AUX_PERF_COUNT) | \ |
203 | INT_MASK(INT_INTCTRL_3) | \ | 205 | (1ULL << INT_INTCTRL_3) | \ |
204 | INT_MASK(INT_INTCTRL_2) | \ | 206 | (1ULL << INT_INTCTRL_2) | \ |
205 | INT_MASK(INT_INTCTRL_1) | \ | 207 | (1ULL << INT_INTCTRL_1) | \ |
206 | INT_MASK(INT_INTCTRL_0) | \ | 208 | (1ULL << INT_INTCTRL_0) | \ |
207 | 0) | 209 | 0) |
208 | #define UNMASKABLE_INTERRUPTS ( \ | 210 | #define UNMASKABLE_INTERRUPTS ( \ |
209 | INT_MASK(INT_ITLB_MISS) | \ | 211 | (1ULL << INT_ITLB_MISS) | \ |
210 | INT_MASK(INT_ILL) | \ | 212 | (1ULL << INT_ILL) | \ |
211 | INT_MASK(INT_GPV) | \ | 213 | (1ULL << INT_GPV) | \ |
212 | INT_MASK(INT_IDN_ACCESS) | \ | 214 | (1ULL << INT_IDN_ACCESS) | \ |
213 | INT_MASK(INT_UDN_ACCESS) | \ | 215 | (1ULL << INT_UDN_ACCESS) | \ |
214 | INT_MASK(INT_SWINT_3) | \ | 216 | (1ULL << INT_SWINT_3) | \ |
215 | INT_MASK(INT_SWINT_2) | \ | 217 | (1ULL << INT_SWINT_2) | \ |
216 | INT_MASK(INT_SWINT_1) | \ | 218 | (1ULL << INT_SWINT_1) | \ |
217 | INT_MASK(INT_SWINT_0) | \ | 219 | (1ULL << INT_SWINT_0) | \ |
218 | INT_MASK(INT_ILL_TRANS) | \ | 220 | (1ULL << INT_ILL_TRANS) | \ |
219 | INT_MASK(INT_UNALIGN_DATA) | \ | 221 | (1ULL << INT_UNALIGN_DATA) | \ |
220 | INT_MASK(INT_DTLB_MISS) | \ | 222 | (1ULL << INT_DTLB_MISS) | \ |
221 | INT_MASK(INT_DTLB_ACCESS) | \ | 223 | (1ULL << INT_DTLB_ACCESS) | \ |
222 | INT_MASK(INT_BOOT_ACCESS) | \ | 224 | (1ULL << INT_BOOT_ACCESS) | \ |
223 | INT_MASK(INT_WORLD_ACCESS) | \ | 225 | (1ULL << INT_WORLD_ACCESS) | \ |
224 | INT_MASK(INT_I_ASID) | \ | 226 | (1ULL << INT_I_ASID) | \ |
225 | INT_MASK(INT_D_ASID) | \ | 227 | (1ULL << INT_D_ASID) | \ |
226 | INT_MASK(INT_DOUBLE_FAULT) | \ | 228 | (1ULL << INT_DOUBLE_FAULT) | \ |
227 | 0) | 229 | 0) |
228 | #define SYNC_INTERRUPTS ( \ | 230 | #define SYNC_INTERRUPTS ( \ |
229 | INT_MASK(INT_SINGLE_STEP_3) | \ | 231 | (1ULL << INT_SINGLE_STEP_3) | \ |
230 | INT_MASK(INT_SINGLE_STEP_2) | \ | 232 | (1ULL << INT_SINGLE_STEP_2) | \ |
231 | INT_MASK(INT_SINGLE_STEP_1) | \ | 233 | (1ULL << INT_SINGLE_STEP_1) | \ |
232 | INT_MASK(INT_SINGLE_STEP_0) | \ | 234 | (1ULL << INT_SINGLE_STEP_0) | \ |
233 | INT_MASK(INT_IDN_COMPLETE) | \ | 235 | (1ULL << INT_IDN_COMPLETE) | \ |
234 | INT_MASK(INT_UDN_COMPLETE) | \ | 236 | (1ULL << INT_UDN_COMPLETE) | \ |
235 | INT_MASK(INT_ITLB_MISS) | \ | 237 | (1ULL << INT_ITLB_MISS) | \ |
236 | INT_MASK(INT_ILL) | \ | 238 | (1ULL << INT_ILL) | \ |
237 | INT_MASK(INT_GPV) | \ | 239 | (1ULL << INT_GPV) | \ |
238 | INT_MASK(INT_IDN_ACCESS) | \ | 240 | (1ULL << INT_IDN_ACCESS) | \ |
239 | INT_MASK(INT_UDN_ACCESS) | \ | 241 | (1ULL << INT_UDN_ACCESS) | \ |
240 | INT_MASK(INT_SWINT_3) | \ | 242 | (1ULL << INT_SWINT_3) | \ |
241 | INT_MASK(INT_SWINT_2) | \ | 243 | (1ULL << INT_SWINT_2) | \ |
242 | INT_MASK(INT_SWINT_1) | \ | 244 | (1ULL << INT_SWINT_1) | \ |
243 | INT_MASK(INT_SWINT_0) | \ | 245 | (1ULL << INT_SWINT_0) | \ |
244 | INT_MASK(INT_ILL_TRANS) | \ | 246 | (1ULL << INT_ILL_TRANS) | \ |
245 | INT_MASK(INT_UNALIGN_DATA) | \ | 247 | (1ULL << INT_UNALIGN_DATA) | \ |
246 | INT_MASK(INT_DTLB_MISS) | \ | 248 | (1ULL << INT_DTLB_MISS) | \ |
247 | INT_MASK(INT_DTLB_ACCESS) | \ | 249 | (1ULL << INT_DTLB_ACCESS) | \ |
248 | 0) | 250 | 0) |
249 | #define NON_SYNC_INTERRUPTS ( \ | 251 | #define NON_SYNC_INTERRUPTS ( \ |
250 | INT_MASK(INT_MEM_ERROR) | \ | 252 | (1ULL << INT_MEM_ERROR) | \ |
251 | INT_MASK(INT_IDN_FIREWALL) | \ | 253 | (1ULL << INT_IDN_FIREWALL) | \ |
252 | INT_MASK(INT_UDN_FIREWALL) | \ | 254 | (1ULL << INT_UDN_FIREWALL) | \ |
253 | INT_MASK(INT_TILE_TIMER) | \ | 255 | (1ULL << INT_TILE_TIMER) | \ |
254 | INT_MASK(INT_AUX_TILE_TIMER) | \ | 256 | (1ULL << INT_AUX_TILE_TIMER) | \ |
255 | INT_MASK(INT_IDN_TIMER) | \ | 257 | (1ULL << INT_IDN_TIMER) | \ |
256 | INT_MASK(INT_UDN_TIMER) | \ | 258 | (1ULL << INT_UDN_TIMER) | \ |
257 | INT_MASK(INT_IDN_AVAIL) | \ | 259 | (1ULL << INT_IDN_AVAIL) | \ |
258 | INT_MASK(INT_UDN_AVAIL) | \ | 260 | (1ULL << INT_UDN_AVAIL) | \ |
259 | INT_MASK(INT_IPI_3) | \ | 261 | (1ULL << INT_IPI_3) | \ |
260 | INT_MASK(INT_IPI_2) | \ | 262 | (1ULL << INT_IPI_2) | \ |
261 | INT_MASK(INT_IPI_1) | \ | 263 | (1ULL << INT_IPI_1) | \ |
262 | INT_MASK(INT_IPI_0) | \ | 264 | (1ULL << INT_IPI_0) | \ |
263 | INT_MASK(INT_PERF_COUNT) | \ | 265 | (1ULL << INT_PERF_COUNT) | \ |
264 | INT_MASK(INT_AUX_PERF_COUNT) | \ | 266 | (1ULL << INT_AUX_PERF_COUNT) | \ |
265 | INT_MASK(INT_INTCTRL_3) | \ | 267 | (1ULL << INT_INTCTRL_3) | \ |
266 | INT_MASK(INT_INTCTRL_2) | \ | 268 | (1ULL << INT_INTCTRL_2) | \ |
267 | INT_MASK(INT_INTCTRL_1) | \ | 269 | (1ULL << INT_INTCTRL_1) | \ |
268 | INT_MASK(INT_INTCTRL_0) | \ | 270 | (1ULL << INT_INTCTRL_0) | \ |
269 | INT_MASK(INT_BOOT_ACCESS) | \ | 271 | (1ULL << INT_BOOT_ACCESS) | \ |
270 | INT_MASK(INT_WORLD_ACCESS) | \ | 272 | (1ULL << INT_WORLD_ACCESS) | \ |
271 | INT_MASK(INT_I_ASID) | \ | 273 | (1ULL << INT_I_ASID) | \ |
272 | INT_MASK(INT_D_ASID) | \ | 274 | (1ULL << INT_D_ASID) | \ |
273 | INT_MASK(INT_DOUBLE_FAULT) | \ | 275 | (1ULL << INT_DOUBLE_FAULT) | \ |
274 | 0) | 276 | 0) |
275 | #endif /* !__ASSEMBLER__ */ | 277 | #endif /* !__ASSEMBLER__ */ |
276 | #endif /* !__ARCH_INTERRUPTS_H__ */ | 278 | #endif /* !__ARCH_INTERRUPTS_H__ */ |
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S index 54bc9a6678e8..4ea080902654 100644 --- a/arch/tile/kernel/intvec_64.S +++ b/arch/tile/kernel/intvec_64.S | |||
@@ -1035,7 +1035,9 @@ handle_syscall: | |||
1035 | /* Ensure that the syscall number is within the legal range. */ | 1035 | /* Ensure that the syscall number is within the legal range. */ |
1036 | { | 1036 | { |
1037 | moveli r20, hw2(sys_call_table) | 1037 | moveli r20, hw2(sys_call_table) |
1038 | #ifdef CONFIG_COMPAT | ||
1038 | blbs r30, .Lcompat_syscall | 1039 | blbs r30, .Lcompat_syscall |
1040 | #endif | ||
1039 | } | 1041 | } |
1040 | { | 1042 | { |
1041 | cmpltu r21, TREG_SYSCALL_NR_NAME, r21 | 1043 | cmpltu r21, TREG_SYSCALL_NR_NAME, r21 |
@@ -1093,6 +1095,7 @@ handle_syscall: | |||
1093 | j .Lresume_userspace /* jump into middle of interrupt_return */ | 1095 | j .Lresume_userspace /* jump into middle of interrupt_return */ |
1094 | } | 1096 | } |
1095 | 1097 | ||
1098 | #ifdef CONFIG_COMPAT | ||
1096 | .Lcompat_syscall: | 1099 | .Lcompat_syscall: |
1097 | /* | 1100 | /* |
1098 | * Load the base of the compat syscall table in r20, and | 1101 | * Load the base of the compat syscall table in r20, and |
@@ -1117,6 +1120,7 @@ handle_syscall: | |||
1117 | { move r15, r4; addxi r4, r4, 0 } | 1120 | { move r15, r4; addxi r4, r4, 0 } |
1118 | { move r16, r5; addxi r5, r5, 0 } | 1121 | { move r16, r5; addxi r5, r5, 0 } |
1119 | j .Lload_syscall_pointer | 1122 | j .Lload_syscall_pointer |
1123 | #endif | ||
1120 | 1124 | ||
1121 | .Linvalid_syscall: | 1125 | .Linvalid_syscall: |
1122 | /* Report an invalid syscall back to the user program */ | 1126 | /* Report an invalid syscall back to the user program */ |
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c index 0e5661e7d00d..caf93ae11793 100644 --- a/arch/tile/kernel/process.c +++ b/arch/tile/kernel/process.c | |||
@@ -159,7 +159,7 @@ static void save_arch_state(struct thread_struct *t); | |||
159 | int copy_thread(unsigned long clone_flags, unsigned long sp, | 159 | int copy_thread(unsigned long clone_flags, unsigned long sp, |
160 | unsigned long arg, struct task_struct *p) | 160 | unsigned long arg, struct task_struct *p) |
161 | { | 161 | { |
162 | struct pt_regs *childregs = task_pt_regs(p), *regs = current_pt_regs(); | 162 | struct pt_regs *childregs = task_pt_regs(p); |
163 | unsigned long ksp; | 163 | unsigned long ksp; |
164 | unsigned long *callee_regs; | 164 | unsigned long *callee_regs; |
165 | 165 | ||
diff --git a/arch/tile/kernel/reboot.c b/arch/tile/kernel/reboot.c index baa3d905fee2..d1b5c913ae72 100644 --- a/arch/tile/kernel/reboot.c +++ b/arch/tile/kernel/reboot.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/reboot.h> | 16 | #include <linux/reboot.h> |
17 | #include <linux/smp.h> | 17 | #include <linux/smp.h> |
18 | #include <linux/pm.h> | 18 | #include <linux/pm.h> |
19 | #include <linux/export.h> | ||
19 | #include <asm/page.h> | 20 | #include <asm/page.h> |
20 | #include <asm/setup.h> | 21 | #include <asm/setup.h> |
21 | #include <hv/hypervisor.h> | 22 | #include <hv/hypervisor.h> |
@@ -49,3 +50,4 @@ void machine_restart(char *cmd) | |||
49 | 50 | ||
50 | /* No interesting distinction to be made here. */ | 51 | /* No interesting distinction to be made here. */ |
51 | void (*pm_power_off)(void) = NULL; | 52 | void (*pm_power_off)(void) = NULL; |
53 | EXPORT_SYMBOL(pm_power_off); | ||
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index 6a649a4462d3..d1e15f7b59c6 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/timex.h> | 31 | #include <linux/timex.h> |
32 | #include <linux/hugetlb.h> | 32 | #include <linux/hugetlb.h> |
33 | #include <linux/start_kernel.h> | 33 | #include <linux/start_kernel.h> |
34 | #include <linux/screen_info.h> | ||
34 | #include <asm/setup.h> | 35 | #include <asm/setup.h> |
35 | #include <asm/sections.h> | 36 | #include <asm/sections.h> |
36 | #include <asm/cacheflush.h> | 37 | #include <asm/cacheflush.h> |
@@ -49,6 +50,10 @@ static inline int ABS(int x) { return x >= 0 ? x : -x; } | |||
49 | /* Chip information */ | 50 | /* Chip information */ |
50 | char chip_model[64] __write_once; | 51 | char chip_model[64] __write_once; |
51 | 52 | ||
53 | #ifdef CONFIG_VT | ||
54 | struct screen_info screen_info; | ||
55 | #endif | ||
56 | |||
52 | struct pglist_data node_data[MAX_NUMNODES] __read_mostly; | 57 | struct pglist_data node_data[MAX_NUMNODES] __read_mostly; |
53 | EXPORT_SYMBOL(node_data); | 58 | EXPORT_SYMBOL(node_data); |
54 | 59 | ||
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c index b2f44c28dda6..ed258b8ae320 100644 --- a/arch/tile/kernel/stack.c +++ b/arch/tile/kernel/stack.c | |||
@@ -112,7 +112,7 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt) | |||
112 | p->pc, p->sp, p->ex1); | 112 | p->pc, p->sp, p->ex1); |
113 | p = NULL; | 113 | p = NULL; |
114 | } | 114 | } |
115 | if (!kbt->profile || (INT_MASK(p->faultnum) & QUEUED_INTERRUPTS) == 0) | 115 | if (!kbt->profile || ((1ULL << p->faultnum) & QUEUED_INTERRUPTS) == 0) |
116 | return p; | 116 | return p; |
117 | return NULL; | 117 | return NULL; |
118 | } | 118 | } |
@@ -484,6 +484,7 @@ void save_stack_trace(struct stack_trace *trace) | |||
484 | { | 484 | { |
485 | save_stack_trace_tsk(NULL, trace); | 485 | save_stack_trace_tsk(NULL, trace); |
486 | } | 486 | } |
487 | EXPORT_SYMBOL_GPL(save_stack_trace); | ||
487 | 488 | ||
488 | #endif | 489 | #endif |
489 | 490 | ||
diff --git a/arch/tile/lib/cacheflush.c b/arch/tile/lib/cacheflush.c index db4fb89e12d8..8f8ad814b139 100644 --- a/arch/tile/lib/cacheflush.c +++ b/arch/tile/lib/cacheflush.c | |||
@@ -12,6 +12,7 @@ | |||
12 | * more details. | 12 | * more details. |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/export.h> | ||
15 | #include <asm/page.h> | 16 | #include <asm/page.h> |
16 | #include <asm/cacheflush.h> | 17 | #include <asm/cacheflush.h> |
17 | #include <arch/icache.h> | 18 | #include <arch/icache.h> |
@@ -165,3 +166,4 @@ void finv_buffer_remote(void *buffer, size_t size, int hfh) | |||
165 | __insn_mtspr(SPR_DSTREAM_PF, old_dstream_pf); | 166 | __insn_mtspr(SPR_DSTREAM_PF, old_dstream_pf); |
166 | #endif | 167 | #endif |
167 | } | 168 | } |
169 | EXPORT_SYMBOL_GPL(finv_buffer_remote); | ||
diff --git a/arch/tile/lib/cpumask.c b/arch/tile/lib/cpumask.c index fdc403614d12..75947edccb26 100644 --- a/arch/tile/lib/cpumask.c +++ b/arch/tile/lib/cpumask.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/ctype.h> | 16 | #include <linux/ctype.h> |
17 | #include <linux/errno.h> | 17 | #include <linux/errno.h> |
18 | #include <linux/smp.h> | 18 | #include <linux/smp.h> |
19 | #include <linux/export.h> | ||
19 | 20 | ||
20 | /* | 21 | /* |
21 | * Allow cropping out bits beyond the end of the array. | 22 | * Allow cropping out bits beyond the end of the array. |
@@ -50,3 +51,4 @@ int bitmap_parselist_crop(const char *bp, unsigned long *maskp, int nmaskbits) | |||
50 | } while (*bp != '\0' && *bp != '\n'); | 51 | } while (*bp != '\0' && *bp != '\n'); |
51 | return 0; | 52 | return 0; |
52 | } | 53 | } |
54 | EXPORT_SYMBOL(bitmap_parselist_crop); | ||
diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c index dd5f0a33fdaf..4385cb6fa00a 100644 --- a/arch/tile/lib/exports.c +++ b/arch/tile/lib/exports.c | |||
@@ -55,6 +55,8 @@ EXPORT_SYMBOL(hv_dev_poll_cancel); | |||
55 | EXPORT_SYMBOL(hv_dev_close); | 55 | EXPORT_SYMBOL(hv_dev_close); |
56 | EXPORT_SYMBOL(hv_sysconf); | 56 | EXPORT_SYMBOL(hv_sysconf); |
57 | EXPORT_SYMBOL(hv_confstr); | 57 | EXPORT_SYMBOL(hv_confstr); |
58 | EXPORT_SYMBOL(hv_get_rtc); | ||
59 | EXPORT_SYMBOL(hv_set_rtc); | ||
58 | 60 | ||
59 | /* libgcc.a */ | 61 | /* libgcc.a */ |
60 | uint32_t __udivsi3(uint32_t dividend, uint32_t divisor); | 62 | uint32_t __udivsi3(uint32_t dividend, uint32_t divisor); |
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c index 5f7868dcd6d4..1ae911939a18 100644 --- a/arch/tile/mm/homecache.c +++ b/arch/tile/mm/homecache.c | |||
@@ -408,6 +408,7 @@ void homecache_change_page_home(struct page *page, int order, int home) | |||
408 | __set_pte(ptep, pte_set_home(pteval, home)); | 408 | __set_pte(ptep, pte_set_home(pteval, home)); |
409 | } | 409 | } |
410 | } | 410 | } |
411 | EXPORT_SYMBOL(homecache_change_page_home); | ||
411 | 412 | ||
412 | struct page *homecache_alloc_pages(gfp_t gfp_mask, | 413 | struct page *homecache_alloc_pages(gfp_t gfp_mask, |
413 | unsigned int order, int home) | 414 | unsigned int order, int home) |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 79795af59810..a9e50ac90838 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -28,7 +28,6 @@ config X86 | |||
28 | select HAVE_OPROFILE | 28 | select HAVE_OPROFILE |
29 | select HAVE_PCSPKR_PLATFORM | 29 | select HAVE_PCSPKR_PLATFORM |
30 | select HAVE_PERF_EVENTS | 30 | select HAVE_PERF_EVENTS |
31 | select HAVE_IRQ_WORK | ||
32 | select HAVE_IOREMAP_PROT | 31 | select HAVE_IOREMAP_PROT |
33 | select HAVE_KPROBES | 32 | select HAVE_KPROBES |
34 | select HAVE_MEMBLOCK | 33 | select HAVE_MEMBLOCK |
@@ -40,10 +39,12 @@ config X86 | |||
40 | select HAVE_DMA_CONTIGUOUS if !SWIOTLB | 39 | select HAVE_DMA_CONTIGUOUS if !SWIOTLB |
41 | select HAVE_KRETPROBES | 40 | select HAVE_KRETPROBES |
42 | select HAVE_OPTPROBES | 41 | select HAVE_OPTPROBES |
42 | select HAVE_KPROBES_ON_FTRACE | ||
43 | select HAVE_FTRACE_MCOUNT_RECORD | 43 | select HAVE_FTRACE_MCOUNT_RECORD |
44 | select HAVE_FENTRY if X86_64 | 44 | select HAVE_FENTRY if X86_64 |
45 | select HAVE_C_RECORDMCOUNT | 45 | select HAVE_C_RECORDMCOUNT |
46 | select HAVE_DYNAMIC_FTRACE | 46 | select HAVE_DYNAMIC_FTRACE |
47 | select HAVE_DYNAMIC_FTRACE_WITH_REGS | ||
47 | select HAVE_FUNCTION_TRACER | 48 | select HAVE_FUNCTION_TRACER |
48 | select HAVE_FUNCTION_GRAPH_TRACER | 49 | select HAVE_FUNCTION_GRAPH_TRACER |
49 | select HAVE_FUNCTION_GRAPH_FP_TEST | 50 | select HAVE_FUNCTION_GRAPH_FP_TEST |
@@ -106,6 +107,7 @@ config X86 | |||
106 | select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC) | 107 | select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC) |
107 | select GENERIC_TIME_VSYSCALL if X86_64 | 108 | select GENERIC_TIME_VSYSCALL if X86_64 |
108 | select KTIME_SCALAR if X86_32 | 109 | select KTIME_SCALAR if X86_32 |
110 | select ALWAYS_USE_PERSISTENT_CLOCK | ||
109 | select GENERIC_STRNCPY_FROM_USER | 111 | select GENERIC_STRNCPY_FROM_USER |
110 | select GENERIC_STRNLEN_USER | 112 | select GENERIC_STRNLEN_USER |
111 | select HAVE_CONTEXT_TRACKING if X86_64 | 113 | select HAVE_CONTEXT_TRACKING if X86_64 |
@@ -114,6 +116,7 @@ config X86 | |||
114 | select MODULES_USE_ELF_RELA if X86_64 | 116 | select MODULES_USE_ELF_RELA if X86_64 |
115 | select CLONE_BACKWARDS if X86_32 | 117 | select CLONE_BACKWARDS if X86_32 |
116 | select GENERIC_SIGALTSTACK | 118 | select GENERIC_SIGALTSTACK |
119 | select ARCH_USE_BUILTIN_BSWAP | ||
117 | 120 | ||
118 | config INSTRUCTION_DECODER | 121 | config INSTRUCTION_DECODER |
119 | def_bool y | 122 | def_bool y |
@@ -2138,6 +2141,7 @@ config OLPC_XO1_RTC | |||
2138 | config OLPC_XO1_SCI | 2141 | config OLPC_XO1_SCI |
2139 | bool "OLPC XO-1 SCI extras" | 2142 | bool "OLPC XO-1 SCI extras" |
2140 | depends on OLPC && OLPC_XO1_PM | 2143 | depends on OLPC && OLPC_XO1_PM |
2144 | depends on INPUT=y | ||
2141 | select POWER_SUPPLY | 2145 | select POWER_SUPPLY |
2142 | select GPIO_CS5535 | 2146 | select GPIO_CS5535 |
2143 | select MFD_CORE | 2147 | select MFD_CORE |
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile index ccce0ed67dde..379814bc41e3 100644 --- a/arch/x86/boot/Makefile +++ b/arch/x86/boot/Makefile | |||
@@ -71,7 +71,7 @@ GCOV_PROFILE := n | |||
71 | $(obj)/bzImage: asflags-y := $(SVGA_MODE) | 71 | $(obj)/bzImage: asflags-y := $(SVGA_MODE) |
72 | 72 | ||
73 | quiet_cmd_image = BUILD $@ | 73 | quiet_cmd_image = BUILD $@ |
74 | cmd_image = $(obj)/tools/build $(obj)/setup.bin $(obj)/vmlinux.bin > $@ | 74 | cmd_image = $(obj)/tools/build $(obj)/setup.bin $(obj)/vmlinux.bin $(obj)/zoffset.h > $@ |
75 | 75 | ||
76 | $(obj)/bzImage: $(obj)/setup.bin $(obj)/vmlinux.bin $(obj)/tools/build FORCE | 76 | $(obj)/bzImage: $(obj)/setup.bin $(obj)/vmlinux.bin $(obj)/tools/build FORCE |
77 | $(call if_changed,image) | 77 | $(call if_changed,image) |
@@ -92,7 +92,7 @@ targets += voffset.h | |||
92 | $(obj)/voffset.h: vmlinux FORCE | 92 | $(obj)/voffset.h: vmlinux FORCE |
93 | $(call if_changed,voffset) | 93 | $(call if_changed,voffset) |
94 | 94 | ||
95 | sed-zoffset := -e 's/^\([0-9a-fA-F]*\) . \(startup_32\|input_data\|_end\|z_.*\)$$/\#define ZO_\2 0x\1/p' | 95 | sed-zoffset := -e 's/^\([0-9a-fA-F]*\) . \(startup_32\|startup_64\|efi_pe_entry\|efi_stub_entry\|input_data\|_end\|z_.*\)$$/\#define ZO_\2 0x\1/p' |
96 | 96 | ||
97 | quiet_cmd_zoffset = ZOFFSET $@ | 97 | quiet_cmd_zoffset = ZOFFSET $@ |
98 | cmd_zoffset = $(NM) $< | sed -n $(sed-zoffset) > $@ | 98 | cmd_zoffset = $(NM) $< | sed -n $(sed-zoffset) > $@ |
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index 18e329ca108e..f8fa41190c35 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c | |||
@@ -256,10 +256,10 @@ static efi_status_t setup_efi_pci(struct boot_params *params) | |||
256 | int i; | 256 | int i; |
257 | struct setup_data *data; | 257 | struct setup_data *data; |
258 | 258 | ||
259 | data = (struct setup_data *)params->hdr.setup_data; | 259 | data = (struct setup_data *)(unsigned long)params->hdr.setup_data; |
260 | 260 | ||
261 | while (data && data->next) | 261 | while (data && data->next) |
262 | data = (struct setup_data *)data->next; | 262 | data = (struct setup_data *)(unsigned long)data->next; |
263 | 263 | ||
264 | status = efi_call_phys5(sys_table->boottime->locate_handle, | 264 | status = efi_call_phys5(sys_table->boottime->locate_handle, |
265 | EFI_LOCATE_BY_PROTOCOL, &pci_proto, | 265 | EFI_LOCATE_BY_PROTOCOL, &pci_proto, |
@@ -295,16 +295,18 @@ static efi_status_t setup_efi_pci(struct boot_params *params) | |||
295 | if (!pci) | 295 | if (!pci) |
296 | continue; | 296 | continue; |
297 | 297 | ||
298 | #ifdef CONFIG_X86_64 | ||
298 | status = efi_call_phys4(pci->attributes, pci, | 299 | status = efi_call_phys4(pci->attributes, pci, |
299 | EfiPciIoAttributeOperationGet, 0, | 300 | EfiPciIoAttributeOperationGet, 0, |
300 | &attributes); | 301 | &attributes); |
301 | 302 | #else | |
303 | status = efi_call_phys5(pci->attributes, pci, | ||
304 | EfiPciIoAttributeOperationGet, 0, 0, | ||
305 | &attributes); | ||
306 | #endif | ||
302 | if (status != EFI_SUCCESS) | 307 | if (status != EFI_SUCCESS) |
303 | continue; | 308 | continue; |
304 | 309 | ||
305 | if (!(attributes & EFI_PCI_IO_ATTRIBUTE_EMBEDDED_ROM)) | ||
306 | continue; | ||
307 | |||
308 | if (!pci->romimage || !pci->romsize) | 310 | if (!pci->romimage || !pci->romsize) |
309 | continue; | 311 | continue; |
310 | 312 | ||
@@ -345,9 +347,9 @@ static efi_status_t setup_efi_pci(struct boot_params *params) | |||
345 | memcpy(rom->romdata, pci->romimage, pci->romsize); | 347 | memcpy(rom->romdata, pci->romimage, pci->romsize); |
346 | 348 | ||
347 | if (data) | 349 | if (data) |
348 | data->next = (uint64_t)rom; | 350 | data->next = (unsigned long)rom; |
349 | else | 351 | else |
350 | params->hdr.setup_data = (uint64_t)rom; | 352 | params->hdr.setup_data = (unsigned long)rom; |
351 | 353 | ||
352 | data = (struct setup_data *)rom; | 354 | data = (struct setup_data *)rom; |
353 | 355 | ||
@@ -432,10 +434,9 @@ static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto, | |||
432 | * Once we've found a GOP supporting ConOut, | 434 | * Once we've found a GOP supporting ConOut, |
433 | * don't bother looking any further. | 435 | * don't bother looking any further. |
434 | */ | 436 | */ |
437 | first_gop = gop; | ||
435 | if (conout_found) | 438 | if (conout_found) |
436 | break; | 439 | break; |
437 | |||
438 | first_gop = gop; | ||
439 | } | 440 | } |
440 | } | 441 | } |
441 | 442 | ||
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S index aa4aaf1b2380..1e3184f6072f 100644 --- a/arch/x86/boot/compressed/head_32.S +++ b/arch/x86/boot/compressed/head_32.S | |||
@@ -35,11 +35,11 @@ ENTRY(startup_32) | |||
35 | #ifdef CONFIG_EFI_STUB | 35 | #ifdef CONFIG_EFI_STUB |
36 | jmp preferred_addr | 36 | jmp preferred_addr |
37 | 37 | ||
38 | .balign 0x10 | ||
39 | /* | 38 | /* |
40 | * We don't need the return address, so set up the stack so | 39 | * We don't need the return address, so set up the stack so |
41 | * efi_main() can find its arugments. | 40 | * efi_main() can find its arguments. |
42 | */ | 41 | */ |
42 | ENTRY(efi_pe_entry) | ||
43 | add $0x4, %esp | 43 | add $0x4, %esp |
44 | 44 | ||
45 | call make_boot_params | 45 | call make_boot_params |
@@ -50,8 +50,10 @@ ENTRY(startup_32) | |||
50 | pushl %eax | 50 | pushl %eax |
51 | pushl %esi | 51 | pushl %esi |
52 | pushl %ecx | 52 | pushl %ecx |
53 | sub $0x4, %esp | ||
53 | 54 | ||
54 | .org 0x30,0x90 | 55 | ENTRY(efi_stub_entry) |
56 | add $0x4, %esp | ||
55 | call efi_main | 57 | call efi_main |
56 | cmpl $0, %eax | 58 | cmpl $0, %eax |
57 | movl %eax, %esi | 59 | movl %eax, %esi |
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 2c4b171eec33..f5d1aaa0dec8 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S | |||
@@ -201,12 +201,12 @@ ENTRY(startup_64) | |||
201 | */ | 201 | */ |
202 | #ifdef CONFIG_EFI_STUB | 202 | #ifdef CONFIG_EFI_STUB |
203 | /* | 203 | /* |
204 | * The entry point for the PE/COFF executable is 0x210, so only | 204 | * The entry point for the PE/COFF executable is efi_pe_entry, so |
205 | * legacy boot loaders will execute this jmp. | 205 | * only legacy boot loaders will execute this jmp. |
206 | */ | 206 | */ |
207 | jmp preferred_addr | 207 | jmp preferred_addr |
208 | 208 | ||
209 | .org 0x210 | 209 | ENTRY(efi_pe_entry) |
210 | mov %rcx, %rdi | 210 | mov %rcx, %rdi |
211 | mov %rdx, %rsi | 211 | mov %rdx, %rsi |
212 | pushq %rdi | 212 | pushq %rdi |
@@ -218,7 +218,7 @@ ENTRY(startup_64) | |||
218 | popq %rsi | 218 | popq %rsi |
219 | popq %rdi | 219 | popq %rdi |
220 | 220 | ||
221 | .org 0x230,0x90 | 221 | ENTRY(efi_stub_entry) |
222 | call efi_main | 222 | call efi_main |
223 | movq %rax,%rsi | 223 | movq %rax,%rsi |
224 | cmpq $0,%rax | 224 | cmpq $0,%rax |
diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c index 4b8e165ee572..94c544650020 100644 --- a/arch/x86/boot/tools/build.c +++ b/arch/x86/boot/tools/build.c | |||
@@ -52,6 +52,10 @@ int is_big_kernel; | |||
52 | 52 | ||
53 | #define PECOFF_RELOC_RESERVE 0x20 | 53 | #define PECOFF_RELOC_RESERVE 0x20 |
54 | 54 | ||
55 | unsigned long efi_stub_entry; | ||
56 | unsigned long efi_pe_entry; | ||
57 | unsigned long startup_64; | ||
58 | |||
55 | /*----------------------------------------------------------------------*/ | 59 | /*----------------------------------------------------------------------*/ |
56 | 60 | ||
57 | static const u32 crctab32[] = { | 61 | static const u32 crctab32[] = { |
@@ -132,7 +136,7 @@ static void die(const char * str, ...) | |||
132 | 136 | ||
133 | static void usage(void) | 137 | static void usage(void) |
134 | { | 138 | { |
135 | die("Usage: build setup system [> image]"); | 139 | die("Usage: build setup system [zoffset.h] [> image]"); |
136 | } | 140 | } |
137 | 141 | ||
138 | #ifdef CONFIG_EFI_STUB | 142 | #ifdef CONFIG_EFI_STUB |
@@ -206,30 +210,54 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz) | |||
206 | */ | 210 | */ |
207 | put_unaligned_le32(file_sz - 512, &buf[pe_header + 0x1c]); | 211 | put_unaligned_le32(file_sz - 512, &buf[pe_header + 0x1c]); |
208 | 212 | ||
209 | #ifdef CONFIG_X86_32 | ||
210 | /* | 213 | /* |
211 | * Address of entry point. | 214 | * Address of entry point for PE/COFF executable |
212 | * | ||
213 | * The EFI stub entry point is +16 bytes from the start of | ||
214 | * the .text section. | ||
215 | */ | 215 | */ |
216 | put_unaligned_le32(text_start + 16, &buf[pe_header + 0x28]); | 216 | put_unaligned_le32(text_start + efi_pe_entry, &buf[pe_header + 0x28]); |
217 | #else | ||
218 | /* | ||
219 | * Address of entry point. startup_32 is at the beginning and | ||
220 | * the 64-bit entry point (startup_64) is always 512 bytes | ||
221 | * after. The EFI stub entry point is 16 bytes after that, as | ||
222 | * the first instruction allows legacy loaders to jump over | ||
223 | * the EFI stub initialisation | ||
224 | */ | ||
225 | put_unaligned_le32(text_start + 528, &buf[pe_header + 0x28]); | ||
226 | #endif /* CONFIG_X86_32 */ | ||
227 | 217 | ||
228 | update_pecoff_section_header(".text", text_start, text_sz); | 218 | update_pecoff_section_header(".text", text_start, text_sz); |
229 | } | 219 | } |
230 | 220 | ||
231 | #endif /* CONFIG_EFI_STUB */ | 221 | #endif /* CONFIG_EFI_STUB */ |
232 | 222 | ||
223 | |||
224 | /* | ||
225 | * Parse zoffset.h and find the entry points. We could just #include zoffset.h | ||
226 | * but that would mean tools/build would have to be rebuilt every time. It's | ||
227 | * not as if parsing it is hard... | ||
228 | */ | ||
229 | #define PARSE_ZOFS(p, sym) do { \ | ||
230 | if (!strncmp(p, "#define ZO_" #sym " ", 11+sizeof(#sym))) \ | ||
231 | sym = strtoul(p + 11 + sizeof(#sym), NULL, 16); \ | ||
232 | } while (0) | ||
233 | |||
234 | static void parse_zoffset(char *fname) | ||
235 | { | ||
236 | FILE *file; | ||
237 | char *p; | ||
238 | int c; | ||
239 | |||
240 | file = fopen(fname, "r"); | ||
241 | if (!file) | ||
242 | die("Unable to open `%s': %m", fname); | ||
243 | c = fread(buf, 1, sizeof(buf) - 1, file); | ||
244 | if (ferror(file)) | ||
245 | die("read-error on `zoffset.h'"); | ||
246 | buf[c] = 0; | ||
247 | |||
248 | p = (char *)buf; | ||
249 | |||
250 | while (p && *p) { | ||
251 | PARSE_ZOFS(p, efi_stub_entry); | ||
252 | PARSE_ZOFS(p, efi_pe_entry); | ||
253 | PARSE_ZOFS(p, startup_64); | ||
254 | |||
255 | p = strchr(p, '\n'); | ||
256 | while (p && (*p == '\r' || *p == '\n')) | ||
257 | p++; | ||
258 | } | ||
259 | } | ||
260 | |||
233 | int main(int argc, char ** argv) | 261 | int main(int argc, char ** argv) |
234 | { | 262 | { |
235 | unsigned int i, sz, setup_sectors; | 263 | unsigned int i, sz, setup_sectors; |
@@ -241,7 +269,19 @@ int main(int argc, char ** argv) | |||
241 | void *kernel; | 269 | void *kernel; |
242 | u32 crc = 0xffffffffUL; | 270 | u32 crc = 0xffffffffUL; |
243 | 271 | ||
244 | if (argc != 3) | 272 | /* Defaults for old kernel */ |
273 | #ifdef CONFIG_X86_32 | ||
274 | efi_pe_entry = 0x10; | ||
275 | efi_stub_entry = 0x30; | ||
276 | #else | ||
277 | efi_pe_entry = 0x210; | ||
278 | efi_stub_entry = 0x230; | ||
279 | startup_64 = 0x200; | ||
280 | #endif | ||
281 | |||
282 | if (argc == 4) | ||
283 | parse_zoffset(argv[3]); | ||
284 | else if (argc != 3) | ||
245 | usage(); | 285 | usage(); |
246 | 286 | ||
247 | /* Copy the setup code */ | 287 | /* Copy the setup code */ |
@@ -299,6 +339,11 @@ int main(int argc, char ** argv) | |||
299 | 339 | ||
300 | #ifdef CONFIG_EFI_STUB | 340 | #ifdef CONFIG_EFI_STUB |
301 | update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz)); | 341 | update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz)); |
342 | |||
343 | #ifdef CONFIG_X86_64 /* Yes, this is really how we defined it :( */ | ||
344 | efi_stub_entry -= 0x200; | ||
345 | #endif | ||
346 | put_unaligned_le32(efi_stub_entry, &buf[0x264]); | ||
302 | #endif | 347 | #endif |
303 | 348 | ||
304 | crc = partial_crc32(buf, i, crc); | 349 | crc = partial_crc32(buf, i, crc); |
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 102ff7cb3e41..142c4ceff112 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S | |||
@@ -207,7 +207,7 @@ sysexit_from_sys_call: | |||
207 | testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) | 207 | testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) |
208 | jnz ia32_ret_from_sys_call | 208 | jnz ia32_ret_from_sys_call |
209 | TRACE_IRQS_ON | 209 | TRACE_IRQS_ON |
210 | sti | 210 | ENABLE_INTERRUPTS(CLBR_NONE) |
211 | movl %eax,%esi /* second arg, syscall return value */ | 211 | movl %eax,%esi /* second arg, syscall return value */ |
212 | cmpl $-MAX_ERRNO,%eax /* is it an error ? */ | 212 | cmpl $-MAX_ERRNO,%eax /* is it an error ? */ |
213 | jbe 1f | 213 | jbe 1f |
@@ -217,7 +217,7 @@ sysexit_from_sys_call: | |||
217 | call __audit_syscall_exit | 217 | call __audit_syscall_exit |
218 | movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */ | 218 | movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */ |
219 | movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi | 219 | movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi |
220 | cli | 220 | DISABLE_INTERRUPTS(CLBR_NONE) |
221 | TRACE_IRQS_OFF | 221 | TRACE_IRQS_OFF |
222 | testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) | 222 | testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) |
223 | jz \exit | 223 | jz \exit |
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 2d9075e863a0..93fe929d1cee 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h | |||
@@ -167,6 +167,7 @@ | |||
167 | #define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */ | 167 | #define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */ |
168 | #define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */ | 168 | #define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */ |
169 | #define X86_FEATURE_PERFCTR_CORE (6*32+23) /* core performance counter extensions */ | 169 | #define X86_FEATURE_PERFCTR_CORE (6*32+23) /* core performance counter extensions */ |
170 | #define X86_FEATURE_PERFCTR_NB (6*32+24) /* NB performance counter extensions */ | ||
170 | 171 | ||
171 | /* | 172 | /* |
172 | * Auxiliary flags: Linux defined - For features scattered in various | 173 | * Auxiliary flags: Linux defined - For features scattered in various |
@@ -309,6 +310,7 @@ extern const char * const x86_power_flags[32]; | |||
309 | #define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) | 310 | #define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) |
310 | #define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) | 311 | #define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) |
311 | #define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE) | 312 | #define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE) |
313 | #define cpu_has_perfctr_nb boot_cpu_has(X86_FEATURE_PERFCTR_NB) | ||
312 | #define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8) | 314 | #define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8) |
313 | #define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16) | 315 | #define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16) |
314 | #define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU) | 316 | #define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU) |
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 6e8fdf5ad113..28677c55113f 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h | |||
@@ -94,6 +94,7 @@ extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size, | |||
94 | #endif /* CONFIG_X86_32 */ | 94 | #endif /* CONFIG_X86_32 */ |
95 | 95 | ||
96 | extern int add_efi_memmap; | 96 | extern int add_efi_memmap; |
97 | extern unsigned long x86_efi_facility; | ||
97 | extern void efi_set_executable(efi_memory_desc_t *md, bool executable); | 98 | extern void efi_set_executable(efi_memory_desc_t *md, bool executable); |
98 | extern int efi_memblock_x86_reserve_range(void); | 99 | extern int efi_memblock_x86_reserve_range(void); |
99 | extern void efi_call_phys_prelog(void); | 100 | extern void efi_call_phys_prelog(void); |
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index 9a25b522d377..86cb51e1ca96 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h | |||
@@ -44,7 +44,6 @@ | |||
44 | 44 | ||
45 | #ifdef CONFIG_DYNAMIC_FTRACE | 45 | #ifdef CONFIG_DYNAMIC_FTRACE |
46 | #define ARCH_SUPPORTS_FTRACE_OPS 1 | 46 | #define ARCH_SUPPORTS_FTRACE_OPS 1 |
47 | #define ARCH_SUPPORTS_FTRACE_SAVE_REGS | ||
48 | #endif | 47 | #endif |
49 | 48 | ||
50 | #ifndef __ASSEMBLY__ | 49 | #ifndef __ASSEMBLY__ |
diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h index 434e2106cc87..b18df579c0e9 100644 --- a/arch/x86/include/asm/hpet.h +++ b/arch/x86/include/asm/hpet.h | |||
@@ -80,9 +80,9 @@ extern void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg); | |||
80 | extern void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg); | 80 | extern void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg); |
81 | 81 | ||
82 | #ifdef CONFIG_PCI_MSI | 82 | #ifdef CONFIG_PCI_MSI |
83 | extern int arch_setup_hpet_msi(unsigned int irq, unsigned int id); | 83 | extern int default_setup_hpet_msi(unsigned int irq, unsigned int id); |
84 | #else | 84 | #else |
85 | static inline int arch_setup_hpet_msi(unsigned int irq, unsigned int id) | 85 | static inline int default_setup_hpet_msi(unsigned int irq, unsigned int id) |
86 | { | 86 | { |
87 | return -EINVAL; | 87 | return -EINVAL; |
88 | } | 88 | } |
@@ -111,6 +111,7 @@ extern void hpet_unregister_irq_handler(rtc_irq_handler handler); | |||
111 | static inline int hpet_enable(void) { return 0; } | 111 | static inline int hpet_enable(void) { return 0; } |
112 | static inline int is_hpet_enabled(void) { return 0; } | 112 | static inline int is_hpet_enabled(void) { return 0; } |
113 | #define hpet_readl(a) 0 | 113 | #define hpet_readl(a) 0 |
114 | #define default_setup_hpet_msi NULL | ||
114 | 115 | ||
115 | #endif | 116 | #endif |
116 | #endif /* _ASM_X86_HPET_H */ | 117 | #endif /* _ASM_X86_HPET_H */ |
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index eb92a6ed2be7..10a78c3d3d5a 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h | |||
@@ -101,6 +101,7 @@ static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr, | |||
101 | irq_attr->polarity = polarity; | 101 | irq_attr->polarity = polarity; |
102 | } | 102 | } |
103 | 103 | ||
104 | /* Intel specific interrupt remapping information */ | ||
104 | struct irq_2_iommu { | 105 | struct irq_2_iommu { |
105 | struct intel_iommu *iommu; | 106 | struct intel_iommu *iommu; |
106 | u16 irte_index; | 107 | u16 irte_index; |
@@ -108,6 +109,12 @@ struct irq_2_iommu { | |||
108 | u8 irte_mask; | 109 | u8 irte_mask; |
109 | }; | 110 | }; |
110 | 111 | ||
112 | /* AMD specific interrupt remapping information */ | ||
113 | struct irq_2_irte { | ||
114 | u16 devid; /* Device ID for IRTE table */ | ||
115 | u16 index; /* Index into IRTE table*/ | ||
116 | }; | ||
117 | |||
111 | /* | 118 | /* |
112 | * This is performance-critical, we want to do it O(1) | 119 | * This is performance-critical, we want to do it O(1) |
113 | * | 120 | * |
@@ -120,7 +127,11 @@ struct irq_cfg { | |||
120 | u8 vector; | 127 | u8 vector; |
121 | u8 move_in_progress : 1; | 128 | u8 move_in_progress : 1; |
122 | #ifdef CONFIG_IRQ_REMAP | 129 | #ifdef CONFIG_IRQ_REMAP |
123 | struct irq_2_iommu irq_2_iommu; | 130 | u8 remapped : 1; |
131 | union { | ||
132 | struct irq_2_iommu irq_2_iommu; | ||
133 | struct irq_2_irte irq_2_irte; | ||
134 | }; | ||
124 | #endif | 135 | #endif |
125 | }; | 136 | }; |
126 | 137 | ||
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h index b518c7509933..86095ed14135 100644 --- a/arch/x86/include/asm/hypervisor.h +++ b/arch/x86/include/asm/hypervisor.h | |||
@@ -25,6 +25,7 @@ | |||
25 | 25 | ||
26 | extern void init_hypervisor(struct cpuinfo_x86 *c); | 26 | extern void init_hypervisor(struct cpuinfo_x86 *c); |
27 | extern void init_hypervisor_platform(void); | 27 | extern void init_hypervisor_platform(void); |
28 | extern bool hypervisor_x2apic_available(void); | ||
28 | 29 | ||
29 | /* | 30 | /* |
30 | * x86 hypervisor information | 31 | * x86 hypervisor information |
@@ -41,6 +42,9 @@ struct hypervisor_x86 { | |||
41 | 42 | ||
42 | /* Platform setup (run once per boot) */ | 43 | /* Platform setup (run once per boot) */ |
43 | void (*init_platform)(void); | 44 | void (*init_platform)(void); |
45 | |||
46 | /* X2APIC detection (run once per boot) */ | ||
47 | bool (*x2apic_available)(void); | ||
44 | }; | 48 | }; |
45 | 49 | ||
46 | extern const struct hypervisor_x86 *x86_hyper; | 50 | extern const struct hypervisor_x86 *x86_hyper; |
@@ -51,13 +55,4 @@ extern const struct hypervisor_x86 x86_hyper_ms_hyperv; | |||
51 | extern const struct hypervisor_x86 x86_hyper_xen_hvm; | 55 | extern const struct hypervisor_x86 x86_hyper_xen_hvm; |
52 | extern const struct hypervisor_x86 x86_hyper_kvm; | 56 | extern const struct hypervisor_x86 x86_hyper_kvm; |
53 | 57 | ||
54 | static inline bool hypervisor_x2apic_available(void) | ||
55 | { | ||
56 | if (kvm_para_available()) | ||
57 | return true; | ||
58 | if (xen_x2apic_para_available()) | ||
59 | return true; | ||
60 | return false; | ||
61 | } | ||
62 | |||
63 | #endif | 58 | #endif |
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h index 73d8c5398ea9..459e50a424d1 100644 --- a/arch/x86/include/asm/io_apic.h +++ b/arch/x86/include/asm/io_apic.h | |||
@@ -144,11 +144,24 @@ extern int timer_through_8259; | |||
144 | (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs) | 144 | (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs) |
145 | 145 | ||
146 | struct io_apic_irq_attr; | 146 | struct io_apic_irq_attr; |
147 | struct irq_cfg; | ||
147 | extern int io_apic_set_pci_routing(struct device *dev, int irq, | 148 | extern int io_apic_set_pci_routing(struct device *dev, int irq, |
148 | struct io_apic_irq_attr *irq_attr); | 149 | struct io_apic_irq_attr *irq_attr); |
149 | void setup_IO_APIC_irq_extra(u32 gsi); | 150 | void setup_IO_APIC_irq_extra(u32 gsi); |
150 | extern void ioapic_insert_resources(void); | 151 | extern void ioapic_insert_resources(void); |
151 | 152 | ||
153 | extern int native_setup_ioapic_entry(int, struct IO_APIC_route_entry *, | ||
154 | unsigned int, int, | ||
155 | struct io_apic_irq_attr *); | ||
156 | extern int native_setup_ioapic_entry(int, struct IO_APIC_route_entry *, | ||
157 | unsigned int, int, | ||
158 | struct io_apic_irq_attr *); | ||
159 | extern void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg); | ||
160 | |||
161 | extern void native_compose_msi_msg(struct pci_dev *pdev, | ||
162 | unsigned int irq, unsigned int dest, | ||
163 | struct msi_msg *msg, u8 hpet_id); | ||
164 | extern void native_eoi_ioapic_pin(int apic, int pin, int vector); | ||
152 | int io_apic_setup_irq_pin_once(unsigned int irq, int node, struct io_apic_irq_attr *attr); | 165 | int io_apic_setup_irq_pin_once(unsigned int irq, int node, struct io_apic_irq_attr *attr); |
153 | 166 | ||
154 | extern int save_ioapic_entries(void); | 167 | extern int save_ioapic_entries(void); |
@@ -179,6 +192,12 @@ extern void __init native_io_apic_init_mappings(void); | |||
179 | extern unsigned int native_io_apic_read(unsigned int apic, unsigned int reg); | 192 | extern unsigned int native_io_apic_read(unsigned int apic, unsigned int reg); |
180 | extern void native_io_apic_write(unsigned int apic, unsigned int reg, unsigned int val); | 193 | extern void native_io_apic_write(unsigned int apic, unsigned int reg, unsigned int val); |
181 | extern void native_io_apic_modify(unsigned int apic, unsigned int reg, unsigned int val); | 194 | extern void native_io_apic_modify(unsigned int apic, unsigned int reg, unsigned int val); |
195 | extern void native_disable_io_apic(void); | ||
196 | extern void native_io_apic_print_entries(unsigned int apic, unsigned int nr_entries); | ||
197 | extern void intel_ir_io_apic_print_entries(unsigned int apic, unsigned int nr_entries); | ||
198 | extern int native_ioapic_set_affinity(struct irq_data *, | ||
199 | const struct cpumask *, | ||
200 | bool); | ||
182 | 201 | ||
183 | static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg) | 202 | static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg) |
184 | { | 203 | { |
@@ -193,6 +212,9 @@ static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned | |||
193 | { | 212 | { |
194 | x86_io_apic_ops.modify(apic, reg, value); | 213 | x86_io_apic_ops.modify(apic, reg, value); |
195 | } | 214 | } |
215 | |||
216 | extern void io_apic_eoi(unsigned int apic, unsigned int vector); | ||
217 | |||
196 | #else /* !CONFIG_X86_IO_APIC */ | 218 | #else /* !CONFIG_X86_IO_APIC */ |
197 | 219 | ||
198 | #define io_apic_assign_pci_irqs 0 | 220 | #define io_apic_assign_pci_irqs 0 |
@@ -223,6 +245,12 @@ static inline void disable_ioapic_support(void) { } | |||
223 | #define native_io_apic_read NULL | 245 | #define native_io_apic_read NULL |
224 | #define native_io_apic_write NULL | 246 | #define native_io_apic_write NULL |
225 | #define native_io_apic_modify NULL | 247 | #define native_io_apic_modify NULL |
248 | #define native_disable_io_apic NULL | ||
249 | #define native_io_apic_print_entries NULL | ||
250 | #define native_ioapic_set_affinity NULL | ||
251 | #define native_setup_ioapic_entry NULL | ||
252 | #define native_compose_msi_msg NULL | ||
253 | #define native_eoi_ioapic_pin NULL | ||
226 | #endif | 254 | #endif |
227 | 255 | ||
228 | #endif /* _ASM_X86_IO_APIC_H */ | 256 | #endif /* _ASM_X86_IO_APIC_H */ |
diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h index 5fb9bbbd2f14..95fd3527f632 100644 --- a/arch/x86/include/asm/irq_remapping.h +++ b/arch/x86/include/asm/irq_remapping.h | |||
@@ -26,8 +26,6 @@ | |||
26 | 26 | ||
27 | #ifdef CONFIG_IRQ_REMAP | 27 | #ifdef CONFIG_IRQ_REMAP |
28 | 28 | ||
29 | extern int irq_remapping_enabled; | ||
30 | |||
31 | extern void setup_irq_remapping_ops(void); | 29 | extern void setup_irq_remapping_ops(void); |
32 | extern int irq_remapping_supported(void); | 30 | extern int irq_remapping_supported(void); |
33 | extern int irq_remapping_prepare(void); | 31 | extern int irq_remapping_prepare(void); |
@@ -40,21 +38,19 @@ extern int setup_ioapic_remapped_entry(int irq, | |||
40 | unsigned int destination, | 38 | unsigned int destination, |
41 | int vector, | 39 | int vector, |
42 | struct io_apic_irq_attr *attr); | 40 | struct io_apic_irq_attr *attr); |
43 | extern int set_remapped_irq_affinity(struct irq_data *data, | ||
44 | const struct cpumask *mask, | ||
45 | bool force); | ||
46 | extern void free_remapped_irq(int irq); | 41 | extern void free_remapped_irq(int irq); |
47 | extern void compose_remapped_msi_msg(struct pci_dev *pdev, | 42 | extern void compose_remapped_msi_msg(struct pci_dev *pdev, |
48 | unsigned int irq, unsigned int dest, | 43 | unsigned int irq, unsigned int dest, |
49 | struct msi_msg *msg, u8 hpet_id); | 44 | struct msi_msg *msg, u8 hpet_id); |
50 | extern int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec); | ||
51 | extern int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq, | ||
52 | int index, int sub_handle); | ||
53 | extern int setup_hpet_msi_remapped(unsigned int irq, unsigned int id); | 45 | extern int setup_hpet_msi_remapped(unsigned int irq, unsigned int id); |
46 | extern void panic_if_irq_remap(const char *msg); | ||
47 | extern bool setup_remapped_irq(int irq, | ||
48 | struct irq_cfg *cfg, | ||
49 | struct irq_chip *chip); | ||
54 | 50 | ||
55 | #else /* CONFIG_IRQ_REMAP */ | 51 | void irq_remap_modify_chip_defaults(struct irq_chip *chip); |
56 | 52 | ||
57 | #define irq_remapping_enabled 0 | 53 | #else /* CONFIG_IRQ_REMAP */ |
58 | 54 | ||
59 | static inline void setup_irq_remapping_ops(void) { } | 55 | static inline void setup_irq_remapping_ops(void) { } |
60 | static inline int irq_remapping_supported(void) { return 0; } | 56 | static inline int irq_remapping_supported(void) { return 0; } |
@@ -71,30 +67,30 @@ static inline int setup_ioapic_remapped_entry(int irq, | |||
71 | { | 67 | { |
72 | return -ENODEV; | 68 | return -ENODEV; |
73 | } | 69 | } |
74 | static inline int set_remapped_irq_affinity(struct irq_data *data, | ||
75 | const struct cpumask *mask, | ||
76 | bool force) | ||
77 | { | ||
78 | return 0; | ||
79 | } | ||
80 | static inline void free_remapped_irq(int irq) { } | 70 | static inline void free_remapped_irq(int irq) { } |
81 | static inline void compose_remapped_msi_msg(struct pci_dev *pdev, | 71 | static inline void compose_remapped_msi_msg(struct pci_dev *pdev, |
82 | unsigned int irq, unsigned int dest, | 72 | unsigned int irq, unsigned int dest, |
83 | struct msi_msg *msg, u8 hpet_id) | 73 | struct msi_msg *msg, u8 hpet_id) |
84 | { | 74 | { |
85 | } | 75 | } |
86 | static inline int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec) | 76 | static inline int setup_hpet_msi_remapped(unsigned int irq, unsigned int id) |
87 | { | 77 | { |
88 | return -ENODEV; | 78 | return -ENODEV; |
89 | } | 79 | } |
90 | static inline int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq, | 80 | |
91 | int index, int sub_handle) | 81 | static inline void panic_if_irq_remap(const char *msg) |
82 | { | ||
83 | } | ||
84 | |||
85 | static inline void irq_remap_modify_chip_defaults(struct irq_chip *chip) | ||
92 | { | 86 | { |
93 | return -ENODEV; | ||
94 | } | 87 | } |
95 | static inline int setup_hpet_msi_remapped(unsigned int irq, unsigned int id) | 88 | |
89 | static inline bool setup_remapped_irq(int irq, | ||
90 | struct irq_cfg *cfg, | ||
91 | struct irq_chip *chip) | ||
96 | { | 92 | { |
97 | return -ENODEV; | 93 | return false; |
98 | } | 94 | } |
99 | #endif /* CONFIG_IRQ_REMAP */ | 95 | #endif /* CONFIG_IRQ_REMAP */ |
100 | 96 | ||
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h index 5ed1f16187be..65231e173baf 100644 --- a/arch/x86/include/asm/kvm_para.h +++ b/arch/x86/include/asm/kvm_para.h | |||
@@ -85,13 +85,13 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1, | |||
85 | return ret; | 85 | return ret; |
86 | } | 86 | } |
87 | 87 | ||
88 | static inline int kvm_para_available(void) | 88 | static inline bool kvm_para_available(void) |
89 | { | 89 | { |
90 | unsigned int eax, ebx, ecx, edx; | 90 | unsigned int eax, ebx, ecx, edx; |
91 | char signature[13]; | 91 | char signature[13]; |
92 | 92 | ||
93 | if (boot_cpu_data.cpuid_level < 0) | 93 | if (boot_cpu_data.cpuid_level < 0) |
94 | return 0; /* So we don't blow up on old processors */ | 94 | return false; /* So we don't blow up on old processors */ |
95 | 95 | ||
96 | if (cpu_has_hypervisor) { | 96 | if (cpu_has_hypervisor) { |
97 | cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx); | 97 | cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx); |
@@ -101,10 +101,10 @@ static inline int kvm_para_available(void) | |||
101 | signature[12] = 0; | 101 | signature[12] = 0; |
102 | 102 | ||
103 | if (strcmp(signature, "KVMKVMKVM") == 0) | 103 | if (strcmp(signature, "KVMKVMKVM") == 0) |
104 | return 1; | 104 | return true; |
105 | } | 105 | } |
106 | 106 | ||
107 | return 0; | 107 | return false; |
108 | } | 108 | } |
109 | 109 | ||
110 | static inline unsigned int kvm_arch_para_features(void) | 110 | static inline unsigned int kvm_arch_para_features(void) |
diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h index 48142971b25d..79327e9483a3 100644 --- a/arch/x86/include/asm/linkage.h +++ b/arch/x86/include/asm/linkage.h | |||
@@ -27,20 +27,20 @@ | |||
27 | #define __asmlinkage_protect0(ret) \ | 27 | #define __asmlinkage_protect0(ret) \ |
28 | __asmlinkage_protect_n(ret) | 28 | __asmlinkage_protect_n(ret) |
29 | #define __asmlinkage_protect1(ret, arg1) \ | 29 | #define __asmlinkage_protect1(ret, arg1) \ |
30 | __asmlinkage_protect_n(ret, "g" (arg1)) | 30 | __asmlinkage_protect_n(ret, "m" (arg1)) |
31 | #define __asmlinkage_protect2(ret, arg1, arg2) \ | 31 | #define __asmlinkage_protect2(ret, arg1, arg2) \ |
32 | __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2)) | 32 | __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2)) |
33 | #define __asmlinkage_protect3(ret, arg1, arg2, arg3) \ | 33 | #define __asmlinkage_protect3(ret, arg1, arg2, arg3) \ |
34 | __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3)) | 34 | __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3)) |
35 | #define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \ | 35 | #define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \ |
36 | __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \ | 36 | __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \ |
37 | "g" (arg4)) | 37 | "m" (arg4)) |
38 | #define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \ | 38 | #define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \ |
39 | __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \ | 39 | __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \ |
40 | "g" (arg4), "g" (arg5)) | 40 | "m" (arg4), "m" (arg5)) |
41 | #define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \ | 41 | #define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \ |
42 | __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \ | 42 | __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \ |
43 | "g" (arg4), "g" (arg5), "g" (arg6)) | 43 | "m" (arg4), "m" (arg5), "m" (arg6)) |
44 | 44 | ||
45 | #endif /* CONFIG_X86_32 */ | 45 | #endif /* CONFIG_X86_32 */ |
46 | 46 | ||
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index ecdfee60ee4a..f4076af1f4ed 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h | |||
@@ -3,6 +3,90 @@ | |||
3 | 3 | ||
4 | #include <uapi/asm/mce.h> | 4 | #include <uapi/asm/mce.h> |
5 | 5 | ||
6 | /* | ||
7 | * Machine Check support for x86 | ||
8 | */ | ||
9 | |||
10 | /* MCG_CAP register defines */ | ||
11 | #define MCG_BANKCNT_MASK 0xff /* Number of Banks */ | ||
12 | #define MCG_CTL_P (1ULL<<8) /* MCG_CTL register available */ | ||
13 | #define MCG_EXT_P (1ULL<<9) /* Extended registers available */ | ||
14 | #define MCG_CMCI_P (1ULL<<10) /* CMCI supported */ | ||
15 | #define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */ | ||
16 | #define MCG_EXT_CNT_SHIFT 16 | ||
17 | #define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT) | ||
18 | #define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */ | ||
19 | |||
20 | /* MCG_STATUS register defines */ | ||
21 | #define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */ | ||
22 | #define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */ | ||
23 | #define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */ | ||
24 | |||
25 | /* MCi_STATUS register defines */ | ||
26 | #define MCI_STATUS_VAL (1ULL<<63) /* valid error */ | ||
27 | #define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */ | ||
28 | #define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */ | ||
29 | #define MCI_STATUS_EN (1ULL<<60) /* error enabled */ | ||
30 | #define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */ | ||
31 | #define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */ | ||
32 | #define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */ | ||
33 | #define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */ | ||
34 | #define MCI_STATUS_AR (1ULL<<55) /* Action required */ | ||
35 | #define MCACOD 0xffff /* MCA Error Code */ | ||
36 | |||
37 | /* Architecturally defined codes from SDM Vol. 3B Chapter 15 */ | ||
38 | #define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */ | ||
39 | #define MCACOD_SCRUBMSK 0xfff0 | ||
40 | #define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */ | ||
41 | #define MCACOD_DATA 0x0134 /* Data Load */ | ||
42 | #define MCACOD_INSTR 0x0150 /* Instruction Fetch */ | ||
43 | |||
44 | /* MCi_MISC register defines */ | ||
45 | #define MCI_MISC_ADDR_LSB(m) ((m) & 0x3f) | ||
46 | #define MCI_MISC_ADDR_MODE(m) (((m) >> 6) & 7) | ||
47 | #define MCI_MISC_ADDR_SEGOFF 0 /* segment offset */ | ||
48 | #define MCI_MISC_ADDR_LINEAR 1 /* linear address */ | ||
49 | #define MCI_MISC_ADDR_PHYS 2 /* physical address */ | ||
50 | #define MCI_MISC_ADDR_MEM 3 /* memory address */ | ||
51 | #define MCI_MISC_ADDR_GENERIC 7 /* generic */ | ||
52 | |||
53 | /* CTL2 register defines */ | ||
54 | #define MCI_CTL2_CMCI_EN (1ULL << 30) | ||
55 | #define MCI_CTL2_CMCI_THRESHOLD_MASK 0x7fffULL | ||
56 | |||
57 | #define MCJ_CTX_MASK 3 | ||
58 | #define MCJ_CTX(flags) ((flags) & MCJ_CTX_MASK) | ||
59 | #define MCJ_CTX_RANDOM 0 /* inject context: random */ | ||
60 | #define MCJ_CTX_PROCESS 0x1 /* inject context: process */ | ||
61 | #define MCJ_CTX_IRQ 0x2 /* inject context: IRQ */ | ||
62 | #define MCJ_NMI_BROADCAST 0x4 /* do NMI broadcasting */ | ||
63 | #define MCJ_EXCEPTION 0x8 /* raise as exception */ | ||
64 | #define MCJ_IRQ_BRAODCAST 0x10 /* do IRQ broadcasting */ | ||
65 | |||
66 | #define MCE_OVERFLOW 0 /* bit 0 in flags means overflow */ | ||
67 | |||
68 | /* Software defined banks */ | ||
69 | #define MCE_EXTENDED_BANK 128 | ||
70 | #define MCE_THERMAL_BANK (MCE_EXTENDED_BANK + 0) | ||
71 | #define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1) | ||
72 | |||
73 | #define MCE_LOG_LEN 32 | ||
74 | #define MCE_LOG_SIGNATURE "MACHINECHECK" | ||
75 | |||
76 | /* | ||
77 | * This structure contains all data related to the MCE log. Also | ||
78 | * carries a signature to make it easier to find from external | ||
79 | * debugging tools. Each entry is only valid when its finished flag | ||
80 | * is set. | ||
81 | */ | ||
82 | struct mce_log { | ||
83 | char signature[12]; /* "MACHINECHECK" */ | ||
84 | unsigned len; /* = MCE_LOG_LEN */ | ||
85 | unsigned next; | ||
86 | unsigned flags; | ||
87 | unsigned recordlen; /* length of struct mce */ | ||
88 | struct mce entry[MCE_LOG_LEN]; | ||
89 | }; | ||
6 | 90 | ||
7 | struct mca_config { | 91 | struct mca_config { |
8 | bool dont_log_ce; | 92 | bool dont_log_ce; |
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index dba7805176bf..c28fd02f4bf7 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h | |||
@@ -121,9 +121,12 @@ static inline void x86_restore_msi_irqs(struct pci_dev *dev, int irq) | |||
121 | #define arch_teardown_msi_irq x86_teardown_msi_irq | 121 | #define arch_teardown_msi_irq x86_teardown_msi_irq |
122 | #define arch_restore_msi_irqs x86_restore_msi_irqs | 122 | #define arch_restore_msi_irqs x86_restore_msi_irqs |
123 | /* implemented in arch/x86/kernel/apic/io_apic. */ | 123 | /* implemented in arch/x86/kernel/apic/io_apic. */ |
124 | struct msi_desc; | ||
124 | int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); | 125 | int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); |
125 | void native_teardown_msi_irq(unsigned int irq); | 126 | void native_teardown_msi_irq(unsigned int irq); |
126 | void native_restore_msi_irqs(struct pci_dev *dev, int irq); | 127 | void native_restore_msi_irqs(struct pci_dev *dev, int irq); |
128 | int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, | ||
129 | unsigned int irq_base, unsigned int irq_offset); | ||
127 | /* default to the implementation in drivers/lib/msi.c */ | 130 | /* default to the implementation in drivers/lib/msi.c */ |
128 | #define HAVE_DEFAULT_MSI_TEARDOWN_IRQS | 131 | #define HAVE_DEFAULT_MSI_TEARDOWN_IRQS |
129 | #define HAVE_DEFAULT_MSI_RESTORE_IRQS | 132 | #define HAVE_DEFAULT_MSI_RESTORE_IRQS |
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 4fabcdf1cfa7..57cb63402213 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h | |||
@@ -29,8 +29,13 @@ | |||
29 | #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23) | 29 | #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23) |
30 | #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL | 30 | #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL |
31 | 31 | ||
32 | #define AMD_PERFMON_EVENTSEL_GUESTONLY (1ULL << 40) | 32 | #define AMD64_EVENTSEL_INT_CORE_ENABLE (1ULL << 36) |
33 | #define AMD_PERFMON_EVENTSEL_HOSTONLY (1ULL << 41) | 33 | #define AMD64_EVENTSEL_GUESTONLY (1ULL << 40) |
34 | #define AMD64_EVENTSEL_HOSTONLY (1ULL << 41) | ||
35 | |||
36 | #define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT 37 | ||
37 | #define AMD64_EVENTSEL_INT_CORE_SEL_MASK \ | ||
38 | (0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT) | ||
34 | 39 | ||
35 | #define AMD64_EVENTSEL_EVENT \ | 40 | #define AMD64_EVENTSEL_EVENT \ |
36 | (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32)) | 41 | (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32)) |
@@ -46,8 +51,12 @@ | |||
46 | #define AMD64_RAW_EVENT_MASK \ | 51 | #define AMD64_RAW_EVENT_MASK \ |
47 | (X86_RAW_EVENT_MASK | \ | 52 | (X86_RAW_EVENT_MASK | \ |
48 | AMD64_EVENTSEL_EVENT) | 53 | AMD64_EVENTSEL_EVENT) |
54 | #define AMD64_RAW_EVENT_MASK_NB \ | ||
55 | (AMD64_EVENTSEL_EVENT | \ | ||
56 | ARCH_PERFMON_EVENTSEL_UMASK) | ||
49 | #define AMD64_NUM_COUNTERS 4 | 57 | #define AMD64_NUM_COUNTERS 4 |
50 | #define AMD64_NUM_COUNTERS_CORE 6 | 58 | #define AMD64_NUM_COUNTERS_CORE 6 |
59 | #define AMD64_NUM_COUNTERS_NB 4 | ||
51 | 60 | ||
52 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c | 61 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c |
53 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) | 62 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) |
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 5199db2923d3..fc304279b559 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -142,6 +142,11 @@ static inline unsigned long pmd_pfn(pmd_t pmd) | |||
142 | return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT; | 142 | return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT; |
143 | } | 143 | } |
144 | 144 | ||
145 | static inline unsigned long pud_pfn(pud_t pud) | ||
146 | { | ||
147 | return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT; | ||
148 | } | ||
149 | |||
145 | #define pte_page(pte) pfn_to_page(pte_pfn(pte)) | 150 | #define pte_page(pte) pfn_to_page(pte_pfn(pte)) |
146 | 151 | ||
147 | static inline int pmd_large(pmd_t pte) | 152 | static inline int pmd_large(pmd_t pte) |
@@ -781,6 +786,18 @@ static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) | |||
781 | memcpy(dst, src, count * sizeof(pgd_t)); | 786 | memcpy(dst, src, count * sizeof(pgd_t)); |
782 | } | 787 | } |
783 | 788 | ||
789 | /* | ||
790 | * The x86 doesn't have any external MMU info: the kernel page | ||
791 | * tables contain all the necessary information. | ||
792 | */ | ||
793 | static inline void update_mmu_cache(struct vm_area_struct *vma, | ||
794 | unsigned long addr, pte_t *ptep) | ||
795 | { | ||
796 | } | ||
797 | static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, | ||
798 | unsigned long addr, pmd_t *pmd) | ||
799 | { | ||
800 | } | ||
784 | 801 | ||
785 | #include <asm-generic/pgtable.h> | 802 | #include <asm-generic/pgtable.h> |
786 | #endif /* __ASSEMBLY__ */ | 803 | #endif /* __ASSEMBLY__ */ |
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index 8faa215a503e..9ee322103c6d 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h | |||
@@ -66,13 +66,6 @@ do { \ | |||
66 | __flush_tlb_one((vaddr)); \ | 66 | __flush_tlb_one((vaddr)); \ |
67 | } while (0) | 67 | } while (0) |
68 | 68 | ||
69 | /* | ||
70 | * The i386 doesn't have any external MMU info: the kernel page | ||
71 | * tables contain all the necessary information. | ||
72 | */ | ||
73 | #define update_mmu_cache(vma, address, ptep) do { } while (0) | ||
74 | #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) | ||
75 | |||
76 | #endif /* !__ASSEMBLY__ */ | 69 | #endif /* !__ASSEMBLY__ */ |
77 | 70 | ||
78 | /* | 71 | /* |
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 47356f9df82e..615b0c78449f 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h | |||
@@ -142,9 +142,6 @@ static inline int pgd_large(pgd_t pgd) { return 0; } | |||
142 | #define pte_offset_map(dir, address) pte_offset_kernel((dir), (address)) | 142 | #define pte_offset_map(dir, address) pte_offset_kernel((dir), (address)) |
143 | #define pte_unmap(pte) ((void)(pte))/* NOP */ | 143 | #define pte_unmap(pte) ((void)(pte))/* NOP */ |
144 | 144 | ||
145 | #define update_mmu_cache(vma, address, ptep) do { } while (0) | ||
146 | #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) | ||
147 | |||
148 | /* Encode and de-code a swap entry */ | 145 | /* Encode and de-code a swap entry */ |
149 | #if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE | 146 | #if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE |
150 | #define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1) | 147 | #define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1) |
diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h index 6c7fc25f2c34..5c6e4fb370f5 100644 --- a/arch/x86/include/asm/required-features.h +++ b/arch/x86/include/asm/required-features.h | |||
@@ -47,6 +47,12 @@ | |||
47 | # define NEED_NOPL 0 | 47 | # define NEED_NOPL 0 |
48 | #endif | 48 | #endif |
49 | 49 | ||
50 | #ifdef CONFIG_MATOM | ||
51 | # define NEED_MOVBE (1<<(X86_FEATURE_MOVBE & 31)) | ||
52 | #else | ||
53 | # define NEED_MOVBE 0 | ||
54 | #endif | ||
55 | |||
50 | #ifdef CONFIG_X86_64 | 56 | #ifdef CONFIG_X86_64 |
51 | #ifdef CONFIG_PARAVIRT | 57 | #ifdef CONFIG_PARAVIRT |
52 | /* Paravirtualized systems may not have PSE or PGE available */ | 58 | /* Paravirtualized systems may not have PSE or PGE available */ |
@@ -80,7 +86,7 @@ | |||
80 | 86 | ||
81 | #define REQUIRED_MASK2 0 | 87 | #define REQUIRED_MASK2 0 |
82 | #define REQUIRED_MASK3 (NEED_NOPL) | 88 | #define REQUIRED_MASK3 (NEED_NOPL) |
83 | #define REQUIRED_MASK4 0 | 89 | #define REQUIRED_MASK4 (NEED_MOVBE) |
84 | #define REQUIRED_MASK5 0 | 90 | #define REQUIRED_MASK5 0 |
85 | #define REQUIRED_MASK6 0 | 91 | #define REQUIRED_MASK6 0 |
86 | #define REQUIRED_MASK7 0 | 92 | #define REQUIRED_MASK7 0 |
diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h index b47c2a82ff15..062921ef34e9 100644 --- a/arch/x86/include/asm/uv/uv.h +++ b/arch/x86/include/asm/uv/uv.h | |||
@@ -16,7 +16,7 @@ extern void uv_system_init(void); | |||
16 | extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, | 16 | extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, |
17 | struct mm_struct *mm, | 17 | struct mm_struct *mm, |
18 | unsigned long start, | 18 | unsigned long start, |
19 | unsigned end, | 19 | unsigned long end, |
20 | unsigned int cpu); | 20 | unsigned int cpu); |
21 | 21 | ||
22 | #else /* X86_UV */ | 22 | #else /* X86_UV */ |
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index 57693498519c..7669941cc9d2 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h | |||
@@ -181,19 +181,38 @@ struct x86_platform_ops { | |||
181 | }; | 181 | }; |
182 | 182 | ||
183 | struct pci_dev; | 183 | struct pci_dev; |
184 | struct msi_msg; | ||
184 | 185 | ||
185 | struct x86_msi_ops { | 186 | struct x86_msi_ops { |
186 | int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type); | 187 | int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type); |
188 | void (*compose_msi_msg)(struct pci_dev *dev, unsigned int irq, | ||
189 | unsigned int dest, struct msi_msg *msg, | ||
190 | u8 hpet_id); | ||
187 | void (*teardown_msi_irq)(unsigned int irq); | 191 | void (*teardown_msi_irq)(unsigned int irq); |
188 | void (*teardown_msi_irqs)(struct pci_dev *dev); | 192 | void (*teardown_msi_irqs)(struct pci_dev *dev); |
189 | void (*restore_msi_irqs)(struct pci_dev *dev, int irq); | 193 | void (*restore_msi_irqs)(struct pci_dev *dev, int irq); |
194 | int (*setup_hpet_msi)(unsigned int irq, unsigned int id); | ||
190 | }; | 195 | }; |
191 | 196 | ||
197 | struct IO_APIC_route_entry; | ||
198 | struct io_apic_irq_attr; | ||
199 | struct irq_data; | ||
200 | struct cpumask; | ||
201 | |||
192 | struct x86_io_apic_ops { | 202 | struct x86_io_apic_ops { |
193 | void (*init) (void); | 203 | void (*init) (void); |
194 | unsigned int (*read) (unsigned int apic, unsigned int reg); | 204 | unsigned int (*read) (unsigned int apic, unsigned int reg); |
195 | void (*write) (unsigned int apic, unsigned int reg, unsigned int value); | 205 | void (*write) (unsigned int apic, unsigned int reg, unsigned int value); |
196 | void (*modify)(unsigned int apic, unsigned int reg, unsigned int value); | 206 | void (*modify) (unsigned int apic, unsigned int reg, unsigned int value); |
207 | void (*disable)(void); | ||
208 | void (*print_entries)(unsigned int apic, unsigned int nr_entries); | ||
209 | int (*set_affinity)(struct irq_data *data, | ||
210 | const struct cpumask *mask, | ||
211 | bool force); | ||
212 | int (*setup_entry)(int irq, struct IO_APIC_route_entry *entry, | ||
213 | unsigned int destination, int vector, | ||
214 | struct io_apic_irq_attr *attr); | ||
215 | void (*eoi_ioapic_pin)(int apic, int pin, int vector); | ||
197 | }; | 216 | }; |
198 | 217 | ||
199 | extern struct x86_init_ops x86_init; | 218 | extern struct x86_init_ops x86_init; |
diff --git a/arch/x86/include/asm/xor.h b/arch/x86/include/asm/xor.h index f8fde90bc45e..d8829751b3f8 100644 --- a/arch/x86/include/asm/xor.h +++ b/arch/x86/include/asm/xor.h | |||
@@ -1,10 +1,499 @@ | |||
1 | #ifdef CONFIG_KMEMCHECK | 1 | #ifdef CONFIG_KMEMCHECK |
2 | /* kmemcheck doesn't handle MMX/SSE/SSE2 instructions */ | 2 | /* kmemcheck doesn't handle MMX/SSE/SSE2 instructions */ |
3 | # include <asm-generic/xor.h> | 3 | # include <asm-generic/xor.h> |
4 | #elif !defined(_ASM_X86_XOR_H) | ||
5 | #define _ASM_X86_XOR_H | ||
6 | |||
7 | /* | ||
8 | * Optimized RAID-5 checksumming functions for SSE. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License as published by | ||
12 | * the Free Software Foundation; either version 2, or (at your option) | ||
13 | * any later version. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * (for example /usr/src/linux/COPYING); if not, write to the Free | ||
17 | * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
18 | */ | ||
19 | |||
20 | /* | ||
21 | * Cache avoiding checksumming functions utilizing KNI instructions | ||
22 | * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo) | ||
23 | */ | ||
24 | |||
25 | /* | ||
26 | * Based on | ||
27 | * High-speed RAID5 checksumming functions utilizing SSE instructions. | ||
28 | * Copyright (C) 1998 Ingo Molnar. | ||
29 | */ | ||
30 | |||
31 | /* | ||
32 | * x86-64 changes / gcc fixes from Andi Kleen. | ||
33 | * Copyright 2002 Andi Kleen, SuSE Labs. | ||
34 | * | ||
35 | * This hasn't been optimized for the hammer yet, but there are likely | ||
36 | * no advantages to be gotten from x86-64 here anyways. | ||
37 | */ | ||
38 | |||
39 | #include <asm/i387.h> | ||
40 | |||
41 | #ifdef CONFIG_X86_32 | ||
42 | /* reduce register pressure */ | ||
43 | # define XOR_CONSTANT_CONSTRAINT "i" | ||
4 | #else | 44 | #else |
45 | # define XOR_CONSTANT_CONSTRAINT "re" | ||
46 | #endif | ||
47 | |||
48 | #define OFFS(x) "16*("#x")" | ||
49 | #define PF_OFFS(x) "256+16*("#x")" | ||
50 | #define PF0(x) " prefetchnta "PF_OFFS(x)"(%[p1]) ;\n" | ||
51 | #define LD(x, y) " movaps "OFFS(x)"(%[p1]), %%xmm"#y" ;\n" | ||
52 | #define ST(x, y) " movaps %%xmm"#y", "OFFS(x)"(%[p1]) ;\n" | ||
53 | #define PF1(x) " prefetchnta "PF_OFFS(x)"(%[p2]) ;\n" | ||
54 | #define PF2(x) " prefetchnta "PF_OFFS(x)"(%[p3]) ;\n" | ||
55 | #define PF3(x) " prefetchnta "PF_OFFS(x)"(%[p4]) ;\n" | ||
56 | #define PF4(x) " prefetchnta "PF_OFFS(x)"(%[p5]) ;\n" | ||
57 | #define XO1(x, y) " xorps "OFFS(x)"(%[p2]), %%xmm"#y" ;\n" | ||
58 | #define XO2(x, y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n" | ||
59 | #define XO3(x, y) " xorps "OFFS(x)"(%[p4]), %%xmm"#y" ;\n" | ||
60 | #define XO4(x, y) " xorps "OFFS(x)"(%[p5]), %%xmm"#y" ;\n" | ||
61 | #define NOP(x) | ||
62 | |||
63 | #define BLK64(pf, op, i) \ | ||
64 | pf(i) \ | ||
65 | op(i, 0) \ | ||
66 | op(i + 1, 1) \ | ||
67 | op(i + 2, 2) \ | ||
68 | op(i + 3, 3) | ||
69 | |||
70 | static void | ||
71 | xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) | ||
72 | { | ||
73 | unsigned long lines = bytes >> 8; | ||
74 | |||
75 | kernel_fpu_begin(); | ||
76 | |||
77 | asm volatile( | ||
78 | #undef BLOCK | ||
79 | #define BLOCK(i) \ | ||
80 | LD(i, 0) \ | ||
81 | LD(i + 1, 1) \ | ||
82 | PF1(i) \ | ||
83 | PF1(i + 2) \ | ||
84 | LD(i + 2, 2) \ | ||
85 | LD(i + 3, 3) \ | ||
86 | PF0(i + 4) \ | ||
87 | PF0(i + 6) \ | ||
88 | XO1(i, 0) \ | ||
89 | XO1(i + 1, 1) \ | ||
90 | XO1(i + 2, 2) \ | ||
91 | XO1(i + 3, 3) \ | ||
92 | ST(i, 0) \ | ||
93 | ST(i + 1, 1) \ | ||
94 | ST(i + 2, 2) \ | ||
95 | ST(i + 3, 3) \ | ||
96 | |||
97 | |||
98 | PF0(0) | ||
99 | PF0(2) | ||
100 | |||
101 | " .align 32 ;\n" | ||
102 | " 1: ;\n" | ||
103 | |||
104 | BLOCK(0) | ||
105 | BLOCK(4) | ||
106 | BLOCK(8) | ||
107 | BLOCK(12) | ||
108 | |||
109 | " add %[inc], %[p1] ;\n" | ||
110 | " add %[inc], %[p2] ;\n" | ||
111 | " dec %[cnt] ;\n" | ||
112 | " jnz 1b ;\n" | ||
113 | : [cnt] "+r" (lines), | ||
114 | [p1] "+r" (p1), [p2] "+r" (p2) | ||
115 | : [inc] XOR_CONSTANT_CONSTRAINT (256UL) | ||
116 | : "memory"); | ||
117 | |||
118 | kernel_fpu_end(); | ||
119 | } | ||
120 | |||
121 | static void | ||
122 | xor_sse_2_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2) | ||
123 | { | ||
124 | unsigned long lines = bytes >> 8; | ||
125 | |||
126 | kernel_fpu_begin(); | ||
127 | |||
128 | asm volatile( | ||
129 | #undef BLOCK | ||
130 | #define BLOCK(i) \ | ||
131 | BLK64(PF0, LD, i) \ | ||
132 | BLK64(PF1, XO1, i) \ | ||
133 | BLK64(NOP, ST, i) \ | ||
134 | |||
135 | " .align 32 ;\n" | ||
136 | " 1: ;\n" | ||
137 | |||
138 | BLOCK(0) | ||
139 | BLOCK(4) | ||
140 | BLOCK(8) | ||
141 | BLOCK(12) | ||
142 | |||
143 | " add %[inc], %[p1] ;\n" | ||
144 | " add %[inc], %[p2] ;\n" | ||
145 | " dec %[cnt] ;\n" | ||
146 | " jnz 1b ;\n" | ||
147 | : [cnt] "+r" (lines), | ||
148 | [p1] "+r" (p1), [p2] "+r" (p2) | ||
149 | : [inc] XOR_CONSTANT_CONSTRAINT (256UL) | ||
150 | : "memory"); | ||
151 | |||
152 | kernel_fpu_end(); | ||
153 | } | ||
154 | |||
155 | static void | ||
156 | xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
157 | unsigned long *p3) | ||
158 | { | ||
159 | unsigned long lines = bytes >> 8; | ||
160 | |||
161 | kernel_fpu_begin(); | ||
162 | |||
163 | asm volatile( | ||
164 | #undef BLOCK | ||
165 | #define BLOCK(i) \ | ||
166 | PF1(i) \ | ||
167 | PF1(i + 2) \ | ||
168 | LD(i, 0) \ | ||
169 | LD(i + 1, 1) \ | ||
170 | LD(i + 2, 2) \ | ||
171 | LD(i + 3, 3) \ | ||
172 | PF2(i) \ | ||
173 | PF2(i + 2) \ | ||
174 | PF0(i + 4) \ | ||
175 | PF0(i + 6) \ | ||
176 | XO1(i, 0) \ | ||
177 | XO1(i + 1, 1) \ | ||
178 | XO1(i + 2, 2) \ | ||
179 | XO1(i + 3, 3) \ | ||
180 | XO2(i, 0) \ | ||
181 | XO2(i + 1, 1) \ | ||
182 | XO2(i + 2, 2) \ | ||
183 | XO2(i + 3, 3) \ | ||
184 | ST(i, 0) \ | ||
185 | ST(i + 1, 1) \ | ||
186 | ST(i + 2, 2) \ | ||
187 | ST(i + 3, 3) \ | ||
188 | |||
189 | |||
190 | PF0(0) | ||
191 | PF0(2) | ||
192 | |||
193 | " .align 32 ;\n" | ||
194 | " 1: ;\n" | ||
195 | |||
196 | BLOCK(0) | ||
197 | BLOCK(4) | ||
198 | BLOCK(8) | ||
199 | BLOCK(12) | ||
200 | |||
201 | " add %[inc], %[p1] ;\n" | ||
202 | " add %[inc], %[p2] ;\n" | ||
203 | " add %[inc], %[p3] ;\n" | ||
204 | " dec %[cnt] ;\n" | ||
205 | " jnz 1b ;\n" | ||
206 | : [cnt] "+r" (lines), | ||
207 | [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3) | ||
208 | : [inc] XOR_CONSTANT_CONSTRAINT (256UL) | ||
209 | : "memory"); | ||
210 | |||
211 | kernel_fpu_end(); | ||
212 | } | ||
213 | |||
214 | static void | ||
215 | xor_sse_3_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
216 | unsigned long *p3) | ||
217 | { | ||
218 | unsigned long lines = bytes >> 8; | ||
219 | |||
220 | kernel_fpu_begin(); | ||
221 | |||
222 | asm volatile( | ||
223 | #undef BLOCK | ||
224 | #define BLOCK(i) \ | ||
225 | BLK64(PF0, LD, i) \ | ||
226 | BLK64(PF1, XO1, i) \ | ||
227 | BLK64(PF2, XO2, i) \ | ||
228 | BLK64(NOP, ST, i) \ | ||
229 | |||
230 | " .align 32 ;\n" | ||
231 | " 1: ;\n" | ||
232 | |||
233 | BLOCK(0) | ||
234 | BLOCK(4) | ||
235 | BLOCK(8) | ||
236 | BLOCK(12) | ||
237 | |||
238 | " add %[inc], %[p1] ;\n" | ||
239 | " add %[inc], %[p2] ;\n" | ||
240 | " add %[inc], %[p3] ;\n" | ||
241 | " dec %[cnt] ;\n" | ||
242 | " jnz 1b ;\n" | ||
243 | : [cnt] "+r" (lines), | ||
244 | [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3) | ||
245 | : [inc] XOR_CONSTANT_CONSTRAINT (256UL) | ||
246 | : "memory"); | ||
247 | |||
248 | kernel_fpu_end(); | ||
249 | } | ||
250 | |||
251 | static void | ||
252 | xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
253 | unsigned long *p3, unsigned long *p4) | ||
254 | { | ||
255 | unsigned long lines = bytes >> 8; | ||
256 | |||
257 | kernel_fpu_begin(); | ||
258 | |||
259 | asm volatile( | ||
260 | #undef BLOCK | ||
261 | #define BLOCK(i) \ | ||
262 | PF1(i) \ | ||
263 | PF1(i + 2) \ | ||
264 | LD(i, 0) \ | ||
265 | LD(i + 1, 1) \ | ||
266 | LD(i + 2, 2) \ | ||
267 | LD(i + 3, 3) \ | ||
268 | PF2(i) \ | ||
269 | PF2(i + 2) \ | ||
270 | XO1(i, 0) \ | ||
271 | XO1(i + 1, 1) \ | ||
272 | XO1(i + 2, 2) \ | ||
273 | XO1(i + 3, 3) \ | ||
274 | PF3(i) \ | ||
275 | PF3(i + 2) \ | ||
276 | PF0(i + 4) \ | ||
277 | PF0(i + 6) \ | ||
278 | XO2(i, 0) \ | ||
279 | XO2(i + 1, 1) \ | ||
280 | XO2(i + 2, 2) \ | ||
281 | XO2(i + 3, 3) \ | ||
282 | XO3(i, 0) \ | ||
283 | XO3(i + 1, 1) \ | ||
284 | XO3(i + 2, 2) \ | ||
285 | XO3(i + 3, 3) \ | ||
286 | ST(i, 0) \ | ||
287 | ST(i + 1, 1) \ | ||
288 | ST(i + 2, 2) \ | ||
289 | ST(i + 3, 3) \ | ||
290 | |||
291 | |||
292 | PF0(0) | ||
293 | PF0(2) | ||
294 | |||
295 | " .align 32 ;\n" | ||
296 | " 1: ;\n" | ||
297 | |||
298 | BLOCK(0) | ||
299 | BLOCK(4) | ||
300 | BLOCK(8) | ||
301 | BLOCK(12) | ||
302 | |||
303 | " add %[inc], %[p1] ;\n" | ||
304 | " add %[inc], %[p2] ;\n" | ||
305 | " add %[inc], %[p3] ;\n" | ||
306 | " add %[inc], %[p4] ;\n" | ||
307 | " dec %[cnt] ;\n" | ||
308 | " jnz 1b ;\n" | ||
309 | : [cnt] "+r" (lines), [p1] "+r" (p1), | ||
310 | [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4) | ||
311 | : [inc] XOR_CONSTANT_CONSTRAINT (256UL) | ||
312 | : "memory"); | ||
313 | |||
314 | kernel_fpu_end(); | ||
315 | } | ||
316 | |||
317 | static void | ||
318 | xor_sse_4_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
319 | unsigned long *p3, unsigned long *p4) | ||
320 | { | ||
321 | unsigned long lines = bytes >> 8; | ||
322 | |||
323 | kernel_fpu_begin(); | ||
324 | |||
325 | asm volatile( | ||
326 | #undef BLOCK | ||
327 | #define BLOCK(i) \ | ||
328 | BLK64(PF0, LD, i) \ | ||
329 | BLK64(PF1, XO1, i) \ | ||
330 | BLK64(PF2, XO2, i) \ | ||
331 | BLK64(PF3, XO3, i) \ | ||
332 | BLK64(NOP, ST, i) \ | ||
333 | |||
334 | " .align 32 ;\n" | ||
335 | " 1: ;\n" | ||
336 | |||
337 | BLOCK(0) | ||
338 | BLOCK(4) | ||
339 | BLOCK(8) | ||
340 | BLOCK(12) | ||
341 | |||
342 | " add %[inc], %[p1] ;\n" | ||
343 | " add %[inc], %[p2] ;\n" | ||
344 | " add %[inc], %[p3] ;\n" | ||
345 | " add %[inc], %[p4] ;\n" | ||
346 | " dec %[cnt] ;\n" | ||
347 | " jnz 1b ;\n" | ||
348 | : [cnt] "+r" (lines), [p1] "+r" (p1), | ||
349 | [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4) | ||
350 | : [inc] XOR_CONSTANT_CONSTRAINT (256UL) | ||
351 | : "memory"); | ||
352 | |||
353 | kernel_fpu_end(); | ||
354 | } | ||
355 | |||
356 | static void | ||
357 | xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
358 | unsigned long *p3, unsigned long *p4, unsigned long *p5) | ||
359 | { | ||
360 | unsigned long lines = bytes >> 8; | ||
361 | |||
362 | kernel_fpu_begin(); | ||
363 | |||
364 | asm volatile( | ||
365 | #undef BLOCK | ||
366 | #define BLOCK(i) \ | ||
367 | PF1(i) \ | ||
368 | PF1(i + 2) \ | ||
369 | LD(i, 0) \ | ||
370 | LD(i + 1, 1) \ | ||
371 | LD(i + 2, 2) \ | ||
372 | LD(i + 3, 3) \ | ||
373 | PF2(i) \ | ||
374 | PF2(i + 2) \ | ||
375 | XO1(i, 0) \ | ||
376 | XO1(i + 1, 1) \ | ||
377 | XO1(i + 2, 2) \ | ||
378 | XO1(i + 3, 3) \ | ||
379 | PF3(i) \ | ||
380 | PF3(i + 2) \ | ||
381 | XO2(i, 0) \ | ||
382 | XO2(i + 1, 1) \ | ||
383 | XO2(i + 2, 2) \ | ||
384 | XO2(i + 3, 3) \ | ||
385 | PF4(i) \ | ||
386 | PF4(i + 2) \ | ||
387 | PF0(i + 4) \ | ||
388 | PF0(i + 6) \ | ||
389 | XO3(i, 0) \ | ||
390 | XO3(i + 1, 1) \ | ||
391 | XO3(i + 2, 2) \ | ||
392 | XO3(i + 3, 3) \ | ||
393 | XO4(i, 0) \ | ||
394 | XO4(i + 1, 1) \ | ||
395 | XO4(i + 2, 2) \ | ||
396 | XO4(i + 3, 3) \ | ||
397 | ST(i, 0) \ | ||
398 | ST(i + 1, 1) \ | ||
399 | ST(i + 2, 2) \ | ||
400 | ST(i + 3, 3) \ | ||
401 | |||
402 | |||
403 | PF0(0) | ||
404 | PF0(2) | ||
405 | |||
406 | " .align 32 ;\n" | ||
407 | " 1: ;\n" | ||
408 | |||
409 | BLOCK(0) | ||
410 | BLOCK(4) | ||
411 | BLOCK(8) | ||
412 | BLOCK(12) | ||
413 | |||
414 | " add %[inc], %[p1] ;\n" | ||
415 | " add %[inc], %[p2] ;\n" | ||
416 | " add %[inc], %[p3] ;\n" | ||
417 | " add %[inc], %[p4] ;\n" | ||
418 | " add %[inc], %[p5] ;\n" | ||
419 | " dec %[cnt] ;\n" | ||
420 | " jnz 1b ;\n" | ||
421 | : [cnt] "+r" (lines), [p1] "+r" (p1), [p2] "+r" (p2), | ||
422 | [p3] "+r" (p3), [p4] "+r" (p4), [p5] "+r" (p5) | ||
423 | : [inc] XOR_CONSTANT_CONSTRAINT (256UL) | ||
424 | : "memory"); | ||
425 | |||
426 | kernel_fpu_end(); | ||
427 | } | ||
428 | |||
429 | static void | ||
430 | xor_sse_5_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
431 | unsigned long *p3, unsigned long *p4, unsigned long *p5) | ||
432 | { | ||
433 | unsigned long lines = bytes >> 8; | ||
434 | |||
435 | kernel_fpu_begin(); | ||
436 | |||
437 | asm volatile( | ||
438 | #undef BLOCK | ||
439 | #define BLOCK(i) \ | ||
440 | BLK64(PF0, LD, i) \ | ||
441 | BLK64(PF1, XO1, i) \ | ||
442 | BLK64(PF2, XO2, i) \ | ||
443 | BLK64(PF3, XO3, i) \ | ||
444 | BLK64(PF4, XO4, i) \ | ||
445 | BLK64(NOP, ST, i) \ | ||
446 | |||
447 | " .align 32 ;\n" | ||
448 | " 1: ;\n" | ||
449 | |||
450 | BLOCK(0) | ||
451 | BLOCK(4) | ||
452 | BLOCK(8) | ||
453 | BLOCK(12) | ||
454 | |||
455 | " add %[inc], %[p1] ;\n" | ||
456 | " add %[inc], %[p2] ;\n" | ||
457 | " add %[inc], %[p3] ;\n" | ||
458 | " add %[inc], %[p4] ;\n" | ||
459 | " add %[inc], %[p5] ;\n" | ||
460 | " dec %[cnt] ;\n" | ||
461 | " jnz 1b ;\n" | ||
462 | : [cnt] "+r" (lines), [p1] "+r" (p1), [p2] "+r" (p2), | ||
463 | [p3] "+r" (p3), [p4] "+r" (p4), [p5] "+r" (p5) | ||
464 | : [inc] XOR_CONSTANT_CONSTRAINT (256UL) | ||
465 | : "memory"); | ||
466 | |||
467 | kernel_fpu_end(); | ||
468 | } | ||
469 | |||
470 | static struct xor_block_template xor_block_sse_pf64 = { | ||
471 | .name = "prefetch64-sse", | ||
472 | .do_2 = xor_sse_2_pf64, | ||
473 | .do_3 = xor_sse_3_pf64, | ||
474 | .do_4 = xor_sse_4_pf64, | ||
475 | .do_5 = xor_sse_5_pf64, | ||
476 | }; | ||
477 | |||
478 | #undef LD | ||
479 | #undef XO1 | ||
480 | #undef XO2 | ||
481 | #undef XO3 | ||
482 | #undef XO4 | ||
483 | #undef ST | ||
484 | #undef NOP | ||
485 | #undef BLK64 | ||
486 | #undef BLOCK | ||
487 | |||
488 | #undef XOR_CONSTANT_CONSTRAINT | ||
489 | |||
5 | #ifdef CONFIG_X86_32 | 490 | #ifdef CONFIG_X86_32 |
6 | # include <asm/xor_32.h> | 491 | # include <asm/xor_32.h> |
7 | #else | 492 | #else |
8 | # include <asm/xor_64.h> | 493 | # include <asm/xor_64.h> |
9 | #endif | 494 | #endif |
10 | #endif | 495 | |
496 | #define XOR_SELECT_TEMPLATE(FASTEST) \ | ||
497 | AVX_SELECT(FASTEST) | ||
498 | |||
499 | #endif /* _ASM_X86_XOR_H */ | ||
diff --git a/arch/x86/include/asm/xor_32.h b/arch/x86/include/asm/xor_32.h index f79cb7ec0e06..ce05722e3c68 100644 --- a/arch/x86/include/asm/xor_32.h +++ b/arch/x86/include/asm/xor_32.h | |||
@@ -2,7 +2,7 @@ | |||
2 | #define _ASM_X86_XOR_32_H | 2 | #define _ASM_X86_XOR_32_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Optimized RAID-5 checksumming functions for MMX and SSE. | 5 | * Optimized RAID-5 checksumming functions for MMX. |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License as published by | 8 | * it under the terms of the GNU General Public License as published by |
@@ -529,290 +529,6 @@ static struct xor_block_template xor_block_p5_mmx = { | |||
529 | .do_5 = xor_p5_mmx_5, | 529 | .do_5 = xor_p5_mmx_5, |
530 | }; | 530 | }; |
531 | 531 | ||
532 | /* | ||
533 | * Cache avoiding checksumming functions utilizing KNI instructions | ||
534 | * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo) | ||
535 | */ | ||
536 | |||
537 | #define OFFS(x) "16*("#x")" | ||
538 | #define PF_OFFS(x) "256+16*("#x")" | ||
539 | #define PF0(x) " prefetchnta "PF_OFFS(x)"(%1) ;\n" | ||
540 | #define LD(x, y) " movaps "OFFS(x)"(%1), %%xmm"#y" ;\n" | ||
541 | #define ST(x, y) " movaps %%xmm"#y", "OFFS(x)"(%1) ;\n" | ||
542 | #define PF1(x) " prefetchnta "PF_OFFS(x)"(%2) ;\n" | ||
543 | #define PF2(x) " prefetchnta "PF_OFFS(x)"(%3) ;\n" | ||
544 | #define PF3(x) " prefetchnta "PF_OFFS(x)"(%4) ;\n" | ||
545 | #define PF4(x) " prefetchnta "PF_OFFS(x)"(%5) ;\n" | ||
546 | #define PF5(x) " prefetchnta "PF_OFFS(x)"(%6) ;\n" | ||
547 | #define XO1(x, y) " xorps "OFFS(x)"(%2), %%xmm"#y" ;\n" | ||
548 | #define XO2(x, y) " xorps "OFFS(x)"(%3), %%xmm"#y" ;\n" | ||
549 | #define XO3(x, y) " xorps "OFFS(x)"(%4), %%xmm"#y" ;\n" | ||
550 | #define XO4(x, y) " xorps "OFFS(x)"(%5), %%xmm"#y" ;\n" | ||
551 | #define XO5(x, y) " xorps "OFFS(x)"(%6), %%xmm"#y" ;\n" | ||
552 | |||
553 | |||
554 | static void | ||
555 | xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) | ||
556 | { | ||
557 | unsigned long lines = bytes >> 8; | ||
558 | |||
559 | kernel_fpu_begin(); | ||
560 | |||
561 | asm volatile( | ||
562 | #undef BLOCK | ||
563 | #define BLOCK(i) \ | ||
564 | LD(i, 0) \ | ||
565 | LD(i + 1, 1) \ | ||
566 | PF1(i) \ | ||
567 | PF1(i + 2) \ | ||
568 | LD(i + 2, 2) \ | ||
569 | LD(i + 3, 3) \ | ||
570 | PF0(i + 4) \ | ||
571 | PF0(i + 6) \ | ||
572 | XO1(i, 0) \ | ||
573 | XO1(i + 1, 1) \ | ||
574 | XO1(i + 2, 2) \ | ||
575 | XO1(i + 3, 3) \ | ||
576 | ST(i, 0) \ | ||
577 | ST(i + 1, 1) \ | ||
578 | ST(i + 2, 2) \ | ||
579 | ST(i + 3, 3) \ | ||
580 | |||
581 | |||
582 | PF0(0) | ||
583 | PF0(2) | ||
584 | |||
585 | " .align 32 ;\n" | ||
586 | " 1: ;\n" | ||
587 | |||
588 | BLOCK(0) | ||
589 | BLOCK(4) | ||
590 | BLOCK(8) | ||
591 | BLOCK(12) | ||
592 | |||
593 | " addl $256, %1 ;\n" | ||
594 | " addl $256, %2 ;\n" | ||
595 | " decl %0 ;\n" | ||
596 | " jnz 1b ;\n" | ||
597 | : "+r" (lines), | ||
598 | "+r" (p1), "+r" (p2) | ||
599 | : | ||
600 | : "memory"); | ||
601 | |||
602 | kernel_fpu_end(); | ||
603 | } | ||
604 | |||
605 | static void | ||
606 | xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
607 | unsigned long *p3) | ||
608 | { | ||
609 | unsigned long lines = bytes >> 8; | ||
610 | |||
611 | kernel_fpu_begin(); | ||
612 | |||
613 | asm volatile( | ||
614 | #undef BLOCK | ||
615 | #define BLOCK(i) \ | ||
616 | PF1(i) \ | ||
617 | PF1(i + 2) \ | ||
618 | LD(i,0) \ | ||
619 | LD(i + 1, 1) \ | ||
620 | LD(i + 2, 2) \ | ||
621 | LD(i + 3, 3) \ | ||
622 | PF2(i) \ | ||
623 | PF2(i + 2) \ | ||
624 | PF0(i + 4) \ | ||
625 | PF0(i + 6) \ | ||
626 | XO1(i,0) \ | ||
627 | XO1(i + 1, 1) \ | ||
628 | XO1(i + 2, 2) \ | ||
629 | XO1(i + 3, 3) \ | ||
630 | XO2(i,0) \ | ||
631 | XO2(i + 1, 1) \ | ||
632 | XO2(i + 2, 2) \ | ||
633 | XO2(i + 3, 3) \ | ||
634 | ST(i,0) \ | ||
635 | ST(i + 1, 1) \ | ||
636 | ST(i + 2, 2) \ | ||
637 | ST(i + 3, 3) \ | ||
638 | |||
639 | |||
640 | PF0(0) | ||
641 | PF0(2) | ||
642 | |||
643 | " .align 32 ;\n" | ||
644 | " 1: ;\n" | ||
645 | |||
646 | BLOCK(0) | ||
647 | BLOCK(4) | ||
648 | BLOCK(8) | ||
649 | BLOCK(12) | ||
650 | |||
651 | " addl $256, %1 ;\n" | ||
652 | " addl $256, %2 ;\n" | ||
653 | " addl $256, %3 ;\n" | ||
654 | " decl %0 ;\n" | ||
655 | " jnz 1b ;\n" | ||
656 | : "+r" (lines), | ||
657 | "+r" (p1), "+r"(p2), "+r"(p3) | ||
658 | : | ||
659 | : "memory" ); | ||
660 | |||
661 | kernel_fpu_end(); | ||
662 | } | ||
663 | |||
664 | static void | ||
665 | xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
666 | unsigned long *p3, unsigned long *p4) | ||
667 | { | ||
668 | unsigned long lines = bytes >> 8; | ||
669 | |||
670 | kernel_fpu_begin(); | ||
671 | |||
672 | asm volatile( | ||
673 | #undef BLOCK | ||
674 | #define BLOCK(i) \ | ||
675 | PF1(i) \ | ||
676 | PF1(i + 2) \ | ||
677 | LD(i,0) \ | ||
678 | LD(i + 1, 1) \ | ||
679 | LD(i + 2, 2) \ | ||
680 | LD(i + 3, 3) \ | ||
681 | PF2(i) \ | ||
682 | PF2(i + 2) \ | ||
683 | XO1(i,0) \ | ||
684 | XO1(i + 1, 1) \ | ||
685 | XO1(i + 2, 2) \ | ||
686 | XO1(i + 3, 3) \ | ||
687 | PF3(i) \ | ||
688 | PF3(i + 2) \ | ||
689 | PF0(i + 4) \ | ||
690 | PF0(i + 6) \ | ||
691 | XO2(i,0) \ | ||
692 | XO2(i + 1, 1) \ | ||
693 | XO2(i + 2, 2) \ | ||
694 | XO2(i + 3, 3) \ | ||
695 | XO3(i,0) \ | ||
696 | XO3(i + 1, 1) \ | ||
697 | XO3(i + 2, 2) \ | ||
698 | XO3(i + 3, 3) \ | ||
699 | ST(i,0) \ | ||
700 | ST(i + 1, 1) \ | ||
701 | ST(i + 2, 2) \ | ||
702 | ST(i + 3, 3) \ | ||
703 | |||
704 | |||
705 | PF0(0) | ||
706 | PF0(2) | ||
707 | |||
708 | " .align 32 ;\n" | ||
709 | " 1: ;\n" | ||
710 | |||
711 | BLOCK(0) | ||
712 | BLOCK(4) | ||
713 | BLOCK(8) | ||
714 | BLOCK(12) | ||
715 | |||
716 | " addl $256, %1 ;\n" | ||
717 | " addl $256, %2 ;\n" | ||
718 | " addl $256, %3 ;\n" | ||
719 | " addl $256, %4 ;\n" | ||
720 | " decl %0 ;\n" | ||
721 | " jnz 1b ;\n" | ||
722 | : "+r" (lines), | ||
723 | "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4) | ||
724 | : | ||
725 | : "memory" ); | ||
726 | |||
727 | kernel_fpu_end(); | ||
728 | } | ||
729 | |||
730 | static void | ||
731 | xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
732 | unsigned long *p3, unsigned long *p4, unsigned long *p5) | ||
733 | { | ||
734 | unsigned long lines = bytes >> 8; | ||
735 | |||
736 | kernel_fpu_begin(); | ||
737 | |||
738 | /* Make sure GCC forgets anything it knows about p4 or p5, | ||
739 | such that it won't pass to the asm volatile below a | ||
740 | register that is shared with any other variable. That's | ||
741 | because we modify p4 and p5 there, but we can't mark them | ||
742 | as read/write, otherwise we'd overflow the 10-asm-operands | ||
743 | limit of GCC < 3.1. */ | ||
744 | asm("" : "+r" (p4), "+r" (p5)); | ||
745 | |||
746 | asm volatile( | ||
747 | #undef BLOCK | ||
748 | #define BLOCK(i) \ | ||
749 | PF1(i) \ | ||
750 | PF1(i + 2) \ | ||
751 | LD(i,0) \ | ||
752 | LD(i + 1, 1) \ | ||
753 | LD(i + 2, 2) \ | ||
754 | LD(i + 3, 3) \ | ||
755 | PF2(i) \ | ||
756 | PF2(i + 2) \ | ||
757 | XO1(i,0) \ | ||
758 | XO1(i + 1, 1) \ | ||
759 | XO1(i + 2, 2) \ | ||
760 | XO1(i + 3, 3) \ | ||
761 | PF3(i) \ | ||
762 | PF3(i + 2) \ | ||
763 | XO2(i,0) \ | ||
764 | XO2(i + 1, 1) \ | ||
765 | XO2(i + 2, 2) \ | ||
766 | XO2(i + 3, 3) \ | ||
767 | PF4(i) \ | ||
768 | PF4(i + 2) \ | ||
769 | PF0(i + 4) \ | ||
770 | PF0(i + 6) \ | ||
771 | XO3(i,0) \ | ||
772 | XO3(i + 1, 1) \ | ||
773 | XO3(i + 2, 2) \ | ||
774 | XO3(i + 3, 3) \ | ||
775 | XO4(i,0) \ | ||
776 | XO4(i + 1, 1) \ | ||
777 | XO4(i + 2, 2) \ | ||
778 | XO4(i + 3, 3) \ | ||
779 | ST(i,0) \ | ||
780 | ST(i + 1, 1) \ | ||
781 | ST(i + 2, 2) \ | ||
782 | ST(i + 3, 3) \ | ||
783 | |||
784 | |||
785 | PF0(0) | ||
786 | PF0(2) | ||
787 | |||
788 | " .align 32 ;\n" | ||
789 | " 1: ;\n" | ||
790 | |||
791 | BLOCK(0) | ||
792 | BLOCK(4) | ||
793 | BLOCK(8) | ||
794 | BLOCK(12) | ||
795 | |||
796 | " addl $256, %1 ;\n" | ||
797 | " addl $256, %2 ;\n" | ||
798 | " addl $256, %3 ;\n" | ||
799 | " addl $256, %4 ;\n" | ||
800 | " addl $256, %5 ;\n" | ||
801 | " decl %0 ;\n" | ||
802 | " jnz 1b ;\n" | ||
803 | : "+r" (lines), | ||
804 | "+r" (p1), "+r" (p2), "+r" (p3) | ||
805 | : "r" (p4), "r" (p5) | ||
806 | : "memory"); | ||
807 | |||
808 | /* p4 and p5 were modified, and now the variables are dead. | ||
809 | Clobber them just to be sure nobody does something stupid | ||
810 | like assuming they have some legal value. */ | ||
811 | asm("" : "=r" (p4), "=r" (p5)); | ||
812 | |||
813 | kernel_fpu_end(); | ||
814 | } | ||
815 | |||
816 | static struct xor_block_template xor_block_pIII_sse = { | 532 | static struct xor_block_template xor_block_pIII_sse = { |
817 | .name = "pIII_sse", | 533 | .name = "pIII_sse", |
818 | .do_2 = xor_sse_2, | 534 | .do_2 = xor_sse_2, |
@@ -827,26 +543,25 @@ static struct xor_block_template xor_block_pIII_sse = { | |||
827 | /* Also try the generic routines. */ | 543 | /* Also try the generic routines. */ |
828 | #include <asm-generic/xor.h> | 544 | #include <asm-generic/xor.h> |
829 | 545 | ||
546 | /* We force the use of the SSE xor block because it can write around L2. | ||
547 | We may also be able to load into the L1 only depending on how the cpu | ||
548 | deals with a load to a line that is being prefetched. */ | ||
830 | #undef XOR_TRY_TEMPLATES | 549 | #undef XOR_TRY_TEMPLATES |
831 | #define XOR_TRY_TEMPLATES \ | 550 | #define XOR_TRY_TEMPLATES \ |
832 | do { \ | 551 | do { \ |
833 | xor_speed(&xor_block_8regs); \ | ||
834 | xor_speed(&xor_block_8regs_p); \ | ||
835 | xor_speed(&xor_block_32regs); \ | ||
836 | xor_speed(&xor_block_32regs_p); \ | ||
837 | AVX_XOR_SPEED; \ | 552 | AVX_XOR_SPEED; \ |
838 | if (cpu_has_xmm) \ | 553 | if (cpu_has_xmm) { \ |
839 | xor_speed(&xor_block_pIII_sse); \ | 554 | xor_speed(&xor_block_pIII_sse); \ |
840 | if (cpu_has_mmx) { \ | 555 | xor_speed(&xor_block_sse_pf64); \ |
556 | } else if (cpu_has_mmx) { \ | ||
841 | xor_speed(&xor_block_pII_mmx); \ | 557 | xor_speed(&xor_block_pII_mmx); \ |
842 | xor_speed(&xor_block_p5_mmx); \ | 558 | xor_speed(&xor_block_p5_mmx); \ |
559 | } else { \ | ||
560 | xor_speed(&xor_block_8regs); \ | ||
561 | xor_speed(&xor_block_8regs_p); \ | ||
562 | xor_speed(&xor_block_32regs); \ | ||
563 | xor_speed(&xor_block_32regs_p); \ | ||
843 | } \ | 564 | } \ |
844 | } while (0) | 565 | } while (0) |
845 | 566 | ||
846 | /* We force the use of the SSE xor block because it can write around L2. | ||
847 | We may also be able to load into the L1 only depending on how the cpu | ||
848 | deals with a load to a line that is being prefetched. */ | ||
849 | #define XOR_SELECT_TEMPLATE(FASTEST) \ | ||
850 | AVX_SELECT(cpu_has_xmm ? &xor_block_pIII_sse : FASTEST) | ||
851 | |||
852 | #endif /* _ASM_X86_XOR_32_H */ | 567 | #endif /* _ASM_X86_XOR_32_H */ |
diff --git a/arch/x86/include/asm/xor_64.h b/arch/x86/include/asm/xor_64.h index 87ac522c4af5..546f1e3b87cc 100644 --- a/arch/x86/include/asm/xor_64.h +++ b/arch/x86/include/asm/xor_64.h | |||
@@ -1,301 +1,6 @@ | |||
1 | #ifndef _ASM_X86_XOR_64_H | 1 | #ifndef _ASM_X86_XOR_64_H |
2 | #define _ASM_X86_XOR_64_H | 2 | #define _ASM_X86_XOR_64_H |
3 | 3 | ||
4 | /* | ||
5 | * Optimized RAID-5 checksumming functions for MMX and SSE. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2, or (at your option) | ||
10 | * any later version. | ||
11 | * | ||
12 | * You should have received a copy of the GNU General Public License | ||
13 | * (for example /usr/src/linux/COPYING); if not, write to the Free | ||
14 | * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
15 | */ | ||
16 | |||
17 | |||
18 | /* | ||
19 | * Cache avoiding checksumming functions utilizing KNI instructions | ||
20 | * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo) | ||
21 | */ | ||
22 | |||
23 | /* | ||
24 | * Based on | ||
25 | * High-speed RAID5 checksumming functions utilizing SSE instructions. | ||
26 | * Copyright (C) 1998 Ingo Molnar. | ||
27 | */ | ||
28 | |||
29 | /* | ||
30 | * x86-64 changes / gcc fixes from Andi Kleen. | ||
31 | * Copyright 2002 Andi Kleen, SuSE Labs. | ||
32 | * | ||
33 | * This hasn't been optimized for the hammer yet, but there are likely | ||
34 | * no advantages to be gotten from x86-64 here anyways. | ||
35 | */ | ||
36 | |||
37 | #include <asm/i387.h> | ||
38 | |||
39 | #define OFFS(x) "16*("#x")" | ||
40 | #define PF_OFFS(x) "256+16*("#x")" | ||
41 | #define PF0(x) " prefetchnta "PF_OFFS(x)"(%[p1]) ;\n" | ||
42 | #define LD(x, y) " movaps "OFFS(x)"(%[p1]), %%xmm"#y" ;\n" | ||
43 | #define ST(x, y) " movaps %%xmm"#y", "OFFS(x)"(%[p1]) ;\n" | ||
44 | #define PF1(x) " prefetchnta "PF_OFFS(x)"(%[p2]) ;\n" | ||
45 | #define PF2(x) " prefetchnta "PF_OFFS(x)"(%[p3]) ;\n" | ||
46 | #define PF3(x) " prefetchnta "PF_OFFS(x)"(%[p4]) ;\n" | ||
47 | #define PF4(x) " prefetchnta "PF_OFFS(x)"(%[p5]) ;\n" | ||
48 | #define PF5(x) " prefetchnta "PF_OFFS(x)"(%[p6]) ;\n" | ||
49 | #define XO1(x, y) " xorps "OFFS(x)"(%[p2]), %%xmm"#y" ;\n" | ||
50 | #define XO2(x, y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n" | ||
51 | #define XO3(x, y) " xorps "OFFS(x)"(%[p4]), %%xmm"#y" ;\n" | ||
52 | #define XO4(x, y) " xorps "OFFS(x)"(%[p5]), %%xmm"#y" ;\n" | ||
53 | #define XO5(x, y) " xorps "OFFS(x)"(%[p6]), %%xmm"#y" ;\n" | ||
54 | |||
55 | |||
56 | static void | ||
57 | xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) | ||
58 | { | ||
59 | unsigned int lines = bytes >> 8; | ||
60 | |||
61 | kernel_fpu_begin(); | ||
62 | |||
63 | asm volatile( | ||
64 | #undef BLOCK | ||
65 | #define BLOCK(i) \ | ||
66 | LD(i, 0) \ | ||
67 | LD(i + 1, 1) \ | ||
68 | PF1(i) \ | ||
69 | PF1(i + 2) \ | ||
70 | LD(i + 2, 2) \ | ||
71 | LD(i + 3, 3) \ | ||
72 | PF0(i + 4) \ | ||
73 | PF0(i + 6) \ | ||
74 | XO1(i, 0) \ | ||
75 | XO1(i + 1, 1) \ | ||
76 | XO1(i + 2, 2) \ | ||
77 | XO1(i + 3, 3) \ | ||
78 | ST(i, 0) \ | ||
79 | ST(i + 1, 1) \ | ||
80 | ST(i + 2, 2) \ | ||
81 | ST(i + 3, 3) \ | ||
82 | |||
83 | |||
84 | PF0(0) | ||
85 | PF0(2) | ||
86 | |||
87 | " .align 32 ;\n" | ||
88 | " 1: ;\n" | ||
89 | |||
90 | BLOCK(0) | ||
91 | BLOCK(4) | ||
92 | BLOCK(8) | ||
93 | BLOCK(12) | ||
94 | |||
95 | " addq %[inc], %[p1] ;\n" | ||
96 | " addq %[inc], %[p2] ;\n" | ||
97 | " decl %[cnt] ; jnz 1b" | ||
98 | : [p1] "+r" (p1), [p2] "+r" (p2), [cnt] "+r" (lines) | ||
99 | : [inc] "r" (256UL) | ||
100 | : "memory"); | ||
101 | |||
102 | kernel_fpu_end(); | ||
103 | } | ||
104 | |||
105 | static void | ||
106 | xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
107 | unsigned long *p3) | ||
108 | { | ||
109 | unsigned int lines = bytes >> 8; | ||
110 | |||
111 | kernel_fpu_begin(); | ||
112 | asm volatile( | ||
113 | #undef BLOCK | ||
114 | #define BLOCK(i) \ | ||
115 | PF1(i) \ | ||
116 | PF1(i + 2) \ | ||
117 | LD(i, 0) \ | ||
118 | LD(i + 1, 1) \ | ||
119 | LD(i + 2, 2) \ | ||
120 | LD(i + 3, 3) \ | ||
121 | PF2(i) \ | ||
122 | PF2(i + 2) \ | ||
123 | PF0(i + 4) \ | ||
124 | PF0(i + 6) \ | ||
125 | XO1(i, 0) \ | ||
126 | XO1(i + 1, 1) \ | ||
127 | XO1(i + 2, 2) \ | ||
128 | XO1(i + 3, 3) \ | ||
129 | XO2(i, 0) \ | ||
130 | XO2(i + 1, 1) \ | ||
131 | XO2(i + 2, 2) \ | ||
132 | XO2(i + 3, 3) \ | ||
133 | ST(i, 0) \ | ||
134 | ST(i + 1, 1) \ | ||
135 | ST(i + 2, 2) \ | ||
136 | ST(i + 3, 3) \ | ||
137 | |||
138 | |||
139 | PF0(0) | ||
140 | PF0(2) | ||
141 | |||
142 | " .align 32 ;\n" | ||
143 | " 1: ;\n" | ||
144 | |||
145 | BLOCK(0) | ||
146 | BLOCK(4) | ||
147 | BLOCK(8) | ||
148 | BLOCK(12) | ||
149 | |||
150 | " addq %[inc], %[p1] ;\n" | ||
151 | " addq %[inc], %[p2] ;\n" | ||
152 | " addq %[inc], %[p3] ;\n" | ||
153 | " decl %[cnt] ; jnz 1b" | ||
154 | : [cnt] "+r" (lines), | ||
155 | [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3) | ||
156 | : [inc] "r" (256UL) | ||
157 | : "memory"); | ||
158 | kernel_fpu_end(); | ||
159 | } | ||
160 | |||
161 | static void | ||
162 | xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
163 | unsigned long *p3, unsigned long *p4) | ||
164 | { | ||
165 | unsigned int lines = bytes >> 8; | ||
166 | |||
167 | kernel_fpu_begin(); | ||
168 | |||
169 | asm volatile( | ||
170 | #undef BLOCK | ||
171 | #define BLOCK(i) \ | ||
172 | PF1(i) \ | ||
173 | PF1(i + 2) \ | ||
174 | LD(i, 0) \ | ||
175 | LD(i + 1, 1) \ | ||
176 | LD(i + 2, 2) \ | ||
177 | LD(i + 3, 3) \ | ||
178 | PF2(i) \ | ||
179 | PF2(i + 2) \ | ||
180 | XO1(i, 0) \ | ||
181 | XO1(i + 1, 1) \ | ||
182 | XO1(i + 2, 2) \ | ||
183 | XO1(i + 3, 3) \ | ||
184 | PF3(i) \ | ||
185 | PF3(i + 2) \ | ||
186 | PF0(i + 4) \ | ||
187 | PF0(i + 6) \ | ||
188 | XO2(i, 0) \ | ||
189 | XO2(i + 1, 1) \ | ||
190 | XO2(i + 2, 2) \ | ||
191 | XO2(i + 3, 3) \ | ||
192 | XO3(i, 0) \ | ||
193 | XO3(i + 1, 1) \ | ||
194 | XO3(i + 2, 2) \ | ||
195 | XO3(i + 3, 3) \ | ||
196 | ST(i, 0) \ | ||
197 | ST(i + 1, 1) \ | ||
198 | ST(i + 2, 2) \ | ||
199 | ST(i + 3, 3) \ | ||
200 | |||
201 | |||
202 | PF0(0) | ||
203 | PF0(2) | ||
204 | |||
205 | " .align 32 ;\n" | ||
206 | " 1: ;\n" | ||
207 | |||
208 | BLOCK(0) | ||
209 | BLOCK(4) | ||
210 | BLOCK(8) | ||
211 | BLOCK(12) | ||
212 | |||
213 | " addq %[inc], %[p1] ;\n" | ||
214 | " addq %[inc], %[p2] ;\n" | ||
215 | " addq %[inc], %[p3] ;\n" | ||
216 | " addq %[inc], %[p4] ;\n" | ||
217 | " decl %[cnt] ; jnz 1b" | ||
218 | : [cnt] "+c" (lines), | ||
219 | [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4) | ||
220 | : [inc] "r" (256UL) | ||
221 | : "memory" ); | ||
222 | |||
223 | kernel_fpu_end(); | ||
224 | } | ||
225 | |||
226 | static void | ||
227 | xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
228 | unsigned long *p3, unsigned long *p4, unsigned long *p5) | ||
229 | { | ||
230 | unsigned int lines = bytes >> 8; | ||
231 | |||
232 | kernel_fpu_begin(); | ||
233 | |||
234 | asm volatile( | ||
235 | #undef BLOCK | ||
236 | #define BLOCK(i) \ | ||
237 | PF1(i) \ | ||
238 | PF1(i + 2) \ | ||
239 | LD(i, 0) \ | ||
240 | LD(i + 1, 1) \ | ||
241 | LD(i + 2, 2) \ | ||
242 | LD(i + 3, 3) \ | ||
243 | PF2(i) \ | ||
244 | PF2(i + 2) \ | ||
245 | XO1(i, 0) \ | ||
246 | XO1(i + 1, 1) \ | ||
247 | XO1(i + 2, 2) \ | ||
248 | XO1(i + 3, 3) \ | ||
249 | PF3(i) \ | ||
250 | PF3(i + 2) \ | ||
251 | XO2(i, 0) \ | ||
252 | XO2(i + 1, 1) \ | ||
253 | XO2(i + 2, 2) \ | ||
254 | XO2(i + 3, 3) \ | ||
255 | PF4(i) \ | ||
256 | PF4(i + 2) \ | ||
257 | PF0(i + 4) \ | ||
258 | PF0(i + 6) \ | ||
259 | XO3(i, 0) \ | ||
260 | XO3(i + 1, 1) \ | ||
261 | XO3(i + 2, 2) \ | ||
262 | XO3(i + 3, 3) \ | ||
263 | XO4(i, 0) \ | ||
264 | XO4(i + 1, 1) \ | ||
265 | XO4(i + 2, 2) \ | ||
266 | XO4(i + 3, 3) \ | ||
267 | ST(i, 0) \ | ||
268 | ST(i + 1, 1) \ | ||
269 | ST(i + 2, 2) \ | ||
270 | ST(i + 3, 3) \ | ||
271 | |||
272 | |||
273 | PF0(0) | ||
274 | PF0(2) | ||
275 | |||
276 | " .align 32 ;\n" | ||
277 | " 1: ;\n" | ||
278 | |||
279 | BLOCK(0) | ||
280 | BLOCK(4) | ||
281 | BLOCK(8) | ||
282 | BLOCK(12) | ||
283 | |||
284 | " addq %[inc], %[p1] ;\n" | ||
285 | " addq %[inc], %[p2] ;\n" | ||
286 | " addq %[inc], %[p3] ;\n" | ||
287 | " addq %[inc], %[p4] ;\n" | ||
288 | " addq %[inc], %[p5] ;\n" | ||
289 | " decl %[cnt] ; jnz 1b" | ||
290 | : [cnt] "+c" (lines), | ||
291 | [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4), | ||
292 | [p5] "+r" (p5) | ||
293 | : [inc] "r" (256UL) | ||
294 | : "memory"); | ||
295 | |||
296 | kernel_fpu_end(); | ||
297 | } | ||
298 | |||
299 | static struct xor_block_template xor_block_sse = { | 4 | static struct xor_block_template xor_block_sse = { |
300 | .name = "generic_sse", | 5 | .name = "generic_sse", |
301 | .do_2 = xor_sse_2, | 6 | .do_2 = xor_sse_2, |
@@ -308,17 +13,15 @@ static struct xor_block_template xor_block_sse = { | |||
308 | /* Also try the AVX routines */ | 13 | /* Also try the AVX routines */ |
309 | #include <asm/xor_avx.h> | 14 | #include <asm/xor_avx.h> |
310 | 15 | ||
16 | /* We force the use of the SSE xor block because it can write around L2. | ||
17 | We may also be able to load into the L1 only depending on how the cpu | ||
18 | deals with a load to a line that is being prefetched. */ | ||
311 | #undef XOR_TRY_TEMPLATES | 19 | #undef XOR_TRY_TEMPLATES |
312 | #define XOR_TRY_TEMPLATES \ | 20 | #define XOR_TRY_TEMPLATES \ |
313 | do { \ | 21 | do { \ |
314 | AVX_XOR_SPEED; \ | 22 | AVX_XOR_SPEED; \ |
23 | xor_speed(&xor_block_sse_pf64); \ | ||
315 | xor_speed(&xor_block_sse); \ | 24 | xor_speed(&xor_block_sse); \ |
316 | } while (0) | 25 | } while (0) |
317 | 26 | ||
318 | /* We force the use of the SSE xor block because it can write around L2. | ||
319 | We may also be able to load into the L1 only depending on how the cpu | ||
320 | deals with a load to a line that is being prefetched. */ | ||
321 | #define XOR_SELECT_TEMPLATE(FASTEST) \ | ||
322 | AVX_SELECT(&xor_block_sse) | ||
323 | |||
324 | #endif /* _ASM_X86_XOR_64_H */ | 27 | #endif /* _ASM_X86_XOR_64_H */ |
diff --git a/arch/x86/include/uapi/asm/mce.h b/arch/x86/include/uapi/asm/mce.h index 58c829871c31..a0eab85ce7b8 100644 --- a/arch/x86/include/uapi/asm/mce.h +++ b/arch/x86/include/uapi/asm/mce.h | |||
@@ -4,66 +4,6 @@ | |||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <asm/ioctls.h> | 5 | #include <asm/ioctls.h> |
6 | 6 | ||
7 | /* | ||
8 | * Machine Check support for x86 | ||
9 | */ | ||
10 | |||
11 | /* MCG_CAP register defines */ | ||
12 | #define MCG_BANKCNT_MASK 0xff /* Number of Banks */ | ||
13 | #define MCG_CTL_P (1ULL<<8) /* MCG_CTL register available */ | ||
14 | #define MCG_EXT_P (1ULL<<9) /* Extended registers available */ | ||
15 | #define MCG_CMCI_P (1ULL<<10) /* CMCI supported */ | ||
16 | #define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */ | ||
17 | #define MCG_EXT_CNT_SHIFT 16 | ||
18 | #define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT) | ||
19 | #define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */ | ||
20 | |||
21 | /* MCG_STATUS register defines */ | ||
22 | #define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */ | ||
23 | #define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */ | ||
24 | #define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */ | ||
25 | |||
26 | /* MCi_STATUS register defines */ | ||
27 | #define MCI_STATUS_VAL (1ULL<<63) /* valid error */ | ||
28 | #define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */ | ||
29 | #define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */ | ||
30 | #define MCI_STATUS_EN (1ULL<<60) /* error enabled */ | ||
31 | #define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */ | ||
32 | #define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */ | ||
33 | #define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */ | ||
34 | #define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */ | ||
35 | #define MCI_STATUS_AR (1ULL<<55) /* Action required */ | ||
36 | #define MCACOD 0xffff /* MCA Error Code */ | ||
37 | |||
38 | /* Architecturally defined codes from SDM Vol. 3B Chapter 15 */ | ||
39 | #define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */ | ||
40 | #define MCACOD_SCRUBMSK 0xfff0 | ||
41 | #define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */ | ||
42 | #define MCACOD_DATA 0x0134 /* Data Load */ | ||
43 | #define MCACOD_INSTR 0x0150 /* Instruction Fetch */ | ||
44 | |||
45 | /* MCi_MISC register defines */ | ||
46 | #define MCI_MISC_ADDR_LSB(m) ((m) & 0x3f) | ||
47 | #define MCI_MISC_ADDR_MODE(m) (((m) >> 6) & 7) | ||
48 | #define MCI_MISC_ADDR_SEGOFF 0 /* segment offset */ | ||
49 | #define MCI_MISC_ADDR_LINEAR 1 /* linear address */ | ||
50 | #define MCI_MISC_ADDR_PHYS 2 /* physical address */ | ||
51 | #define MCI_MISC_ADDR_MEM 3 /* memory address */ | ||
52 | #define MCI_MISC_ADDR_GENERIC 7 /* generic */ | ||
53 | |||
54 | /* CTL2 register defines */ | ||
55 | #define MCI_CTL2_CMCI_EN (1ULL << 30) | ||
56 | #define MCI_CTL2_CMCI_THRESHOLD_MASK 0x7fffULL | ||
57 | |||
58 | #define MCJ_CTX_MASK 3 | ||
59 | #define MCJ_CTX(flags) ((flags) & MCJ_CTX_MASK) | ||
60 | #define MCJ_CTX_RANDOM 0 /* inject context: random */ | ||
61 | #define MCJ_CTX_PROCESS 0x1 /* inject context: process */ | ||
62 | #define MCJ_CTX_IRQ 0x2 /* inject context: IRQ */ | ||
63 | #define MCJ_NMI_BROADCAST 0x4 /* do NMI broadcasting */ | ||
64 | #define MCJ_EXCEPTION 0x8 /* raise as exception */ | ||
65 | #define MCJ_IRQ_BRAODCAST 0x10 /* do IRQ broadcasting */ | ||
66 | |||
67 | /* Fields are zero when not available */ | 7 | /* Fields are zero when not available */ |
68 | struct mce { | 8 | struct mce { |
69 | __u64 status; | 9 | __u64 status; |
@@ -87,35 +27,8 @@ struct mce { | |||
87 | __u64 mcgcap; /* MCGCAP MSR: machine check capabilities of CPU */ | 27 | __u64 mcgcap; /* MCGCAP MSR: machine check capabilities of CPU */ |
88 | }; | 28 | }; |
89 | 29 | ||
90 | /* | ||
91 | * This structure contains all data related to the MCE log. Also | ||
92 | * carries a signature to make it easier to find from external | ||
93 | * debugging tools. Each entry is only valid when its finished flag | ||
94 | * is set. | ||
95 | */ | ||
96 | |||
97 | #define MCE_LOG_LEN 32 | ||
98 | |||
99 | struct mce_log { | ||
100 | char signature[12]; /* "MACHINECHECK" */ | ||
101 | unsigned len; /* = MCE_LOG_LEN */ | ||
102 | unsigned next; | ||
103 | unsigned flags; | ||
104 | unsigned recordlen; /* length of struct mce */ | ||
105 | struct mce entry[MCE_LOG_LEN]; | ||
106 | }; | ||
107 | |||
108 | #define MCE_OVERFLOW 0 /* bit 0 in flags means overflow */ | ||
109 | |||
110 | #define MCE_LOG_SIGNATURE "MACHINECHECK" | ||
111 | |||
112 | #define MCE_GET_RECORD_LEN _IOR('M', 1, int) | 30 | #define MCE_GET_RECORD_LEN _IOR('M', 1, int) |
113 | #define MCE_GET_LOG_LEN _IOR('M', 2, int) | 31 | #define MCE_GET_LOG_LEN _IOR('M', 2, int) |
114 | #define MCE_GETCLEAR_FLAGS _IOR('M', 3, int) | 32 | #define MCE_GETCLEAR_FLAGS _IOR('M', 3, int) |
115 | 33 | ||
116 | /* Software defined banks */ | ||
117 | #define MCE_EXTENDED_BANK 128 | ||
118 | #define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0 | ||
119 | #define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1) | ||
120 | |||
121 | #endif /* _UAPI_ASM_X86_MCE_H */ | 34 | #endif /* _UAPI_ASM_X86_MCE_H */ |
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h index 433a59fb1a74..075a40255591 100644 --- a/arch/x86/include/uapi/asm/msr-index.h +++ b/arch/x86/include/uapi/asm/msr-index.h | |||
@@ -194,6 +194,8 @@ | |||
194 | /* Fam 15h MSRs */ | 194 | /* Fam 15h MSRs */ |
195 | #define MSR_F15H_PERF_CTL 0xc0010200 | 195 | #define MSR_F15H_PERF_CTL 0xc0010200 |
196 | #define MSR_F15H_PERF_CTR 0xc0010201 | 196 | #define MSR_F15H_PERF_CTR 0xc0010201 |
197 | #define MSR_F15H_NB_PERF_CTL 0xc0010240 | ||
198 | #define MSR_F15H_NB_PERF_CTR 0xc0010241 | ||
197 | 199 | ||
198 | /* Fam 10h MSRs */ | 200 | /* Fam 10h MSRs */ |
199 | #define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058 | 201 | #define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058 |
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 34e923a53762..ac3b3d002833 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -65,8 +65,7 @@ obj-$(CONFIG_X86_TSC) += trace_clock.o | |||
65 | obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o | 65 | obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o |
66 | obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o | 66 | obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o |
67 | obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o | 67 | obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o |
68 | obj-$(CONFIG_KPROBES) += kprobes.o | 68 | obj-y += kprobes/ |
69 | obj-$(CONFIG_OPTPROBES) += kprobes-opt.o | ||
70 | obj-$(CONFIG_MODULES) += module.o | 69 | obj-$(CONFIG_MODULES) += module.o |
71 | obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o | 70 | obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o |
72 | obj-$(CONFIG_KGDB) += kgdb.o | 71 | obj-$(CONFIG_KGDB) += kgdb.o |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index b994cc84aa7e..a5b4dce1b7ac 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -1477,8 +1477,7 @@ void __init bsp_end_local_APIC_setup(void) | |||
1477 | * Now that local APIC setup is completed for BP, configure the fault | 1477 | * Now that local APIC setup is completed for BP, configure the fault |
1478 | * handling for interrupt remapping. | 1478 | * handling for interrupt remapping. |
1479 | */ | 1479 | */ |
1480 | if (irq_remapping_enabled) | 1480 | irq_remap_enable_fault_handling(); |
1481 | irq_remap_enable_fault_handling(); | ||
1482 | 1481 | ||
1483 | } | 1482 | } |
1484 | 1483 | ||
@@ -2251,8 +2250,7 @@ static int lapic_suspend(void) | |||
2251 | local_irq_save(flags); | 2250 | local_irq_save(flags); |
2252 | disable_local_APIC(); | 2251 | disable_local_APIC(); |
2253 | 2252 | ||
2254 | if (irq_remapping_enabled) | 2253 | irq_remapping_disable(); |
2255 | irq_remapping_disable(); | ||
2256 | 2254 | ||
2257 | local_irq_restore(flags); | 2255 | local_irq_restore(flags); |
2258 | return 0; | 2256 | return 0; |
@@ -2268,16 +2266,15 @@ static void lapic_resume(void) | |||
2268 | return; | 2266 | return; |
2269 | 2267 | ||
2270 | local_irq_save(flags); | 2268 | local_irq_save(flags); |
2271 | if (irq_remapping_enabled) { | 2269 | |
2272 | /* | 2270 | /* |
2273 | * IO-APIC and PIC have their own resume routines. | 2271 | * IO-APIC and PIC have their own resume routines. |
2274 | * We just mask them here to make sure the interrupt | 2272 | * We just mask them here to make sure the interrupt |
2275 | * subsystem is completely quiet while we enable x2apic | 2273 | * subsystem is completely quiet while we enable x2apic |
2276 | * and interrupt-remapping. | 2274 | * and interrupt-remapping. |
2277 | */ | 2275 | */ |
2278 | mask_ioapic_entries(); | 2276 | mask_ioapic_entries(); |
2279 | legacy_pic->mask_all(); | 2277 | legacy_pic->mask_all(); |
2280 | } | ||
2281 | 2278 | ||
2282 | if (x2apic_mode) | 2279 | if (x2apic_mode) |
2283 | enable_x2apic(); | 2280 | enable_x2apic(); |
@@ -2320,8 +2317,7 @@ static void lapic_resume(void) | |||
2320 | apic_write(APIC_ESR, 0); | 2317 | apic_write(APIC_ESR, 0); |
2321 | apic_read(APIC_ESR); | 2318 | apic_read(APIC_ESR); |
2322 | 2319 | ||
2323 | if (irq_remapping_enabled) | 2320 | irq_remapping_reenable(x2apic_mode); |
2324 | irq_remapping_reenable(x2apic_mode); | ||
2325 | 2321 | ||
2326 | local_irq_restore(flags); | 2322 | local_irq_restore(flags); |
2327 | } | 2323 | } |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index b739d398bb29..9ed796ccc32c 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -68,22 +68,6 @@ | |||
68 | #define for_each_irq_pin(entry, head) \ | 68 | #define for_each_irq_pin(entry, head) \ |
69 | for (entry = head; entry; entry = entry->next) | 69 | for (entry = head; entry; entry = entry->next) |
70 | 70 | ||
71 | #ifdef CONFIG_IRQ_REMAP | ||
72 | static void irq_remap_modify_chip_defaults(struct irq_chip *chip); | ||
73 | static inline bool irq_remapped(struct irq_cfg *cfg) | ||
74 | { | ||
75 | return cfg->irq_2_iommu.iommu != NULL; | ||
76 | } | ||
77 | #else | ||
78 | static inline bool irq_remapped(struct irq_cfg *cfg) | ||
79 | { | ||
80 | return false; | ||
81 | } | ||
82 | static inline void irq_remap_modify_chip_defaults(struct irq_chip *chip) | ||
83 | { | ||
84 | } | ||
85 | #endif | ||
86 | |||
87 | /* | 71 | /* |
88 | * Is the SiS APIC rmw bug present ? | 72 | * Is the SiS APIC rmw bug present ? |
89 | * -1 = don't know, 0 = no, 1 = yes | 73 | * -1 = don't know, 0 = no, 1 = yes |
@@ -300,9 +284,9 @@ static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node) | |||
300 | return cfg; | 284 | return cfg; |
301 | } | 285 | } |
302 | 286 | ||
303 | static int alloc_irq_from(unsigned int from, int node) | 287 | static int alloc_irqs_from(unsigned int from, unsigned int count, int node) |
304 | { | 288 | { |
305 | return irq_alloc_desc_from(from, node); | 289 | return irq_alloc_descs_from(from, count, node); |
306 | } | 290 | } |
307 | 291 | ||
308 | static void free_irq_at(unsigned int at, struct irq_cfg *cfg) | 292 | static void free_irq_at(unsigned int at, struct irq_cfg *cfg) |
@@ -326,7 +310,7 @@ static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx) | |||
326 | + (mpc_ioapic_addr(idx) & ~PAGE_MASK); | 310 | + (mpc_ioapic_addr(idx) & ~PAGE_MASK); |
327 | } | 311 | } |
328 | 312 | ||
329 | static inline void io_apic_eoi(unsigned int apic, unsigned int vector) | 313 | void io_apic_eoi(unsigned int apic, unsigned int vector) |
330 | { | 314 | { |
331 | struct io_apic __iomem *io_apic = io_apic_base(apic); | 315 | struct io_apic __iomem *io_apic = io_apic_base(apic); |
332 | writel(vector, &io_apic->eoi); | 316 | writel(vector, &io_apic->eoi); |
@@ -573,19 +557,10 @@ static void unmask_ioapic_irq(struct irq_data *data) | |||
573 | * Otherwise, we simulate the EOI message manually by changing the trigger | 557 | * Otherwise, we simulate the EOI message manually by changing the trigger |
574 | * mode to edge and then back to level, with RTE being masked during this. | 558 | * mode to edge and then back to level, with RTE being masked during this. |
575 | */ | 559 | */ |
576 | static void __eoi_ioapic_pin(int apic, int pin, int vector, struct irq_cfg *cfg) | 560 | void native_eoi_ioapic_pin(int apic, int pin, int vector) |
577 | { | 561 | { |
578 | if (mpc_ioapic_ver(apic) >= 0x20) { | 562 | if (mpc_ioapic_ver(apic) >= 0x20) { |
579 | /* | 563 | io_apic_eoi(apic, vector); |
580 | * Intr-remapping uses pin number as the virtual vector | ||
581 | * in the RTE. Actual vector is programmed in | ||
582 | * intr-remapping table entry. Hence for the io-apic | ||
583 | * EOI we use the pin number. | ||
584 | */ | ||
585 | if (cfg && irq_remapped(cfg)) | ||
586 | io_apic_eoi(apic, pin); | ||
587 | else | ||
588 | io_apic_eoi(apic, vector); | ||
589 | } else { | 564 | } else { |
590 | struct IO_APIC_route_entry entry, entry1; | 565 | struct IO_APIC_route_entry entry, entry1; |
591 | 566 | ||
@@ -606,14 +581,15 @@ static void __eoi_ioapic_pin(int apic, int pin, int vector, struct irq_cfg *cfg) | |||
606 | } | 581 | } |
607 | } | 582 | } |
608 | 583 | ||
609 | static void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) | 584 | void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) |
610 | { | 585 | { |
611 | struct irq_pin_list *entry; | 586 | struct irq_pin_list *entry; |
612 | unsigned long flags; | 587 | unsigned long flags; |
613 | 588 | ||
614 | raw_spin_lock_irqsave(&ioapic_lock, flags); | 589 | raw_spin_lock_irqsave(&ioapic_lock, flags); |
615 | for_each_irq_pin(entry, cfg->irq_2_pin) | 590 | for_each_irq_pin(entry, cfg->irq_2_pin) |
616 | __eoi_ioapic_pin(entry->apic, entry->pin, cfg->vector, cfg); | 591 | x86_io_apic_ops.eoi_ioapic_pin(entry->apic, entry->pin, |
592 | cfg->vector); | ||
617 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); | 593 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); |
618 | } | 594 | } |
619 | 595 | ||
@@ -650,7 +626,7 @@ static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) | |||
650 | } | 626 | } |
651 | 627 | ||
652 | raw_spin_lock_irqsave(&ioapic_lock, flags); | 628 | raw_spin_lock_irqsave(&ioapic_lock, flags); |
653 | __eoi_ioapic_pin(apic, pin, entry.vector, NULL); | 629 | x86_io_apic_ops.eoi_ioapic_pin(apic, pin, entry.vector); |
654 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); | 630 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); |
655 | } | 631 | } |
656 | 632 | ||
@@ -1304,25 +1280,18 @@ static void ioapic_register_intr(unsigned int irq, struct irq_cfg *cfg, | |||
1304 | fasteoi = false; | 1280 | fasteoi = false; |
1305 | } | 1281 | } |
1306 | 1282 | ||
1307 | if (irq_remapped(cfg)) { | 1283 | if (setup_remapped_irq(irq, cfg, chip)) |
1308 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); | ||
1309 | irq_remap_modify_chip_defaults(chip); | ||
1310 | fasteoi = trigger != 0; | 1284 | fasteoi = trigger != 0; |
1311 | } | ||
1312 | 1285 | ||
1313 | hdl = fasteoi ? handle_fasteoi_irq : handle_edge_irq; | 1286 | hdl = fasteoi ? handle_fasteoi_irq : handle_edge_irq; |
1314 | irq_set_chip_and_handler_name(irq, chip, hdl, | 1287 | irq_set_chip_and_handler_name(irq, chip, hdl, |
1315 | fasteoi ? "fasteoi" : "edge"); | 1288 | fasteoi ? "fasteoi" : "edge"); |
1316 | } | 1289 | } |
1317 | 1290 | ||
1318 | static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry, | 1291 | int native_setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry, |
1319 | unsigned int destination, int vector, | 1292 | unsigned int destination, int vector, |
1320 | struct io_apic_irq_attr *attr) | 1293 | struct io_apic_irq_attr *attr) |
1321 | { | 1294 | { |
1322 | if (irq_remapping_enabled) | ||
1323 | return setup_ioapic_remapped_entry(irq, entry, destination, | ||
1324 | vector, attr); | ||
1325 | |||
1326 | memset(entry, 0, sizeof(*entry)); | 1295 | memset(entry, 0, sizeof(*entry)); |
1327 | 1296 | ||
1328 | entry->delivery_mode = apic->irq_delivery_mode; | 1297 | entry->delivery_mode = apic->irq_delivery_mode; |
@@ -1370,8 +1339,8 @@ static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg, | |||
1370 | attr->ioapic, mpc_ioapic_id(attr->ioapic), attr->ioapic_pin, | 1339 | attr->ioapic, mpc_ioapic_id(attr->ioapic), attr->ioapic_pin, |
1371 | cfg->vector, irq, attr->trigger, attr->polarity, dest); | 1340 | cfg->vector, irq, attr->trigger, attr->polarity, dest); |
1372 | 1341 | ||
1373 | if (setup_ioapic_entry(irq, &entry, dest, cfg->vector, attr)) { | 1342 | if (x86_io_apic_ops.setup_entry(irq, &entry, dest, cfg->vector, attr)) { |
1374 | pr_warn("Failed to setup ioapic entry for ioapic %d, pin %d\n", | 1343 | pr_warn("Failed to setup ioapic entry for ioapic %d, pin %d\n", |
1375 | mpc_ioapic_id(attr->ioapic), attr->ioapic_pin); | 1344 | mpc_ioapic_id(attr->ioapic), attr->ioapic_pin); |
1376 | __clear_irq_vector(irq, cfg); | 1345 | __clear_irq_vector(irq, cfg); |
1377 | 1346 | ||
@@ -1479,9 +1448,6 @@ static void __init setup_timer_IRQ0_pin(unsigned int ioapic_idx, | |||
1479 | struct IO_APIC_route_entry entry; | 1448 | struct IO_APIC_route_entry entry; |
1480 | unsigned int dest; | 1449 | unsigned int dest; |
1481 | 1450 | ||
1482 | if (irq_remapping_enabled) | ||
1483 | return; | ||
1484 | |||
1485 | memset(&entry, 0, sizeof(entry)); | 1451 | memset(&entry, 0, sizeof(entry)); |
1486 | 1452 | ||
1487 | /* | 1453 | /* |
@@ -1513,9 +1479,63 @@ static void __init setup_timer_IRQ0_pin(unsigned int ioapic_idx, | |||
1513 | ioapic_write_entry(ioapic_idx, pin, entry); | 1479 | ioapic_write_entry(ioapic_idx, pin, entry); |
1514 | } | 1480 | } |
1515 | 1481 | ||
1516 | __apicdebuginit(void) print_IO_APIC(int ioapic_idx) | 1482 | void native_io_apic_print_entries(unsigned int apic, unsigned int nr_entries) |
1517 | { | 1483 | { |
1518 | int i; | 1484 | int i; |
1485 | |||
1486 | pr_debug(" NR Dst Mask Trig IRR Pol Stat Dmod Deli Vect:\n"); | ||
1487 | |||
1488 | for (i = 0; i <= nr_entries; i++) { | ||
1489 | struct IO_APIC_route_entry entry; | ||
1490 | |||
1491 | entry = ioapic_read_entry(apic, i); | ||
1492 | |||
1493 | pr_debug(" %02x %02X ", i, entry.dest); | ||
1494 | pr_cont("%1d %1d %1d %1d %1d " | ||
1495 | "%1d %1d %02X\n", | ||
1496 | entry.mask, | ||
1497 | entry.trigger, | ||
1498 | entry.irr, | ||
1499 | entry.polarity, | ||
1500 | entry.delivery_status, | ||
1501 | entry.dest_mode, | ||
1502 | entry.delivery_mode, | ||
1503 | entry.vector); | ||
1504 | } | ||
1505 | } | ||
1506 | |||
1507 | void intel_ir_io_apic_print_entries(unsigned int apic, | ||
1508 | unsigned int nr_entries) | ||
1509 | { | ||
1510 | int i; | ||
1511 | |||
1512 | pr_debug(" NR Indx Fmt Mask Trig IRR Pol Stat Indx2 Zero Vect:\n"); | ||
1513 | |||
1514 | for (i = 0; i <= nr_entries; i++) { | ||
1515 | struct IR_IO_APIC_route_entry *ir_entry; | ||
1516 | struct IO_APIC_route_entry entry; | ||
1517 | |||
1518 | entry = ioapic_read_entry(apic, i); | ||
1519 | |||
1520 | ir_entry = (struct IR_IO_APIC_route_entry *)&entry; | ||
1521 | |||
1522 | pr_debug(" %02x %04X ", i, ir_entry->index); | ||
1523 | pr_cont("%1d %1d %1d %1d %1d " | ||
1524 | "%1d %1d %X %02X\n", | ||
1525 | ir_entry->format, | ||
1526 | ir_entry->mask, | ||
1527 | ir_entry->trigger, | ||
1528 | ir_entry->irr, | ||
1529 | ir_entry->polarity, | ||
1530 | ir_entry->delivery_status, | ||
1531 | ir_entry->index2, | ||
1532 | ir_entry->zero, | ||
1533 | ir_entry->vector); | ||
1534 | } | ||
1535 | } | ||
1536 | |||
1537 | __apicdebuginit(void) print_IO_APIC(int ioapic_idx) | ||
1538 | { | ||
1519 | union IO_APIC_reg_00 reg_00; | 1539 | union IO_APIC_reg_00 reg_00; |
1520 | union IO_APIC_reg_01 reg_01; | 1540 | union IO_APIC_reg_01 reg_01; |
1521 | union IO_APIC_reg_02 reg_02; | 1541 | union IO_APIC_reg_02 reg_02; |
@@ -1568,58 +1588,7 @@ __apicdebuginit(void) print_IO_APIC(int ioapic_idx) | |||
1568 | 1588 | ||
1569 | printk(KERN_DEBUG ".... IRQ redirection table:\n"); | 1589 | printk(KERN_DEBUG ".... IRQ redirection table:\n"); |
1570 | 1590 | ||
1571 | if (irq_remapping_enabled) { | 1591 | x86_io_apic_ops.print_entries(ioapic_idx, reg_01.bits.entries); |
1572 | printk(KERN_DEBUG " NR Indx Fmt Mask Trig IRR" | ||
1573 | " Pol Stat Indx2 Zero Vect:\n"); | ||
1574 | } else { | ||
1575 | printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol" | ||
1576 | " Stat Dmod Deli Vect:\n"); | ||
1577 | } | ||
1578 | |||
1579 | for (i = 0; i <= reg_01.bits.entries; i++) { | ||
1580 | if (irq_remapping_enabled) { | ||
1581 | struct IO_APIC_route_entry entry; | ||
1582 | struct IR_IO_APIC_route_entry *ir_entry; | ||
1583 | |||
1584 | entry = ioapic_read_entry(ioapic_idx, i); | ||
1585 | ir_entry = (struct IR_IO_APIC_route_entry *) &entry; | ||
1586 | printk(KERN_DEBUG " %02x %04X ", | ||
1587 | i, | ||
1588 | ir_entry->index | ||
1589 | ); | ||
1590 | pr_cont("%1d %1d %1d %1d %1d " | ||
1591 | "%1d %1d %X %02X\n", | ||
1592 | ir_entry->format, | ||
1593 | ir_entry->mask, | ||
1594 | ir_entry->trigger, | ||
1595 | ir_entry->irr, | ||
1596 | ir_entry->polarity, | ||
1597 | ir_entry->delivery_status, | ||
1598 | ir_entry->index2, | ||
1599 | ir_entry->zero, | ||
1600 | ir_entry->vector | ||
1601 | ); | ||
1602 | } else { | ||
1603 | struct IO_APIC_route_entry entry; | ||
1604 | |||
1605 | entry = ioapic_read_entry(ioapic_idx, i); | ||
1606 | printk(KERN_DEBUG " %02x %02X ", | ||
1607 | i, | ||
1608 | entry.dest | ||
1609 | ); | ||
1610 | pr_cont("%1d %1d %1d %1d %1d " | ||
1611 | "%1d %1d %02X\n", | ||
1612 | entry.mask, | ||
1613 | entry.trigger, | ||
1614 | entry.irr, | ||
1615 | entry.polarity, | ||
1616 | entry.delivery_status, | ||
1617 | entry.dest_mode, | ||
1618 | entry.delivery_mode, | ||
1619 | entry.vector | ||
1620 | ); | ||
1621 | } | ||
1622 | } | ||
1623 | } | 1592 | } |
1624 | 1593 | ||
1625 | __apicdebuginit(void) print_IO_APICs(void) | 1594 | __apicdebuginit(void) print_IO_APICs(void) |
@@ -1921,30 +1890,14 @@ void __init enable_IO_APIC(void) | |||
1921 | clear_IO_APIC(); | 1890 | clear_IO_APIC(); |
1922 | } | 1891 | } |
1923 | 1892 | ||
1924 | /* | 1893 | void native_disable_io_apic(void) |
1925 | * Not an __init, needed by the reboot code | ||
1926 | */ | ||
1927 | void disable_IO_APIC(void) | ||
1928 | { | 1894 | { |
1929 | /* | 1895 | /* |
1930 | * Clear the IO-APIC before rebooting: | ||
1931 | */ | ||
1932 | clear_IO_APIC(); | ||
1933 | |||
1934 | if (!legacy_pic->nr_legacy_irqs) | ||
1935 | return; | ||
1936 | |||
1937 | /* | ||
1938 | * If the i8259 is routed through an IOAPIC | 1896 | * If the i8259 is routed through an IOAPIC |
1939 | * Put that IOAPIC in virtual wire mode | 1897 | * Put that IOAPIC in virtual wire mode |
1940 | * so legacy interrupts can be delivered. | 1898 | * so legacy interrupts can be delivered. |
1941 | * | ||
1942 | * With interrupt-remapping, for now we will use virtual wire A mode, | ||
1943 | * as virtual wire B is little complex (need to configure both | ||
1944 | * IOAPIC RTE as well as interrupt-remapping table entry). | ||
1945 | * As this gets called during crash dump, keep this simple for now. | ||
1946 | */ | 1899 | */ |
1947 | if (ioapic_i8259.pin != -1 && !irq_remapping_enabled) { | 1900 | if (ioapic_i8259.pin != -1) { |
1948 | struct IO_APIC_route_entry entry; | 1901 | struct IO_APIC_route_entry entry; |
1949 | 1902 | ||
1950 | memset(&entry, 0, sizeof(entry)); | 1903 | memset(&entry, 0, sizeof(entry)); |
@@ -1964,12 +1917,25 @@ void disable_IO_APIC(void) | |||
1964 | ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry); | 1917 | ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry); |
1965 | } | 1918 | } |
1966 | 1919 | ||
1920 | if (cpu_has_apic || apic_from_smp_config()) | ||
1921 | disconnect_bsp_APIC(ioapic_i8259.pin != -1); | ||
1922 | |||
1923 | } | ||
1924 | |||
1925 | /* | ||
1926 | * Not an __init, needed by the reboot code | ||
1927 | */ | ||
1928 | void disable_IO_APIC(void) | ||
1929 | { | ||
1967 | /* | 1930 | /* |
1968 | * Use virtual wire A mode when interrupt remapping is enabled. | 1931 | * Clear the IO-APIC before rebooting: |
1969 | */ | 1932 | */ |
1970 | if (cpu_has_apic || apic_from_smp_config()) | 1933 | clear_IO_APIC(); |
1971 | disconnect_bsp_APIC(!irq_remapping_enabled && | 1934 | |
1972 | ioapic_i8259.pin != -1); | 1935 | if (!legacy_pic->nr_legacy_irqs) |
1936 | return; | ||
1937 | |||
1938 | x86_io_apic_ops.disable(); | ||
1973 | } | 1939 | } |
1974 | 1940 | ||
1975 | #ifdef CONFIG_X86_32 | 1941 | #ifdef CONFIG_X86_32 |
@@ -2322,12 +2288,8 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq | |||
2322 | 2288 | ||
2323 | apic = entry->apic; | 2289 | apic = entry->apic; |
2324 | pin = entry->pin; | 2290 | pin = entry->pin; |
2325 | /* | 2291 | |
2326 | * With interrupt-remapping, destination information comes | 2292 | io_apic_write(apic, 0x11 + pin*2, dest); |
2327 | * from interrupt-remapping table entry. | ||
2328 | */ | ||
2329 | if (!irq_remapped(cfg)) | ||
2330 | io_apic_write(apic, 0x11 + pin*2, dest); | ||
2331 | reg = io_apic_read(apic, 0x10 + pin*2); | 2293 | reg = io_apic_read(apic, 0x10 + pin*2); |
2332 | reg &= ~IO_APIC_REDIR_VECTOR_MASK; | 2294 | reg &= ~IO_APIC_REDIR_VECTOR_MASK; |
2333 | reg |= vector; | 2295 | reg |= vector; |
@@ -2369,9 +2331,10 @@ int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, | |||
2369 | return 0; | 2331 | return 0; |
2370 | } | 2332 | } |
2371 | 2333 | ||
2372 | static int | 2334 | |
2373 | ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, | 2335 | int native_ioapic_set_affinity(struct irq_data *data, |
2374 | bool force) | 2336 | const struct cpumask *mask, |
2337 | bool force) | ||
2375 | { | 2338 | { |
2376 | unsigned int dest, irq = data->irq; | 2339 | unsigned int dest, irq = data->irq; |
2377 | unsigned long flags; | 2340 | unsigned long flags; |
@@ -2548,33 +2511,6 @@ static void ack_apic_level(struct irq_data *data) | |||
2548 | ioapic_irqd_unmask(data, cfg, masked); | 2511 | ioapic_irqd_unmask(data, cfg, masked); |
2549 | } | 2512 | } |
2550 | 2513 | ||
2551 | #ifdef CONFIG_IRQ_REMAP | ||
2552 | static void ir_ack_apic_edge(struct irq_data *data) | ||
2553 | { | ||
2554 | ack_APIC_irq(); | ||
2555 | } | ||
2556 | |||
2557 | static void ir_ack_apic_level(struct irq_data *data) | ||
2558 | { | ||
2559 | ack_APIC_irq(); | ||
2560 | eoi_ioapic_irq(data->irq, data->chip_data); | ||
2561 | } | ||
2562 | |||
2563 | static void ir_print_prefix(struct irq_data *data, struct seq_file *p) | ||
2564 | { | ||
2565 | seq_printf(p, " IR-%s", data->chip->name); | ||
2566 | } | ||
2567 | |||
2568 | static void irq_remap_modify_chip_defaults(struct irq_chip *chip) | ||
2569 | { | ||
2570 | chip->irq_print_chip = ir_print_prefix; | ||
2571 | chip->irq_ack = ir_ack_apic_edge; | ||
2572 | chip->irq_eoi = ir_ack_apic_level; | ||
2573 | |||
2574 | chip->irq_set_affinity = set_remapped_irq_affinity; | ||
2575 | } | ||
2576 | #endif /* CONFIG_IRQ_REMAP */ | ||
2577 | |||
2578 | static struct irq_chip ioapic_chip __read_mostly = { | 2514 | static struct irq_chip ioapic_chip __read_mostly = { |
2579 | .name = "IO-APIC", | 2515 | .name = "IO-APIC", |
2580 | .irq_startup = startup_ioapic_irq, | 2516 | .irq_startup = startup_ioapic_irq, |
@@ -2582,7 +2518,7 @@ static struct irq_chip ioapic_chip __read_mostly = { | |||
2582 | .irq_unmask = unmask_ioapic_irq, | 2518 | .irq_unmask = unmask_ioapic_irq, |
2583 | .irq_ack = ack_apic_edge, | 2519 | .irq_ack = ack_apic_edge, |
2584 | .irq_eoi = ack_apic_level, | 2520 | .irq_eoi = ack_apic_level, |
2585 | .irq_set_affinity = ioapic_set_affinity, | 2521 | .irq_set_affinity = native_ioapic_set_affinity, |
2586 | .irq_retrigger = ioapic_retrigger_irq, | 2522 | .irq_retrigger = ioapic_retrigger_irq, |
2587 | }; | 2523 | }; |
2588 | 2524 | ||
@@ -2781,8 +2717,7 @@ static inline void __init check_timer(void) | |||
2781 | * 8259A. | 2717 | * 8259A. |
2782 | */ | 2718 | */ |
2783 | if (pin1 == -1) { | 2719 | if (pin1 == -1) { |
2784 | if (irq_remapping_enabled) | 2720 | panic_if_irq_remap("BIOS bug: timer not connected to IO-APIC"); |
2785 | panic("BIOS bug: timer not connected to IO-APIC"); | ||
2786 | pin1 = pin2; | 2721 | pin1 = pin2; |
2787 | apic1 = apic2; | 2722 | apic1 = apic2; |
2788 | no_pin1 = 1; | 2723 | no_pin1 = 1; |
@@ -2814,8 +2749,7 @@ static inline void __init check_timer(void) | |||
2814 | clear_IO_APIC_pin(0, pin1); | 2749 | clear_IO_APIC_pin(0, pin1); |
2815 | goto out; | 2750 | goto out; |
2816 | } | 2751 | } |
2817 | if (irq_remapping_enabled) | 2752 | panic_if_irq_remap("timer doesn't work through Interrupt-remapped IO-APIC"); |
2818 | panic("timer doesn't work through Interrupt-remapped IO-APIC"); | ||
2819 | local_irq_disable(); | 2753 | local_irq_disable(); |
2820 | clear_IO_APIC_pin(apic1, pin1); | 2754 | clear_IO_APIC_pin(apic1, pin1); |
2821 | if (!no_pin1) | 2755 | if (!no_pin1) |
@@ -2982,37 +2916,58 @@ device_initcall(ioapic_init_ops); | |||
2982 | /* | 2916 | /* |
2983 | * Dynamic irq allocate and deallocation | 2917 | * Dynamic irq allocate and deallocation |
2984 | */ | 2918 | */ |
2985 | unsigned int create_irq_nr(unsigned int from, int node) | 2919 | unsigned int __create_irqs(unsigned int from, unsigned int count, int node) |
2986 | { | 2920 | { |
2987 | struct irq_cfg *cfg; | 2921 | struct irq_cfg **cfg; |
2988 | unsigned long flags; | 2922 | unsigned long flags; |
2989 | unsigned int ret = 0; | 2923 | int irq, i; |
2990 | int irq; | ||
2991 | 2924 | ||
2992 | if (from < nr_irqs_gsi) | 2925 | if (from < nr_irqs_gsi) |
2993 | from = nr_irqs_gsi; | 2926 | from = nr_irqs_gsi; |
2994 | 2927 | ||
2995 | irq = alloc_irq_from(from, node); | 2928 | cfg = kzalloc_node(count * sizeof(cfg[0]), GFP_KERNEL, node); |
2996 | if (irq < 0) | 2929 | if (!cfg) |
2997 | return 0; | ||
2998 | cfg = alloc_irq_cfg(irq, node); | ||
2999 | if (!cfg) { | ||
3000 | free_irq_at(irq, NULL); | ||
3001 | return 0; | 2930 | return 0; |
2931 | |||
2932 | irq = alloc_irqs_from(from, count, node); | ||
2933 | if (irq < 0) | ||
2934 | goto out_cfgs; | ||
2935 | |||
2936 | for (i = 0; i < count; i++) { | ||
2937 | cfg[i] = alloc_irq_cfg(irq + i, node); | ||
2938 | if (!cfg[i]) | ||
2939 | goto out_irqs; | ||
3002 | } | 2940 | } |
3003 | 2941 | ||
3004 | raw_spin_lock_irqsave(&vector_lock, flags); | 2942 | raw_spin_lock_irqsave(&vector_lock, flags); |
3005 | if (!__assign_irq_vector(irq, cfg, apic->target_cpus())) | 2943 | for (i = 0; i < count; i++) |
3006 | ret = irq; | 2944 | if (__assign_irq_vector(irq + i, cfg[i], apic->target_cpus())) |
2945 | goto out_vecs; | ||
3007 | raw_spin_unlock_irqrestore(&vector_lock, flags); | 2946 | raw_spin_unlock_irqrestore(&vector_lock, flags); |
3008 | 2947 | ||
3009 | if (ret) { | 2948 | for (i = 0; i < count; i++) { |
3010 | irq_set_chip_data(irq, cfg); | 2949 | irq_set_chip_data(irq + i, cfg[i]); |
3011 | irq_clear_status_flags(irq, IRQ_NOREQUEST); | 2950 | irq_clear_status_flags(irq + i, IRQ_NOREQUEST); |
3012 | } else { | ||
3013 | free_irq_at(irq, cfg); | ||
3014 | } | 2951 | } |
3015 | return ret; | 2952 | |
2953 | kfree(cfg); | ||
2954 | return irq; | ||
2955 | |||
2956 | out_vecs: | ||
2957 | for (i--; i >= 0; i--) | ||
2958 | __clear_irq_vector(irq + i, cfg[i]); | ||
2959 | raw_spin_unlock_irqrestore(&vector_lock, flags); | ||
2960 | out_irqs: | ||
2961 | for (i = 0; i < count; i++) | ||
2962 | free_irq_at(irq + i, cfg[i]); | ||
2963 | out_cfgs: | ||
2964 | kfree(cfg); | ||
2965 | return 0; | ||
2966 | } | ||
2967 | |||
2968 | unsigned int create_irq_nr(unsigned int from, int node) | ||
2969 | { | ||
2970 | return __create_irqs(from, 1, node); | ||
3016 | } | 2971 | } |
3017 | 2972 | ||
3018 | int create_irq(void) | 2973 | int create_irq(void) |
@@ -3037,48 +2992,35 @@ void destroy_irq(unsigned int irq) | |||
3037 | 2992 | ||
3038 | irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE); | 2993 | irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE); |
3039 | 2994 | ||
3040 | if (irq_remapped(cfg)) | 2995 | free_remapped_irq(irq); |
3041 | free_remapped_irq(irq); | 2996 | |
3042 | raw_spin_lock_irqsave(&vector_lock, flags); | 2997 | raw_spin_lock_irqsave(&vector_lock, flags); |
3043 | __clear_irq_vector(irq, cfg); | 2998 | __clear_irq_vector(irq, cfg); |
3044 | raw_spin_unlock_irqrestore(&vector_lock, flags); | 2999 | raw_spin_unlock_irqrestore(&vector_lock, flags); |
3045 | free_irq_at(irq, cfg); | 3000 | free_irq_at(irq, cfg); |
3046 | } | 3001 | } |
3047 | 3002 | ||
3003 | void destroy_irqs(unsigned int irq, unsigned int count) | ||
3004 | { | ||
3005 | unsigned int i; | ||
3006 | |||
3007 | for (i = 0; i < count; i++) | ||
3008 | destroy_irq(irq + i); | ||
3009 | } | ||
3010 | |||
3048 | /* | 3011 | /* |
3049 | * MSI message composition | 3012 | * MSI message composition |
3050 | */ | 3013 | */ |
3051 | #ifdef CONFIG_PCI_MSI | 3014 | void native_compose_msi_msg(struct pci_dev *pdev, |
3052 | static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, | 3015 | unsigned int irq, unsigned int dest, |
3053 | struct msi_msg *msg, u8 hpet_id) | 3016 | struct msi_msg *msg, u8 hpet_id) |
3054 | { | 3017 | { |
3055 | struct irq_cfg *cfg; | 3018 | struct irq_cfg *cfg = irq_cfg(irq); |
3056 | int err; | ||
3057 | unsigned dest; | ||
3058 | |||
3059 | if (disable_apic) | ||
3060 | return -ENXIO; | ||
3061 | |||
3062 | cfg = irq_cfg(irq); | ||
3063 | err = assign_irq_vector(irq, cfg, apic->target_cpus()); | ||
3064 | if (err) | ||
3065 | return err; | ||
3066 | 3019 | ||
3067 | err = apic->cpu_mask_to_apicid_and(cfg->domain, | 3020 | msg->address_hi = MSI_ADDR_BASE_HI; |
3068 | apic->target_cpus(), &dest); | ||
3069 | if (err) | ||
3070 | return err; | ||
3071 | |||
3072 | if (irq_remapped(cfg)) { | ||
3073 | compose_remapped_msi_msg(pdev, irq, dest, msg, hpet_id); | ||
3074 | return err; | ||
3075 | } | ||
3076 | 3021 | ||
3077 | if (x2apic_enabled()) | 3022 | if (x2apic_enabled()) |
3078 | msg->address_hi = MSI_ADDR_BASE_HI | | 3023 | msg->address_hi |= MSI_ADDR_EXT_DEST_ID(dest); |
3079 | MSI_ADDR_EXT_DEST_ID(dest); | ||
3080 | else | ||
3081 | msg->address_hi = MSI_ADDR_BASE_HI; | ||
3082 | 3024 | ||
3083 | msg->address_lo = | 3025 | msg->address_lo = |
3084 | MSI_ADDR_BASE_LO | | 3026 | MSI_ADDR_BASE_LO | |
@@ -3097,8 +3039,32 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, | |||
3097 | MSI_DATA_DELIVERY_FIXED: | 3039 | MSI_DATA_DELIVERY_FIXED: |
3098 | MSI_DATA_DELIVERY_LOWPRI) | | 3040 | MSI_DATA_DELIVERY_LOWPRI) | |
3099 | MSI_DATA_VECTOR(cfg->vector); | 3041 | MSI_DATA_VECTOR(cfg->vector); |
3042 | } | ||
3100 | 3043 | ||
3101 | return err; | 3044 | #ifdef CONFIG_PCI_MSI |
3045 | static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, | ||
3046 | struct msi_msg *msg, u8 hpet_id) | ||
3047 | { | ||
3048 | struct irq_cfg *cfg; | ||
3049 | int err; | ||
3050 | unsigned dest; | ||
3051 | |||
3052 | if (disable_apic) | ||
3053 | return -ENXIO; | ||
3054 | |||
3055 | cfg = irq_cfg(irq); | ||
3056 | err = assign_irq_vector(irq, cfg, apic->target_cpus()); | ||
3057 | if (err) | ||
3058 | return err; | ||
3059 | |||
3060 | err = apic->cpu_mask_to_apicid_and(cfg->domain, | ||
3061 | apic->target_cpus(), &dest); | ||
3062 | if (err) | ||
3063 | return err; | ||
3064 | |||
3065 | x86_msi.compose_msi_msg(pdev, irq, dest, msg, hpet_id); | ||
3066 | |||
3067 | return 0; | ||
3102 | } | 3068 | } |
3103 | 3069 | ||
3104 | static int | 3070 | static int |
@@ -3136,23 +3102,28 @@ static struct irq_chip msi_chip = { | |||
3136 | .irq_retrigger = ioapic_retrigger_irq, | 3102 | .irq_retrigger = ioapic_retrigger_irq, |
3137 | }; | 3103 | }; |
3138 | 3104 | ||
3139 | static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) | 3105 | int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, |
3106 | unsigned int irq_base, unsigned int irq_offset) | ||
3140 | { | 3107 | { |
3141 | struct irq_chip *chip = &msi_chip; | 3108 | struct irq_chip *chip = &msi_chip; |
3142 | struct msi_msg msg; | 3109 | struct msi_msg msg; |
3110 | unsigned int irq = irq_base + irq_offset; | ||
3143 | int ret; | 3111 | int ret; |
3144 | 3112 | ||
3145 | ret = msi_compose_msg(dev, irq, &msg, -1); | 3113 | ret = msi_compose_msg(dev, irq, &msg, -1); |
3146 | if (ret < 0) | 3114 | if (ret < 0) |
3147 | return ret; | 3115 | return ret; |
3148 | 3116 | ||
3149 | irq_set_msi_desc(irq, msidesc); | 3117 | irq_set_msi_desc_off(irq_base, irq_offset, msidesc); |
3150 | write_msi_msg(irq, &msg); | ||
3151 | 3118 | ||
3152 | if (irq_remapped(irq_get_chip_data(irq))) { | 3119 | /* |
3153 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); | 3120 | * MSI-X message is written per-IRQ, the offset is always 0. |
3154 | irq_remap_modify_chip_defaults(chip); | 3121 | * MSI message denotes a contiguous group of IRQs, written for 0th IRQ. |
3155 | } | 3122 | */ |
3123 | if (!irq_offset) | ||
3124 | write_msi_msg(irq, &msg); | ||
3125 | |||
3126 | setup_remapped_irq(irq, irq_get_chip_data(irq), chip); | ||
3156 | 3127 | ||
3157 | irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); | 3128 | irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); |
3158 | 3129 | ||
@@ -3163,46 +3134,26 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) | |||
3163 | 3134 | ||
3164 | int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | 3135 | int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) |
3165 | { | 3136 | { |
3166 | int node, ret, sub_handle, index = 0; | ||
3167 | unsigned int irq, irq_want; | 3137 | unsigned int irq, irq_want; |
3168 | struct msi_desc *msidesc; | 3138 | struct msi_desc *msidesc; |
3139 | int node, ret; | ||
3169 | 3140 | ||
3170 | /* x86 doesn't support multiple MSI yet */ | 3141 | /* Multiple MSI vectors only supported with interrupt remapping */ |
3171 | if (type == PCI_CAP_ID_MSI && nvec > 1) | 3142 | if (type == PCI_CAP_ID_MSI && nvec > 1) |
3172 | return 1; | 3143 | return 1; |
3173 | 3144 | ||
3174 | node = dev_to_node(&dev->dev); | 3145 | node = dev_to_node(&dev->dev); |
3175 | irq_want = nr_irqs_gsi; | 3146 | irq_want = nr_irqs_gsi; |
3176 | sub_handle = 0; | ||
3177 | list_for_each_entry(msidesc, &dev->msi_list, list) { | 3147 | list_for_each_entry(msidesc, &dev->msi_list, list) { |
3178 | irq = create_irq_nr(irq_want, node); | 3148 | irq = create_irq_nr(irq_want, node); |
3179 | if (irq == 0) | 3149 | if (irq == 0) |
3180 | return -1; | 3150 | return -ENOSPC; |
3151 | |||
3181 | irq_want = irq + 1; | 3152 | irq_want = irq + 1; |
3182 | if (!irq_remapping_enabled) | ||
3183 | goto no_ir; | ||
3184 | 3153 | ||
3185 | if (!sub_handle) { | 3154 | ret = setup_msi_irq(dev, msidesc, irq, 0); |
3186 | /* | ||
3187 | * allocate the consecutive block of IRTE's | ||
3188 | * for 'nvec' | ||
3189 | */ | ||
3190 | index = msi_alloc_remapped_irq(dev, irq, nvec); | ||
3191 | if (index < 0) { | ||
3192 | ret = index; | ||
3193 | goto error; | ||
3194 | } | ||
3195 | } else { | ||
3196 | ret = msi_setup_remapped_irq(dev, irq, index, | ||
3197 | sub_handle); | ||
3198 | if (ret < 0) | ||
3199 | goto error; | ||
3200 | } | ||
3201 | no_ir: | ||
3202 | ret = setup_msi_irq(dev, msidesc, irq); | ||
3203 | if (ret < 0) | 3155 | if (ret < 0) |
3204 | goto error; | 3156 | goto error; |
3205 | sub_handle++; | ||
3206 | } | 3157 | } |
3207 | return 0; | 3158 | return 0; |
3208 | 3159 | ||
@@ -3298,26 +3249,19 @@ static struct irq_chip hpet_msi_type = { | |||
3298 | .irq_retrigger = ioapic_retrigger_irq, | 3249 | .irq_retrigger = ioapic_retrigger_irq, |
3299 | }; | 3250 | }; |
3300 | 3251 | ||
3301 | int arch_setup_hpet_msi(unsigned int irq, unsigned int id) | 3252 | int default_setup_hpet_msi(unsigned int irq, unsigned int id) |
3302 | { | 3253 | { |
3303 | struct irq_chip *chip = &hpet_msi_type; | 3254 | struct irq_chip *chip = &hpet_msi_type; |
3304 | struct msi_msg msg; | 3255 | struct msi_msg msg; |
3305 | int ret; | 3256 | int ret; |
3306 | 3257 | ||
3307 | if (irq_remapping_enabled) { | ||
3308 | ret = setup_hpet_msi_remapped(irq, id); | ||
3309 | if (ret) | ||
3310 | return ret; | ||
3311 | } | ||
3312 | |||
3313 | ret = msi_compose_msg(NULL, irq, &msg, id); | 3258 | ret = msi_compose_msg(NULL, irq, &msg, id); |
3314 | if (ret < 0) | 3259 | if (ret < 0) |
3315 | return ret; | 3260 | return ret; |
3316 | 3261 | ||
3317 | hpet_msi_write(irq_get_handler_data(irq), &msg); | 3262 | hpet_msi_write(irq_get_handler_data(irq), &msg); |
3318 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); | 3263 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); |
3319 | if (irq_remapped(irq_get_chip_data(irq))) | 3264 | setup_remapped_irq(irq, irq_get_chip_data(irq), chip); |
3320 | irq_remap_modify_chip_defaults(chip); | ||
3321 | 3265 | ||
3322 | irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); | 3266 | irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); |
3323 | return 0; | 3267 | return 0; |
@@ -3683,10 +3627,7 @@ void __init setup_ioapic_dest(void) | |||
3683 | else | 3627 | else |
3684 | mask = apic->target_cpus(); | 3628 | mask = apic->target_cpus(); |
3685 | 3629 | ||
3686 | if (irq_remapping_enabled) | 3630 | x86_io_apic_ops.set_affinity(idata, mask, false); |
3687 | set_remapped_irq_affinity(idata, mask, false); | ||
3688 | else | ||
3689 | ioapic_set_affinity(idata, mask, false); | ||
3690 | } | 3631 | } |
3691 | 3632 | ||
3692 | } | 3633 | } |
diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c index cce91bf26676..7434d8556d09 100644 --- a/arch/x86/kernel/apic/ipi.c +++ b/arch/x86/kernel/apic/ipi.c | |||
@@ -106,7 +106,7 @@ void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector) | |||
106 | unsigned long mask = cpumask_bits(cpumask)[0]; | 106 | unsigned long mask = cpumask_bits(cpumask)[0]; |
107 | unsigned long flags; | 107 | unsigned long flags; |
108 | 108 | ||
109 | if (WARN_ONCE(!mask, "empty IPI mask")) | 109 | if (!mask) |
110 | return; | 110 | return; |
111 | 111 | ||
112 | local_irq_save(flags); | 112 | local_irq_save(flags); |
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index e03a1e180e81..562a76d433c8 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c | |||
@@ -20,18 +20,19 @@ static int set_x2apic_phys_mode(char *arg) | |||
20 | } | 20 | } |
21 | early_param("x2apic_phys", set_x2apic_phys_mode); | 21 | early_param("x2apic_phys", set_x2apic_phys_mode); |
22 | 22 | ||
23 | static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | 23 | static bool x2apic_fadt_phys(void) |
24 | { | 24 | { |
25 | if (x2apic_phys) | 25 | if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) && |
26 | return x2apic_enabled(); | 26 | (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) { |
27 | else if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) && | ||
28 | (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL) && | ||
29 | x2apic_enabled()) { | ||
30 | printk(KERN_DEBUG "System requires x2apic physical mode\n"); | 27 | printk(KERN_DEBUG "System requires x2apic physical mode\n"); |
31 | return 1; | 28 | return true; |
32 | } | 29 | } |
33 | else | 30 | return false; |
34 | return 0; | 31 | } |
32 | |||
33 | static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | ||
34 | { | ||
35 | return x2apic_enabled() && (x2apic_phys || x2apic_fadt_phys()); | ||
35 | } | 36 | } |
36 | 37 | ||
37 | static void | 38 | static void |
@@ -82,7 +83,7 @@ static void init_x2apic_ldr(void) | |||
82 | 83 | ||
83 | static int x2apic_phys_probe(void) | 84 | static int x2apic_phys_probe(void) |
84 | { | 85 | { |
85 | if (x2apic_mode && x2apic_phys) | 86 | if (x2apic_mode && (x2apic_phys || x2apic_fadt_phys())) |
86 | return 1; | 87 | return 1; |
87 | 88 | ||
88 | return apic == &apic_x2apic_phys; | 89 | return apic == &apic_x2apic_phys; |
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index d65464e43503..8d7012b7f402 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c | |||
@@ -899,6 +899,7 @@ static void apm_cpu_idle(void) | |||
899 | static int use_apm_idle; /* = 0 */ | 899 | static int use_apm_idle; /* = 0 */ |
900 | static unsigned int last_jiffies; /* = 0 */ | 900 | static unsigned int last_jiffies; /* = 0 */ |
901 | static unsigned int last_stime; /* = 0 */ | 901 | static unsigned int last_stime; /* = 0 */ |
902 | cputime_t stime; | ||
902 | 903 | ||
903 | int apm_idle_done = 0; | 904 | int apm_idle_done = 0; |
904 | unsigned int jiffies_since_last_check = jiffies - last_jiffies; | 905 | unsigned int jiffies_since_last_check = jiffies - last_jiffies; |
@@ -906,23 +907,23 @@ static void apm_cpu_idle(void) | |||
906 | 907 | ||
907 | WARN_ONCE(1, "deprecated apm_cpu_idle will be deleted in 2012"); | 908 | WARN_ONCE(1, "deprecated apm_cpu_idle will be deleted in 2012"); |
908 | recalc: | 909 | recalc: |
910 | task_cputime(current, NULL, &stime); | ||
909 | if (jiffies_since_last_check > IDLE_CALC_LIMIT) { | 911 | if (jiffies_since_last_check > IDLE_CALC_LIMIT) { |
910 | use_apm_idle = 0; | 912 | use_apm_idle = 0; |
911 | last_jiffies = jiffies; | ||
912 | last_stime = current->stime; | ||
913 | } else if (jiffies_since_last_check > idle_period) { | 913 | } else if (jiffies_since_last_check > idle_period) { |
914 | unsigned int idle_percentage; | 914 | unsigned int idle_percentage; |
915 | 915 | ||
916 | idle_percentage = current->stime - last_stime; | 916 | idle_percentage = stime - last_stime; |
917 | idle_percentage *= 100; | 917 | idle_percentage *= 100; |
918 | idle_percentage /= jiffies_since_last_check; | 918 | idle_percentage /= jiffies_since_last_check; |
919 | use_apm_idle = (idle_percentage > idle_threshold); | 919 | use_apm_idle = (idle_percentage > idle_threshold); |
920 | if (apm_info.forbid_idle) | 920 | if (apm_info.forbid_idle) |
921 | use_apm_idle = 0; | 921 | use_apm_idle = 0; |
922 | last_jiffies = jiffies; | ||
923 | last_stime = current->stime; | ||
924 | } | 922 | } |
925 | 923 | ||
924 | last_jiffies = jiffies; | ||
925 | last_stime = stime; | ||
926 | |||
926 | bucket = IDLE_LEAKY_MAX; | 927 | bucket = IDLE_LEAKY_MAX; |
927 | 928 | ||
928 | while (!need_resched()) { | 929 | while (!need_resched()) { |
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c index a8f8fa9769d6..1e7e84a02eba 100644 --- a/arch/x86/kernel/cpu/hypervisor.c +++ b/arch/x86/kernel/cpu/hypervisor.c | |||
@@ -79,3 +79,10 @@ void __init init_hypervisor_platform(void) | |||
79 | if (x86_hyper->init_platform) | 79 | if (x86_hyper->init_platform) |
80 | x86_hyper->init_platform(); | 80 | x86_hyper->init_platform(); |
81 | } | 81 | } |
82 | |||
83 | bool __init hypervisor_x2apic_available(void) | ||
84 | { | ||
85 | return x86_hyper && | ||
86 | x86_hyper->x2apic_available && | ||
87 | x86_hyper->x2apic_available(); | ||
88 | } | ||
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index fe9edec6698a..84c1309c4c0c 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -298,8 +298,7 @@ struct _cache_attr { | |||
298 | unsigned int); | 298 | unsigned int); |
299 | }; | 299 | }; |
300 | 300 | ||
301 | #ifdef CONFIG_AMD_NB | 301 | #if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS) |
302 | |||
303 | /* | 302 | /* |
304 | * L3 cache descriptors | 303 | * L3 cache descriptors |
305 | */ | 304 | */ |
@@ -524,9 +523,9 @@ store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count, | |||
524 | static struct _cache_attr subcaches = | 523 | static struct _cache_attr subcaches = |
525 | __ATTR(subcaches, 0644, show_subcaches, store_subcaches); | 524 | __ATTR(subcaches, 0644, show_subcaches, store_subcaches); |
526 | 525 | ||
527 | #else /* CONFIG_AMD_NB */ | 526 | #else |
528 | #define amd_init_l3_cache(x, y) | 527 | #define amd_init_l3_cache(x, y) |
529 | #endif /* CONFIG_AMD_NB */ | 528 | #endif /* CONFIG_AMD_NB && CONFIG_SYSFS */ |
530 | 529 | ||
531 | static int | 530 | static int |
532 | __cpuinit cpuid4_cache_lookup_regs(int index, | 531 | __cpuinit cpuid4_cache_lookup_regs(int index, |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 6774c17a5576..bf0f01aea994 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -829,7 +829,7 @@ static inline void x86_assign_hw_event(struct perf_event *event, | |||
829 | } else { | 829 | } else { |
830 | hwc->config_base = x86_pmu_config_addr(hwc->idx); | 830 | hwc->config_base = x86_pmu_config_addr(hwc->idx); |
831 | hwc->event_base = x86_pmu_event_addr(hwc->idx); | 831 | hwc->event_base = x86_pmu_event_addr(hwc->idx); |
832 | hwc->event_base_rdpmc = hwc->idx; | 832 | hwc->event_base_rdpmc = x86_pmu_rdpmc_index(hwc->idx); |
833 | } | 833 | } |
834 | } | 834 | } |
835 | 835 | ||
@@ -1310,11 +1310,6 @@ static struct attribute_group x86_pmu_format_group = { | |||
1310 | .attrs = NULL, | 1310 | .attrs = NULL, |
1311 | }; | 1311 | }; |
1312 | 1312 | ||
1313 | struct perf_pmu_events_attr { | ||
1314 | struct device_attribute attr; | ||
1315 | u64 id; | ||
1316 | }; | ||
1317 | |||
1318 | /* | 1313 | /* |
1319 | * Remove all undefined events (x86_pmu.event_map(id) == 0) | 1314 | * Remove all undefined events (x86_pmu.event_map(id) == 0) |
1320 | * out of events_attr attributes. | 1315 | * out of events_attr attributes. |
@@ -1348,11 +1343,9 @@ static ssize_t events_sysfs_show(struct device *dev, struct device_attribute *at | |||
1348 | #define EVENT_VAR(_id) event_attr_##_id | 1343 | #define EVENT_VAR(_id) event_attr_##_id |
1349 | #define EVENT_PTR(_id) &event_attr_##_id.attr.attr | 1344 | #define EVENT_PTR(_id) &event_attr_##_id.attr.attr |
1350 | 1345 | ||
1351 | #define EVENT_ATTR(_name, _id) \ | 1346 | #define EVENT_ATTR(_name, _id) \ |
1352 | static struct perf_pmu_events_attr EVENT_VAR(_id) = { \ | 1347 | PMU_EVENT_ATTR(_name, EVENT_VAR(_id), PERF_COUNT_HW_##_id, \ |
1353 | .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \ | 1348 | events_sysfs_show) |
1354 | .id = PERF_COUNT_HW_##_id, \ | ||
1355 | }; | ||
1356 | 1349 | ||
1357 | EVENT_ATTR(cpu-cycles, CPU_CYCLES ); | 1350 | EVENT_ATTR(cpu-cycles, CPU_CYCLES ); |
1358 | EVENT_ATTR(instructions, INSTRUCTIONS ); | 1351 | EVENT_ATTR(instructions, INSTRUCTIONS ); |
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 115c1ea97746..7f5c75c2afdd 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h | |||
@@ -325,6 +325,8 @@ struct x86_pmu { | |||
325 | int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign); | 325 | int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign); |
326 | unsigned eventsel; | 326 | unsigned eventsel; |
327 | unsigned perfctr; | 327 | unsigned perfctr; |
328 | int (*addr_offset)(int index, bool eventsel); | ||
329 | int (*rdpmc_index)(int index); | ||
328 | u64 (*event_map)(int); | 330 | u64 (*event_map)(int); |
329 | int max_events; | 331 | int max_events; |
330 | int num_counters; | 332 | int num_counters; |
@@ -446,28 +448,21 @@ extern u64 __read_mostly hw_cache_extra_regs | |||
446 | 448 | ||
447 | u64 x86_perf_event_update(struct perf_event *event); | 449 | u64 x86_perf_event_update(struct perf_event *event); |
448 | 450 | ||
449 | static inline int x86_pmu_addr_offset(int index) | 451 | static inline unsigned int x86_pmu_config_addr(int index) |
450 | { | 452 | { |
451 | int offset; | 453 | return x86_pmu.eventsel + (x86_pmu.addr_offset ? |
452 | 454 | x86_pmu.addr_offset(index, true) : index); | |
453 | /* offset = X86_FEATURE_PERFCTR_CORE ? index << 1 : index */ | ||
454 | alternative_io(ASM_NOP2, | ||
455 | "shll $1, %%eax", | ||
456 | X86_FEATURE_PERFCTR_CORE, | ||
457 | "=a" (offset), | ||
458 | "a" (index)); | ||
459 | |||
460 | return offset; | ||
461 | } | 455 | } |
462 | 456 | ||
463 | static inline unsigned int x86_pmu_config_addr(int index) | 457 | static inline unsigned int x86_pmu_event_addr(int index) |
464 | { | 458 | { |
465 | return x86_pmu.eventsel + x86_pmu_addr_offset(index); | 459 | return x86_pmu.perfctr + (x86_pmu.addr_offset ? |
460 | x86_pmu.addr_offset(index, false) : index); | ||
466 | } | 461 | } |
467 | 462 | ||
468 | static inline unsigned int x86_pmu_event_addr(int index) | 463 | static inline int x86_pmu_rdpmc_index(int index) |
469 | { | 464 | { |
470 | return x86_pmu.perfctr + x86_pmu_addr_offset(index); | 465 | return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index; |
471 | } | 466 | } |
472 | 467 | ||
473 | int x86_setup_perfctr(struct perf_event *event); | 468 | int x86_setup_perfctr(struct perf_event *event); |
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index c93bc4e813a0..dfdab42aed27 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c | |||
@@ -132,21 +132,102 @@ static u64 amd_pmu_event_map(int hw_event) | |||
132 | return amd_perfmon_event_map[hw_event]; | 132 | return amd_perfmon_event_map[hw_event]; |
133 | } | 133 | } |
134 | 134 | ||
135 | static int amd_pmu_hw_config(struct perf_event *event) | 135 | static struct event_constraint *amd_nb_event_constraint; |
136 | |||
137 | /* | ||
138 | * Previously calculated offsets | ||
139 | */ | ||
140 | static unsigned int event_offsets[X86_PMC_IDX_MAX] __read_mostly; | ||
141 | static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly; | ||
142 | static unsigned int rdpmc_indexes[X86_PMC_IDX_MAX] __read_mostly; | ||
143 | |||
144 | /* | ||
145 | * Legacy CPUs: | ||
146 | * 4 counters starting at 0xc0010000 each offset by 1 | ||
147 | * | ||
148 | * CPUs with core performance counter extensions: | ||
149 | * 6 counters starting at 0xc0010200 each offset by 2 | ||
150 | * | ||
151 | * CPUs with north bridge performance counter extensions: | ||
152 | * 4 additional counters starting at 0xc0010240 each offset by 2 | ||
153 | * (indexed right above either one of the above core counters) | ||
154 | */ | ||
155 | static inline int amd_pmu_addr_offset(int index, bool eventsel) | ||
136 | { | 156 | { |
137 | int ret; | 157 | int offset, first, base; |
138 | 158 | ||
139 | /* pass precise event sampling to ibs: */ | 159 | if (!index) |
140 | if (event->attr.precise_ip && get_ibs_caps()) | 160 | return index; |
141 | return -ENOENT; | 161 | |
162 | if (eventsel) | ||
163 | offset = event_offsets[index]; | ||
164 | else | ||
165 | offset = count_offsets[index]; | ||
166 | |||
167 | if (offset) | ||
168 | return offset; | ||
169 | |||
170 | if (amd_nb_event_constraint && | ||
171 | test_bit(index, amd_nb_event_constraint->idxmsk)) { | ||
172 | /* | ||
173 | * calculate the offset of NB counters with respect to | ||
174 | * base eventsel or perfctr | ||
175 | */ | ||
176 | |||
177 | first = find_first_bit(amd_nb_event_constraint->idxmsk, | ||
178 | X86_PMC_IDX_MAX); | ||
179 | |||
180 | if (eventsel) | ||
181 | base = MSR_F15H_NB_PERF_CTL - x86_pmu.eventsel; | ||
182 | else | ||
183 | base = MSR_F15H_NB_PERF_CTR - x86_pmu.perfctr; | ||
184 | |||
185 | offset = base + ((index - first) << 1); | ||
186 | } else if (!cpu_has_perfctr_core) | ||
187 | offset = index; | ||
188 | else | ||
189 | offset = index << 1; | ||
190 | |||
191 | if (eventsel) | ||
192 | event_offsets[index] = offset; | ||
193 | else | ||
194 | count_offsets[index] = offset; | ||
195 | |||
196 | return offset; | ||
197 | } | ||
198 | |||
199 | static inline int amd_pmu_rdpmc_index(int index) | ||
200 | { | ||
201 | int ret, first; | ||
202 | |||
203 | if (!index) | ||
204 | return index; | ||
205 | |||
206 | ret = rdpmc_indexes[index]; | ||
142 | 207 | ||
143 | ret = x86_pmu_hw_config(event); | ||
144 | if (ret) | 208 | if (ret) |
145 | return ret; | 209 | return ret; |
146 | 210 | ||
147 | if (has_branch_stack(event)) | 211 | if (amd_nb_event_constraint && |
148 | return -EOPNOTSUPP; | 212 | test_bit(index, amd_nb_event_constraint->idxmsk)) { |
213 | /* | ||
214 | * according to the mnual, ECX value of the NB counters is | ||
215 | * the index of the NB counter (0, 1, 2 or 3) plus 6 | ||
216 | */ | ||
217 | |||
218 | first = find_first_bit(amd_nb_event_constraint->idxmsk, | ||
219 | X86_PMC_IDX_MAX); | ||
220 | ret = index - first + 6; | ||
221 | } else | ||
222 | ret = index; | ||
223 | |||
224 | rdpmc_indexes[index] = ret; | ||
225 | |||
226 | return ret; | ||
227 | } | ||
149 | 228 | ||
229 | static int amd_core_hw_config(struct perf_event *event) | ||
230 | { | ||
150 | if (event->attr.exclude_host && event->attr.exclude_guest) | 231 | if (event->attr.exclude_host && event->attr.exclude_guest) |
151 | /* | 232 | /* |
152 | * When HO == GO == 1 the hardware treats that as GO == HO == 0 | 233 | * When HO == GO == 1 the hardware treats that as GO == HO == 0 |
@@ -156,14 +237,37 @@ static int amd_pmu_hw_config(struct perf_event *event) | |||
156 | event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR | | 237 | event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR | |
157 | ARCH_PERFMON_EVENTSEL_OS); | 238 | ARCH_PERFMON_EVENTSEL_OS); |
158 | else if (event->attr.exclude_host) | 239 | else if (event->attr.exclude_host) |
159 | event->hw.config |= AMD_PERFMON_EVENTSEL_GUESTONLY; | 240 | event->hw.config |= AMD64_EVENTSEL_GUESTONLY; |
160 | else if (event->attr.exclude_guest) | 241 | else if (event->attr.exclude_guest) |
161 | event->hw.config |= AMD_PERFMON_EVENTSEL_HOSTONLY; | 242 | event->hw.config |= AMD64_EVENTSEL_HOSTONLY; |
243 | |||
244 | return 0; | ||
245 | } | ||
246 | |||
247 | /* | ||
248 | * NB counters do not support the following event select bits: | ||
249 | * Host/Guest only | ||
250 | * Counter mask | ||
251 | * Invert counter mask | ||
252 | * Edge detect | ||
253 | * OS/User mode | ||
254 | */ | ||
255 | static int amd_nb_hw_config(struct perf_event *event) | ||
256 | { | ||
257 | /* for NB, we only allow system wide counting mode */ | ||
258 | if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) | ||
259 | return -EINVAL; | ||
260 | |||
261 | if (event->attr.exclude_user || event->attr.exclude_kernel || | ||
262 | event->attr.exclude_host || event->attr.exclude_guest) | ||
263 | return -EINVAL; | ||
162 | 264 | ||
163 | if (event->attr.type != PERF_TYPE_RAW) | 265 | event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR | |
164 | return 0; | 266 | ARCH_PERFMON_EVENTSEL_OS); |
165 | 267 | ||
166 | event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK; | 268 | if (event->hw.config & ~(AMD64_RAW_EVENT_MASK_NB | |
269 | ARCH_PERFMON_EVENTSEL_INT)) | ||
270 | return -EINVAL; | ||
167 | 271 | ||
168 | return 0; | 272 | return 0; |
169 | } | 273 | } |
@@ -181,6 +285,11 @@ static inline int amd_is_nb_event(struct hw_perf_event *hwc) | |||
181 | return (hwc->config & 0xe0) == 0xe0; | 285 | return (hwc->config & 0xe0) == 0xe0; |
182 | } | 286 | } |
183 | 287 | ||
288 | static inline int amd_is_perfctr_nb_event(struct hw_perf_event *hwc) | ||
289 | { | ||
290 | return amd_nb_event_constraint && amd_is_nb_event(hwc); | ||
291 | } | ||
292 | |||
184 | static inline int amd_has_nb(struct cpu_hw_events *cpuc) | 293 | static inline int amd_has_nb(struct cpu_hw_events *cpuc) |
185 | { | 294 | { |
186 | struct amd_nb *nb = cpuc->amd_nb; | 295 | struct amd_nb *nb = cpuc->amd_nb; |
@@ -188,20 +297,37 @@ static inline int amd_has_nb(struct cpu_hw_events *cpuc) | |||
188 | return nb && nb->nb_id != -1; | 297 | return nb && nb->nb_id != -1; |
189 | } | 298 | } |
190 | 299 | ||
191 | static void amd_put_event_constraints(struct cpu_hw_events *cpuc, | 300 | static int amd_pmu_hw_config(struct perf_event *event) |
192 | struct perf_event *event) | 301 | { |
302 | int ret; | ||
303 | |||
304 | /* pass precise event sampling to ibs: */ | ||
305 | if (event->attr.precise_ip && get_ibs_caps()) | ||
306 | return -ENOENT; | ||
307 | |||
308 | if (has_branch_stack(event)) | ||
309 | return -EOPNOTSUPP; | ||
310 | |||
311 | ret = x86_pmu_hw_config(event); | ||
312 | if (ret) | ||
313 | return ret; | ||
314 | |||
315 | if (event->attr.type == PERF_TYPE_RAW) | ||
316 | event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK; | ||
317 | |||
318 | if (amd_is_perfctr_nb_event(&event->hw)) | ||
319 | return amd_nb_hw_config(event); | ||
320 | |||
321 | return amd_core_hw_config(event); | ||
322 | } | ||
323 | |||
324 | static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc, | ||
325 | struct perf_event *event) | ||
193 | { | 326 | { |
194 | struct hw_perf_event *hwc = &event->hw; | ||
195 | struct amd_nb *nb = cpuc->amd_nb; | 327 | struct amd_nb *nb = cpuc->amd_nb; |
196 | int i; | 328 | int i; |
197 | 329 | ||
198 | /* | 330 | /* |
199 | * only care about NB events | ||
200 | */ | ||
201 | if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc))) | ||
202 | return; | ||
203 | |||
204 | /* | ||
205 | * need to scan whole list because event may not have | 331 | * need to scan whole list because event may not have |
206 | * been assigned during scheduling | 332 | * been assigned during scheduling |
207 | * | 333 | * |
@@ -215,6 +341,19 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc, | |||
215 | } | 341 | } |
216 | } | 342 | } |
217 | 343 | ||
344 | static void amd_nb_interrupt_hw_config(struct hw_perf_event *hwc) | ||
345 | { | ||
346 | int core_id = cpu_data(smp_processor_id()).cpu_core_id; | ||
347 | |||
348 | /* deliver interrupts only to this core */ | ||
349 | if (hwc->config & ARCH_PERFMON_EVENTSEL_INT) { | ||
350 | hwc->config |= AMD64_EVENTSEL_INT_CORE_ENABLE; | ||
351 | hwc->config &= ~AMD64_EVENTSEL_INT_CORE_SEL_MASK; | ||
352 | hwc->config |= (u64)(core_id) << | ||
353 | AMD64_EVENTSEL_INT_CORE_SEL_SHIFT; | ||
354 | } | ||
355 | } | ||
356 | |||
218 | /* | 357 | /* |
219 | * AMD64 NorthBridge events need special treatment because | 358 | * AMD64 NorthBridge events need special treatment because |
220 | * counter access needs to be synchronized across all cores | 359 | * counter access needs to be synchronized across all cores |
@@ -247,24 +386,24 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc, | |||
247 | * | 386 | * |
248 | * Given that resources are allocated (cmpxchg), they must be | 387 | * Given that resources are allocated (cmpxchg), they must be |
249 | * eventually freed for others to use. This is accomplished by | 388 | * eventually freed for others to use. This is accomplished by |
250 | * calling amd_put_event_constraints(). | 389 | * calling __amd_put_nb_event_constraints() |
251 | * | 390 | * |
252 | * Non NB events are not impacted by this restriction. | 391 | * Non NB events are not impacted by this restriction. |
253 | */ | 392 | */ |
254 | static struct event_constraint * | 393 | static struct event_constraint * |
255 | amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) | 394 | __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, |
395 | struct event_constraint *c) | ||
256 | { | 396 | { |
257 | struct hw_perf_event *hwc = &event->hw; | 397 | struct hw_perf_event *hwc = &event->hw; |
258 | struct amd_nb *nb = cpuc->amd_nb; | 398 | struct amd_nb *nb = cpuc->amd_nb; |
259 | struct perf_event *old = NULL; | 399 | struct perf_event *old; |
260 | int max = x86_pmu.num_counters; | 400 | int idx, new = -1; |
261 | int i, j, k = -1; | ||
262 | 401 | ||
263 | /* | 402 | if (!c) |
264 | * if not NB event or no NB, then no constraints | 403 | c = &unconstrained; |
265 | */ | 404 | |
266 | if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc))) | 405 | if (cpuc->is_fake) |
267 | return &unconstrained; | 406 | return c; |
268 | 407 | ||
269 | /* | 408 | /* |
270 | * detect if already present, if so reuse | 409 | * detect if already present, if so reuse |
@@ -276,48 +415,36 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) | |||
276 | * because of successive calls to x86_schedule_events() from | 415 | * because of successive calls to x86_schedule_events() from |
277 | * hw_perf_group_sched_in() without hw_perf_enable() | 416 | * hw_perf_group_sched_in() without hw_perf_enable() |
278 | */ | 417 | */ |
279 | for (i = 0; i < max; i++) { | 418 | for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) { |
280 | /* | 419 | if (new == -1 || hwc->idx == idx) |
281 | * keep track of first free slot | 420 | /* assign free slot, prefer hwc->idx */ |
282 | */ | 421 | old = cmpxchg(nb->owners + idx, NULL, event); |
283 | if (k == -1 && !nb->owners[i]) | 422 | else if (nb->owners[idx] == event) |
284 | k = i; | 423 | /* event already present */ |
424 | old = event; | ||
425 | else | ||
426 | continue; | ||
427 | |||
428 | if (old && old != event) | ||
429 | continue; | ||
430 | |||
431 | /* reassign to this slot */ | ||
432 | if (new != -1) | ||
433 | cmpxchg(nb->owners + new, event, NULL); | ||
434 | new = idx; | ||
285 | 435 | ||
286 | /* already present, reuse */ | 436 | /* already present, reuse */ |
287 | if (nb->owners[i] == event) | 437 | if (old == event) |
288 | goto done; | ||
289 | } | ||
290 | /* | ||
291 | * not present, so grab a new slot | ||
292 | * starting either at: | ||
293 | */ | ||
294 | if (hwc->idx != -1) { | ||
295 | /* previous assignment */ | ||
296 | i = hwc->idx; | ||
297 | } else if (k != -1) { | ||
298 | /* start from free slot found */ | ||
299 | i = k; | ||
300 | } else { | ||
301 | /* | ||
302 | * event not found, no slot found in | ||
303 | * first pass, try again from the | ||
304 | * beginning | ||
305 | */ | ||
306 | i = 0; | ||
307 | } | ||
308 | j = i; | ||
309 | do { | ||
310 | old = cmpxchg(nb->owners+i, NULL, event); | ||
311 | if (!old) | ||
312 | break; | 438 | break; |
313 | if (++i == max) | 439 | } |
314 | i = 0; | 440 | |
315 | } while (i != j); | 441 | if (new == -1) |
316 | done: | 442 | return &emptyconstraint; |
317 | if (!old) | 443 | |
318 | return &nb->event_constraints[i]; | 444 | if (amd_is_perfctr_nb_event(hwc)) |
319 | 445 | amd_nb_interrupt_hw_config(hwc); | |
320 | return &emptyconstraint; | 446 | |
447 | return &nb->event_constraints[new]; | ||
321 | } | 448 | } |
322 | 449 | ||
323 | static struct amd_nb *amd_alloc_nb(int cpu) | 450 | static struct amd_nb *amd_alloc_nb(int cpu) |
@@ -364,7 +491,7 @@ static void amd_pmu_cpu_starting(int cpu) | |||
364 | struct amd_nb *nb; | 491 | struct amd_nb *nb; |
365 | int i, nb_id; | 492 | int i, nb_id; |
366 | 493 | ||
367 | cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY; | 494 | cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY; |
368 | 495 | ||
369 | if (boot_cpu_data.x86_max_cores < 2) | 496 | if (boot_cpu_data.x86_max_cores < 2) |
370 | return; | 497 | return; |
@@ -407,6 +534,26 @@ static void amd_pmu_cpu_dead(int cpu) | |||
407 | } | 534 | } |
408 | } | 535 | } |
409 | 536 | ||
537 | static struct event_constraint * | ||
538 | amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) | ||
539 | { | ||
540 | /* | ||
541 | * if not NB event or no NB, then no constraints | ||
542 | */ | ||
543 | if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))) | ||
544 | return &unconstrained; | ||
545 | |||
546 | return __amd_get_nb_event_constraints(cpuc, event, | ||
547 | amd_nb_event_constraint); | ||
548 | } | ||
549 | |||
550 | static void amd_put_event_constraints(struct cpu_hw_events *cpuc, | ||
551 | struct perf_event *event) | ||
552 | { | ||
553 | if (amd_has_nb(cpuc) && amd_is_nb_event(&event->hw)) | ||
554 | __amd_put_nb_event_constraints(cpuc, event); | ||
555 | } | ||
556 | |||
410 | PMU_FORMAT_ATTR(event, "config:0-7,32-35"); | 557 | PMU_FORMAT_ATTR(event, "config:0-7,32-35"); |
411 | PMU_FORMAT_ATTR(umask, "config:8-15" ); | 558 | PMU_FORMAT_ATTR(umask, "config:8-15" ); |
412 | PMU_FORMAT_ATTR(edge, "config:18" ); | 559 | PMU_FORMAT_ATTR(edge, "config:18" ); |
@@ -496,6 +643,9 @@ static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, | |||
496 | static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0); | 643 | static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0); |
497 | static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0); | 644 | static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0); |
498 | 645 | ||
646 | static struct event_constraint amd_NBPMC96 = EVENT_CONSTRAINT(0, 0x3C0, 0); | ||
647 | static struct event_constraint amd_NBPMC74 = EVENT_CONSTRAINT(0, 0xF0, 0); | ||
648 | |||
499 | static struct event_constraint * | 649 | static struct event_constraint * |
500 | amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event) | 650 | amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event) |
501 | { | 651 | { |
@@ -561,8 +711,8 @@ amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *ev | |||
561 | return &amd_f15_PMC20; | 711 | return &amd_f15_PMC20; |
562 | } | 712 | } |
563 | case AMD_EVENT_NB: | 713 | case AMD_EVENT_NB: |
564 | /* not yet implemented */ | 714 | return __amd_get_nb_event_constraints(cpuc, event, |
565 | return &emptyconstraint; | 715 | amd_nb_event_constraint); |
566 | default: | 716 | default: |
567 | return &emptyconstraint; | 717 | return &emptyconstraint; |
568 | } | 718 | } |
@@ -587,6 +737,8 @@ static __initconst const struct x86_pmu amd_pmu = { | |||
587 | .schedule_events = x86_schedule_events, | 737 | .schedule_events = x86_schedule_events, |
588 | .eventsel = MSR_K7_EVNTSEL0, | 738 | .eventsel = MSR_K7_EVNTSEL0, |
589 | .perfctr = MSR_K7_PERFCTR0, | 739 | .perfctr = MSR_K7_PERFCTR0, |
740 | .addr_offset = amd_pmu_addr_offset, | ||
741 | .rdpmc_index = amd_pmu_rdpmc_index, | ||
590 | .event_map = amd_pmu_event_map, | 742 | .event_map = amd_pmu_event_map, |
591 | .max_events = ARRAY_SIZE(amd_perfmon_event_map), | 743 | .max_events = ARRAY_SIZE(amd_perfmon_event_map), |
592 | .num_counters = AMD64_NUM_COUNTERS, | 744 | .num_counters = AMD64_NUM_COUNTERS, |
@@ -608,7 +760,7 @@ static __initconst const struct x86_pmu amd_pmu = { | |||
608 | 760 | ||
609 | static int setup_event_constraints(void) | 761 | static int setup_event_constraints(void) |
610 | { | 762 | { |
611 | if (boot_cpu_data.x86 >= 0x15) | 763 | if (boot_cpu_data.x86 == 0x15) |
612 | x86_pmu.get_event_constraints = amd_get_event_constraints_f15h; | 764 | x86_pmu.get_event_constraints = amd_get_event_constraints_f15h; |
613 | return 0; | 765 | return 0; |
614 | } | 766 | } |
@@ -638,6 +790,23 @@ static int setup_perfctr_core(void) | |||
638 | return 0; | 790 | return 0; |
639 | } | 791 | } |
640 | 792 | ||
793 | static int setup_perfctr_nb(void) | ||
794 | { | ||
795 | if (!cpu_has_perfctr_nb) | ||
796 | return -ENODEV; | ||
797 | |||
798 | x86_pmu.num_counters += AMD64_NUM_COUNTERS_NB; | ||
799 | |||
800 | if (cpu_has_perfctr_core) | ||
801 | amd_nb_event_constraint = &amd_NBPMC96; | ||
802 | else | ||
803 | amd_nb_event_constraint = &amd_NBPMC74; | ||
804 | |||
805 | printk(KERN_INFO "perf: AMD northbridge performance counters detected\n"); | ||
806 | |||
807 | return 0; | ||
808 | } | ||
809 | |||
641 | __init int amd_pmu_init(void) | 810 | __init int amd_pmu_init(void) |
642 | { | 811 | { |
643 | /* Performance-monitoring supported from K7 and later: */ | 812 | /* Performance-monitoring supported from K7 and later: */ |
@@ -648,6 +817,7 @@ __init int amd_pmu_init(void) | |||
648 | 817 | ||
649 | setup_event_constraints(); | 818 | setup_event_constraints(); |
650 | setup_perfctr_core(); | 819 | setup_perfctr_core(); |
820 | setup_perfctr_nb(); | ||
651 | 821 | ||
652 | /* Events are common for all AMDs */ | 822 | /* Events are common for all AMDs */ |
653 | memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, | 823 | memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, |
@@ -678,7 +848,7 @@ void amd_pmu_disable_virt(void) | |||
678 | * SVM is disabled the Guest-only bits still gets set and the counter | 848 | * SVM is disabled the Guest-only bits still gets set and the counter |
679 | * will not count anything. | 849 | * will not count anything. |
680 | */ | 850 | */ |
681 | cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY; | 851 | cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY; |
682 | 852 | ||
683 | /* Reload all events */ | 853 | /* Reload all events */ |
684 | x86_pmu_disable_all(); | 854 | x86_pmu_disable_all(); |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 93b9e1181f83..4914e94ad6e8 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -2019,7 +2019,10 @@ __init int intel_pmu_init(void) | |||
2019 | break; | 2019 | break; |
2020 | 2020 | ||
2021 | case 28: /* Atom */ | 2021 | case 28: /* Atom */ |
2022 | case 54: /* Cedariew */ | 2022 | case 38: /* Lincroft */ |
2023 | case 39: /* Penwell */ | ||
2024 | case 53: /* Cloverview */ | ||
2025 | case 54: /* Cedarview */ | ||
2023 | memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, | 2026 | memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, |
2024 | sizeof(hw_cache_event_ids)); | 2027 | sizeof(hw_cache_event_ids)); |
2025 | 2028 | ||
@@ -2084,6 +2087,7 @@ __init int intel_pmu_init(void) | |||
2084 | pr_cont("SandyBridge events, "); | 2087 | pr_cont("SandyBridge events, "); |
2085 | break; | 2088 | break; |
2086 | case 58: /* IvyBridge */ | 2089 | case 58: /* IvyBridge */ |
2090 | case 62: /* IvyBridge EP */ | ||
2087 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, | 2091 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, |
2088 | sizeof(hw_cache_event_ids)); | 2092 | sizeof(hw_cache_event_ids)); |
2089 | memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, | 2093 | memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, |
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c index f2af39f5dc3d..4820c232a0b9 100644 --- a/arch/x86/kernel/cpu/perf_event_p6.c +++ b/arch/x86/kernel/cpu/perf_event_p6.c | |||
@@ -19,7 +19,7 @@ static const u64 p6_perfmon_event_map[] = | |||
19 | 19 | ||
20 | }; | 20 | }; |
21 | 21 | ||
22 | static __initconst u64 p6_hw_cache_event_ids | 22 | static u64 p6_hw_cache_event_ids |
23 | [PERF_COUNT_HW_CACHE_MAX] | 23 | [PERF_COUNT_HW_CACHE_MAX] |
24 | [PERF_COUNT_HW_CACHE_OP_MAX] | 24 | [PERF_COUNT_HW_CACHE_OP_MAX] |
25 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | 25 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = |
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index d22d0c4edcfd..03a36321ec54 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c | |||
@@ -33,6 +33,9 @@ | |||
33 | 33 | ||
34 | #define VMWARE_PORT_CMD_GETVERSION 10 | 34 | #define VMWARE_PORT_CMD_GETVERSION 10 |
35 | #define VMWARE_PORT_CMD_GETHZ 45 | 35 | #define VMWARE_PORT_CMD_GETHZ 45 |
36 | #define VMWARE_PORT_CMD_GETVCPU_INFO 68 | ||
37 | #define VMWARE_PORT_CMD_LEGACY_X2APIC 3 | ||
38 | #define VMWARE_PORT_CMD_VCPU_RESERVED 31 | ||
36 | 39 | ||
37 | #define VMWARE_PORT(cmd, eax, ebx, ecx, edx) \ | 40 | #define VMWARE_PORT(cmd, eax, ebx, ecx, edx) \ |
38 | __asm__("inl (%%dx)" : \ | 41 | __asm__("inl (%%dx)" : \ |
@@ -125,10 +128,20 @@ static void __cpuinit vmware_set_cpu_features(struct cpuinfo_x86 *c) | |||
125 | set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE); | 128 | set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE); |
126 | } | 129 | } |
127 | 130 | ||
131 | /* Checks if hypervisor supports x2apic without VT-D interrupt remapping. */ | ||
132 | static bool __init vmware_legacy_x2apic_available(void) | ||
133 | { | ||
134 | uint32_t eax, ebx, ecx, edx; | ||
135 | VMWARE_PORT(GETVCPU_INFO, eax, ebx, ecx, edx); | ||
136 | return (eax & (1 << VMWARE_PORT_CMD_VCPU_RESERVED)) == 0 && | ||
137 | (eax & (1 << VMWARE_PORT_CMD_LEGACY_X2APIC)) != 0; | ||
138 | } | ||
139 | |||
128 | const __refconst struct hypervisor_x86 x86_hyper_vmware = { | 140 | const __refconst struct hypervisor_x86 x86_hyper_vmware = { |
129 | .name = "VMware", | 141 | .name = "VMware", |
130 | .detect = vmware_platform, | 142 | .detect = vmware_platform, |
131 | .set_cpu_features = vmware_set_cpu_features, | 143 | .set_cpu_features = vmware_set_cpu_features, |
132 | .init_platform = vmware_platform_setup, | 144 | .init_platform = vmware_platform_setup, |
145 | .x2apic_available = vmware_legacy_x2apic_available, | ||
133 | }; | 146 | }; |
134 | EXPORT_SYMBOL(x86_hyper_vmware); | 147 | EXPORT_SYMBOL(x86_hyper_vmware); |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 07a7a04529bc..cb3c591339aa 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -1781,6 +1781,7 @@ first_nmi: | |||
1781 | * Leave room for the "copied" frame | 1781 | * Leave room for the "copied" frame |
1782 | */ | 1782 | */ |
1783 | subq $(5*8), %rsp | 1783 | subq $(5*8), %rsp |
1784 | CFI_ADJUST_CFA_OFFSET 5*8 | ||
1784 | 1785 | ||
1785 | /* Copy the stack frame to the Saved frame */ | 1786 | /* Copy the stack frame to the Saved frame */ |
1786 | .rept 5 | 1787 | .rept 5 |
@@ -1863,10 +1864,8 @@ end_repeat_nmi: | |||
1863 | nmi_swapgs: | 1864 | nmi_swapgs: |
1864 | SWAPGS_UNSAFE_STACK | 1865 | SWAPGS_UNSAFE_STACK |
1865 | nmi_restore: | 1866 | nmi_restore: |
1866 | RESTORE_ALL 8 | 1867 | /* Pop the extra iret frame at once */ |
1867 | 1868 | RESTORE_ALL 6*8 | |
1868 | /* Pop the extra iret frame */ | ||
1869 | addq $(5*8), %rsp | ||
1870 | 1869 | ||
1871 | /* Clear the NMI executing stack variable */ | 1870 | /* Clear the NMI executing stack variable */ |
1872 | movq $0, 5*8(%rsp) | 1871 | movq $0, 5*8(%rsp) |
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 8e7f6556028f..3c3f58a0808f 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S | |||
@@ -300,37 +300,52 @@ ENTRY(startup_32_smp) | |||
300 | leal -__PAGE_OFFSET(%ecx),%esp | 300 | leal -__PAGE_OFFSET(%ecx),%esp |
301 | 301 | ||
302 | default_entry: | 302 | default_entry: |
303 | #define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \ | ||
304 | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \ | ||
305 | X86_CR0_PG) | ||
306 | movl $(CR0_STATE & ~X86_CR0_PG),%eax | ||
307 | movl %eax,%cr0 | ||
308 | |||
309 | /* | ||
310 | * We want to start out with EFLAGS unambiguously cleared. Some BIOSes leave | ||
311 | * bits like NT set. This would confuse the debugger if this code is traced. So | ||
312 | * initialize them properly now before switching to protected mode. That means | ||
313 | * DF in particular (even though we have cleared it earlier after copying the | ||
314 | * command line) because GCC expects it. | ||
315 | */ | ||
316 | pushl $0 | ||
317 | popfl | ||
318 | |||
303 | /* | 319 | /* |
304 | * New page tables may be in 4Mbyte page mode and may | 320 | * New page tables may be in 4Mbyte page mode and may be using the global pages. |
305 | * be using the global pages. | ||
306 | * | 321 | * |
307 | * NOTE! If we are on a 486 we may have no cr4 at all! | 322 | * NOTE! If we are on a 486 we may have no cr4 at all! Specifically, cr4 exists |
308 | * Specifically, cr4 exists if and only if CPUID exists | 323 | * if and only if CPUID exists and has flags other than the FPU flag set. |
309 | * and has flags other than the FPU flag set. | ||
310 | */ | 324 | */ |
325 | movl $-1,pa(X86_CPUID) # preset CPUID level | ||
311 | movl $X86_EFLAGS_ID,%ecx | 326 | movl $X86_EFLAGS_ID,%ecx |
312 | pushl %ecx | 327 | pushl %ecx |
313 | popfl | 328 | popfl # set EFLAGS=ID |
314 | pushfl | 329 | pushfl |
315 | popl %eax | 330 | popl %eax # get EFLAGS |
316 | pushl $0 | 331 | testl $X86_EFLAGS_ID,%eax # did EFLAGS.ID remained set? |
317 | popfl | 332 | jz enable_paging # hw disallowed setting of ID bit |
318 | pushfl | 333 | # which means no CPUID and no CR4 |
319 | popl %edx | 334 | |
320 | xorl %edx,%eax | 335 | xorl %eax,%eax |
321 | testl %ecx,%eax | 336 | cpuid |
322 | jz 6f # No ID flag = no CPUID = no CR4 | 337 | movl %eax,pa(X86_CPUID) # save largest std CPUID function |
323 | 338 | ||
324 | movl $1,%eax | 339 | movl $1,%eax |
325 | cpuid | 340 | cpuid |
326 | andl $~1,%edx # Ignore CPUID.FPU | 341 | andl $~1,%edx # Ignore CPUID.FPU |
327 | jz 6f # No flags or only CPUID.FPU = no CR4 | 342 | jz enable_paging # No flags or only CPUID.FPU = no CR4 |
328 | 343 | ||
329 | movl pa(mmu_cr4_features),%eax | 344 | movl pa(mmu_cr4_features),%eax |
330 | movl %eax,%cr4 | 345 | movl %eax,%cr4 |
331 | 346 | ||
332 | testb $X86_CR4_PAE, %al # check if PAE is enabled | 347 | testb $X86_CR4_PAE, %al # check if PAE is enabled |
333 | jz 6f | 348 | jz enable_paging |
334 | 349 | ||
335 | /* Check if extended functions are implemented */ | 350 | /* Check if extended functions are implemented */ |
336 | movl $0x80000000, %eax | 351 | movl $0x80000000, %eax |
@@ -338,7 +353,7 @@ default_entry: | |||
338 | /* Value must be in the range 0x80000001 to 0x8000ffff */ | 353 | /* Value must be in the range 0x80000001 to 0x8000ffff */ |
339 | subl $0x80000001, %eax | 354 | subl $0x80000001, %eax |
340 | cmpl $(0x8000ffff-0x80000001), %eax | 355 | cmpl $(0x8000ffff-0x80000001), %eax |
341 | ja 6f | 356 | ja enable_paging |
342 | 357 | ||
343 | /* Clear bogus XD_DISABLE bits */ | 358 | /* Clear bogus XD_DISABLE bits */ |
344 | call verify_cpu | 359 | call verify_cpu |
@@ -347,7 +362,7 @@ default_entry: | |||
347 | cpuid | 362 | cpuid |
348 | /* Execute Disable bit supported? */ | 363 | /* Execute Disable bit supported? */ |
349 | btl $(X86_FEATURE_NX & 31), %edx | 364 | btl $(X86_FEATURE_NX & 31), %edx |
350 | jnc 6f | 365 | jnc enable_paging |
351 | 366 | ||
352 | /* Setup EFER (Extended Feature Enable Register) */ | 367 | /* Setup EFER (Extended Feature Enable Register) */ |
353 | movl $MSR_EFER, %ecx | 368 | movl $MSR_EFER, %ecx |
@@ -357,15 +372,14 @@ default_entry: | |||
357 | /* Make changes effective */ | 372 | /* Make changes effective */ |
358 | wrmsr | 373 | wrmsr |
359 | 374 | ||
360 | 6: | 375 | enable_paging: |
361 | 376 | ||
362 | /* | 377 | /* |
363 | * Enable paging | 378 | * Enable paging |
364 | */ | 379 | */ |
365 | movl $pa(initial_page_table), %eax | 380 | movl $pa(initial_page_table), %eax |
366 | movl %eax,%cr3 /* set the page table pointer.. */ | 381 | movl %eax,%cr3 /* set the page table pointer.. */ |
367 | movl %cr0,%eax | 382 | movl $CR0_STATE,%eax |
368 | orl $X86_CR0_PG,%eax | ||
369 | movl %eax,%cr0 /* ..and set paging (PG) bit */ | 383 | movl %eax,%cr0 /* ..and set paging (PG) bit */ |
370 | ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */ | 384 | ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */ |
371 | 1: | 385 | 1: |
@@ -373,14 +387,6 @@ default_entry: | |||
373 | addl $__PAGE_OFFSET, %esp | 387 | addl $__PAGE_OFFSET, %esp |
374 | 388 | ||
375 | /* | 389 | /* |
376 | * Initialize eflags. Some BIOS's leave bits like NT set. This would | ||
377 | * confuse the debugger if this code is traced. | ||
378 | * XXX - best to initialize before switching to protected mode. | ||
379 | */ | ||
380 | pushl $0 | ||
381 | popfl | ||
382 | |||
383 | /* | ||
384 | * start system 32-bit setup. We need to re-do some of the things done | 390 | * start system 32-bit setup. We need to re-do some of the things done |
385 | * in 16-bit mode for the "real" operations. | 391 | * in 16-bit mode for the "real" operations. |
386 | */ | 392 | */ |
@@ -389,31 +395,11 @@ default_entry: | |||
389 | jz 1f # Did we do this already? | 395 | jz 1f # Did we do this already? |
390 | call *%eax | 396 | call *%eax |
391 | 1: | 397 | 1: |
392 | 398 | ||
393 | /* check if it is 486 or 386. */ | ||
394 | /* | 399 | /* |
395 | * XXX - this does a lot of unnecessary setup. Alignment checks don't | 400 | * Check if it is 486 |
396 | * apply at our cpl of 0 and the stack ought to be aligned already, and | ||
397 | * we don't need to preserve eflags. | ||
398 | */ | 401 | */ |
399 | movl $-1,X86_CPUID # -1 for no CPUID initially | 402 | cmpl $-1,X86_CPUID |
400 | movb $3,X86 # at least 386 | ||
401 | pushfl # push EFLAGS | ||
402 | popl %eax # get EFLAGS | ||
403 | movl %eax,%ecx # save original EFLAGS | ||
404 | xorl $0x240000,%eax # flip AC and ID bits in EFLAGS | ||
405 | pushl %eax # copy to EFLAGS | ||
406 | popfl # set EFLAGS | ||
407 | pushfl # get new EFLAGS | ||
408 | popl %eax # put it in eax | ||
409 | xorl %ecx,%eax # change in flags | ||
410 | pushl %ecx # restore original EFLAGS | ||
411 | popfl | ||
412 | testl $0x40000,%eax # check if AC bit changed | ||
413 | je is386 | ||
414 | |||
415 | movb $4,X86 # at least 486 | ||
416 | testl $0x200000,%eax # check if ID bit changed | ||
417 | je is486 | 403 | je is486 |
418 | 404 | ||
419 | /* get vendor info */ | 405 | /* get vendor info */ |
@@ -439,11 +425,10 @@ default_entry: | |||
439 | movb %cl,X86_MASK | 425 | movb %cl,X86_MASK |
440 | movl %edx,X86_CAPABILITY | 426 | movl %edx,X86_CAPABILITY |
441 | 427 | ||
442 | is486: movl $0x50022,%ecx # set AM, WP, NE and MP | 428 | is486: |
443 | jmp 2f | 429 | movb $4,X86 |
444 | 430 | movl $0x50022,%ecx # set AM, WP, NE and MP | |
445 | is386: movl $2,%ecx # set MP | 431 | movl %cr0,%eax |
446 | 2: movl %cr0,%eax | ||
447 | andl $0x80000011,%eax # Save PG,PE,ET | 432 | andl $0x80000011,%eax # Save PG,PE,ET |
448 | orl %ecx,%eax | 433 | orl %ecx,%eax |
449 | movl %eax,%cr0 | 434 | movl %eax,%cr0 |
@@ -468,7 +453,6 @@ is386: movl $2,%ecx # set MP | |||
468 | xorl %eax,%eax # Clear LDT | 453 | xorl %eax,%eax # Clear LDT |
469 | lldt %ax | 454 | lldt %ax |
470 | 455 | ||
471 | cld # gcc2 wants the direction flag cleared at all times | ||
472 | pushl $0 # fake return address for unwinder | 456 | pushl $0 # fake return address for unwinder |
473 | jmp *(initial_code) | 457 | jmp *(initial_code) |
474 | 458 | ||
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index e28670f9a589..da85a8e830a1 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
@@ -478,7 +478,7 @@ static int hpet_msi_next_event(unsigned long delta, | |||
478 | 478 | ||
479 | static int hpet_setup_msi_irq(unsigned int irq) | 479 | static int hpet_setup_msi_irq(unsigned int irq) |
480 | { | 480 | { |
481 | if (arch_setup_hpet_msi(irq, hpet_blockid)) { | 481 | if (x86_msi.setup_hpet_msi(irq, hpet_blockid)) { |
482 | destroy_irq(irq); | 482 | destroy_irq(irq); |
483 | return -EINVAL; | 483 | return -EINVAL; |
484 | } | 484 | } |
diff --git a/arch/x86/kernel/kprobes/Makefile b/arch/x86/kernel/kprobes/Makefile new file mode 100644 index 000000000000..0d33169cc1a2 --- /dev/null +++ b/arch/x86/kernel/kprobes/Makefile | |||
@@ -0,0 +1,7 @@ | |||
1 | # | ||
2 | # Makefile for kernel probes | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_KPROBES) += core.o | ||
6 | obj-$(CONFIG_OPTPROBES) += opt.o | ||
7 | obj-$(CONFIG_KPROBES_ON_FTRACE) += ftrace.o | ||
diff --git a/arch/x86/kernel/kprobes-common.h b/arch/x86/kernel/kprobes/common.h index 3230b68ef29a..2e9d4b5af036 100644 --- a/arch/x86/kernel/kprobes-common.h +++ b/arch/x86/kernel/kprobes/common.h | |||
@@ -99,4 +99,15 @@ static inline unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsig | |||
99 | return addr; | 99 | return addr; |
100 | } | 100 | } |
101 | #endif | 101 | #endif |
102 | |||
103 | #ifdef CONFIG_KPROBES_ON_FTRACE | ||
104 | extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs, | ||
105 | struct kprobe_ctlblk *kcb); | ||
106 | #else | ||
107 | static inline int skip_singlestep(struct kprobe *p, struct pt_regs *regs, | ||
108 | struct kprobe_ctlblk *kcb) | ||
109 | { | ||
110 | return 0; | ||
111 | } | ||
112 | #endif | ||
102 | #endif | 113 | #endif |
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes/core.c index 57916c0d3cf6..e124554598ee 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes/core.c | |||
@@ -58,7 +58,7 @@ | |||
58 | #include <asm/insn.h> | 58 | #include <asm/insn.h> |
59 | #include <asm/debugreg.h> | 59 | #include <asm/debugreg.h> |
60 | 60 | ||
61 | #include "kprobes-common.h" | 61 | #include "common.h" |
62 | 62 | ||
63 | void jprobe_return_end(void); | 63 | void jprobe_return_end(void); |
64 | 64 | ||
@@ -78,7 +78,7 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); | |||
78 | * Groups, and some special opcodes can not boost. | 78 | * Groups, and some special opcodes can not boost. |
79 | * This is non-const and volatile to keep gcc from statically | 79 | * This is non-const and volatile to keep gcc from statically |
80 | * optimizing it out, as variable_test_bit makes gcc think only | 80 | * optimizing it out, as variable_test_bit makes gcc think only |
81 | * *(unsigned long*) is used. | 81 | * *(unsigned long*) is used. |
82 | */ | 82 | */ |
83 | static volatile u32 twobyte_is_boostable[256 / 32] = { | 83 | static volatile u32 twobyte_is_boostable[256 / 32] = { |
84 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ | 84 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ |
@@ -117,7 +117,7 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op) | |||
117 | struct __arch_relative_insn { | 117 | struct __arch_relative_insn { |
118 | u8 op; | 118 | u8 op; |
119 | s32 raddr; | 119 | s32 raddr; |
120 | } __attribute__((packed)) *insn; | 120 | } __packed *insn; |
121 | 121 | ||
122 | insn = (struct __arch_relative_insn *)from; | 122 | insn = (struct __arch_relative_insn *)from; |
123 | insn->raddr = (s32)((long)(to) - ((long)(from) + 5)); | 123 | insn->raddr = (s32)((long)(to) - ((long)(from) + 5)); |
@@ -541,23 +541,6 @@ reenter_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb | |||
541 | return 1; | 541 | return 1; |
542 | } | 542 | } |
543 | 543 | ||
544 | #ifdef KPROBES_CAN_USE_FTRACE | ||
545 | static void __kprobes skip_singlestep(struct kprobe *p, struct pt_regs *regs, | ||
546 | struct kprobe_ctlblk *kcb) | ||
547 | { | ||
548 | /* | ||
549 | * Emulate singlestep (and also recover regs->ip) | ||
550 | * as if there is a 5byte nop | ||
551 | */ | ||
552 | regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE; | ||
553 | if (unlikely(p->post_handler)) { | ||
554 | kcb->kprobe_status = KPROBE_HIT_SSDONE; | ||
555 | p->post_handler(p, regs, 0); | ||
556 | } | ||
557 | __this_cpu_write(current_kprobe, NULL); | ||
558 | } | ||
559 | #endif | ||
560 | |||
561 | /* | 544 | /* |
562 | * Interrupts are disabled on entry as trap3 is an interrupt gate and they | 545 | * Interrupts are disabled on entry as trap3 is an interrupt gate and they |
563 | * remain disabled throughout this function. | 546 | * remain disabled throughout this function. |
@@ -616,13 +599,8 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
616 | } else if (kprobe_running()) { | 599 | } else if (kprobe_running()) { |
617 | p = __this_cpu_read(current_kprobe); | 600 | p = __this_cpu_read(current_kprobe); |
618 | if (p->break_handler && p->break_handler(p, regs)) { | 601 | if (p->break_handler && p->break_handler(p, regs)) { |
619 | #ifdef KPROBES_CAN_USE_FTRACE | 602 | if (!skip_singlestep(p, regs, kcb)) |
620 | if (kprobe_ftrace(p)) { | 603 | setup_singlestep(p, regs, kcb, 0); |
621 | skip_singlestep(p, regs, kcb); | ||
622 | return 1; | ||
623 | } | ||
624 | #endif | ||
625 | setup_singlestep(p, regs, kcb, 0); | ||
626 | return 1; | 604 | return 1; |
627 | } | 605 | } |
628 | } /* else: not a kprobe fault; let the kernel handle it */ | 606 | } /* else: not a kprobe fault; let the kernel handle it */ |
@@ -1075,50 +1053,6 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | |||
1075 | return 0; | 1053 | return 0; |
1076 | } | 1054 | } |
1077 | 1055 | ||
1078 | #ifdef KPROBES_CAN_USE_FTRACE | ||
1079 | /* Ftrace callback handler for kprobes */ | ||
1080 | void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, | ||
1081 | struct ftrace_ops *ops, struct pt_regs *regs) | ||
1082 | { | ||
1083 | struct kprobe *p; | ||
1084 | struct kprobe_ctlblk *kcb; | ||
1085 | unsigned long flags; | ||
1086 | |||
1087 | /* Disable irq for emulating a breakpoint and avoiding preempt */ | ||
1088 | local_irq_save(flags); | ||
1089 | |||
1090 | p = get_kprobe((kprobe_opcode_t *)ip); | ||
1091 | if (unlikely(!p) || kprobe_disabled(p)) | ||
1092 | goto end; | ||
1093 | |||
1094 | kcb = get_kprobe_ctlblk(); | ||
1095 | if (kprobe_running()) { | ||
1096 | kprobes_inc_nmissed_count(p); | ||
1097 | } else { | ||
1098 | /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */ | ||
1099 | regs->ip = ip + sizeof(kprobe_opcode_t); | ||
1100 | |||
1101 | __this_cpu_write(current_kprobe, p); | ||
1102 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; | ||
1103 | if (!p->pre_handler || !p->pre_handler(p, regs)) | ||
1104 | skip_singlestep(p, regs, kcb); | ||
1105 | /* | ||
1106 | * If pre_handler returns !0, it sets regs->ip and | ||
1107 | * resets current kprobe. | ||
1108 | */ | ||
1109 | } | ||
1110 | end: | ||
1111 | local_irq_restore(flags); | ||
1112 | } | ||
1113 | |||
1114 | int __kprobes arch_prepare_kprobe_ftrace(struct kprobe *p) | ||
1115 | { | ||
1116 | p->ainsn.insn = NULL; | ||
1117 | p->ainsn.boostable = -1; | ||
1118 | return 0; | ||
1119 | } | ||
1120 | #endif | ||
1121 | |||
1122 | int __init arch_init_kprobes(void) | 1056 | int __init arch_init_kprobes(void) |
1123 | { | 1057 | { |
1124 | return arch_init_optprobes(); | 1058 | return arch_init_optprobes(); |
diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c new file mode 100644 index 000000000000..23ef5c556f06 --- /dev/null +++ b/arch/x86/kernel/kprobes/ftrace.c | |||
@@ -0,0 +1,93 @@ | |||
1 | /* | ||
2 | * Dynamic Ftrace based Kprobes Optimization | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * Copyright (C) Hitachi Ltd., 2012 | ||
19 | */ | ||
20 | #include <linux/kprobes.h> | ||
21 | #include <linux/ptrace.h> | ||
22 | #include <linux/hardirq.h> | ||
23 | #include <linux/preempt.h> | ||
24 | #include <linux/ftrace.h> | ||
25 | |||
26 | #include "common.h" | ||
27 | |||
28 | static int __skip_singlestep(struct kprobe *p, struct pt_regs *regs, | ||
29 | struct kprobe_ctlblk *kcb) | ||
30 | { | ||
31 | /* | ||
32 | * Emulate singlestep (and also recover regs->ip) | ||
33 | * as if there is a 5byte nop | ||
34 | */ | ||
35 | regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE; | ||
36 | if (unlikely(p->post_handler)) { | ||
37 | kcb->kprobe_status = KPROBE_HIT_SSDONE; | ||
38 | p->post_handler(p, regs, 0); | ||
39 | } | ||
40 | __this_cpu_write(current_kprobe, NULL); | ||
41 | return 1; | ||
42 | } | ||
43 | |||
44 | int __kprobes skip_singlestep(struct kprobe *p, struct pt_regs *regs, | ||
45 | struct kprobe_ctlblk *kcb) | ||
46 | { | ||
47 | if (kprobe_ftrace(p)) | ||
48 | return __skip_singlestep(p, regs, kcb); | ||
49 | else | ||
50 | return 0; | ||
51 | } | ||
52 | |||
53 | /* Ftrace callback handler for kprobes */ | ||
54 | void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, | ||
55 | struct ftrace_ops *ops, struct pt_regs *regs) | ||
56 | { | ||
57 | struct kprobe *p; | ||
58 | struct kprobe_ctlblk *kcb; | ||
59 | unsigned long flags; | ||
60 | |||
61 | /* Disable irq for emulating a breakpoint and avoiding preempt */ | ||
62 | local_irq_save(flags); | ||
63 | |||
64 | p = get_kprobe((kprobe_opcode_t *)ip); | ||
65 | if (unlikely(!p) || kprobe_disabled(p)) | ||
66 | goto end; | ||
67 | |||
68 | kcb = get_kprobe_ctlblk(); | ||
69 | if (kprobe_running()) { | ||
70 | kprobes_inc_nmissed_count(p); | ||
71 | } else { | ||
72 | /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */ | ||
73 | regs->ip = ip + sizeof(kprobe_opcode_t); | ||
74 | |||
75 | __this_cpu_write(current_kprobe, p); | ||
76 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; | ||
77 | if (!p->pre_handler || !p->pre_handler(p, regs)) | ||
78 | __skip_singlestep(p, regs, kcb); | ||
79 | /* | ||
80 | * If pre_handler returns !0, it sets regs->ip and | ||
81 | * resets current kprobe. | ||
82 | */ | ||
83 | } | ||
84 | end: | ||
85 | local_irq_restore(flags); | ||
86 | } | ||
87 | |||
88 | int __kprobes arch_prepare_kprobe_ftrace(struct kprobe *p) | ||
89 | { | ||
90 | p->ainsn.insn = NULL; | ||
91 | p->ainsn.boostable = -1; | ||
92 | return 0; | ||
93 | } | ||
diff --git a/arch/x86/kernel/kprobes-opt.c b/arch/x86/kernel/kprobes/opt.c index c5e410eed403..76dc6f095724 100644 --- a/arch/x86/kernel/kprobes-opt.c +++ b/arch/x86/kernel/kprobes/opt.c | |||
@@ -37,7 +37,7 @@ | |||
37 | #include <asm/insn.h> | 37 | #include <asm/insn.h> |
38 | #include <asm/debugreg.h> | 38 | #include <asm/debugreg.h> |
39 | 39 | ||
40 | #include "kprobes-common.h" | 40 | #include "common.h" |
41 | 41 | ||
42 | unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr) | 42 | unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr) |
43 | { | 43 | { |
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 9c2bd8bd4b4c..2b44ea5f269d 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c | |||
@@ -505,6 +505,7 @@ static bool __init kvm_detect(void) | |||
505 | const struct hypervisor_x86 x86_hyper_kvm __refconst = { | 505 | const struct hypervisor_x86 x86_hyper_kvm __refconst = { |
506 | .name = "KVM", | 506 | .name = "KVM", |
507 | .detect = kvm_detect, | 507 | .detect = kvm_detect, |
508 | .x2apic_available = kvm_para_available, | ||
508 | }; | 509 | }; |
509 | EXPORT_SYMBOL_GPL(x86_hyper_kvm); | 510 | EXPORT_SYMBOL_GPL(x86_hyper_kvm); |
510 | 511 | ||
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index a7c5661f8496..4929502c1372 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c | |||
@@ -174,6 +174,9 @@ static int msr_open(struct inode *inode, struct file *file) | |||
174 | unsigned int cpu; | 174 | unsigned int cpu; |
175 | struct cpuinfo_x86 *c; | 175 | struct cpuinfo_x86 *c; |
176 | 176 | ||
177 | if (!capable(CAP_SYS_RAWIO)) | ||
178 | return -EPERM; | ||
179 | |||
177 | cpu = iminor(file->f_path.dentry->d_inode); | 180 | cpu = iminor(file->f_path.dentry->d_inode); |
178 | if (cpu >= nr_cpu_ids || !cpu_online(cpu)) | 181 | if (cpu >= nr_cpu_ids || !cpu_online(cpu)) |
179 | return -ENXIO; /* No such CPU */ | 182 | return -ENXIO; /* No such CPU */ |
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 0f5dec5c80e0..872079a67e4d 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -56,7 +56,7 @@ struct device x86_dma_fallback_dev = { | |||
56 | EXPORT_SYMBOL(x86_dma_fallback_dev); | 56 | EXPORT_SYMBOL(x86_dma_fallback_dev); |
57 | 57 | ||
58 | /* Number of entries preallocated for DMA-API debugging */ | 58 | /* Number of entries preallocated for DMA-API debugging */ |
59 | #define PREALLOC_DMA_DEBUG_ENTRIES 32768 | 59 | #define PREALLOC_DMA_DEBUG_ENTRIES 65536 |
60 | 60 | ||
61 | int dma_set_mask(struct device *dev, u64 mask) | 61 | int dma_set_mask(struct device *dev, u64 mask) |
62 | { | 62 | { |
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 4e8ba39eaf0f..76fa1e9a2b39 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -584,7 +584,7 @@ static void native_machine_emergency_restart(void) | |||
584 | break; | 584 | break; |
585 | 585 | ||
586 | case BOOT_EFI: | 586 | case BOOT_EFI: |
587 | if (efi_enabled) | 587 | if (efi_enabled(EFI_RUNTIME_SERVICES)) |
588 | efi.reset_system(reboot_mode ? | 588 | efi.reset_system(reboot_mode ? |
589 | EFI_RESET_WARM : | 589 | EFI_RESET_WARM : |
590 | EFI_RESET_COLD, | 590 | EFI_RESET_COLD, |
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c index 801602b5d745..2e8f3d3b5641 100644 --- a/arch/x86/kernel/rtc.c +++ b/arch/x86/kernel/rtc.c | |||
@@ -149,7 +149,6 @@ unsigned long mach_get_cmos_time(void) | |||
149 | if (century) { | 149 | if (century) { |
150 | century = bcd2bin(century); | 150 | century = bcd2bin(century); |
151 | year += century * 100; | 151 | year += century * 100; |
152 | printk(KERN_INFO "Extended CMOS year: %d\n", century * 100); | ||
153 | } else | 152 | } else |
154 | year += CMOS_YEARS_OFFS; | 153 | year += CMOS_YEARS_OFFS; |
155 | 154 | ||
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 00f6c1472b85..8b24289cc10c 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -807,15 +807,15 @@ void __init setup_arch(char **cmdline_p) | |||
807 | #ifdef CONFIG_EFI | 807 | #ifdef CONFIG_EFI |
808 | if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature, | 808 | if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature, |
809 | "EL32", 4)) { | 809 | "EL32", 4)) { |
810 | efi_enabled = 1; | 810 | set_bit(EFI_BOOT, &x86_efi_facility); |
811 | efi_64bit = false; | ||
812 | } else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature, | 811 | } else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature, |
813 | "EL64", 4)) { | 812 | "EL64", 4)) { |
814 | efi_enabled = 1; | 813 | set_bit(EFI_BOOT, &x86_efi_facility); |
815 | efi_64bit = true; | 814 | set_bit(EFI_64BIT, &x86_efi_facility); |
816 | } | 815 | } |
817 | if (efi_enabled && efi_memblock_x86_reserve_range()) | 816 | |
818 | efi_enabled = 0; | 817 | if (efi_enabled(EFI_BOOT)) |
818 | efi_memblock_x86_reserve_range(); | ||
819 | #endif | 819 | #endif |
820 | 820 | ||
821 | x86_init.oem.arch_setup(); | 821 | x86_init.oem.arch_setup(); |
@@ -888,7 +888,7 @@ void __init setup_arch(char **cmdline_p) | |||
888 | 888 | ||
889 | finish_e820_parsing(); | 889 | finish_e820_parsing(); |
890 | 890 | ||
891 | if (efi_enabled) | 891 | if (efi_enabled(EFI_BOOT)) |
892 | efi_init(); | 892 | efi_init(); |
893 | 893 | ||
894 | dmi_scan_machine(); | 894 | dmi_scan_machine(); |
@@ -971,7 +971,7 @@ void __init setup_arch(char **cmdline_p) | |||
971 | * The EFI specification says that boot service code won't be called | 971 | * The EFI specification says that boot service code won't be called |
972 | * after ExitBootServices(). This is, in fact, a lie. | 972 | * after ExitBootServices(). This is, in fact, a lie. |
973 | */ | 973 | */ |
974 | if (efi_enabled) | 974 | if (efi_enabled(EFI_MEMMAP)) |
975 | efi_reserve_boot_services(); | 975 | efi_reserve_boot_services(); |
976 | 976 | ||
977 | /* preallocate 4k for mptable mpc */ | 977 | /* preallocate 4k for mptable mpc */ |
@@ -1114,7 +1114,7 @@ void __init setup_arch(char **cmdline_p) | |||
1114 | 1114 | ||
1115 | #ifdef CONFIG_VT | 1115 | #ifdef CONFIG_VT |
1116 | #if defined(CONFIG_VGA_CONSOLE) | 1116 | #if defined(CONFIG_VGA_CONSOLE) |
1117 | if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY)) | 1117 | if (!efi_enabled(EFI_BOOT) || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY)) |
1118 | conswitchp = &vga_con; | 1118 | conswitchp = &vga_con; |
1119 | #elif defined(CONFIG_DUMMY_CONSOLE) | 1119 | #elif defined(CONFIG_DUMMY_CONSOLE) |
1120 | conswitchp = &dummy_con; | 1120 | conswitchp = &dummy_con; |
@@ -1131,14 +1131,14 @@ void __init setup_arch(char **cmdline_p) | |||
1131 | register_refined_jiffies(CLOCK_TICK_RATE); | 1131 | register_refined_jiffies(CLOCK_TICK_RATE); |
1132 | 1132 | ||
1133 | #ifdef CONFIG_EFI | 1133 | #ifdef CONFIG_EFI |
1134 | /* Once setup is done above, disable efi_enabled on mismatched | 1134 | /* Once setup is done above, unmap the EFI memory map on |
1135 | * firmware/kernel archtectures since there is no support for | 1135 | * mismatched firmware/kernel archtectures since there is no |
1136 | * runtime services. | 1136 | * support for runtime services. |
1137 | */ | 1137 | */ |
1138 | if (efi_enabled && IS_ENABLED(CONFIG_X86_64) != efi_64bit) { | 1138 | if (efi_enabled(EFI_BOOT) && |
1139 | IS_ENABLED(CONFIG_X86_64) != efi_enabled(EFI_64BIT)) { | ||
1139 | pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n"); | 1140 | pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n"); |
1140 | efi_unmap_memmap(); | 1141 | efi_unmap_memmap(); |
1141 | efi_enabled = 0; | ||
1142 | } | 1142 | } |
1143 | #endif | 1143 | #endif |
1144 | } | 1144 | } |
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index 97ef74b88e0f..dbded5aedb81 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c | |||
@@ -157,7 +157,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | |||
157 | if (flags & MAP_FIXED) | 157 | if (flags & MAP_FIXED) |
158 | return addr; | 158 | return addr; |
159 | 159 | ||
160 | /* for MAP_32BIT mappings we force the legact mmap base */ | 160 | /* for MAP_32BIT mappings we force the legacy mmap base */ |
161 | if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) | 161 | if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) |
162 | goto bottomup; | 162 | goto bottomup; |
163 | 163 | ||
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 06ccb5073a3f..4b9ea101fe3b 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -623,7 +623,8 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) | |||
623 | ns_now = __cycles_2_ns(tsc_now); | 623 | ns_now = __cycles_2_ns(tsc_now); |
624 | 624 | ||
625 | if (cpu_khz) { | 625 | if (cpu_khz) { |
626 | *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz; | 626 | *scale = ((NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR) + |
627 | cpu_khz / 2) / cpu_khz; | ||
627 | *offset = ns_now - mult_frac(tsc_now, *scale, | 628 | *offset = ns_now - mult_frac(tsc_now, *scale, |
628 | (1UL << CYC2NS_SCALE_FACTOR)); | 629 | (1UL << CYC2NS_SCALE_FACTOR)); |
629 | } | 630 | } |
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index c71025b67462..0ba4cfb4f412 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c | |||
@@ -680,8 +680,10 @@ static bool __skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) | |||
680 | if (auprobe->insn[i] == 0x66) | 680 | if (auprobe->insn[i] == 0x66) |
681 | continue; | 681 | continue; |
682 | 682 | ||
683 | if (auprobe->insn[i] == 0x90) | 683 | if (auprobe->insn[i] == 0x90) { |
684 | regs->ip += i + 1; | ||
684 | return true; | 685 | return true; |
686 | } | ||
685 | 687 | ||
686 | break; | 688 | break; |
687 | } | 689 | } |
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index 7a3d075a814a..d065d67c2672 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <asm/time.h> | 19 | #include <asm/time.h> |
20 | #include <asm/irq.h> | 20 | #include <asm/irq.h> |
21 | #include <asm/io_apic.h> | 21 | #include <asm/io_apic.h> |
22 | #include <asm/hpet.h> | ||
22 | #include <asm/pat.h> | 23 | #include <asm/pat.h> |
23 | #include <asm/tsc.h> | 24 | #include <asm/tsc.h> |
24 | #include <asm/iommu.h> | 25 | #include <asm/iommu.h> |
@@ -111,15 +112,22 @@ struct x86_platform_ops x86_platform = { | |||
111 | 112 | ||
112 | EXPORT_SYMBOL_GPL(x86_platform); | 113 | EXPORT_SYMBOL_GPL(x86_platform); |
113 | struct x86_msi_ops x86_msi = { | 114 | struct x86_msi_ops x86_msi = { |
114 | .setup_msi_irqs = native_setup_msi_irqs, | 115 | .setup_msi_irqs = native_setup_msi_irqs, |
115 | .teardown_msi_irq = native_teardown_msi_irq, | 116 | .compose_msi_msg = native_compose_msi_msg, |
116 | .teardown_msi_irqs = default_teardown_msi_irqs, | 117 | .teardown_msi_irq = native_teardown_msi_irq, |
117 | .restore_msi_irqs = default_restore_msi_irqs, | 118 | .teardown_msi_irqs = default_teardown_msi_irqs, |
119 | .restore_msi_irqs = default_restore_msi_irqs, | ||
120 | .setup_hpet_msi = default_setup_hpet_msi, | ||
118 | }; | 121 | }; |
119 | 122 | ||
120 | struct x86_io_apic_ops x86_io_apic_ops = { | 123 | struct x86_io_apic_ops x86_io_apic_ops = { |
121 | .init = native_io_apic_init_mappings, | 124 | .init = native_io_apic_init_mappings, |
122 | .read = native_io_apic_read, | 125 | .read = native_io_apic_read, |
123 | .write = native_io_apic_write, | 126 | .write = native_io_apic_write, |
124 | .modify = native_io_apic_modify, | 127 | .modify = native_io_apic_modify, |
128 | .disable = native_disable_io_apic, | ||
129 | .print_entries = native_io_apic_print_entries, | ||
130 | .set_affinity = native_ioapic_set_affinity, | ||
131 | .setup_entry = native_setup_ioapic_entry, | ||
132 | .eoi_ioapic_pin = native_eoi_ioapic_pin, | ||
125 | }; | 133 | }; |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 027088f2f7dd..fb674fd3fc22 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -748,13 +748,15 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, | |||
748 | return; | 748 | return; |
749 | } | 749 | } |
750 | #endif | 750 | #endif |
751 | /* Kernel addresses are always protection faults: */ | ||
752 | if (address >= TASK_SIZE) | ||
753 | error_code |= PF_PROT; | ||
751 | 754 | ||
752 | if (unlikely(show_unhandled_signals)) | 755 | if (likely(show_unhandled_signals)) |
753 | show_signal_msg(regs, error_code, address, tsk); | 756 | show_signal_msg(regs, error_code, address, tsk); |
754 | 757 | ||
755 | /* Kernel addresses are always protection faults: */ | ||
756 | tsk->thread.cr2 = address; | 758 | tsk->thread.cr2 = address; |
757 | tsk->thread.error_code = error_code | (address >= TASK_SIZE); | 759 | tsk->thread.error_code = error_code; |
758 | tsk->thread.trap_nr = X86_TRAP_PF; | 760 | tsk->thread.trap_nr = X86_TRAP_PF; |
759 | 761 | ||
760 | force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0); | 762 | force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0); |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 2ead3c8a4c84..d6eeead43758 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -605,7 +605,7 @@ kernel_physical_mapping_init(unsigned long start, | |||
605 | } | 605 | } |
606 | 606 | ||
607 | if (pgd_changed) | 607 | if (pgd_changed) |
608 | sync_global_pgds(addr, end); | 608 | sync_global_pgds(addr, end - 1); |
609 | 609 | ||
610 | __flush_tlb_all(); | 610 | __flush_tlb_all(); |
611 | 611 | ||
@@ -831,6 +831,9 @@ int kern_addr_valid(unsigned long addr) | |||
831 | if (pud_none(*pud)) | 831 | if (pud_none(*pud)) |
832 | return 0; | 832 | return 0; |
833 | 833 | ||
834 | if (pud_large(*pud)) | ||
835 | return pfn_valid(pud_pfn(*pud)); | ||
836 | |||
834 | pmd = pmd_offset(pud, addr); | 837 | pmd = pmd_offset(pud, addr); |
835 | if (pmd_none(*pmd)) | 838 | if (pmd_none(*pmd)) |
836 | return 0; | 839 | return 0; |
@@ -981,7 +984,7 @@ vmemmap_populate(struct page *start_page, unsigned long size, int node) | |||
981 | } | 984 | } |
982 | 985 | ||
983 | } | 986 | } |
984 | sync_global_pgds((unsigned long)start_page, end); | 987 | sync_global_pgds((unsigned long)start_page, end - 1); |
985 | return 0; | 988 | return 0; |
986 | } | 989 | } |
987 | 990 | ||
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index ad4439145f85..928bf837040a 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c | |||
@@ -51,9 +51,6 @@ | |||
51 | 51 | ||
52 | #define EFI_DEBUG 1 | 52 | #define EFI_DEBUG 1 |
53 | 53 | ||
54 | int efi_enabled; | ||
55 | EXPORT_SYMBOL(efi_enabled); | ||
56 | |||
57 | struct efi __read_mostly efi = { | 54 | struct efi __read_mostly efi = { |
58 | .mps = EFI_INVALID_TABLE_ADDR, | 55 | .mps = EFI_INVALID_TABLE_ADDR, |
59 | .acpi = EFI_INVALID_TABLE_ADDR, | 56 | .acpi = EFI_INVALID_TABLE_ADDR, |
@@ -69,19 +66,28 @@ EXPORT_SYMBOL(efi); | |||
69 | 66 | ||
70 | struct efi_memory_map memmap; | 67 | struct efi_memory_map memmap; |
71 | 68 | ||
72 | bool efi_64bit; | ||
73 | |||
74 | static struct efi efi_phys __initdata; | 69 | static struct efi efi_phys __initdata; |
75 | static efi_system_table_t efi_systab __initdata; | 70 | static efi_system_table_t efi_systab __initdata; |
76 | 71 | ||
77 | static inline bool efi_is_native(void) | 72 | static inline bool efi_is_native(void) |
78 | { | 73 | { |
79 | return IS_ENABLED(CONFIG_X86_64) == efi_64bit; | 74 | return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT); |
75 | } | ||
76 | |||
77 | unsigned long x86_efi_facility; | ||
78 | |||
79 | /* | ||
80 | * Returns 1 if 'facility' is enabled, 0 otherwise. | ||
81 | */ | ||
82 | int efi_enabled(int facility) | ||
83 | { | ||
84 | return test_bit(facility, &x86_efi_facility) != 0; | ||
80 | } | 85 | } |
86 | EXPORT_SYMBOL(efi_enabled); | ||
81 | 87 | ||
82 | static int __init setup_noefi(char *arg) | 88 | static int __init setup_noefi(char *arg) |
83 | { | 89 | { |
84 | efi_enabled = 0; | 90 | clear_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility); |
85 | return 0; | 91 | return 0; |
86 | } | 92 | } |
87 | early_param("noefi", setup_noefi); | 93 | early_param("noefi", setup_noefi); |
@@ -426,6 +432,7 @@ void __init efi_reserve_boot_services(void) | |||
426 | 432 | ||
427 | void __init efi_unmap_memmap(void) | 433 | void __init efi_unmap_memmap(void) |
428 | { | 434 | { |
435 | clear_bit(EFI_MEMMAP, &x86_efi_facility); | ||
429 | if (memmap.map) { | 436 | if (memmap.map) { |
430 | early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size); | 437 | early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size); |
431 | memmap.map = NULL; | 438 | memmap.map = NULL; |
@@ -460,7 +467,7 @@ void __init efi_free_boot_services(void) | |||
460 | 467 | ||
461 | static int __init efi_systab_init(void *phys) | 468 | static int __init efi_systab_init(void *phys) |
462 | { | 469 | { |
463 | if (efi_64bit) { | 470 | if (efi_enabled(EFI_64BIT)) { |
464 | efi_system_table_64_t *systab64; | 471 | efi_system_table_64_t *systab64; |
465 | u64 tmp = 0; | 472 | u64 tmp = 0; |
466 | 473 | ||
@@ -552,7 +559,7 @@ static int __init efi_config_init(u64 tables, int nr_tables) | |||
552 | void *config_tables, *tablep; | 559 | void *config_tables, *tablep; |
553 | int i, sz; | 560 | int i, sz; |
554 | 561 | ||
555 | if (efi_64bit) | 562 | if (efi_enabled(EFI_64BIT)) |
556 | sz = sizeof(efi_config_table_64_t); | 563 | sz = sizeof(efi_config_table_64_t); |
557 | else | 564 | else |
558 | sz = sizeof(efi_config_table_32_t); | 565 | sz = sizeof(efi_config_table_32_t); |
@@ -572,7 +579,7 @@ static int __init efi_config_init(u64 tables, int nr_tables) | |||
572 | efi_guid_t guid; | 579 | efi_guid_t guid; |
573 | unsigned long table; | 580 | unsigned long table; |
574 | 581 | ||
575 | if (efi_64bit) { | 582 | if (efi_enabled(EFI_64BIT)) { |
576 | u64 table64; | 583 | u64 table64; |
577 | guid = ((efi_config_table_64_t *)tablep)->guid; | 584 | guid = ((efi_config_table_64_t *)tablep)->guid; |
578 | table64 = ((efi_config_table_64_t *)tablep)->table; | 585 | table64 = ((efi_config_table_64_t *)tablep)->table; |
@@ -684,7 +691,6 @@ void __init efi_init(void) | |||
684 | if (boot_params.efi_info.efi_systab_hi || | 691 | if (boot_params.efi_info.efi_systab_hi || |
685 | boot_params.efi_info.efi_memmap_hi) { | 692 | boot_params.efi_info.efi_memmap_hi) { |
686 | pr_info("Table located above 4GB, disabling EFI.\n"); | 693 | pr_info("Table located above 4GB, disabling EFI.\n"); |
687 | efi_enabled = 0; | ||
688 | return; | 694 | return; |
689 | } | 695 | } |
690 | efi_phys.systab = (efi_system_table_t *)boot_params.efi_info.efi_systab; | 696 | efi_phys.systab = (efi_system_table_t *)boot_params.efi_info.efi_systab; |
@@ -694,10 +700,10 @@ void __init efi_init(void) | |||
694 | ((__u64)boot_params.efi_info.efi_systab_hi<<32)); | 700 | ((__u64)boot_params.efi_info.efi_systab_hi<<32)); |
695 | #endif | 701 | #endif |
696 | 702 | ||
697 | if (efi_systab_init(efi_phys.systab)) { | 703 | if (efi_systab_init(efi_phys.systab)) |
698 | efi_enabled = 0; | ||
699 | return; | 704 | return; |
700 | } | 705 | |
706 | set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility); | ||
701 | 707 | ||
702 | /* | 708 | /* |
703 | * Show what we know for posterity | 709 | * Show what we know for posterity |
@@ -715,10 +721,10 @@ void __init efi_init(void) | |||
715 | efi.systab->hdr.revision >> 16, | 721 | efi.systab->hdr.revision >> 16, |
716 | efi.systab->hdr.revision & 0xffff, vendor); | 722 | efi.systab->hdr.revision & 0xffff, vendor); |
717 | 723 | ||
718 | if (efi_config_init(efi.systab->tables, efi.systab->nr_tables)) { | 724 | if (efi_config_init(efi.systab->tables, efi.systab->nr_tables)) |
719 | efi_enabled = 0; | ||
720 | return; | 725 | return; |
721 | } | 726 | |
727 | set_bit(EFI_CONFIG_TABLES, &x86_efi_facility); | ||
722 | 728 | ||
723 | /* | 729 | /* |
724 | * Note: We currently don't support runtime services on an EFI | 730 | * Note: We currently don't support runtime services on an EFI |
@@ -727,15 +733,17 @@ void __init efi_init(void) | |||
727 | 733 | ||
728 | if (!efi_is_native()) | 734 | if (!efi_is_native()) |
729 | pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n"); | 735 | pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n"); |
730 | else if (efi_runtime_init()) { | 736 | else { |
731 | efi_enabled = 0; | 737 | if (efi_runtime_init()) |
732 | return; | 738 | return; |
739 | set_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility); | ||
733 | } | 740 | } |
734 | 741 | ||
735 | if (efi_memmap_init()) { | 742 | if (efi_memmap_init()) |
736 | efi_enabled = 0; | ||
737 | return; | 743 | return; |
738 | } | 744 | |
745 | set_bit(EFI_MEMMAP, &x86_efi_facility); | ||
746 | |||
739 | #ifdef CONFIG_X86_32 | 747 | #ifdef CONFIG_X86_32 |
740 | if (efi_is_native()) { | 748 | if (efi_is_native()) { |
741 | x86_platform.get_wallclock = efi_get_time; | 749 | x86_platform.get_wallclock = efi_get_time; |
@@ -941,7 +949,7 @@ void __init efi_enter_virtual_mode(void) | |||
941 | * | 949 | * |
942 | * Call EFI services through wrapper functions. | 950 | * Call EFI services through wrapper functions. |
943 | */ | 951 | */ |
944 | efi.runtime_version = efi_systab.fw_revision; | 952 | efi.runtime_version = efi_systab.hdr.revision; |
945 | efi.get_time = virt_efi_get_time; | 953 | efi.get_time = virt_efi_get_time; |
946 | efi.set_time = virt_efi_set_time; | 954 | efi.set_time = virt_efi_set_time; |
947 | efi.get_wakeup_time = virt_efi_get_wakeup_time; | 955 | efi.get_wakeup_time = virt_efi_get_wakeup_time; |
@@ -969,6 +977,9 @@ u32 efi_mem_type(unsigned long phys_addr) | |||
969 | efi_memory_desc_t *md; | 977 | efi_memory_desc_t *md; |
970 | void *p; | 978 | void *p; |
971 | 979 | ||
980 | if (!efi_enabled(EFI_MEMMAP)) | ||
981 | return 0; | ||
982 | |||
972 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | 983 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { |
973 | md = p; | 984 | md = p; |
974 | if ((md->phys_addr <= phys_addr) && | 985 | if ((md->phys_addr <= phys_addr) && |
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 95fd505dfeb6..2b2003860615 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c | |||
@@ -38,7 +38,7 @@ | |||
38 | #include <asm/cacheflush.h> | 38 | #include <asm/cacheflush.h> |
39 | #include <asm/fixmap.h> | 39 | #include <asm/fixmap.h> |
40 | 40 | ||
41 | static pgd_t save_pgd __initdata; | 41 | static pgd_t *save_pgd __initdata; |
42 | static unsigned long efi_flags __initdata; | 42 | static unsigned long efi_flags __initdata; |
43 | 43 | ||
44 | static void __init early_code_mapping_set_exec(int executable) | 44 | static void __init early_code_mapping_set_exec(int executable) |
@@ -61,12 +61,20 @@ static void __init early_code_mapping_set_exec(int executable) | |||
61 | void __init efi_call_phys_prelog(void) | 61 | void __init efi_call_phys_prelog(void) |
62 | { | 62 | { |
63 | unsigned long vaddress; | 63 | unsigned long vaddress; |
64 | int pgd; | ||
65 | int n_pgds; | ||
64 | 66 | ||
65 | early_code_mapping_set_exec(1); | 67 | early_code_mapping_set_exec(1); |
66 | local_irq_save(efi_flags); | 68 | local_irq_save(efi_flags); |
67 | vaddress = (unsigned long)__va(0x0UL); | 69 | |
68 | save_pgd = *pgd_offset_k(0x0UL); | 70 | n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE); |
69 | set_pgd(pgd_offset_k(0x0UL), *pgd_offset_k(vaddress)); | 71 | save_pgd = kmalloc(n_pgds * sizeof(pgd_t), GFP_KERNEL); |
72 | |||
73 | for (pgd = 0; pgd < n_pgds; pgd++) { | ||
74 | save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE); | ||
75 | vaddress = (unsigned long)__va(pgd * PGDIR_SIZE); | ||
76 | set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress)); | ||
77 | } | ||
70 | __flush_tlb_all(); | 78 | __flush_tlb_all(); |
71 | } | 79 | } |
72 | 80 | ||
@@ -75,7 +83,11 @@ void __init efi_call_phys_epilog(void) | |||
75 | /* | 83 | /* |
76 | * After the lock is released, the original page table is restored. | 84 | * After the lock is released, the original page table is restored. |
77 | */ | 85 | */ |
78 | set_pgd(pgd_offset_k(0x0UL), save_pgd); | 86 | int pgd; |
87 | int n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE); | ||
88 | for (pgd = 0; pgd < n_pgds; pgd++) | ||
89 | set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]); | ||
90 | kfree(save_pgd); | ||
79 | __flush_tlb_all(); | 91 | __flush_tlb_all(); |
80 | local_irq_restore(efi_flags); | 92 | local_irq_restore(efi_flags); |
81 | early_code_mapping_set_exec(0); | 93 | early_code_mapping_set_exec(0); |
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index b8b3a37c80cd..dbbdca5f508c 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c | |||
@@ -1034,7 +1034,8 @@ static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp, | |||
1034 | * globally purge translation cache of a virtual address or all TLB's | 1034 | * globally purge translation cache of a virtual address or all TLB's |
1035 | * @cpumask: mask of all cpu's in which the address is to be removed | 1035 | * @cpumask: mask of all cpu's in which the address is to be removed |
1036 | * @mm: mm_struct containing virtual address range | 1036 | * @mm: mm_struct containing virtual address range |
1037 | * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu) | 1037 | * @start: start virtual address to be removed from TLB |
1038 | * @end: end virtual address to be remove from TLB | ||
1038 | * @cpu: the current cpu | 1039 | * @cpu: the current cpu |
1039 | * | 1040 | * |
1040 | * This is the entry point for initiating any UV global TLB shootdown. | 1041 | * This is the entry point for initiating any UV global TLB shootdown. |
@@ -1056,7 +1057,7 @@ static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp, | |||
1056 | */ | 1057 | */ |
1057 | const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, | 1058 | const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, |
1058 | struct mm_struct *mm, unsigned long start, | 1059 | struct mm_struct *mm, unsigned long start, |
1059 | unsigned end, unsigned int cpu) | 1060 | unsigned long end, unsigned int cpu) |
1060 | { | 1061 | { |
1061 | int locals = 0; | 1062 | int locals = 0; |
1062 | int remotes = 0; | 1063 | int remotes = 0; |
@@ -1113,7 +1114,10 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, | |||
1113 | 1114 | ||
1114 | record_send_statistics(stat, locals, hubs, remotes, bau_desc); | 1115 | record_send_statistics(stat, locals, hubs, remotes, bau_desc); |
1115 | 1116 | ||
1116 | bau_desc->payload.address = start; | 1117 | if (!end || (end - start) <= PAGE_SIZE) |
1118 | bau_desc->payload.address = start; | ||
1119 | else | ||
1120 | bau_desc->payload.address = TLB_FLUSH_ALL; | ||
1117 | bau_desc->payload.sending_cpu = cpu; | 1121 | bau_desc->payload.sending_cpu = cpu; |
1118 | /* | 1122 | /* |
1119 | * uv_flush_send_and_wait returns 0 if all cpu's were messaged, | 1123 | * uv_flush_send_and_wait returns 0 if all cpu's were messaged, |
diff --git a/arch/x86/tools/insn_sanity.c b/arch/x86/tools/insn_sanity.c index cc2f8c131286..872eb60e7806 100644 --- a/arch/x86/tools/insn_sanity.c +++ b/arch/x86/tools/insn_sanity.c | |||
@@ -55,7 +55,7 @@ static FILE *input_file; /* Input file name */ | |||
55 | static void usage(const char *err) | 55 | static void usage(const char *err) |
56 | { | 56 | { |
57 | if (err) | 57 | if (err) |
58 | fprintf(stderr, "Error: %s\n\n", err); | 58 | fprintf(stderr, "%s: Error: %s\n\n", prog, err); |
59 | fprintf(stderr, "Usage: %s [-y|-n|-v] [-s seed[,no]] [-m max] [-i input]\n", prog); | 59 | fprintf(stderr, "Usage: %s [-y|-n|-v] [-s seed[,no]] [-m max] [-i input]\n", prog); |
60 | fprintf(stderr, "\t-y 64bit mode\n"); | 60 | fprintf(stderr, "\t-y 64bit mode\n"); |
61 | fprintf(stderr, "\t-n 32bit mode\n"); | 61 | fprintf(stderr, "\t-n 32bit mode\n"); |
@@ -269,7 +269,13 @@ int main(int argc, char **argv) | |||
269 | insns++; | 269 | insns++; |
270 | } | 270 | } |
271 | 271 | ||
272 | fprintf(stdout, "%s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n", (errors) ? "Failure" : "Success", insns, (input_file) ? "given" : "random", errors, seed); | 272 | fprintf(stdout, "%s: %s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n", |
273 | prog, | ||
274 | (errors) ? "Failure" : "Success", | ||
275 | insns, | ||
276 | (input_file) ? "given" : "random", | ||
277 | errors, | ||
278 | seed); | ||
273 | 279 | ||
274 | return errors ? 1 : 0; | 280 | return errors ? 1 : 0; |
275 | } | 281 | } |
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c index 205ad328aa52..c74436e687bf 100644 --- a/arch/x86/vdso/vclock_gettime.c +++ b/arch/x86/vdso/vclock_gettime.c | |||
@@ -60,7 +60,7 @@ notrace static cycle_t vread_tsc(void) | |||
60 | 60 | ||
61 | static notrace cycle_t vread_hpet(void) | 61 | static notrace cycle_t vread_hpet(void) |
62 | { | 62 | { |
63 | return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0); | 63 | return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + HPET_COUNTER); |
64 | } | 64 | } |
65 | 65 | ||
66 | #ifdef CONFIG_PARAVIRT_CLOCK | 66 | #ifdef CONFIG_PARAVIRT_CLOCK |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 138e5667409a..39928d16be3b 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -1517,72 +1517,51 @@ asmlinkage void __init xen_start_kernel(void) | |||
1517 | #endif | 1517 | #endif |
1518 | } | 1518 | } |
1519 | 1519 | ||
1520 | #ifdef CONFIG_XEN_PVHVM | 1520 | void __ref xen_hvm_init_shared_info(void) |
1521 | #define HVM_SHARED_INFO_ADDR 0xFE700000UL | ||
1522 | static struct shared_info *xen_hvm_shared_info; | ||
1523 | static unsigned long xen_hvm_sip_phys; | ||
1524 | static int xen_major, xen_minor; | ||
1525 | |||
1526 | static void xen_hvm_connect_shared_info(unsigned long pfn) | ||
1527 | { | 1521 | { |
1522 | int cpu; | ||
1528 | struct xen_add_to_physmap xatp; | 1523 | struct xen_add_to_physmap xatp; |
1524 | static struct shared_info *shared_info_page = 0; | ||
1529 | 1525 | ||
1526 | if (!shared_info_page) | ||
1527 | shared_info_page = (struct shared_info *) | ||
1528 | extend_brk(PAGE_SIZE, PAGE_SIZE); | ||
1530 | xatp.domid = DOMID_SELF; | 1529 | xatp.domid = DOMID_SELF; |
1531 | xatp.idx = 0; | 1530 | xatp.idx = 0; |
1532 | xatp.space = XENMAPSPACE_shared_info; | 1531 | xatp.space = XENMAPSPACE_shared_info; |
1533 | xatp.gpfn = pfn; | 1532 | xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT; |
1534 | if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) | 1533 | if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) |
1535 | BUG(); | 1534 | BUG(); |
1536 | 1535 | ||
1537 | } | 1536 | HYPERVISOR_shared_info = (struct shared_info *)shared_info_page; |
1538 | static void __init xen_hvm_set_shared_info(struct shared_info *sip) | ||
1539 | { | ||
1540 | int cpu; | ||
1541 | |||
1542 | HYPERVISOR_shared_info = sip; | ||
1543 | 1537 | ||
1544 | /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info | 1538 | /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info |
1545 | * page, we use it in the event channel upcall and in some pvclock | 1539 | * page, we use it in the event channel upcall and in some pvclock |
1546 | * related functions. We don't need the vcpu_info placement | 1540 | * related functions. We don't need the vcpu_info placement |
1547 | * optimizations because we don't use any pv_mmu or pv_irq op on | 1541 | * optimizations because we don't use any pv_mmu or pv_irq op on |
1548 | * HVM. */ | 1542 | * HVM. |
1549 | for_each_online_cpu(cpu) | 1543 | * When xen_hvm_init_shared_info is run at boot time only vcpu 0 is |
1544 | * online but xen_hvm_init_shared_info is run at resume time too and | ||
1545 | * in that case multiple vcpus might be online. */ | ||
1546 | for_each_online_cpu(cpu) { | ||
1550 | per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; | 1547 | per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; |
1551 | } | ||
1552 | |||
1553 | /* Reconnect the shared_info pfn to a (new) mfn */ | ||
1554 | void xen_hvm_resume_shared_info(void) | ||
1555 | { | ||
1556 | xen_hvm_connect_shared_info(xen_hvm_sip_phys >> PAGE_SHIFT); | ||
1557 | } | ||
1558 | |||
1559 | /* Xen tools prior to Xen 4 do not provide a E820_Reserved area for guest usage. | ||
1560 | * On these old tools the shared info page will be placed in E820_Ram. | ||
1561 | * Xen 4 provides a E820_Reserved area at 0xFC000000, and this code expects | ||
1562 | * that nothing is mapped up to HVM_SHARED_INFO_ADDR. | ||
1563 | * Xen 4.3+ provides an explicit 1MB area at HVM_SHARED_INFO_ADDR which is used | ||
1564 | * here for the shared info page. */ | ||
1565 | static void __init xen_hvm_init_shared_info(void) | ||
1566 | { | ||
1567 | if (xen_major < 4) { | ||
1568 | xen_hvm_shared_info = extend_brk(PAGE_SIZE, PAGE_SIZE); | ||
1569 | xen_hvm_sip_phys = __pa(xen_hvm_shared_info); | ||
1570 | } else { | ||
1571 | xen_hvm_sip_phys = HVM_SHARED_INFO_ADDR; | ||
1572 | set_fixmap(FIX_PARAVIRT_BOOTMAP, xen_hvm_sip_phys); | ||
1573 | xen_hvm_shared_info = | ||
1574 | (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP); | ||
1575 | } | 1548 | } |
1576 | xen_hvm_connect_shared_info(xen_hvm_sip_phys >> PAGE_SHIFT); | ||
1577 | xen_hvm_set_shared_info(xen_hvm_shared_info); | ||
1578 | } | 1549 | } |
1579 | 1550 | ||
1551 | #ifdef CONFIG_XEN_PVHVM | ||
1580 | static void __init init_hvm_pv_info(void) | 1552 | static void __init init_hvm_pv_info(void) |
1581 | { | 1553 | { |
1582 | uint32_t ecx, edx, pages, msr, base; | 1554 | int major, minor; |
1555 | uint32_t eax, ebx, ecx, edx, pages, msr, base; | ||
1583 | u64 pfn; | 1556 | u64 pfn; |
1584 | 1557 | ||
1585 | base = xen_cpuid_base(); | 1558 | base = xen_cpuid_base(); |
1559 | cpuid(base + 1, &eax, &ebx, &ecx, &edx); | ||
1560 | |||
1561 | major = eax >> 16; | ||
1562 | minor = eax & 0xffff; | ||
1563 | printk(KERN_INFO "Xen version %d.%d.\n", major, minor); | ||
1564 | |||
1586 | cpuid(base + 2, &pages, &msr, &ecx, &edx); | 1565 | cpuid(base + 2, &pages, &msr, &ecx, &edx); |
1587 | 1566 | ||
1588 | pfn = __pa(hypercall_page); | 1567 | pfn = __pa(hypercall_page); |
@@ -1633,22 +1612,12 @@ static void __init xen_hvm_guest_init(void) | |||
1633 | 1612 | ||
1634 | static bool __init xen_hvm_platform(void) | 1613 | static bool __init xen_hvm_platform(void) |
1635 | { | 1614 | { |
1636 | uint32_t eax, ebx, ecx, edx, base; | ||
1637 | |||
1638 | if (xen_pv_domain()) | 1615 | if (xen_pv_domain()) |
1639 | return false; | 1616 | return false; |
1640 | 1617 | ||
1641 | base = xen_cpuid_base(); | 1618 | if (!xen_cpuid_base()) |
1642 | if (!base) | ||
1643 | return false; | 1619 | return false; |
1644 | 1620 | ||
1645 | cpuid(base + 1, &eax, &ebx, &ecx, &edx); | ||
1646 | |||
1647 | xen_major = eax >> 16; | ||
1648 | xen_minor = eax & 0xffff; | ||
1649 | |||
1650 | printk(KERN_INFO "Xen version %d.%d.\n", xen_major, xen_minor); | ||
1651 | |||
1652 | return true; | 1621 | return true; |
1653 | } | 1622 | } |
1654 | 1623 | ||
@@ -1668,6 +1637,7 @@ const struct hypervisor_x86 x86_hyper_xen_hvm __refconst = { | |||
1668 | .name = "Xen HVM", | 1637 | .name = "Xen HVM", |
1669 | .detect = xen_hvm_platform, | 1638 | .detect = xen_hvm_platform, |
1670 | .init_platform = xen_hvm_guest_init, | 1639 | .init_platform = xen_hvm_guest_init, |
1640 | .x2apic_available = xen_x2apic_para_available, | ||
1671 | }; | 1641 | }; |
1672 | EXPORT_SYMBOL(x86_hyper_xen_hvm); | 1642 | EXPORT_SYMBOL(x86_hyper_xen_hvm); |
1673 | #endif | 1643 | #endif |
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c index ae8a00c39de4..45329c8c226e 100644 --- a/arch/x86/xen/suspend.c +++ b/arch/x86/xen/suspend.c | |||
@@ -30,7 +30,7 @@ void xen_arch_hvm_post_suspend(int suspend_cancelled) | |||
30 | { | 30 | { |
31 | #ifdef CONFIG_XEN_PVHVM | 31 | #ifdef CONFIG_XEN_PVHVM |
32 | int cpu; | 32 | int cpu; |
33 | xen_hvm_resume_shared_info(); | 33 | xen_hvm_init_shared_info(); |
34 | xen_callback_vector(); | 34 | xen_callback_vector(); |
35 | xen_unplug_emulated_devices(); | 35 | xen_unplug_emulated_devices(); |
36 | if (xen_feature(XENFEAT_hvm_safe_pvclock)) { | 36 | if (xen_feature(XENFEAT_hvm_safe_pvclock)) { |
diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S index f9643fc50de5..33ca6e42a4ca 100644 --- a/arch/x86/xen/xen-asm_32.S +++ b/arch/x86/xen/xen-asm_32.S | |||
@@ -89,11 +89,11 @@ ENTRY(xen_iret) | |||
89 | */ | 89 | */ |
90 | #ifdef CONFIG_SMP | 90 | #ifdef CONFIG_SMP |
91 | GET_THREAD_INFO(%eax) | 91 | GET_THREAD_INFO(%eax) |
92 | movl TI_cpu(%eax), %eax | 92 | movl %ss:TI_cpu(%eax), %eax |
93 | movl __per_cpu_offset(,%eax,4), %eax | 93 | movl %ss:__per_cpu_offset(,%eax,4), %eax |
94 | mov xen_vcpu(%eax), %eax | 94 | mov %ss:xen_vcpu(%eax), %eax |
95 | #else | 95 | #else |
96 | movl xen_vcpu, %eax | 96 | movl %ss:xen_vcpu, %eax |
97 | #endif | 97 | #endif |
98 | 98 | ||
99 | /* check IF state we're restoring */ | 99 | /* check IF state we're restoring */ |
@@ -106,11 +106,11 @@ ENTRY(xen_iret) | |||
106 | * resuming the code, so we don't have to be worried about | 106 | * resuming the code, so we don't have to be worried about |
107 | * being preempted to another CPU. | 107 | * being preempted to another CPU. |
108 | */ | 108 | */ |
109 | setz XEN_vcpu_info_mask(%eax) | 109 | setz %ss:XEN_vcpu_info_mask(%eax) |
110 | xen_iret_start_crit: | 110 | xen_iret_start_crit: |
111 | 111 | ||
112 | /* check for unmasked and pending */ | 112 | /* check for unmasked and pending */ |
113 | cmpw $0x0001, XEN_vcpu_info_pending(%eax) | 113 | cmpw $0x0001, %ss:XEN_vcpu_info_pending(%eax) |
114 | 114 | ||
115 | /* | 115 | /* |
116 | * If there's something pending, mask events again so we can | 116 | * If there's something pending, mask events again so we can |
@@ -118,7 +118,7 @@ xen_iret_start_crit: | |||
118 | * touch XEN_vcpu_info_mask. | 118 | * touch XEN_vcpu_info_mask. |
119 | */ | 119 | */ |
120 | jne 1f | 120 | jne 1f |
121 | movb $1, XEN_vcpu_info_mask(%eax) | 121 | movb $1, %ss:XEN_vcpu_info_mask(%eax) |
122 | 122 | ||
123 | 1: popl %eax | 123 | 1: popl %eax |
124 | 124 | ||
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index d2e73d19d366..a95b41744ad0 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h | |||
@@ -40,7 +40,7 @@ void xen_enable_syscall(void); | |||
40 | void xen_vcpu_restore(void); | 40 | void xen_vcpu_restore(void); |
41 | 41 | ||
42 | void xen_callback_vector(void); | 42 | void xen_callback_vector(void); |
43 | void xen_hvm_resume_shared_info(void); | 43 | void xen_hvm_init_shared_info(void); |
44 | void xen_unplug_emulated_devices(void); | 44 | void xen_unplug_emulated_devices(void); |
45 | 45 | ||
46 | void __init xen_build_dynamic_phys_to_machine(void); | 46 | void __init xen_build_dynamic_phys_to_machine(void); |
diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h index 4acb5feba1fb..172a02a6ad14 100644 --- a/arch/xtensa/include/asm/dma-mapping.h +++ b/arch/xtensa/include/asm/dma-mapping.h | |||
@@ -170,4 +170,19 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |||
170 | consistent_sync(vaddr, size, direction); | 170 | consistent_sync(vaddr, size, direction); |
171 | } | 171 | } |
172 | 172 | ||
173 | /* Not supported for now */ | ||
174 | static inline int dma_mmap_coherent(struct device *dev, | ||
175 | struct vm_area_struct *vma, void *cpu_addr, | ||
176 | dma_addr_t dma_addr, size_t size) | ||
177 | { | ||
178 | return -EINVAL; | ||
179 | } | ||
180 | |||
181 | static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
182 | void *cpu_addr, dma_addr_t dma_addr, | ||
183 | size_t size) | ||
184 | { | ||
185 | return -EINVAL; | ||
186 | } | ||
187 | |||
173 | #endif /* _XTENSA_DMA_MAPPING_H */ | 188 | #endif /* _XTENSA_DMA_MAPPING_H */ |