diff options
Diffstat (limited to 'arch')
58 files changed, 1314 insertions, 1240 deletions
diff --git a/arch/arm/mach-imx/cpufreq.c b/arch/arm/mach-imx/cpufreq.c index 467d899fbe75..e548ba74a4d2 100644 --- a/arch/arm/mach-imx/cpufreq.c +++ b/arch/arm/mach-imx/cpufreq.c | |||
@@ -269,7 +269,6 @@ static int __init imx_cpufreq_driver_init(struct cpufreq_policy *policy) | |||
269 | return -EINVAL; | 269 | return -EINVAL; |
270 | 270 | ||
271 | policy->cur = policy->min = policy->max = imx_get_speed(0); | 271 | policy->cur = policy->min = policy->max = imx_get_speed(0); |
272 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
273 | policy->cpuinfo.min_freq = 8000; | 272 | policy->cpuinfo.min_freq = 8000; |
274 | policy->cpuinfo.max_freq = 200000; | 273 | policy->cpuinfo.max_freq = 200000; |
275 | /* Manual states, that PLL stabilizes in two CLK32 periods */ | 274 | /* Manual states, that PLL stabilizes in two CLK32 periods */ |
diff --git a/arch/arm/mach-sa1100/cpu-sa1110.c b/arch/arm/mach-sa1100/cpu-sa1110.c index 78f4c1346044..36b47ff5af11 100644 --- a/arch/arm/mach-sa1100/cpu-sa1110.c +++ b/arch/arm/mach-sa1100/cpu-sa1110.c | |||
@@ -331,7 +331,6 @@ static int __init sa1110_cpu_init(struct cpufreq_policy *policy) | |||
331 | if (policy->cpu != 0) | 331 | if (policy->cpu != 0) |
332 | return -EINVAL; | 332 | return -EINVAL; |
333 | policy->cur = policy->min = policy->max = sa11x0_getspeed(0); | 333 | policy->cur = policy->min = policy->max = sa11x0_getspeed(0); |
334 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
335 | policy->cpuinfo.min_freq = 59000; | 334 | policy->cpuinfo.min_freq = 59000; |
336 | policy->cpuinfo.max_freq = 287000; | 335 | policy->cpuinfo.max_freq = 287000; |
337 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | 336 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; |
diff --git a/arch/arm/plat-omap/cpu-omap.c b/arch/arm/plat-omap/cpu-omap.c index a0c71dca2373..c0d63b0c61c9 100644 --- a/arch/arm/plat-omap/cpu-omap.c +++ b/arch/arm/plat-omap/cpu-omap.c | |||
@@ -108,7 +108,6 @@ static int __init omap_cpu_init(struct cpufreq_policy *policy) | |||
108 | if (policy->cpu != 0) | 108 | if (policy->cpu != 0) |
109 | return -EINVAL; | 109 | return -EINVAL; |
110 | policy->cur = policy->min = policy->max = omap_getspeed(0); | 110 | policy->cur = policy->min = policy->max = omap_getspeed(0); |
111 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
112 | policy->cpuinfo.min_freq = clk_round_rate(mpu_clk, 0) / 1000; | 111 | policy->cpuinfo.min_freq = clk_round_rate(mpu_clk, 0) / 1000; |
113 | policy->cpuinfo.max_freq = clk_round_rate(mpu_clk, VERY_HI_RATE) / 1000; | 112 | policy->cpuinfo.max_freq = clk_round_rate(mpu_clk, VERY_HI_RATE) / 1000; |
114 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | 113 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; |
diff --git a/arch/blackfin/mach-bf533/cpu.c b/arch/blackfin/mach-bf533/cpu.c index 6fd9cfd0a31b..b7a0e0fbd9af 100644 --- a/arch/blackfin/mach-bf533/cpu.c +++ b/arch/blackfin/mach-bf533/cpu.c | |||
@@ -118,8 +118,6 @@ static int __init __bf533_cpu_init(struct cpufreq_policy *policy) | |||
118 | if (policy->cpu != 0) | 118 | if (policy->cpu != 0) |
119 | return -EINVAL; | 119 | return -EINVAL; |
120 | 120 | ||
121 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
122 | |||
123 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | 121 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; |
124 | /*Now ,only support one cpu */ | 122 | /*Now ,only support one cpu */ |
125 | policy->cur = bf533_getfreq(0); | 123 | policy->cur = bf533_getfreq(0); |
diff --git a/arch/blackfin/mach-bf537/boards/generic_board.c b/arch/blackfin/mach-bf537/boards/generic_board.c index 5e9d09eb8579..6668c8e4a3fc 100644 --- a/arch/blackfin/mach-bf537/boards/generic_board.c +++ b/arch/blackfin/mach-bf537/boards/generic_board.c | |||
@@ -40,7 +40,7 @@ | |||
40 | #include <linux/pata_platform.h> | 40 | #include <linux/pata_platform.h> |
41 | #include <linux/irq.h> | 41 | #include <linux/irq.h> |
42 | #include <linux/interrupt.h> | 42 | #include <linux/interrupt.h> |
43 | #include <linux/usb_sl811.h> | 43 | #include <linux/usb/sl811.h> |
44 | #include <asm/dma.h> | 44 | #include <asm/dma.h> |
45 | #include <asm/bfin5xx_spi.h> | 45 | #include <asm/bfin5xx_spi.h> |
46 | #include <asm/reboot.h> | 46 | #include <asm/reboot.h> |
diff --git a/arch/blackfin/mach-bf537/boards/pnav10.c b/arch/blackfin/mach-bf537/boards/pnav10.c index 20507e92a3a4..f83a2544004d 100644 --- a/arch/blackfin/mach-bf537/boards/pnav10.c +++ b/arch/blackfin/mach-bf537/boards/pnav10.c | |||
@@ -40,7 +40,7 @@ | |||
40 | #include <linux/irq.h> | 40 | #include <linux/irq.h> |
41 | #include <asm/dma.h> | 41 | #include <asm/dma.h> |
42 | #include <asm/bfin5xx_spi.h> | 42 | #include <asm/bfin5xx_spi.h> |
43 | #include <linux/usb_sl811.h> | 43 | #include <linux/usb/sl811.h> |
44 | 44 | ||
45 | #include <linux/spi/ad7877.h> | 45 | #include <linux/spi/ad7877.h> |
46 | 46 | ||
diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c index 47d7d4a0e73d..f42ba3aa86d7 100644 --- a/arch/blackfin/mach-bf537/boards/stamp.c +++ b/arch/blackfin/mach-bf537/boards/stamp.c | |||
@@ -40,7 +40,7 @@ | |||
40 | #include <linux/pata_platform.h> | 40 | #include <linux/pata_platform.h> |
41 | #include <linux/irq.h> | 41 | #include <linux/irq.h> |
42 | #include <linux/interrupt.h> | 42 | #include <linux/interrupt.h> |
43 | #include <linux/usb_sl811.h> | 43 | #include <linux/usb/sl811.h> |
44 | #include <asm/dma.h> | 44 | #include <asm/dma.h> |
45 | #include <asm/bfin5xx_spi.h> | 45 | #include <asm/bfin5xx_spi.h> |
46 | #include <asm/reboot.h> | 46 | #include <asm/reboot.h> |
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig index 2d85e4b87307..6bbbc2755e44 100644 --- a/arch/i386/Kconfig +++ b/arch/i386/Kconfig | |||
@@ -1206,6 +1206,16 @@ config SCx200HR_TIMER | |||
1206 | processor goes idle (as is done by the scheduler). The | 1206 | processor goes idle (as is done by the scheduler). The |
1207 | other workaround is idle=poll boot option. | 1207 | other workaround is idle=poll boot option. |
1208 | 1208 | ||
1209 | config GEODE_MFGPT_TIMER | ||
1210 | bool "Geode Multi-Function General Purpose Timer (MFGPT) events" | ||
1211 | depends on MGEODE_LX && GENERIC_TIME && GENERIC_CLOCKEVENTS | ||
1212 | default y | ||
1213 | help | ||
1214 | This driver provides a clock event source based on the MFGPT | ||
1215 | timer(s) in the CS5535 and CS5536 companion chip for the geode. | ||
1216 | MFGPTs have a better resolution and max interval than the | ||
1217 | generic PIT, and are suitable for use as high-res timers. | ||
1218 | |||
1209 | config K8_NB | 1219 | config K8_NB |
1210 | def_bool y | 1220 | def_bool y |
1211 | depends on AGP_AMD64 | 1221 | depends on AGP_AMD64 |
diff --git a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c index 8c6ec7070844..b8498ea62068 100644 --- a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c +++ b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c | |||
@@ -321,8 +321,6 @@ acpi_cpufreq_cpu_init ( | |||
321 | data->acpi_data.states[i].transition_latency * 1000; | 321 | data->acpi_data.states[i].transition_latency * 1000; |
322 | } | 322 | } |
323 | } | 323 | } |
324 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
325 | |||
326 | policy->cur = processor_get_freq(data, policy->cpu); | 324 | policy->cur = processor_get_freq(data, policy->cpu); |
327 | 325 | ||
328 | /* table init */ | 326 | /* table init */ |
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.c b/arch/powerpc/platforms/cell/cbe_cpufreq.c index 901236fa0f07..5123e9d4164b 100644 --- a/arch/powerpc/platforms/cell/cbe_cpufreq.c +++ b/arch/powerpc/platforms/cell/cbe_cpufreq.c | |||
@@ -107,8 +107,6 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
107 | pr_debug("%d: %d\n", i, cbe_freqs[i].frequency); | 107 | pr_debug("%d: %d\n", i, cbe_freqs[i].frequency); |
108 | } | 108 | } |
109 | 109 | ||
110 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
111 | |||
112 | /* if DEBUG is enabled set_pmode() measures the latency | 110 | /* if DEBUG is enabled set_pmode() measures the latency |
113 | * of a transition */ | 111 | * of a transition */ |
114 | policy->cpuinfo.transition_latency = 25000; | 112 | policy->cpuinfo.transition_latency = 25000; |
diff --git a/arch/powerpc/platforms/pasemi/cpufreq.c b/arch/powerpc/platforms/pasemi/cpufreq.c index 3ae083851b01..1cfb8b0c8fec 100644 --- a/arch/powerpc/platforms/pasemi/cpufreq.c +++ b/arch/powerpc/platforms/pasemi/cpufreq.c | |||
@@ -195,8 +195,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
195 | pr_debug("%d: %d\n", i, pas_freqs[i].frequency); | 195 | pr_debug("%d: %d\n", i, pas_freqs[i].frequency); |
196 | } | 196 | } |
197 | 197 | ||
198 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
199 | |||
200 | policy->cpuinfo.transition_latency = get_gizmo_latency(); | 198 | policy->cpuinfo.transition_latency = get_gizmo_latency(); |
201 | 199 | ||
202 | cur_astate = get_cur_astate(policy->cpu); | 200 | cur_astate = get_cur_astate(policy->cpu); |
diff --git a/arch/powerpc/platforms/powermac/cpufreq_32.c b/arch/powerpc/platforms/powermac/cpufreq_32.c index 1fe35dab0e9e..c04abcc28a7a 100644 --- a/arch/powerpc/platforms/powermac/cpufreq_32.c +++ b/arch/powerpc/platforms/powermac/cpufreq_32.c | |||
@@ -410,7 +410,6 @@ static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
410 | if (policy->cpu != 0) | 410 | if (policy->cpu != 0) |
411 | return -ENODEV; | 411 | return -ENODEV; |
412 | 412 | ||
413 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
414 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | 413 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; |
415 | policy->cur = cur_freq; | 414 | policy->cur = cur_freq; |
416 | 415 | ||
diff --git a/arch/powerpc/platforms/powermac/cpufreq_64.c b/arch/powerpc/platforms/powermac/cpufreq_64.c index 00f50298c342..4dfb4bc242b5 100644 --- a/arch/powerpc/platforms/powermac/cpufreq_64.c +++ b/arch/powerpc/platforms/powermac/cpufreq_64.c | |||
@@ -357,7 +357,6 @@ static unsigned int g5_cpufreq_get_speed(unsigned int cpu) | |||
357 | 357 | ||
358 | static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy) | 358 | static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy) |
359 | { | 359 | { |
360 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
361 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | 360 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; |
362 | policy->cur = g5_cpu_freqs[g5_query_freq()].frequency; | 361 | policy->cur = g5_cpu_freqs[g5_query_freq()].frequency; |
363 | /* secondary CPUs are tied to the primary one by the | 362 | /* secondary CPUs are tied to the primary one by the |
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index 62391fb1f61f..ac61cf43a7d9 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c | |||
@@ -547,8 +547,7 @@ static void __cpuinit appldata_online_cpu(int cpu) | |||
547 | spin_unlock(&appldata_timer_lock); | 547 | spin_unlock(&appldata_timer_lock); |
548 | } | 548 | } |
549 | 549 | ||
550 | static void | 550 | static void __cpuinit appldata_offline_cpu(int cpu) |
551 | appldata_offline_cpu(int cpu) | ||
552 | { | 551 | { |
553 | del_virt_timer(&per_cpu(appldata_timer, cpu)); | 552 | del_virt_timer(&per_cpu(appldata_timer, cpu)); |
554 | if (atomic_dec_and_test(&appldata_expire_count)) { | 553 | if (atomic_dec_and_test(&appldata_expire_count)) { |
@@ -560,9 +559,9 @@ appldata_offline_cpu(int cpu) | |||
560 | spin_unlock(&appldata_timer_lock); | 559 | spin_unlock(&appldata_timer_lock); |
561 | } | 560 | } |
562 | 561 | ||
563 | static int __cpuinit | 562 | static int __cpuinit appldata_cpu_notify(struct notifier_block *self, |
564 | appldata_cpu_notify(struct notifier_block *self, | 563 | unsigned long action, |
565 | unsigned long action, void *hcpu) | 564 | void *hcpu) |
566 | { | 565 | { |
567 | switch (action) { | 566 | switch (action) { |
568 | case CPU_ONLINE: | 567 | case CPU_ONLINE: |
@@ -608,63 +607,15 @@ static int __init appldata_init(void) | |||
608 | register_hotcpu_notifier(&appldata_nb); | 607 | register_hotcpu_notifier(&appldata_nb); |
609 | 608 | ||
610 | appldata_sysctl_header = register_sysctl_table(appldata_dir_table); | 609 | appldata_sysctl_header = register_sysctl_table(appldata_dir_table); |
611 | #ifdef MODULE | ||
612 | appldata_dir_table[0].de->owner = THIS_MODULE; | ||
613 | appldata_table[0].de->owner = THIS_MODULE; | ||
614 | appldata_table[1].de->owner = THIS_MODULE; | ||
615 | #endif | ||
616 | 610 | ||
617 | P_DEBUG("Base interface initialized.\n"); | 611 | P_DEBUG("Base interface initialized.\n"); |
618 | return 0; | 612 | return 0; |
619 | } | 613 | } |
620 | 614 | ||
621 | /* | 615 | __initcall(appldata_init); |
622 | * appldata_exit() | ||
623 | * | ||
624 | * stop timer, unregister /proc entries | ||
625 | */ | ||
626 | static void __exit appldata_exit(void) | ||
627 | { | ||
628 | struct list_head *lh; | ||
629 | struct appldata_ops *ops; | ||
630 | int rc, i; | ||
631 | 616 | ||
632 | P_DEBUG("Unloading module ...\n"); | ||
633 | /* | ||
634 | * ops list should be empty, but just in case something went wrong... | ||
635 | */ | ||
636 | spin_lock(&appldata_ops_lock); | ||
637 | list_for_each(lh, &appldata_ops_list) { | ||
638 | ops = list_entry(lh, struct appldata_ops, list); | ||
639 | rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC, | ||
640 | (unsigned long) ops->data, ops->size, | ||
641 | ops->mod_lvl); | ||
642 | if (rc != 0) { | ||
643 | P_ERROR("STOP DIAG 0xDC for %s failed, " | ||
644 | "return code: %d\n", ops->name, rc); | ||
645 | } | ||
646 | } | ||
647 | spin_unlock(&appldata_ops_lock); | ||
648 | |||
649 | for_each_online_cpu(i) | ||
650 | appldata_offline_cpu(i); | ||
651 | |||
652 | appldata_timer_active = 0; | ||
653 | |||
654 | unregister_sysctl_table(appldata_sysctl_header); | ||
655 | |||
656 | destroy_workqueue(appldata_wq); | ||
657 | P_DEBUG("... module unloaded!\n"); | ||
658 | } | ||
659 | /**************************** init / exit <END> ******************************/ | 617 | /**************************** init / exit <END> ******************************/ |
660 | 618 | ||
661 | |||
662 | module_init(appldata_init); | ||
663 | module_exit(appldata_exit); | ||
664 | MODULE_LICENSE("GPL"); | ||
665 | MODULE_AUTHOR("Gerald Schaefer"); | ||
666 | MODULE_DESCRIPTION("Linux-VM Monitor Stream, base infrastructure"); | ||
667 | |||
668 | EXPORT_SYMBOL_GPL(appldata_register_ops); | 619 | EXPORT_SYMBOL_GPL(appldata_register_ops); |
669 | EXPORT_SYMBOL_GPL(appldata_unregister_ops); | 620 | EXPORT_SYMBOL_GPL(appldata_unregister_ops); |
670 | EXPORT_SYMBOL_GPL(appldata_diag); | 621 | EXPORT_SYMBOL_GPL(appldata_diag); |
diff --git a/arch/s390/kernel/audit.c b/arch/s390/kernel/audit.c index d1c76fe10f29..f4932c22ebe4 100644 --- a/arch/s390/kernel/audit.c +++ b/arch/s390/kernel/audit.c | |||
@@ -2,6 +2,7 @@ | |||
2 | #include <linux/types.h> | 2 | #include <linux/types.h> |
3 | #include <linux/audit.h> | 3 | #include <linux/audit.h> |
4 | #include <asm/unistd.h> | 4 | #include <asm/unistd.h> |
5 | #include "audit.h" | ||
5 | 6 | ||
6 | static unsigned dir_class[] = { | 7 | static unsigned dir_class[] = { |
7 | #include <asm-generic/audit_dir_write.h> | 8 | #include <asm-generic/audit_dir_write.h> |
@@ -40,7 +41,6 @@ int audit_classify_arch(int arch) | |||
40 | int audit_classify_syscall(int abi, unsigned syscall) | 41 | int audit_classify_syscall(int abi, unsigned syscall) |
41 | { | 42 | { |
42 | #ifdef CONFIG_COMPAT | 43 | #ifdef CONFIG_COMPAT |
43 | extern int s390_classify_syscall(unsigned); | ||
44 | if (abi == AUDIT_ARCH_S390) | 44 | if (abi == AUDIT_ARCH_S390) |
45 | return s390_classify_syscall(syscall); | 45 | return s390_classify_syscall(syscall); |
46 | #endif | 46 | #endif |
@@ -61,11 +61,6 @@ int audit_classify_syscall(int abi, unsigned syscall) | |||
61 | static int __init audit_classes_init(void) | 61 | static int __init audit_classes_init(void) |
62 | { | 62 | { |
63 | #ifdef CONFIG_COMPAT | 63 | #ifdef CONFIG_COMPAT |
64 | extern __u32 s390_dir_class[]; | ||
65 | extern __u32 s390_write_class[]; | ||
66 | extern __u32 s390_read_class[]; | ||
67 | extern __u32 s390_chattr_class[]; | ||
68 | extern __u32 s390_signal_class[]; | ||
69 | audit_register_class(AUDIT_CLASS_WRITE_32, s390_write_class); | 64 | audit_register_class(AUDIT_CLASS_WRITE_32, s390_write_class); |
70 | audit_register_class(AUDIT_CLASS_READ_32, s390_read_class); | 65 | audit_register_class(AUDIT_CLASS_READ_32, s390_read_class); |
71 | audit_register_class(AUDIT_CLASS_DIR_WRITE_32, s390_dir_class); | 66 | audit_register_class(AUDIT_CLASS_DIR_WRITE_32, s390_dir_class); |
diff --git a/arch/s390/kernel/audit.h b/arch/s390/kernel/audit.h new file mode 100644 index 000000000000..12b56f4b5a73 --- /dev/null +++ b/arch/s390/kernel/audit.h | |||
@@ -0,0 +1,15 @@ | |||
1 | #ifndef __ARCH_S390_KERNEL_AUDIT_H | ||
2 | #define __ARCH_S390_KERNEL_AUDIT_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | |||
6 | #ifdef CONFIG_COMPAT | ||
7 | extern int s390_classify_syscall(unsigned); | ||
8 | extern __u32 s390_dir_class[]; | ||
9 | extern __u32 s390_write_class[]; | ||
10 | extern __u32 s390_read_class[]; | ||
11 | extern __u32 s390_chattr_class[]; | ||
12 | extern __u32 s390_signal_class[]; | ||
13 | #endif /* CONFIG_COMPAT */ | ||
14 | |||
15 | #endif /* __ARCH_S390_KERNEL_AUDIT_H */ | ||
diff --git a/arch/s390/kernel/compat_audit.c b/arch/s390/kernel/compat_audit.c index 0569f5126e49..d6487bf879e5 100644 --- a/arch/s390/kernel/compat_audit.c +++ b/arch/s390/kernel/compat_audit.c | |||
@@ -1,5 +1,6 @@ | |||
1 | #undef __s390x__ | 1 | #undef __s390x__ |
2 | #include <asm/unistd.h> | 2 | #include <asm/unistd.h> |
3 | #include "audit.h" | ||
3 | 4 | ||
4 | unsigned s390_dir_class[] = { | 5 | unsigned s390_dir_class[] = { |
5 | #include <asm-generic/audit_dir_write.h> | 6 | #include <asm-generic/audit_dir_write.h> |
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c index 6c89f30c8e31..d8c1131e0815 100644 --- a/arch/s390/kernel/cpcmd.c +++ b/arch/s390/kernel/cpcmd.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * arch/s390/kernel/cpcmd.c | 2 | * arch/s390/kernel/cpcmd.c |
3 | * | 3 | * |
4 | * S390 version | 4 | * S390 version |
5 | * Copyright (C) 1999,2005 IBM Deutschland Entwicklung GmbH, IBM Corporation | 5 | * Copyright IBM Corp. 1999,2007 |
6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | 6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), |
7 | * Christian Borntraeger (cborntra@de.ibm.com), | 7 | * Christian Borntraeger (cborntra@de.ibm.com), |
8 | */ | 8 | */ |
@@ -21,6 +21,49 @@ | |||
21 | static DEFINE_SPINLOCK(cpcmd_lock); | 21 | static DEFINE_SPINLOCK(cpcmd_lock); |
22 | static char cpcmd_buf[241]; | 22 | static char cpcmd_buf[241]; |
23 | 23 | ||
24 | static int diag8_noresponse(int cmdlen) | ||
25 | { | ||
26 | register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf; | ||
27 | register unsigned long reg3 asm ("3") = cmdlen; | ||
28 | |||
29 | asm volatile( | ||
30 | #ifndef CONFIG_64BIT | ||
31 | " diag %1,%0,0x8\n" | ||
32 | #else /* CONFIG_64BIT */ | ||
33 | " sam31\n" | ||
34 | " diag %1,%0,0x8\n" | ||
35 | " sam64\n" | ||
36 | #endif /* CONFIG_64BIT */ | ||
37 | : "+d" (reg3) : "d" (reg2) : "cc"); | ||
38 | return reg3; | ||
39 | } | ||
40 | |||
41 | static int diag8_response(int cmdlen, char *response, int *rlen) | ||
42 | { | ||
43 | register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf; | ||
44 | register unsigned long reg3 asm ("3") = (addr_t) response; | ||
45 | register unsigned long reg4 asm ("4") = cmdlen | 0x40000000L; | ||
46 | register unsigned long reg5 asm ("5") = *rlen; | ||
47 | |||
48 | asm volatile( | ||
49 | #ifndef CONFIG_64BIT | ||
50 | " diag %2,%0,0x8\n" | ||
51 | " brc 8,1f\n" | ||
52 | " ar %1,%4\n" | ||
53 | #else /* CONFIG_64BIT */ | ||
54 | " sam31\n" | ||
55 | " diag %2,%0,0x8\n" | ||
56 | " sam64\n" | ||
57 | " brc 8,1f\n" | ||
58 | " agr %1,%4\n" | ||
59 | #endif /* CONFIG_64BIT */ | ||
60 | "1:\n" | ||
61 | : "+d" (reg4), "+d" (reg5) | ||
62 | : "d" (reg2), "d" (reg3), "d" (*rlen) : "cc"); | ||
63 | *rlen = reg5; | ||
64 | return reg4; | ||
65 | } | ||
66 | |||
24 | /* | 67 | /* |
25 | * __cpcmd has some restrictions over cpcmd | 68 | * __cpcmd has some restrictions over cpcmd |
26 | * - the response buffer must reside below 2GB (if any) | 69 | * - the response buffer must reside below 2GB (if any) |
@@ -28,59 +71,27 @@ static char cpcmd_buf[241]; | |||
28 | */ | 71 | */ |
29 | int __cpcmd(const char *cmd, char *response, int rlen, int *response_code) | 72 | int __cpcmd(const char *cmd, char *response, int rlen, int *response_code) |
30 | { | 73 | { |
31 | unsigned cmdlen; | 74 | int cmdlen; |
32 | int return_code, return_len; | 75 | int rc; |
76 | int response_len; | ||
33 | 77 | ||
34 | cmdlen = strlen(cmd); | 78 | cmdlen = strlen(cmd); |
35 | BUG_ON(cmdlen > 240); | 79 | BUG_ON(cmdlen > 240); |
36 | memcpy(cpcmd_buf, cmd, cmdlen); | 80 | memcpy(cpcmd_buf, cmd, cmdlen); |
37 | ASCEBC(cpcmd_buf, cmdlen); | 81 | ASCEBC(cpcmd_buf, cmdlen); |
38 | 82 | ||
39 | if (response != NULL && rlen > 0) { | 83 | if (response) { |
40 | register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf; | ||
41 | register unsigned long reg3 asm ("3") = (addr_t) response; | ||
42 | register unsigned long reg4 asm ("4") = cmdlen | 0x40000000L; | ||
43 | register unsigned long reg5 asm ("5") = rlen; | ||
44 | |||
45 | memset(response, 0, rlen); | 84 | memset(response, 0, rlen); |
46 | asm volatile( | 85 | response_len = rlen; |
47 | #ifndef CONFIG_64BIT | 86 | rc = diag8_response(cmdlen, response, &rlen); |
48 | " diag %2,%0,0x8\n" | 87 | EBCASC(response, response_len); |
49 | " brc 8,1f\n" | ||
50 | " ar %1,%4\n" | ||
51 | #else /* CONFIG_64BIT */ | ||
52 | " sam31\n" | ||
53 | " diag %2,%0,0x8\n" | ||
54 | " sam64\n" | ||
55 | " brc 8,1f\n" | ||
56 | " agr %1,%4\n" | ||
57 | #endif /* CONFIG_64BIT */ | ||
58 | "1:\n" | ||
59 | : "+d" (reg4), "+d" (reg5) | ||
60 | : "d" (reg2), "d" (reg3), "d" (rlen) : "cc"); | ||
61 | return_code = (int) reg4; | ||
62 | return_len = (int) reg5; | ||
63 | EBCASC(response, rlen); | ||
64 | } else { | 88 | } else { |
65 | register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf; | 89 | rc = diag8_noresponse(cmdlen); |
66 | register unsigned long reg3 asm ("3") = cmdlen; | ||
67 | return_len = 0; | ||
68 | asm volatile( | ||
69 | #ifndef CONFIG_64BIT | ||
70 | " diag %1,%0,0x8\n" | ||
71 | #else /* CONFIG_64BIT */ | ||
72 | " sam31\n" | ||
73 | " diag %1,%0,0x8\n" | ||
74 | " sam64\n" | ||
75 | #endif /* CONFIG_64BIT */ | ||
76 | : "+d" (reg3) : "d" (reg2) : "cc"); | ||
77 | return_code = (int) reg3; | ||
78 | } | 90 | } |
79 | if (response_code != NULL) | 91 | if (response_code) |
80 | *response_code = return_code; | 92 | *response_code = rc; |
81 | return return_len; | 93 | return rlen; |
82 | } | 94 | } |
83 | |||
84 | EXPORT_SYMBOL(__cpcmd); | 95 | EXPORT_SYMBOL(__cpcmd); |
85 | 96 | ||
86 | int cpcmd(const char *cmd, char *response, int rlen, int *response_code) | 97 | int cpcmd(const char *cmd, char *response, int rlen, int *response_code) |
@@ -109,5 +120,4 @@ int cpcmd(const char *cmd, char *response, int rlen, int *response_code) | |||
109 | } | 120 | } |
110 | return len; | 121 | return len; |
111 | } | 122 | } |
112 | |||
113 | EXPORT_SYMBOL(cpcmd); | 123 | EXPORT_SYMBOL(cpcmd); |
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index 50d2235df732..c14a336f6300 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c | |||
@@ -1162,6 +1162,7 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr) | |||
1162 | unsigned int value; | 1162 | unsigned int value; |
1163 | char separator; | 1163 | char separator; |
1164 | char *ptr; | 1164 | char *ptr; |
1165 | int i; | ||
1165 | 1166 | ||
1166 | ptr = buffer; | 1167 | ptr = buffer; |
1167 | insn = find_insn(code); | 1168 | insn = find_insn(code); |
@@ -1169,7 +1170,8 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr) | |||
1169 | ptr += sprintf(ptr, "%.5s\t", insn->name); | 1170 | ptr += sprintf(ptr, "%.5s\t", insn->name); |
1170 | /* Extract the operands. */ | 1171 | /* Extract the operands. */ |
1171 | separator = 0; | 1172 | separator = 0; |
1172 | for (ops = formats[insn->format] + 1; *ops != 0; ops++) { | 1173 | for (ops = formats[insn->format] + 1, i = 0; |
1174 | *ops != 0 && i < 6; ops++, i++) { | ||
1173 | operand = operands + *ops; | 1175 | operand = operands + *ops; |
1174 | value = extract_operand(code, operand); | 1176 | value = extract_operand(code, operand); |
1175 | if ((operand->flags & OPERAND_INDEX) && value == 0) | 1177 | if ((operand->flags & OPERAND_INDEX) && value == 0) |
@@ -1241,7 +1243,6 @@ void show_code(struct pt_regs *regs) | |||
1241 | } | 1243 | } |
1242 | /* Find a starting point for the disassembly. */ | 1244 | /* Find a starting point for the disassembly. */ |
1243 | while (start < 32) { | 1245 | while (start < 32) { |
1244 | hops = 0; | ||
1245 | for (i = 0, hops = 0; start + i < 32 && hops < 3; hops++) { | 1246 | for (i = 0, hops = 0; start + i < 32 && hops < 3; hops++) { |
1246 | if (!find_insn(code + start + i)) | 1247 | if (!find_insn(code + start + i)) |
1247 | break; | 1248 | break; |
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 8b8f136d9cc7..66b51901c87d 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c | |||
@@ -735,10 +735,10 @@ void do_reipl(void) | |||
735 | case REIPL_METHOD_CCW_VM: | 735 | case REIPL_METHOD_CCW_VM: |
736 | reipl_get_ascii_loadparm(loadparm); | 736 | reipl_get_ascii_loadparm(loadparm); |
737 | if (strlen(loadparm) == 0) | 737 | if (strlen(loadparm) == 0) |
738 | sprintf(buf, "IPL %X", | 738 | sprintf(buf, "IPL %X CLEAR", |
739 | reipl_block_ccw->ipl_info.ccw.devno); | 739 | reipl_block_ccw->ipl_info.ccw.devno); |
740 | else | 740 | else |
741 | sprintf(buf, "IPL %X LOADPARM '%s'", | 741 | sprintf(buf, "IPL %X CLEAR LOADPARM '%s'", |
742 | reipl_block_ccw->ipl_info.ccw.devno, loadparm); | 742 | reipl_block_ccw->ipl_info.ccw.devno, loadparm); |
743 | __cpcmd(buf, NULL, 0, NULL); | 743 | __cpcmd(buf, NULL, 0, NULL); |
744 | break; | 744 | break; |
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index b4622a3889b0..849120e3e28a 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S | |||
@@ -2,6 +2,7 @@ | |||
2 | * Written by Martin Schwidefsky (schwidefsky@de.ibm.com) | 2 | * Written by Martin Schwidefsky (schwidefsky@de.ibm.com) |
3 | */ | 3 | */ |
4 | 4 | ||
5 | #include <asm/page.h> | ||
5 | #include <asm-generic/vmlinux.lds.h> | 6 | #include <asm-generic/vmlinux.lds.h> |
6 | 7 | ||
7 | #ifndef CONFIG_64BIT | 8 | #ifndef CONFIG_64BIT |
@@ -18,121 +19,142 @@ jiffies = jiffies_64; | |||
18 | 19 | ||
19 | SECTIONS | 20 | SECTIONS |
20 | { | 21 | { |
21 | . = 0x00000000; | 22 | . = 0x00000000; |
22 | _text = .; /* Text and read-only data */ | 23 | .text : { |
23 | .text : { | 24 | _text = .; /* Text and read-only data */ |
24 | *(.text.head) | 25 | *(.text.head) |
25 | TEXT_TEXT | 26 | TEXT_TEXT |
26 | SCHED_TEXT | 27 | SCHED_TEXT |
27 | LOCK_TEXT | 28 | LOCK_TEXT |
28 | KPROBES_TEXT | 29 | KPROBES_TEXT |
29 | *(.fixup) | 30 | *(.fixup) |
30 | *(.gnu.warning) | 31 | *(.gnu.warning) |
31 | } = 0x0700 | 32 | } = 0x0700 |
32 | 33 | ||
33 | _etext = .; /* End of text section */ | 34 | _etext = .; /* End of text section */ |
34 | 35 | ||
35 | RODATA | 36 | RODATA |
36 | 37 | ||
37 | #ifdef CONFIG_SHARED_KERNEL | 38 | #ifdef CONFIG_SHARED_KERNEL |
38 | . = ALIGN(1048576); /* VM shared segments are 1MB aligned */ | 39 | . = ALIGN(0x100000); /* VM shared segments are 1MB aligned */ |
39 | #endif | 40 | #endif |
40 | 41 | ||
41 | . = ALIGN(4096); | 42 | . = ALIGN(PAGE_SIZE); |
42 | _eshared = .; /* End of shareable data */ | 43 | _eshared = .; /* End of shareable data */ |
43 | 44 | ||
44 | . = ALIGN(16); /* Exception table */ | 45 | . = ALIGN(16); /* Exception table */ |
45 | __start___ex_table = .; | 46 | __ex_table : { |
46 | __ex_table : { *(__ex_table) } | 47 | __start___ex_table = .; |
47 | __stop___ex_table = .; | 48 | *(__ex_table) |
48 | 49 | __stop___ex_table = .; | |
49 | NOTES | 50 | } |
50 | 51 | ||
51 | BUG_TABLE | 52 | NOTES |
52 | 53 | BUG_TABLE | |
53 | .data : { /* Data */ | 54 | |
54 | DATA_DATA | 55 | .data : { /* Data */ |
55 | CONSTRUCTORS | 56 | DATA_DATA |
56 | } | 57 | CONSTRUCTORS |
57 | 58 | } | |
58 | . = ALIGN(4096); | 59 | |
59 | __nosave_begin = .; | 60 | . = ALIGN(PAGE_SIZE); |
60 | .data_nosave : { *(.data.nosave) } | 61 | .data_nosave : { |
61 | . = ALIGN(4096); | 62 | __nosave_begin = .; |
62 | __nosave_end = .; | 63 | *(.data.nosave) |
63 | 64 | } | |
64 | . = ALIGN(4096); | 65 | . = ALIGN(PAGE_SIZE); |
65 | .data.page_aligned : { *(.data.idt) } | 66 | __nosave_end = .; |
66 | 67 | ||
67 | . = ALIGN(256); | 68 | . = ALIGN(PAGE_SIZE); |
68 | .data.cacheline_aligned : { *(.data.cacheline_aligned) } | 69 | .data.page_aligned : { |
69 | 70 | *(.data.idt) | |
70 | . = ALIGN(256); | 71 | } |
71 | .data.read_mostly : { *(.data.read_mostly) } | 72 | |
72 | _edata = .; /* End of data section */ | 73 | . = ALIGN(0x100); |
73 | 74 | .data.cacheline_aligned : { | |
74 | . = ALIGN(8192); /* init_task */ | 75 | *(.data.cacheline_aligned) |
75 | .data.init_task : { *(.data.init_task) } | 76 | } |
76 | 77 | ||
77 | /* will be freed after init */ | 78 | . = ALIGN(0x100); |
78 | . = ALIGN(4096); /* Init code and data */ | 79 | .data.read_mostly : { |
79 | __init_begin = .; | 80 | *(.data.read_mostly) |
80 | .init.text : { | 81 | } |
81 | _sinittext = .; | 82 | _edata = .; /* End of data section */ |
82 | *(.init.text) | 83 | |
83 | _einittext = .; | 84 | . = ALIGN(2 * PAGE_SIZE); /* init_task */ |
84 | } | 85 | .data.init_task : { |
85 | /* | 86 | *(.data.init_task) |
86 | * .exit.text is discarded at runtime, not link time, | 87 | } |
87 | * to deal with references from __bug_table | 88 | |
88 | */ | 89 | /* will be freed after init */ |
89 | .exit.text : { *(.exit.text) } | 90 | . = ALIGN(PAGE_SIZE); /* Init code and data */ |
90 | 91 | __init_begin = .; | |
91 | .init.data : { *(.init.data) } | 92 | .init.text : { |
92 | . = ALIGN(256); | 93 | _sinittext = .; |
93 | __setup_start = .; | 94 | *(.init.text) |
94 | .init.setup : { *(.init.setup) } | 95 | _einittext = .; |
95 | __setup_end = .; | 96 | } |
96 | __initcall_start = .; | 97 | /* |
97 | .initcall.init : { | 98 | * .exit.text is discarded at runtime, not link time, |
98 | INITCALLS | 99 | * to deal with references from __bug_table |
99 | } | 100 | */ |
100 | __initcall_end = .; | 101 | .exit.text : { |
101 | __con_initcall_start = .; | 102 | *(.exit.text) |
102 | .con_initcall.init : { *(.con_initcall.init) } | 103 | } |
103 | __con_initcall_end = .; | 104 | |
104 | SECURITY_INIT | 105 | .init.data : { |
106 | *(.init.data) | ||
107 | } | ||
108 | . = ALIGN(0x100); | ||
109 | .init.setup : { | ||
110 | __setup_start = .; | ||
111 | *(.init.setup) | ||
112 | __setup_end = .; | ||
113 | } | ||
114 | .initcall.init : { | ||
115 | __initcall_start = .; | ||
116 | INITCALLS | ||
117 | __initcall_end = .; | ||
118 | } | ||
119 | |||
120 | .con_initcall.init : { | ||
121 | __con_initcall_start = .; | ||
122 | *(.con_initcall.init) | ||
123 | __con_initcall_end = .; | ||
124 | } | ||
125 | SECURITY_INIT | ||
105 | 126 | ||
106 | #ifdef CONFIG_BLK_DEV_INITRD | 127 | #ifdef CONFIG_BLK_DEV_INITRD |
107 | . = ALIGN(256); | 128 | . = ALIGN(0x100); |
108 | __initramfs_start = .; | 129 | .init.ramfs : { |
109 | .init.ramfs : { *(.init.initramfs) } | 130 | __initramfs_start = .; |
110 | . = ALIGN(2); | 131 | *(.init.ramfs) |
111 | __initramfs_end = .; | 132 | . = ALIGN(2); |
133 | __initramfs_end = .; | ||
134 | } | ||
112 | #endif | 135 | #endif |
113 | PERCPU(4096) | 136 | |
114 | . = ALIGN(4096); | 137 | PERCPU(PAGE_SIZE) |
115 | __init_end = .; | 138 | . = ALIGN(PAGE_SIZE); |
116 | /* freed after init ends here */ | 139 | __init_end = .; /* freed after init ends here */ |
117 | 140 | ||
118 | __bss_start = .; /* BSS */ | 141 | /* BSS */ |
119 | .bss : { *(.bss) } | 142 | .bss : { |
120 | . = ALIGN(2); | 143 | __bss_start = .; |
121 | __bss_stop = .; | 144 | *(.bss) |
122 | 145 | . = ALIGN(2); | |
123 | _end = . ; | 146 | __bss_stop = .; |
124 | 147 | } | |
125 | /* Sections to be discarded */ | 148 | |
126 | /DISCARD/ : { | 149 | _end = . ; |
127 | *(.exit.data) *(.exitcall.exit) | 150 | |
128 | } | 151 | /* Sections to be discarded */ |
129 | 152 | /DISCARD/ : { | |
130 | /* Stabs debugging sections. */ | 153 | *(.exit.data) |
131 | .stab 0 : { *(.stab) } | 154 | *(.exitcall.exit) |
132 | .stabstr 0 : { *(.stabstr) } | 155 | } |
133 | .stab.excl 0 : { *(.stab.excl) } | 156 | |
134 | .stab.exclstr 0 : { *(.stab.exclstr) } | 157 | /* Debugging sections. */ |
135 | .stab.index 0 : { *(.stab.index) } | 158 | STABS_DEBUG |
136 | .stab.indexstr 0 : { *(.stab.indexstr) } | 159 | DWARF_DEBUG |
137 | .comment 0 : { *(.comment) } | ||
138 | } | 160 | } |
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 54055194e9af..4c1ac341ec80 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
@@ -468,7 +468,7 @@ typedef struct { | |||
468 | __u64 refselmk; | 468 | __u64 refselmk; |
469 | __u64 refcmpmk; | 469 | __u64 refcmpmk; |
470 | __u64 reserved; | 470 | __u64 reserved; |
471 | } __attribute__ ((packed)) pfault_refbk_t; | 471 | } __attribute__ ((packed, aligned(8))) pfault_refbk_t; |
472 | 472 | ||
473 | int pfault_init(void) | 473 | int pfault_init(void) |
474 | { | 474 | { |
diff --git a/arch/sh/kernel/cpufreq.c b/arch/sh/kernel/cpufreq.c index e61890217c50..71d1c427b907 100644 --- a/arch/sh/kernel/cpufreq.c +++ b/arch/sh/kernel/cpufreq.c | |||
@@ -93,7 +93,6 @@ static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
93 | policy->cpuinfo.max_freq = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000; | 93 | policy->cpuinfo.max_freq = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000; |
94 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | 94 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; |
95 | 95 | ||
96 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
97 | policy->cur = sh_cpufreq_get(policy->cpu); | 96 | policy->cur = sh_cpufreq_get(policy->cpu); |
98 | policy->min = policy->cpuinfo.min_freq; | 97 | policy->min = policy->cpuinfo.min_freq; |
99 | policy->max = policy->cpuinfo.max_freq; | 98 | policy->max = policy->cpuinfo.max_freq; |
diff --git a/arch/sparc64/kernel/us2e_cpufreq.c b/arch/sparc64/kernel/us2e_cpufreq.c index 1f83fe6a82d6..791c15138f3a 100644 --- a/arch/sparc64/kernel/us2e_cpufreq.c +++ b/arch/sparc64/kernel/us2e_cpufreq.c | |||
@@ -326,7 +326,6 @@ static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy) | |||
326 | table[2].index = 5; | 326 | table[2].index = 5; |
327 | table[3].frequency = CPUFREQ_TABLE_END; | 327 | table[3].frequency = CPUFREQ_TABLE_END; |
328 | 328 | ||
329 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
330 | policy->cpuinfo.transition_latency = 0; | 329 | policy->cpuinfo.transition_latency = 0; |
331 | policy->cur = clock_tick; | 330 | policy->cur = clock_tick; |
332 | 331 | ||
diff --git a/arch/x86/kernel/Makefile_32 b/arch/x86/kernel/Makefile_32 index c624193740fd..7ff02063b858 100644 --- a/arch/x86/kernel/Makefile_32 +++ b/arch/x86/kernel/Makefile_32 | |||
@@ -7,7 +7,7 @@ extra-y := head_32.o init_task_32.o vmlinux.lds | |||
7 | obj-y := process_32.o signal_32.o entry_32.o traps_32.o irq_32.o \ | 7 | obj-y := process_32.o signal_32.o entry_32.o traps_32.o irq_32.o \ |
8 | ptrace_32.o time_32.o ioport_32.o ldt_32.o setup_32.o i8259_32.o sys_i386_32.o \ | 8 | ptrace_32.o time_32.o ioport_32.o ldt_32.o setup_32.o i8259_32.o sys_i386_32.o \ |
9 | pci-dma_32.o i386_ksyms_32.o i387_32.o bootflag.o e820_32.o\ | 9 | pci-dma_32.o i386_ksyms_32.o i387_32.o bootflag.o e820_32.o\ |
10 | quirks.o i8237.o topology.o alternative.o i8253_32.o tsc_32.o | 10 | quirks.o i8237.o topology.o alternative.o i8253.o tsc_32.o |
11 | 11 | ||
12 | obj-$(CONFIG_STACKTRACE) += stacktrace.o | 12 | obj-$(CONFIG_STACKTRACE) += stacktrace.o |
13 | obj-y += cpu/ | 13 | obj-y += cpu/ |
@@ -37,9 +37,9 @@ obj-$(CONFIG_EFI) += efi_32.o efi_stub_32.o | |||
37 | obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o | 37 | obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o |
38 | obj-$(CONFIG_VM86) += vm86_32.o | 38 | obj-$(CONFIG_VM86) += vm86_32.o |
39 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o | 39 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o |
40 | obj-$(CONFIG_HPET_TIMER) += hpet_32.o | 40 | obj-$(CONFIG_HPET_TIMER) += hpet.o |
41 | obj-$(CONFIG_K8_NB) += k8.o | 41 | obj-$(CONFIG_K8_NB) += k8.o |
42 | obj-$(CONFIG_MGEODE_LX) += geode_32.o | 42 | obj-$(CONFIG_MGEODE_LX) += geode_32.o mfgpt_32.o |
43 | 43 | ||
44 | obj-$(CONFIG_VMI) += vmi_32.o vmiclock_32.o | 44 | obj-$(CONFIG_VMI) += vmi_32.o vmiclock_32.o |
45 | obj-$(CONFIG_PARAVIRT) += paravirt_32.o | 45 | obj-$(CONFIG_PARAVIRT) += paravirt_32.o |
diff --git a/arch/x86/kernel/Makefile_64 b/arch/x86/kernel/Makefile_64 index 3ab017a0a3b9..43da66213a47 100644 --- a/arch/x86/kernel/Makefile_64 +++ b/arch/x86/kernel/Makefile_64 | |||
@@ -8,8 +8,8 @@ obj-y := process_64.o signal_64.o entry_64.o traps_64.o irq_64.o \ | |||
8 | ptrace_64.o time_64.o ioport_64.o ldt_64.o setup_64.o i8259_64.o sys_x86_64.o \ | 8 | ptrace_64.o time_64.o ioport_64.o ldt_64.o setup_64.o i8259_64.o sys_x86_64.o \ |
9 | x8664_ksyms_64.o i387_64.o syscall_64.o vsyscall_64.o \ | 9 | x8664_ksyms_64.o i387_64.o syscall_64.o vsyscall_64.o \ |
10 | setup64.o bootflag.o e820_64.o reboot_64.o quirks.o i8237.o \ | 10 | setup64.o bootflag.o e820_64.o reboot_64.o quirks.o i8237.o \ |
11 | pci-dma_64.o pci-nommu_64.o alternative.o hpet_64.o tsc_64.o bugs_64.o \ | 11 | pci-dma_64.o pci-nommu_64.o alternative.o hpet.o tsc_64.o bugs_64.o \ |
12 | perfctr-watchdog.o | 12 | perfctr-watchdog.o i8253.o |
13 | 13 | ||
14 | obj-$(CONFIG_STACKTRACE) += stacktrace.o | 14 | obj-$(CONFIG_STACKTRACE) += stacktrace.o |
15 | obj-$(CONFIG_X86_MCE) += mce_64.o therm_throt.o | 15 | obj-$(CONFIG_X86_MCE) += mce_64.o therm_throt.o |
diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c index 925758dbca0c..395928de28ea 100644 --- a/arch/x86/kernel/apic_64.c +++ b/arch/x86/kernel/apic_64.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/sysdev.h> | 25 | #include <linux/sysdev.h> |
26 | #include <linux/module.h> | 26 | #include <linux/module.h> |
27 | #include <linux/ioport.h> | 27 | #include <linux/ioport.h> |
28 | #include <linux/clockchips.h> | ||
28 | 29 | ||
29 | #include <asm/atomic.h> | 30 | #include <asm/atomic.h> |
30 | #include <asm/smp.h> | 31 | #include <asm/smp.h> |
@@ -39,12 +40,9 @@ | |||
39 | #include <asm/hpet.h> | 40 | #include <asm/hpet.h> |
40 | #include <asm/apic.h> | 41 | #include <asm/apic.h> |
41 | 42 | ||
42 | int apic_mapped; | ||
43 | int apic_verbosity; | 43 | int apic_verbosity; |
44 | int apic_runs_main_timer; | 44 | int disable_apic_timer __cpuinitdata; |
45 | int apic_calibrate_pmtmr __initdata; | 45 | static int apic_calibrate_pmtmr __initdata; |
46 | |||
47 | int disable_apic_timer __initdata; | ||
48 | 46 | ||
49 | /* Local APIC timer works in C2? */ | 47 | /* Local APIC timer works in C2? */ |
50 | int local_apic_timer_c2_ok; | 48 | int local_apic_timer_c2_ok; |
@@ -56,14 +54,78 @@ static struct resource lapic_resource = { | |||
56 | .flags = IORESOURCE_MEM | IORESOURCE_BUSY, | 54 | .flags = IORESOURCE_MEM | IORESOURCE_BUSY, |
57 | }; | 55 | }; |
58 | 56 | ||
57 | static unsigned int calibration_result; | ||
58 | |||
59 | static int lapic_next_event(unsigned long delta, | ||
60 | struct clock_event_device *evt); | ||
61 | static void lapic_timer_setup(enum clock_event_mode mode, | ||
62 | struct clock_event_device *evt); | ||
63 | |||
64 | static void lapic_timer_broadcast(cpumask_t mask); | ||
65 | |||
66 | static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen); | ||
67 | |||
68 | static struct clock_event_device lapic_clockevent = { | ||
69 | .name = "lapic", | ||
70 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | ||
71 | | CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY, | ||
72 | .shift = 32, | ||
73 | .set_mode = lapic_timer_setup, | ||
74 | .set_next_event = lapic_next_event, | ||
75 | .broadcast = lapic_timer_broadcast, | ||
76 | .rating = 100, | ||
77 | .irq = -1, | ||
78 | }; | ||
79 | static DEFINE_PER_CPU(struct clock_event_device, lapic_events); | ||
80 | |||
81 | static int lapic_next_event(unsigned long delta, | ||
82 | struct clock_event_device *evt) | ||
83 | { | ||
84 | apic_write(APIC_TMICT, delta); | ||
85 | return 0; | ||
86 | } | ||
87 | |||
88 | static void lapic_timer_setup(enum clock_event_mode mode, | ||
89 | struct clock_event_device *evt) | ||
90 | { | ||
91 | unsigned long flags; | ||
92 | unsigned int v; | ||
93 | |||
94 | /* Lapic used as dummy for broadcast ? */ | ||
95 | if (evt->features & CLOCK_EVT_FEAT_DUMMY) | ||
96 | return; | ||
97 | |||
98 | local_irq_save(flags); | ||
99 | |||
100 | switch (mode) { | ||
101 | case CLOCK_EVT_MODE_PERIODIC: | ||
102 | case CLOCK_EVT_MODE_ONESHOT: | ||
103 | __setup_APIC_LVTT(calibration_result, | ||
104 | mode != CLOCK_EVT_MODE_PERIODIC, 1); | ||
105 | break; | ||
106 | case CLOCK_EVT_MODE_UNUSED: | ||
107 | case CLOCK_EVT_MODE_SHUTDOWN: | ||
108 | v = apic_read(APIC_LVTT); | ||
109 | v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR); | ||
110 | apic_write(APIC_LVTT, v); | ||
111 | break; | ||
112 | case CLOCK_EVT_MODE_RESUME: | ||
113 | /* Nothing to do here */ | ||
114 | break; | ||
115 | } | ||
116 | |||
117 | local_irq_restore(flags); | ||
118 | } | ||
119 | |||
59 | /* | 120 | /* |
60 | * cpu_mask that denotes the CPUs that needs timer interrupt coming in as | 121 | * Local APIC timer broadcast function |
61 | * IPIs in place of local APIC timers | ||
62 | */ | 122 | */ |
63 | static cpumask_t timer_interrupt_broadcast_ipi_mask; | 123 | static void lapic_timer_broadcast(cpumask_t mask) |
64 | 124 | { | |
65 | /* Using APIC to generate smp_local_timer_interrupt? */ | 125 | #ifdef CONFIG_SMP |
66 | int using_apic_timer __read_mostly = 0; | 126 | send_IPI_mask(mask, LOCAL_TIMER_VECTOR); |
127 | #endif | ||
128 | } | ||
67 | 129 | ||
68 | static void apic_pm_activate(void); | 130 | static void apic_pm_activate(void); |
69 | 131 | ||
@@ -184,7 +246,10 @@ void disconnect_bsp_APIC(int virt_wire_setup) | |||
184 | apic_write(APIC_SPIV, value); | 246 | apic_write(APIC_SPIV, value); |
185 | 247 | ||
186 | if (!virt_wire_setup) { | 248 | if (!virt_wire_setup) { |
187 | /* For LVT0 make it edge triggered, active high, external and enabled */ | 249 | /* |
250 | * For LVT0 make it edge triggered, active high, | ||
251 | * external and enabled | ||
252 | */ | ||
188 | value = apic_read(APIC_LVT0); | 253 | value = apic_read(APIC_LVT0); |
189 | value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING | | 254 | value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING | |
190 | APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR | | 255 | APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR | |
@@ -420,10 +485,12 @@ void __cpuinit setup_local_APIC (void) | |||
420 | value = apic_read(APIC_LVT0) & APIC_LVT_MASKED; | 485 | value = apic_read(APIC_LVT0) & APIC_LVT_MASKED; |
421 | if (!smp_processor_id() && !value) { | 486 | if (!smp_processor_id() && !value) { |
422 | value = APIC_DM_EXTINT; | 487 | value = APIC_DM_EXTINT; |
423 | apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", smp_processor_id()); | 488 | apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", |
489 | smp_processor_id()); | ||
424 | } else { | 490 | } else { |
425 | value = APIC_DM_EXTINT | APIC_LVT_MASKED; | 491 | value = APIC_DM_EXTINT | APIC_LVT_MASKED; |
426 | apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n", smp_processor_id()); | 492 | apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n", |
493 | smp_processor_id()); | ||
427 | } | 494 | } |
428 | apic_write(APIC_LVT0, value); | 495 | apic_write(APIC_LVT0, value); |
429 | 496 | ||
@@ -706,8 +773,8 @@ void __init init_apic_mappings(void) | |||
706 | apic_phys = mp_lapic_addr; | 773 | apic_phys = mp_lapic_addr; |
707 | 774 | ||
708 | set_fixmap_nocache(FIX_APIC_BASE, apic_phys); | 775 | set_fixmap_nocache(FIX_APIC_BASE, apic_phys); |
709 | apic_mapped = 1; | 776 | apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n", |
710 | apic_printk(APIC_VERBOSE,"mapped APIC to %16lx (%16lx)\n", APIC_BASE, apic_phys); | 777 | APIC_BASE, apic_phys); |
711 | 778 | ||
712 | /* Put local APIC into the resource map. */ | 779 | /* Put local APIC into the resource map. */ |
713 | lapic_resource.start = apic_phys; | 780 | lapic_resource.start = apic_phys; |
@@ -730,12 +797,14 @@ void __init init_apic_mappings(void) | |||
730 | if (smp_found_config) { | 797 | if (smp_found_config) { |
731 | ioapic_phys = mp_ioapics[i].mpc_apicaddr; | 798 | ioapic_phys = mp_ioapics[i].mpc_apicaddr; |
732 | } else { | 799 | } else { |
733 | ioapic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE); | 800 | ioapic_phys = (unsigned long) |
801 | alloc_bootmem_pages(PAGE_SIZE); | ||
734 | ioapic_phys = __pa(ioapic_phys); | 802 | ioapic_phys = __pa(ioapic_phys); |
735 | } | 803 | } |
736 | set_fixmap_nocache(idx, ioapic_phys); | 804 | set_fixmap_nocache(idx, ioapic_phys); |
737 | apic_printk(APIC_VERBOSE,"mapped IOAPIC to %016lx (%016lx)\n", | 805 | apic_printk(APIC_VERBOSE, |
738 | __fix_to_virt(idx), ioapic_phys); | 806 | "mapped IOAPIC to %016lx (%016lx)\n", |
807 | __fix_to_virt(idx), ioapic_phys); | ||
739 | idx++; | 808 | idx++; |
740 | 809 | ||
741 | if (ioapic_res != NULL) { | 810 | if (ioapic_res != NULL) { |
@@ -758,16 +827,14 @@ void __init init_apic_mappings(void) | |||
758 | * P5 APIC double write bug. | 827 | * P5 APIC double write bug. |
759 | */ | 828 | */ |
760 | 829 | ||
761 | #define APIC_DIVISOR 16 | 830 | static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen) |
762 | |||
763 | static void __setup_APIC_LVTT(unsigned int clocks) | ||
764 | { | 831 | { |
765 | unsigned int lvtt_value, tmp_value; | 832 | unsigned int lvtt_value, tmp_value; |
766 | int cpu = smp_processor_id(); | ||
767 | 833 | ||
768 | lvtt_value = APIC_LVT_TIMER_PERIODIC | LOCAL_TIMER_VECTOR; | 834 | lvtt_value = LOCAL_TIMER_VECTOR; |
769 | 835 | if (!oneshot) | |
770 | if (cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask)) | 836 | lvtt_value |= APIC_LVT_TIMER_PERIODIC; |
837 | if (!irqen) | ||
771 | lvtt_value |= APIC_LVT_MASKED; | 838 | lvtt_value |= APIC_LVT_MASKED; |
772 | 839 | ||
773 | apic_write(APIC_LVTT, lvtt_value); | 840 | apic_write(APIC_LVTT, lvtt_value); |
@@ -780,44 +847,18 @@ static void __setup_APIC_LVTT(unsigned int clocks) | |||
780 | & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE)) | 847 | & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE)) |
781 | | APIC_TDR_DIV_16); | 848 | | APIC_TDR_DIV_16); |
782 | 849 | ||
783 | apic_write(APIC_TMICT, clocks/APIC_DIVISOR); | 850 | if (!oneshot) |
851 | apic_write(APIC_TMICT, clocks); | ||
784 | } | 852 | } |
785 | 853 | ||
786 | static void setup_APIC_timer(unsigned int clocks) | 854 | static void setup_APIC_timer(void) |
787 | { | 855 | { |
788 | unsigned long flags; | 856 | struct clock_event_device *levt = &__get_cpu_var(lapic_events); |
789 | 857 | ||
790 | local_irq_save(flags); | 858 | memcpy(levt, &lapic_clockevent, sizeof(*levt)); |
859 | levt->cpumask = cpumask_of_cpu(smp_processor_id()); | ||
791 | 860 | ||
792 | /* wait for irq slice */ | 861 | clockevents_register_device(levt); |
793 | if (hpet_address && hpet_use_timer) { | ||
794 | u32 trigger = hpet_readl(HPET_T0_CMP); | ||
795 | while (hpet_readl(HPET_T0_CMP) == trigger) | ||
796 | /* do nothing */ ; | ||
797 | } else { | ||
798 | int c1, c2; | ||
799 | outb_p(0x00, 0x43); | ||
800 | c2 = inb_p(0x40); | ||
801 | c2 |= inb_p(0x40) << 8; | ||
802 | do { | ||
803 | c1 = c2; | ||
804 | outb_p(0x00, 0x43); | ||
805 | c2 = inb_p(0x40); | ||
806 | c2 |= inb_p(0x40) << 8; | ||
807 | } while (c2 - c1 < 300); | ||
808 | } | ||
809 | __setup_APIC_LVTT(clocks); | ||
810 | /* Turn off PIT interrupt if we use APIC timer as main timer. | ||
811 | Only works with the PM timer right now | ||
812 | TBD fix it for HPET too. */ | ||
813 | if ((pmtmr_ioport != 0) && | ||
814 | smp_processor_id() == boot_cpu_id && | ||
815 | apic_runs_main_timer == 1 && | ||
816 | !cpu_isset(boot_cpu_id, timer_interrupt_broadcast_ipi_mask)) { | ||
817 | stop_timer_interrupt(); | ||
818 | apic_runs_main_timer++; | ||
819 | } | ||
820 | local_irq_restore(flags); | ||
821 | } | 862 | } |
822 | 863 | ||
823 | /* | 864 | /* |
@@ -835,17 +876,22 @@ static void setup_APIC_timer(unsigned int clocks) | |||
835 | 876 | ||
836 | #define TICK_COUNT 100000000 | 877 | #define TICK_COUNT 100000000 |
837 | 878 | ||
838 | static int __init calibrate_APIC_clock(void) | 879 | static void __init calibrate_APIC_clock(void) |
839 | { | 880 | { |
840 | unsigned apic, apic_start; | 881 | unsigned apic, apic_start; |
841 | unsigned long tsc, tsc_start; | 882 | unsigned long tsc, tsc_start; |
842 | int result; | 883 | int result; |
884 | |||
885 | local_irq_disable(); | ||
886 | |||
843 | /* | 887 | /* |
844 | * Put whatever arbitrary (but long enough) timeout | 888 | * Put whatever arbitrary (but long enough) timeout |
845 | * value into the APIC clock, we just want to get the | 889 | * value into the APIC clock, we just want to get the |
846 | * counter running for calibration. | 890 | * counter running for calibration. |
891 | * | ||
892 | * No interrupt enable ! | ||
847 | */ | 893 | */ |
848 | __setup_APIC_LVTT(4000000000); | 894 | __setup_APIC_LVTT(250000000, 0, 0); |
849 | 895 | ||
850 | apic_start = apic_read(APIC_TMCCT); | 896 | apic_start = apic_read(APIC_TMCCT); |
851 | #ifdef CONFIG_X86_PM_TIMER | 897 | #ifdef CONFIG_X86_PM_TIMER |
@@ -867,123 +913,62 @@ static int __init calibrate_APIC_clock(void) | |||
867 | result = (apic_start - apic) * 1000L * tsc_khz / | 913 | result = (apic_start - apic) * 1000L * tsc_khz / |
868 | (tsc - tsc_start); | 914 | (tsc - tsc_start); |
869 | } | 915 | } |
870 | printk("result %d\n", result); | ||
871 | 916 | ||
917 | local_irq_enable(); | ||
918 | |||
919 | printk(KERN_DEBUG "APIC timer calibration result %d\n", result); | ||
872 | 920 | ||
873 | printk(KERN_INFO "Detected %d.%03d MHz APIC timer.\n", | 921 | printk(KERN_INFO "Detected %d.%03d MHz APIC timer.\n", |
874 | result / 1000 / 1000, result / 1000 % 1000); | 922 | result / 1000 / 1000, result / 1000 % 1000); |
875 | 923 | ||
876 | return result * APIC_DIVISOR / HZ; | 924 | /* Calculate the scaled math multiplication factor */ |
877 | } | 925 | lapic_clockevent.mult = div_sc(result, NSEC_PER_SEC, 32); |
926 | lapic_clockevent.max_delta_ns = | ||
927 | clockevent_delta2ns(0x7FFFFF, &lapic_clockevent); | ||
928 | lapic_clockevent.min_delta_ns = | ||
929 | clockevent_delta2ns(0xF, &lapic_clockevent); | ||
878 | 930 | ||
879 | static unsigned int calibration_result; | 931 | calibration_result = result / HZ; |
932 | } | ||
880 | 933 | ||
881 | void __init setup_boot_APIC_clock (void) | 934 | void __init setup_boot_APIC_clock (void) |
882 | { | 935 | { |
936 | /* | ||
937 | * The local apic timer can be disabled via the kernel commandline. | ||
938 | * Register the lapic timer as a dummy clock event source on SMP | ||
939 | * systems, so the broadcast mechanism is used. On UP systems simply | ||
940 | * ignore it. | ||
941 | */ | ||
883 | if (disable_apic_timer) { | 942 | if (disable_apic_timer) { |
884 | printk(KERN_INFO "Disabling APIC timer\n"); | 943 | printk(KERN_INFO "Disabling APIC timer\n"); |
944 | /* No broadcast on UP ! */ | ||
945 | if (num_possible_cpus() > 1) | ||
946 | setup_APIC_timer(); | ||
885 | return; | 947 | return; |
886 | } | 948 | } |
887 | 949 | ||
888 | printk(KERN_INFO "Using local APIC timer interrupts.\n"); | 950 | printk(KERN_INFO "Using local APIC timer interrupts.\n"); |
889 | using_apic_timer = 1; | 951 | calibrate_APIC_clock(); |
890 | |||
891 | local_irq_disable(); | ||
892 | 952 | ||
893 | calibration_result = calibrate_APIC_clock(); | ||
894 | /* | 953 | /* |
895 | * Now set up the timer for real. | 954 | * If nmi_watchdog is set to IO_APIC, we need the |
955 | * PIT/HPET going. Otherwise register lapic as a dummy | ||
956 | * device. | ||
896 | */ | 957 | */ |
897 | setup_APIC_timer(calibration_result); | 958 | if (nmi_watchdog != NMI_IO_APIC) |
959 | lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY; | ||
960 | else | ||
961 | printk(KERN_WARNING "APIC timer registered as dummy," | ||
962 | " due to nmi_watchdog=1!\n"); | ||
898 | 963 | ||
899 | local_irq_enable(); | 964 | setup_APIC_timer(); |
900 | } | 965 | } |
901 | 966 | ||
902 | void __cpuinit setup_secondary_APIC_clock(void) | 967 | void __cpuinit setup_secondary_APIC_clock(void) |
903 | { | 968 | { |
904 | local_irq_disable(); /* FIXME: Do we need this? --RR */ | 969 | setup_APIC_timer(); |
905 | setup_APIC_timer(calibration_result); | ||
906 | local_irq_enable(); | ||
907 | } | 970 | } |
908 | 971 | ||
909 | void disable_APIC_timer(void) | ||
910 | { | ||
911 | if (using_apic_timer) { | ||
912 | unsigned long v; | ||
913 | |||
914 | v = apic_read(APIC_LVTT); | ||
915 | /* | ||
916 | * When an illegal vector value (0-15) is written to an LVT | ||
917 | * entry and delivery mode is Fixed, the APIC may signal an | ||
918 | * illegal vector error, with out regard to whether the mask | ||
919 | * bit is set or whether an interrupt is actually seen on input. | ||
920 | * | ||
921 | * Boot sequence might call this function when the LVTT has | ||
922 | * '0' vector value. So make sure vector field is set to | ||
923 | * valid value. | ||
924 | */ | ||
925 | v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR); | ||
926 | apic_write(APIC_LVTT, v); | ||
927 | } | ||
928 | } | ||
929 | |||
930 | void enable_APIC_timer(void) | ||
931 | { | ||
932 | int cpu = smp_processor_id(); | ||
933 | |||
934 | if (using_apic_timer && | ||
935 | !cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask)) { | ||
936 | unsigned long v; | ||
937 | |||
938 | v = apic_read(APIC_LVTT); | ||
939 | apic_write(APIC_LVTT, v & ~APIC_LVT_MASKED); | ||
940 | } | ||
941 | } | ||
942 | |||
943 | void switch_APIC_timer_to_ipi(void *cpumask) | ||
944 | { | ||
945 | cpumask_t mask = *(cpumask_t *)cpumask; | ||
946 | int cpu = smp_processor_id(); | ||
947 | |||
948 | if (cpu_isset(cpu, mask) && | ||
949 | !cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask)) { | ||
950 | disable_APIC_timer(); | ||
951 | cpu_set(cpu, timer_interrupt_broadcast_ipi_mask); | ||
952 | } | ||
953 | } | ||
954 | EXPORT_SYMBOL(switch_APIC_timer_to_ipi); | ||
955 | |||
956 | void smp_send_timer_broadcast_ipi(void) | ||
957 | { | ||
958 | int cpu = smp_processor_id(); | ||
959 | cpumask_t mask; | ||
960 | |||
961 | cpus_and(mask, cpu_online_map, timer_interrupt_broadcast_ipi_mask); | ||
962 | |||
963 | if (cpu_isset(cpu, mask)) { | ||
964 | cpu_clear(cpu, mask); | ||
965 | add_pda(apic_timer_irqs, 1); | ||
966 | smp_local_timer_interrupt(); | ||
967 | } | ||
968 | |||
969 | if (!cpus_empty(mask)) { | ||
970 | send_IPI_mask(mask, LOCAL_TIMER_VECTOR); | ||
971 | } | ||
972 | } | ||
973 | |||
974 | void switch_ipi_to_APIC_timer(void *cpumask) | ||
975 | { | ||
976 | cpumask_t mask = *(cpumask_t *)cpumask; | ||
977 | int cpu = smp_processor_id(); | ||
978 | |||
979 | if (cpu_isset(cpu, mask) && | ||
980 | cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask)) { | ||
981 | cpu_clear(cpu, timer_interrupt_broadcast_ipi_mask); | ||
982 | enable_APIC_timer(); | ||
983 | } | ||
984 | } | ||
985 | EXPORT_SYMBOL(switch_ipi_to_APIC_timer); | ||
986 | |||
987 | int setup_profiling_timer(unsigned int multiplier) | 972 | int setup_profiling_timer(unsigned int multiplier) |
988 | { | 973 | { |
989 | return -EINVAL; | 974 | return -EINVAL; |
@@ -997,8 +982,6 @@ void setup_APIC_extended_lvt(unsigned char lvt_off, unsigned char vector, | |||
997 | apic_write(reg, v); | 982 | apic_write(reg, v); |
998 | } | 983 | } |
999 | 984 | ||
1000 | #undef APIC_DIVISOR | ||
1001 | |||
1002 | /* | 985 | /* |
1003 | * Local timer interrupt handler. It does both profiling and | 986 | * Local timer interrupt handler. It does both profiling and |
1004 | * process statistics/rescheduling. | 987 | * process statistics/rescheduling. |
@@ -1011,22 +994,34 @@ void setup_APIC_extended_lvt(unsigned char lvt_off, unsigned char vector, | |||
1011 | 994 | ||
1012 | void smp_local_timer_interrupt(void) | 995 | void smp_local_timer_interrupt(void) |
1013 | { | 996 | { |
1014 | profile_tick(CPU_PROFILING); | 997 | int cpu = smp_processor_id(); |
1015 | #ifdef CONFIG_SMP | 998 | struct clock_event_device *evt = &per_cpu(lapic_events, cpu); |
1016 | update_process_times(user_mode(get_irq_regs())); | 999 | |
1017 | #endif | ||
1018 | if (apic_runs_main_timer > 1 && smp_processor_id() == boot_cpu_id) | ||
1019 | main_timer_handler(); | ||
1020 | /* | 1000 | /* |
1021 | * We take the 'long' return path, and there every subsystem | 1001 | * Normally we should not be here till LAPIC has been initialized but |
1022 | * grabs the appropriate locks (kernel lock/ irq lock). | 1002 | * in some cases like kdump, its possible that there is a pending LAPIC |
1003 | * timer interrupt from previous kernel's context and is delivered in | ||
1004 | * new kernel the moment interrupts are enabled. | ||
1023 | * | 1005 | * |
1024 | * We might want to decouple profiling from the 'long path', | 1006 | * Interrupts are enabled early and LAPIC is setup much later, hence |
1025 | * and do the profiling totally in assembly. | 1007 | * its possible that when we get here evt->event_handler is NULL. |
1026 | * | 1008 | * Check for event_handler being NULL and discard the interrupt as |
1027 | * Currently this isn't too much of an issue (performance wise), | 1009 | * spurious. |
1028 | * we can take more than 100K local irqs per second on a 100 MHz P5. | 1010 | */ |
1011 | if (!evt->event_handler) { | ||
1012 | printk(KERN_WARNING | ||
1013 | "Spurious LAPIC timer interrupt on cpu %d\n", cpu); | ||
1014 | /* Switch it off */ | ||
1015 | lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt); | ||
1016 | return; | ||
1017 | } | ||
1018 | |||
1019 | /* | ||
1020 | * the NMI deadlock-detector uses this. | ||
1029 | */ | 1021 | */ |
1022 | add_pda(apic_timer_irqs, 1); | ||
1023 | |||
1024 | evt->event_handler(evt); | ||
1030 | } | 1025 | } |
1031 | 1026 | ||
1032 | /* | 1027 | /* |
@@ -1042,11 +1037,6 @@ void smp_apic_timer_interrupt(struct pt_regs *regs) | |||
1042 | struct pt_regs *old_regs = set_irq_regs(regs); | 1037 | struct pt_regs *old_regs = set_irq_regs(regs); |
1043 | 1038 | ||
1044 | /* | 1039 | /* |
1045 | * the NMI deadlock-detector uses this. | ||
1046 | */ | ||
1047 | add_pda(apic_timer_irqs, 1); | ||
1048 | |||
1049 | /* | ||
1050 | * NOTE! We'd better ACK the irq immediately, | 1040 | * NOTE! We'd better ACK the irq immediately, |
1051 | * because timer handling can be slow. | 1041 | * because timer handling can be slow. |
1052 | */ | 1042 | */ |
@@ -1225,29 +1215,13 @@ static __init int setup_noapictimer(char *str) | |||
1225 | disable_apic_timer = 1; | 1215 | disable_apic_timer = 1; |
1226 | return 1; | 1216 | return 1; |
1227 | } | 1217 | } |
1228 | 1218 | __setup("noapictimer", setup_noapictimer); | |
1229 | static __init int setup_apicmaintimer(char *str) | ||
1230 | { | ||
1231 | apic_runs_main_timer = 1; | ||
1232 | nohpet = 1; | ||
1233 | return 1; | ||
1234 | } | ||
1235 | __setup("apicmaintimer", setup_apicmaintimer); | ||
1236 | |||
1237 | static __init int setup_noapicmaintimer(char *str) | ||
1238 | { | ||
1239 | apic_runs_main_timer = -1; | ||
1240 | return 1; | ||
1241 | } | ||
1242 | __setup("noapicmaintimer", setup_noapicmaintimer); | ||
1243 | 1219 | ||
1244 | static __init int setup_apicpmtimer(char *s) | 1220 | static __init int setup_apicpmtimer(char *s) |
1245 | { | 1221 | { |
1246 | apic_calibrate_pmtmr = 1; | 1222 | apic_calibrate_pmtmr = 1; |
1247 | notsc_setup(NULL); | 1223 | notsc_setup(NULL); |
1248 | return setup_apicmaintimer(NULL); | 1224 | return 0; |
1249 | } | 1225 | } |
1250 | __setup("apicpmtimer", setup_apicpmtimer); | 1226 | __setup("apicpmtimer", setup_apicpmtimer); |
1251 | 1227 | ||
1252 | __setup("noapictimer", setup_noapictimer); | ||
1253 | |||
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index b6434a7ef8b2..ffd01e5dcb52 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
@@ -646,7 +646,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
646 | policy->cpuinfo.transition_latency = | 646 | policy->cpuinfo.transition_latency = |
647 | perf->states[i].transition_latency * 1000; | 647 | perf->states[i].transition_latency * 1000; |
648 | } | 648 | } |
649 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
650 | 649 | ||
651 | data->max_freq = perf->states[0].core_frequency * 1000; | 650 | data->max_freq = perf->states[0].core_frequency * 1000; |
652 | /* table init */ | 651 | /* table init */ |
diff --git a/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c b/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c index 66acd5039918..32f0bda3fc95 100644 --- a/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c +++ b/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c | |||
@@ -363,7 +363,6 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy) | |||
363 | policy->cur = nforce2_get(policy->cpu); | 363 | policy->cur = nforce2_get(policy->cpu); |
364 | policy->min = policy->cpuinfo.min_freq; | 364 | policy->min = policy->cpuinfo.min_freq; |
365 | policy->max = policy->cpuinfo.max_freq; | 365 | policy->max = policy->cpuinfo.max_freq; |
366 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
367 | 366 | ||
368 | return 0; | 367 | return 0; |
369 | } | 368 | } |
diff --git a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c index f43d98e11cc7..c11baaf9f2b4 100644 --- a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c +++ b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c | |||
@@ -253,7 +253,6 @@ static int eps_cpu_init(struct cpufreq_policy *policy) | |||
253 | f_table[k].frequency = CPUFREQ_TABLE_END; | 253 | f_table[k].frequency = CPUFREQ_TABLE_END; |
254 | } | 254 | } |
255 | 255 | ||
256 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
257 | policy->cpuinfo.transition_latency = 140000; /* 844mV -> 700mV in ns */ | 256 | policy->cpuinfo.transition_latency = 140000; /* 844mV -> 700mV in ns */ |
258 | policy->cur = fsb * current_multiplier; | 257 | policy->cur = fsb * current_multiplier; |
259 | 258 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/elanfreq.c b/arch/x86/kernel/cpu/cpufreq/elanfreq.c index f317276afa7a..1e7ae7dafcf6 100644 --- a/arch/x86/kernel/cpu/cpufreq/elanfreq.c +++ b/arch/x86/kernel/cpu/cpufreq/elanfreq.c | |||
@@ -219,7 +219,6 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy) | |||
219 | } | 219 | } |
220 | 220 | ||
221 | /* cpuinfo and default policy values */ | 221 | /* cpuinfo and default policy values */ |
222 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
223 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | 222 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; |
224 | policy->cur = elanfreq_get_cpu_frequency(0); | 223 | policy->cur = elanfreq_get_cpu_frequency(0); |
225 | 224 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c b/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c index 461dabc4e495..ed2bda127c44 100644 --- a/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c +++ b/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c | |||
@@ -420,7 +420,6 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy) | |||
420 | policy->min = maxfreq / POLICY_MIN_DIV; | 420 | policy->min = maxfreq / POLICY_MIN_DIV; |
421 | policy->max = maxfreq; | 421 | policy->max = maxfreq; |
422 | policy->cur = curfreq; | 422 | policy->cur = curfreq; |
423 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
424 | policy->cpuinfo.min_freq = maxfreq / max_duration; | 423 | policy->cpuinfo.min_freq = maxfreq / max_duration; |
425 | policy->cpuinfo.max_freq = maxfreq; | 424 | policy->cpuinfo.max_freq = maxfreq; |
426 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | 425 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; |
diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.c b/arch/x86/kernel/cpu/cpufreq/longhaul.c index f0cce3c2dc3a..5045f5d583c8 100644 --- a/arch/x86/kernel/cpu/cpufreq/longhaul.c +++ b/arch/x86/kernel/cpu/cpufreq/longhaul.c | |||
@@ -710,6 +710,10 @@ static int enable_arbiter_disable(void) | |||
710 | reg = 0x78; | 710 | reg = 0x78; |
711 | dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8601_0, | 711 | dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8601_0, |
712 | NULL); | 712 | NULL); |
713 | /* Find PM133/VT8605 host bridge */ | ||
714 | if (dev == NULL) | ||
715 | dev = pci_get_device(PCI_VENDOR_ID_VIA, | ||
716 | PCI_DEVICE_ID_VIA_8605_0, NULL); | ||
713 | /* Find CLE266 host bridge */ | 717 | /* Find CLE266 host bridge */ |
714 | if (dev == NULL) { | 718 | if (dev == NULL) { |
715 | reg = 0x76; | 719 | reg = 0x76; |
@@ -918,7 +922,6 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy) | |||
918 | if ((longhaul_version != TYPE_LONGHAUL_V1) && (scale_voltage != 0)) | 922 | if ((longhaul_version != TYPE_LONGHAUL_V1) && (scale_voltage != 0)) |
919 | longhaul_setup_voltagescaling(); | 923 | longhaul_setup_voltagescaling(); |
920 | 924 | ||
921 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
922 | policy->cpuinfo.transition_latency = 200000; /* nsec */ | 925 | policy->cpuinfo.transition_latency = 200000; /* nsec */ |
923 | policy->cur = calc_speed(longhaul_get_cpu_mult()); | 926 | policy->cur = calc_speed(longhaul_get_cpu_mult()); |
924 | 927 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c index 4c76b511e194..8eb414b906d2 100644 --- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | |||
@@ -229,7 +229,6 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) | |||
229 | cpufreq_frequency_table_get_attr(p4clockmod_table, policy->cpu); | 229 | cpufreq_frequency_table_get_attr(p4clockmod_table, policy->cpu); |
230 | 230 | ||
231 | /* cpuinfo and default policy values */ | 231 | /* cpuinfo and default policy values */ |
232 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
233 | policy->cpuinfo.transition_latency = 1000000; /* assumed */ | 232 | policy->cpuinfo.transition_latency = 1000000; /* assumed */ |
234 | policy->cur = stock_freq; | 233 | policy->cur = stock_freq; |
235 | 234 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c b/arch/x86/kernel/cpu/cpufreq/powernow-k6.c index f89524051e4a..6d0285339317 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k6.c | |||
@@ -160,7 +160,6 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy) | |||
160 | } | 160 | } |
161 | 161 | ||
162 | /* cpuinfo and default policy values */ | 162 | /* cpuinfo and default policy values */ |
163 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
164 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | 163 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; |
165 | policy->cur = busfreq * max_multiplier; | 164 | policy->cur = busfreq * max_multiplier; |
166 | 165 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c index ca3e1d341889..7decd6a50ffa 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c | |||
@@ -637,8 +637,6 @@ static int __init powernow_cpu_init (struct cpufreq_policy *policy) | |||
637 | printk (KERN_INFO PFX "Minimum speed %d MHz. Maximum speed %d MHz.\n", | 637 | printk (KERN_INFO PFX "Minimum speed %d MHz. Maximum speed %d MHz.\n", |
638 | minimum_speed/1000, maximum_speed/1000); | 638 | minimum_speed/1000, maximum_speed/1000); |
639 | 639 | ||
640 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
641 | |||
642 | policy->cpuinfo.transition_latency = cpufreq_scale(2000000UL, fsb, latency); | 640 | policy->cpuinfo.transition_latency = cpufreq_scale(2000000UL, fsb, latency); |
643 | 641 | ||
644 | policy->cur = powernow_get(0); | 642 | policy->cur = powernow_get(0); |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 34ed53a06730..b273b69cfddf 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -76,7 +76,10 @@ static u32 find_khz_freq_from_fid(u32 fid) | |||
76 | /* Return a frequency in MHz, given an input fid and did */ | 76 | /* Return a frequency in MHz, given an input fid and did */ |
77 | static u32 find_freq_from_fiddid(u32 fid, u32 did) | 77 | static u32 find_freq_from_fiddid(u32 fid, u32 did) |
78 | { | 78 | { |
79 | return 100 * (fid + 0x10) >> did; | 79 | if (current_cpu_data.x86 == 0x10) |
80 | return 100 * (fid + 0x10) >> did; | ||
81 | else | ||
82 | return 100 * (fid + 0x8) >> did; | ||
80 | } | 83 | } |
81 | 84 | ||
82 | static u32 find_khz_freq_from_fiddid(u32 fid, u32 did) | 85 | static u32 find_khz_freq_from_fiddid(u32 fid, u32 did) |
@@ -1208,7 +1211,6 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1208 | /* run on any CPU again */ | 1211 | /* run on any CPU again */ |
1209 | set_cpus_allowed(current, oldmask); | 1212 | set_cpus_allowed(current, oldmask); |
1210 | 1213 | ||
1211 | pol->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
1212 | if (cpu_family == CPU_HW_PSTATE) | 1214 | if (cpu_family == CPU_HW_PSTATE) |
1213 | pol->cpus = cpumask_of_cpu(pol->cpu); | 1215 | pol->cpus = cpumask_of_cpu(pol->cpu); |
1214 | else | 1216 | else |
@@ -1325,21 +1327,16 @@ static struct cpufreq_driver cpufreq_amd64_driver = { | |||
1325 | static int __cpuinit powernowk8_init(void) | 1327 | static int __cpuinit powernowk8_init(void) |
1326 | { | 1328 | { |
1327 | unsigned int i, supported_cpus = 0; | 1329 | unsigned int i, supported_cpus = 0; |
1328 | unsigned int booted_cores = 1; | ||
1329 | 1330 | ||
1330 | for_each_online_cpu(i) { | 1331 | for_each_online_cpu(i) { |
1331 | if (check_supported_cpu(i)) | 1332 | if (check_supported_cpu(i)) |
1332 | supported_cpus++; | 1333 | supported_cpus++; |
1333 | } | 1334 | } |
1334 | 1335 | ||
1335 | #ifdef CONFIG_SMP | ||
1336 | booted_cores = cpu_data[0].booted_cores; | ||
1337 | #endif | ||
1338 | |||
1339 | if (supported_cpus == num_online_cpus()) { | 1336 | if (supported_cpus == num_online_cpus()) { |
1340 | printk(KERN_INFO PFX "Found %d %s " | 1337 | printk(KERN_INFO PFX "Found %d %s " |
1341 | "processors (%d cpu cores) (" VERSION ")\n", | 1338 | "processors (%d cpu cores) (" VERSION ")\n", |
1342 | supported_cpus/booted_cores, | 1339 | num_online_nodes(), |
1343 | boot_cpu_data.x86_model_id, supported_cpus); | 1340 | boot_cpu_data.x86_model_id, supported_cpus); |
1344 | return cpufreq_register_driver(&cpufreq_amd64_driver); | 1341 | return cpufreq_register_driver(&cpufreq_amd64_driver); |
1345 | } | 1342 | } |
diff --git a/arch/x86/kernel/cpu/cpufreq/sc520_freq.c b/arch/x86/kernel/cpu/cpufreq/sc520_freq.c index b8fb4b521c62..d9f3e90a7ae0 100644 --- a/arch/x86/kernel/cpu/cpufreq/sc520_freq.c +++ b/arch/x86/kernel/cpu/cpufreq/sc520_freq.c | |||
@@ -111,7 +111,6 @@ static int sc520_freq_cpu_init(struct cpufreq_policy *policy) | |||
111 | return -ENODEV; | 111 | return -ENODEV; |
112 | 112 | ||
113 | /* cpuinfo and default policy values */ | 113 | /* cpuinfo and default policy values */ |
114 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
115 | policy->cpuinfo.transition_latency = 1000000; /* 1ms */ | 114 | policy->cpuinfo.transition_latency = 1000000; /* 1ms */ |
116 | policy->cur = sc520_freq_get_cpu_frequency(0); | 115 | policy->cur = sc520_freq_get_cpu_frequency(0); |
117 | 116 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c index 6c5dc2c85aeb..811d47438546 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | |||
@@ -393,7 +393,6 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) | |||
393 | 393 | ||
394 | freq = get_cur_freq(policy->cpu); | 394 | freq = get_cur_freq(policy->cpu); |
395 | 395 | ||
396 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
397 | policy->cpuinfo.transition_latency = 10000; /* 10uS transition latency */ | 396 | policy->cpuinfo.transition_latency = 10000; /* 10uS transition latency */ |
398 | policy->cur = freq; | 397 | policy->cur = freq; |
399 | 398 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c index a5b2346faf1f..36685e8f7be1 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c | |||
@@ -348,7 +348,6 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) | |||
348 | (speed / 1000)); | 348 | (speed / 1000)); |
349 | 349 | ||
350 | /* cpuinfo and default policy values */ | 350 | /* cpuinfo and default policy values */ |
351 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
352 | policy->cur = speed; | 351 | policy->cur = speed; |
353 | 352 | ||
354 | result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs); | 353 | result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs); |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c b/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c index e1c509aa3054..f2b5a621d27b 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c | |||
@@ -290,7 +290,6 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) | |||
290 | (speed / 1000)); | 290 | (speed / 1000)); |
291 | 291 | ||
292 | /* cpuinfo and default policy values */ | 292 | /* cpuinfo and default policy values */ |
293 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
294 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | 293 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; |
295 | policy->cur = speed; | 294 | policy->cur = speed; |
296 | 295 | ||
diff --git a/arch/x86/kernel/geode_32.c b/arch/x86/kernel/geode_32.c index 41e8aec4c61d..f12d8c5d9809 100644 --- a/arch/x86/kernel/geode_32.c +++ b/arch/x86/kernel/geode_32.c | |||
@@ -145,10 +145,14 @@ EXPORT_SYMBOL_GPL(geode_gpio_setup_event); | |||
145 | 145 | ||
146 | static int __init geode_southbridge_init(void) | 146 | static int __init geode_southbridge_init(void) |
147 | { | 147 | { |
148 | int timers; | ||
149 | |||
148 | if (!is_geode()) | 150 | if (!is_geode()) |
149 | return -ENODEV; | 151 | return -ENODEV; |
150 | 152 | ||
151 | init_lbars(); | 153 | init_lbars(); |
154 | timers = geode_mfgpt_detect(); | ||
155 | printk(KERN_INFO "geode: %d MFGPT timers available.\n", timers); | ||
152 | return 0; | 156 | return 0; |
153 | } | 157 | } |
154 | 158 | ||
diff --git a/arch/x86/kernel/hpet_32.c b/arch/x86/kernel/hpet.c index 533d4932bc79..f8367074da0d 100644 --- a/arch/x86/kernel/hpet_32.c +++ b/arch/x86/kernel/hpet.c | |||
@@ -1,5 +1,6 @@ | |||
1 | #include <linux/clocksource.h> | 1 | #include <linux/clocksource.h> |
2 | #include <linux/clockchips.h> | 2 | #include <linux/clockchips.h> |
3 | #include <linux/delay.h> | ||
3 | #include <linux/errno.h> | 4 | #include <linux/errno.h> |
4 | #include <linux/hpet.h> | 5 | #include <linux/hpet.h> |
5 | #include <linux/init.h> | 6 | #include <linux/init.h> |
@@ -7,11 +8,11 @@ | |||
7 | #include <linux/pm.h> | 8 | #include <linux/pm.h> |
8 | #include <linux/delay.h> | 9 | #include <linux/delay.h> |
9 | 10 | ||
11 | #include <asm/fixmap.h> | ||
10 | #include <asm/hpet.h> | 12 | #include <asm/hpet.h> |
13 | #include <asm/i8253.h> | ||
11 | #include <asm/io.h> | 14 | #include <asm/io.h> |
12 | 15 | ||
13 | extern struct clock_event_device *global_clock_event; | ||
14 | |||
15 | #define HPET_MASK CLOCKSOURCE_MASK(32) | 16 | #define HPET_MASK CLOCKSOURCE_MASK(32) |
16 | #define HPET_SHIFT 22 | 17 | #define HPET_SHIFT 22 |
17 | 18 | ||
@@ -22,9 +23,9 @@ extern struct clock_event_device *global_clock_event; | |||
22 | * HPET address is set in acpi/boot.c, when an ACPI entry exists | 23 | * HPET address is set in acpi/boot.c, when an ACPI entry exists |
23 | */ | 24 | */ |
24 | unsigned long hpet_address; | 25 | unsigned long hpet_address; |
25 | static void __iomem * hpet_virt_address; | 26 | static void __iomem *hpet_virt_address; |
26 | 27 | ||
27 | static inline unsigned long hpet_readl(unsigned long a) | 28 | unsigned long hpet_readl(unsigned long a) |
28 | { | 29 | { |
29 | return readl(hpet_virt_address + a); | 30 | return readl(hpet_virt_address + a); |
30 | } | 31 | } |
@@ -34,6 +35,36 @@ static inline void hpet_writel(unsigned long d, unsigned long a) | |||
34 | writel(d, hpet_virt_address + a); | 35 | writel(d, hpet_virt_address + a); |
35 | } | 36 | } |
36 | 37 | ||
38 | #ifdef CONFIG_X86_64 | ||
39 | |||
40 | #include <asm/pgtable.h> | ||
41 | |||
42 | static inline void hpet_set_mapping(void) | ||
43 | { | ||
44 | set_fixmap_nocache(FIX_HPET_BASE, hpet_address); | ||
45 | __set_fixmap(VSYSCALL_HPET, hpet_address, PAGE_KERNEL_VSYSCALL_NOCACHE); | ||
46 | hpet_virt_address = (void __iomem *)fix_to_virt(FIX_HPET_BASE); | ||
47 | } | ||
48 | |||
49 | static inline void hpet_clear_mapping(void) | ||
50 | { | ||
51 | hpet_virt_address = NULL; | ||
52 | } | ||
53 | |||
54 | #else | ||
55 | |||
56 | static inline void hpet_set_mapping(void) | ||
57 | { | ||
58 | hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE); | ||
59 | } | ||
60 | |||
61 | static inline void hpet_clear_mapping(void) | ||
62 | { | ||
63 | iounmap(hpet_virt_address); | ||
64 | hpet_virt_address = NULL; | ||
65 | } | ||
66 | #endif | ||
67 | |||
37 | /* | 68 | /* |
38 | * HPET command line enable / disable | 69 | * HPET command line enable / disable |
39 | */ | 70 | */ |
@@ -49,6 +80,13 @@ static int __init hpet_setup(char* str) | |||
49 | } | 80 | } |
50 | __setup("hpet=", hpet_setup); | 81 | __setup("hpet=", hpet_setup); |
51 | 82 | ||
83 | static int __init disable_hpet(char *str) | ||
84 | { | ||
85 | boot_hpet_disable = 1; | ||
86 | return 1; | ||
87 | } | ||
88 | __setup("nohpet", disable_hpet); | ||
89 | |||
52 | static inline int is_hpet_capable(void) | 90 | static inline int is_hpet_capable(void) |
53 | { | 91 | { |
54 | return (!boot_hpet_disable && hpet_address); | 92 | return (!boot_hpet_disable && hpet_address); |
@@ -83,7 +121,7 @@ static void hpet_reserve_platform_timers(unsigned long id) | |||
83 | 121 | ||
84 | memset(&hd, 0, sizeof (hd)); | 122 | memset(&hd, 0, sizeof (hd)); |
85 | hd.hd_phys_address = hpet_address; | 123 | hd.hd_phys_address = hpet_address; |
86 | hd.hd_address = hpet_virt_address; | 124 | hd.hd_address = hpet; |
87 | hd.hd_nirqs = nrtimers; | 125 | hd.hd_nirqs = nrtimers; |
88 | hd.hd_flags = HPET_DATA_PLATFORM; | 126 | hd.hd_flags = HPET_DATA_PLATFORM; |
89 | hpet_reserve_timer(&hd, 0); | 127 | hpet_reserve_timer(&hd, 0); |
@@ -111,9 +149,9 @@ static void hpet_reserve_platform_timers(unsigned long id) { } | |||
111 | */ | 149 | */ |
112 | static unsigned long hpet_period; | 150 | static unsigned long hpet_period; |
113 | 151 | ||
114 | static void hpet_set_mode(enum clock_event_mode mode, | 152 | static void hpet_legacy_set_mode(enum clock_event_mode mode, |
115 | struct clock_event_device *evt); | 153 | struct clock_event_device *evt); |
116 | static int hpet_next_event(unsigned long delta, | 154 | static int hpet_legacy_next_event(unsigned long delta, |
117 | struct clock_event_device *evt); | 155 | struct clock_event_device *evt); |
118 | 156 | ||
119 | /* | 157 | /* |
@@ -122,10 +160,11 @@ static int hpet_next_event(unsigned long delta, | |||
122 | static struct clock_event_device hpet_clockevent = { | 160 | static struct clock_event_device hpet_clockevent = { |
123 | .name = "hpet", | 161 | .name = "hpet", |
124 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, | 162 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, |
125 | .set_mode = hpet_set_mode, | 163 | .set_mode = hpet_legacy_set_mode, |
126 | .set_next_event = hpet_next_event, | 164 | .set_next_event = hpet_legacy_next_event, |
127 | .shift = 32, | 165 | .shift = 32, |
128 | .irq = 0, | 166 | .irq = 0, |
167 | .rating = 50, | ||
129 | }; | 168 | }; |
130 | 169 | ||
131 | static void hpet_start_counter(void) | 170 | static void hpet_start_counter(void) |
@@ -140,7 +179,18 @@ static void hpet_start_counter(void) | |||
140 | hpet_writel(cfg, HPET_CFG); | 179 | hpet_writel(cfg, HPET_CFG); |
141 | } | 180 | } |
142 | 181 | ||
143 | static void hpet_enable_int(void) | 182 | static void hpet_resume_device(void) |
183 | { | ||
184 | force_hpet_resume(); | ||
185 | } | ||
186 | |||
187 | static void hpet_restart_counter(void) | ||
188 | { | ||
189 | hpet_resume_device(); | ||
190 | hpet_start_counter(); | ||
191 | } | ||
192 | |||
193 | static void hpet_enable_legacy_int(void) | ||
144 | { | 194 | { |
145 | unsigned long cfg = hpet_readl(HPET_CFG); | 195 | unsigned long cfg = hpet_readl(HPET_CFG); |
146 | 196 | ||
@@ -149,7 +199,39 @@ static void hpet_enable_int(void) | |||
149 | hpet_legacy_int_enabled = 1; | 199 | hpet_legacy_int_enabled = 1; |
150 | } | 200 | } |
151 | 201 | ||
152 | static void hpet_set_mode(enum clock_event_mode mode, | 202 | static void hpet_legacy_clockevent_register(void) |
203 | { | ||
204 | uint64_t hpet_freq; | ||
205 | |||
206 | /* Start HPET legacy interrupts */ | ||
207 | hpet_enable_legacy_int(); | ||
208 | |||
209 | /* | ||
210 | * The period is a femto seconds value. We need to calculate the | ||
211 | * scaled math multiplication factor for nanosecond to hpet tick | ||
212 | * conversion. | ||
213 | */ | ||
214 | hpet_freq = 1000000000000000ULL; | ||
215 | do_div(hpet_freq, hpet_period); | ||
216 | hpet_clockevent.mult = div_sc((unsigned long) hpet_freq, | ||
217 | NSEC_PER_SEC, 32); | ||
218 | /* Calculate the min / max delta */ | ||
219 | hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, | ||
220 | &hpet_clockevent); | ||
221 | hpet_clockevent.min_delta_ns = clockevent_delta2ns(0x30, | ||
222 | &hpet_clockevent); | ||
223 | |||
224 | /* | ||
225 | * Start hpet with the boot cpu mask and make it | ||
226 | * global after the IO_APIC has been initialized. | ||
227 | */ | ||
228 | hpet_clockevent.cpumask = cpumask_of_cpu(smp_processor_id()); | ||
229 | clockevents_register_device(&hpet_clockevent); | ||
230 | global_clock_event = &hpet_clockevent; | ||
231 | printk(KERN_DEBUG "hpet clockevent registered\n"); | ||
232 | } | ||
233 | |||
234 | static void hpet_legacy_set_mode(enum clock_event_mode mode, | ||
153 | struct clock_event_device *evt) | 235 | struct clock_event_device *evt) |
154 | { | 236 | { |
155 | unsigned long cfg, cmp, now; | 237 | unsigned long cfg, cmp, now; |
@@ -190,12 +272,12 @@ static void hpet_set_mode(enum clock_event_mode mode, | |||
190 | break; | 272 | break; |
191 | 273 | ||
192 | case CLOCK_EVT_MODE_RESUME: | 274 | case CLOCK_EVT_MODE_RESUME: |
193 | hpet_enable_int(); | 275 | hpet_enable_legacy_int(); |
194 | break; | 276 | break; |
195 | } | 277 | } |
196 | } | 278 | } |
197 | 279 | ||
198 | static int hpet_next_event(unsigned long delta, | 280 | static int hpet_legacy_next_event(unsigned long delta, |
199 | struct clock_event_device *evt) | 281 | struct clock_event_device *evt) |
200 | { | 282 | { |
201 | unsigned long cnt; | 283 | unsigned long cnt; |
@@ -215,6 +297,13 @@ static cycle_t read_hpet(void) | |||
215 | return (cycle_t)hpet_readl(HPET_COUNTER); | 297 | return (cycle_t)hpet_readl(HPET_COUNTER); |
216 | } | 298 | } |
217 | 299 | ||
300 | #ifdef CONFIG_X86_64 | ||
301 | static cycle_t __vsyscall_fn vread_hpet(void) | ||
302 | { | ||
303 | return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0); | ||
304 | } | ||
305 | #endif | ||
306 | |||
218 | static struct clocksource clocksource_hpet = { | 307 | static struct clocksource clocksource_hpet = { |
219 | .name = "hpet", | 308 | .name = "hpet", |
220 | .rating = 250, | 309 | .rating = 250, |
@@ -222,61 +311,17 @@ static struct clocksource clocksource_hpet = { | |||
222 | .mask = HPET_MASK, | 311 | .mask = HPET_MASK, |
223 | .shift = HPET_SHIFT, | 312 | .shift = HPET_SHIFT, |
224 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 313 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
225 | .resume = hpet_start_counter, | 314 | .resume = hpet_restart_counter, |
315 | #ifdef CONFIG_X86_64 | ||
316 | .vread = vread_hpet, | ||
317 | #endif | ||
226 | }; | 318 | }; |
227 | 319 | ||
228 | /* | 320 | static int hpet_clocksource_register(void) |
229 | * Try to setup the HPET timer | ||
230 | */ | ||
231 | int __init hpet_enable(void) | ||
232 | { | 321 | { |
233 | unsigned long id; | ||
234 | uint64_t hpet_freq; | ||
235 | u64 tmp, start, now; | 322 | u64 tmp, start, now; |
236 | cycle_t t1; | 323 | cycle_t t1; |
237 | 324 | ||
238 | if (!is_hpet_capable()) | ||
239 | return 0; | ||
240 | |||
241 | hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE); | ||
242 | |||
243 | /* | ||
244 | * Read the period and check for a sane value: | ||
245 | */ | ||
246 | hpet_period = hpet_readl(HPET_PERIOD); | ||
247 | if (hpet_period < HPET_MIN_PERIOD || hpet_period > HPET_MAX_PERIOD) | ||
248 | goto out_nohpet; | ||
249 | |||
250 | /* | ||
251 | * The period is a femto seconds value. We need to calculate the | ||
252 | * scaled math multiplication factor for nanosecond to hpet tick | ||
253 | * conversion. | ||
254 | */ | ||
255 | hpet_freq = 1000000000000000ULL; | ||
256 | do_div(hpet_freq, hpet_period); | ||
257 | hpet_clockevent.mult = div_sc((unsigned long) hpet_freq, | ||
258 | NSEC_PER_SEC, 32); | ||
259 | /* Calculate the min / max delta */ | ||
260 | hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, | ||
261 | &hpet_clockevent); | ||
262 | hpet_clockevent.min_delta_ns = clockevent_delta2ns(0x30, | ||
263 | &hpet_clockevent); | ||
264 | |||
265 | /* | ||
266 | * Read the HPET ID register to retrieve the IRQ routing | ||
267 | * information and the number of channels | ||
268 | */ | ||
269 | id = hpet_readl(HPET_ID); | ||
270 | |||
271 | #ifdef CONFIG_HPET_EMULATE_RTC | ||
272 | /* | ||
273 | * The legacy routing mode needs at least two channels, tick timer | ||
274 | * and the rtc emulation channel. | ||
275 | */ | ||
276 | if (!(id & HPET_ID_NUMBER)) | ||
277 | goto out_nohpet; | ||
278 | #endif | ||
279 | |||
280 | /* Start the counter */ | 325 | /* Start the counter */ |
281 | hpet_start_counter(); | 326 | hpet_start_counter(); |
282 | 327 | ||
@@ -298,7 +343,7 @@ int __init hpet_enable(void) | |||
298 | if (t1 == read_hpet()) { | 343 | if (t1 == read_hpet()) { |
299 | printk(KERN_WARNING | 344 | printk(KERN_WARNING |
300 | "HPET counter not counting. HPET disabled\n"); | 345 | "HPET counter not counting. HPET disabled\n"); |
301 | goto out_nohpet; | 346 | return -ENODEV; |
302 | } | 347 | } |
303 | 348 | ||
304 | /* Initialize and register HPET clocksource | 349 | /* Initialize and register HPET clocksource |
@@ -319,27 +364,84 @@ int __init hpet_enable(void) | |||
319 | 364 | ||
320 | clocksource_register(&clocksource_hpet); | 365 | clocksource_register(&clocksource_hpet); |
321 | 366 | ||
367 | return 0; | ||
368 | } | ||
369 | |||
370 | /* | ||
371 | * Try to setup the HPET timer | ||
372 | */ | ||
373 | int __init hpet_enable(void) | ||
374 | { | ||
375 | unsigned long id; | ||
376 | |||
377 | if (!is_hpet_capable()) | ||
378 | return 0; | ||
379 | |||
380 | hpet_set_mapping(); | ||
381 | |||
382 | /* | ||
383 | * Read the period and check for a sane value: | ||
384 | */ | ||
385 | hpet_period = hpet_readl(HPET_PERIOD); | ||
386 | if (hpet_period < HPET_MIN_PERIOD || hpet_period > HPET_MAX_PERIOD) | ||
387 | goto out_nohpet; | ||
388 | |||
389 | /* | ||
390 | * Read the HPET ID register to retrieve the IRQ routing | ||
391 | * information and the number of channels | ||
392 | */ | ||
393 | id = hpet_readl(HPET_ID); | ||
394 | |||
395 | #ifdef CONFIG_HPET_EMULATE_RTC | ||
396 | /* | ||
397 | * The legacy routing mode needs at least two channels, tick timer | ||
398 | * and the rtc emulation channel. | ||
399 | */ | ||
400 | if (!(id & HPET_ID_NUMBER)) | ||
401 | goto out_nohpet; | ||
402 | #endif | ||
403 | |||
404 | if (hpet_clocksource_register()) | ||
405 | goto out_nohpet; | ||
406 | |||
322 | if (id & HPET_ID_LEGSUP) { | 407 | if (id & HPET_ID_LEGSUP) { |
323 | hpet_enable_int(); | 408 | hpet_legacy_clockevent_register(); |
324 | hpet_reserve_platform_timers(id); | ||
325 | /* | ||
326 | * Start hpet with the boot cpu mask and make it | ||
327 | * global after the IO_APIC has been initialized. | ||
328 | */ | ||
329 | hpet_clockevent.cpumask = cpumask_of_cpu(smp_processor_id()); | ||
330 | clockevents_register_device(&hpet_clockevent); | ||
331 | global_clock_event = &hpet_clockevent; | ||
332 | return 1; | 409 | return 1; |
333 | } | 410 | } |
334 | return 0; | 411 | return 0; |
335 | 412 | ||
336 | out_nohpet: | 413 | out_nohpet: |
337 | iounmap(hpet_virt_address); | 414 | hpet_clear_mapping(); |
338 | hpet_virt_address = NULL; | ||
339 | boot_hpet_disable = 1; | 415 | boot_hpet_disable = 1; |
340 | return 0; | 416 | return 0; |
341 | } | 417 | } |
342 | 418 | ||
419 | /* | ||
420 | * Needs to be late, as the reserve_timer code calls kalloc ! | ||
421 | * | ||
422 | * Not a problem on i386 as hpet_enable is called from late_time_init, | ||
423 | * but on x86_64 it is necessary ! | ||
424 | */ | ||
425 | static __init int hpet_late_init(void) | ||
426 | { | ||
427 | if (boot_hpet_disable) | ||
428 | return -ENODEV; | ||
429 | |||
430 | if (!hpet_address) { | ||
431 | if (!force_hpet_address) | ||
432 | return -ENODEV; | ||
433 | |||
434 | hpet_address = force_hpet_address; | ||
435 | hpet_enable(); | ||
436 | if (!hpet_virt_address) | ||
437 | return -ENODEV; | ||
438 | } | ||
439 | |||
440 | hpet_reserve_platform_timers(hpet_readl(HPET_ID)); | ||
441 | |||
442 | return 0; | ||
443 | } | ||
444 | fs_initcall(hpet_late_init); | ||
343 | 445 | ||
344 | #ifdef CONFIG_HPET_EMULATE_RTC | 446 | #ifdef CONFIG_HPET_EMULATE_RTC |
345 | 447 | ||
diff --git a/arch/x86/kernel/hpet_64.c b/arch/x86/kernel/hpet_64.c deleted file mode 100644 index e2d1b912e154..000000000000 --- a/arch/x86/kernel/hpet_64.c +++ /dev/null | |||
@@ -1,493 +0,0 @@ | |||
1 | #include <linux/kernel.h> | ||
2 | #include <linux/sched.h> | ||
3 | #include <linux/init.h> | ||
4 | #include <linux/mc146818rtc.h> | ||
5 | #include <linux/time.h> | ||
6 | #include <linux/clocksource.h> | ||
7 | #include <linux/ioport.h> | ||
8 | #include <linux/acpi.h> | ||
9 | #include <linux/hpet.h> | ||
10 | #include <asm/pgtable.h> | ||
11 | #include <asm/vsyscall.h> | ||
12 | #include <asm/timex.h> | ||
13 | #include <asm/hpet.h> | ||
14 | |||
15 | #define HPET_MASK 0xFFFFFFFF | ||
16 | #define HPET_SHIFT 22 | ||
17 | |||
18 | /* FSEC = 10^-15 NSEC = 10^-9 */ | ||
19 | #define FSEC_PER_NSEC 1000000 | ||
20 | |||
21 | int nohpet __initdata; | ||
22 | |||
23 | unsigned long hpet_address; | ||
24 | unsigned long hpet_period; /* fsecs / HPET clock */ | ||
25 | unsigned long hpet_tick; /* HPET clocks / interrupt */ | ||
26 | |||
27 | int hpet_use_timer; /* Use counter of hpet for time keeping, | ||
28 | * otherwise PIT | ||
29 | */ | ||
30 | |||
31 | #ifdef CONFIG_HPET | ||
32 | static __init int late_hpet_init(void) | ||
33 | { | ||
34 | struct hpet_data hd; | ||
35 | unsigned int ntimer; | ||
36 | |||
37 | if (!hpet_address) | ||
38 | return 0; | ||
39 | |||
40 | memset(&hd, 0, sizeof(hd)); | ||
41 | |||
42 | ntimer = hpet_readl(HPET_ID); | ||
43 | ntimer = (ntimer & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT; | ||
44 | ntimer++; | ||
45 | |||
46 | /* | ||
47 | * Register with driver. | ||
48 | * Timer0 and Timer1 is used by platform. | ||
49 | */ | ||
50 | hd.hd_phys_address = hpet_address; | ||
51 | hd.hd_address = (void __iomem *)fix_to_virt(FIX_HPET_BASE); | ||
52 | hd.hd_nirqs = ntimer; | ||
53 | hd.hd_flags = HPET_DATA_PLATFORM; | ||
54 | hpet_reserve_timer(&hd, 0); | ||
55 | #ifdef CONFIG_HPET_EMULATE_RTC | ||
56 | hpet_reserve_timer(&hd, 1); | ||
57 | #endif | ||
58 | hd.hd_irq[0] = HPET_LEGACY_8254; | ||
59 | hd.hd_irq[1] = HPET_LEGACY_RTC; | ||
60 | if (ntimer > 2) { | ||
61 | struct hpet *hpet; | ||
62 | struct hpet_timer *timer; | ||
63 | int i; | ||
64 | |||
65 | hpet = (struct hpet *) fix_to_virt(FIX_HPET_BASE); | ||
66 | timer = &hpet->hpet_timers[2]; | ||
67 | for (i = 2; i < ntimer; timer++, i++) | ||
68 | hd.hd_irq[i] = (timer->hpet_config & | ||
69 | Tn_INT_ROUTE_CNF_MASK) >> | ||
70 | Tn_INT_ROUTE_CNF_SHIFT; | ||
71 | |||
72 | } | ||
73 | |||
74 | hpet_alloc(&hd); | ||
75 | return 0; | ||
76 | } | ||
77 | fs_initcall(late_hpet_init); | ||
78 | #endif | ||
79 | |||
80 | int hpet_timer_stop_set_go(unsigned long tick) | ||
81 | { | ||
82 | unsigned int cfg; | ||
83 | |||
84 | /* | ||
85 | * Stop the timers and reset the main counter. | ||
86 | */ | ||
87 | |||
88 | cfg = hpet_readl(HPET_CFG); | ||
89 | cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY); | ||
90 | hpet_writel(cfg, HPET_CFG); | ||
91 | hpet_writel(0, HPET_COUNTER); | ||
92 | hpet_writel(0, HPET_COUNTER + 4); | ||
93 | |||
94 | /* | ||
95 | * Set up timer 0, as periodic with first interrupt to happen at hpet_tick, | ||
96 | * and period also hpet_tick. | ||
97 | */ | ||
98 | if (hpet_use_timer) { | ||
99 | hpet_writel(HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL | | ||
100 | HPET_TN_32BIT, HPET_T0_CFG); | ||
101 | hpet_writel(hpet_tick, HPET_T0_CMP); /* next interrupt */ | ||
102 | hpet_writel(hpet_tick, HPET_T0_CMP); /* period */ | ||
103 | cfg |= HPET_CFG_LEGACY; | ||
104 | } | ||
105 | /* | ||
106 | * Go! | ||
107 | */ | ||
108 | |||
109 | cfg |= HPET_CFG_ENABLE; | ||
110 | hpet_writel(cfg, HPET_CFG); | ||
111 | |||
112 | return 0; | ||
113 | } | ||
114 | |||
115 | static cycle_t read_hpet(void) | ||
116 | { | ||
117 | return (cycle_t)hpet_readl(HPET_COUNTER); | ||
118 | } | ||
119 | |||
120 | static cycle_t __vsyscall_fn vread_hpet(void) | ||
121 | { | ||
122 | return readl((void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0); | ||
123 | } | ||
124 | |||
125 | struct clocksource clocksource_hpet = { | ||
126 | .name = "hpet", | ||
127 | .rating = 250, | ||
128 | .read = read_hpet, | ||
129 | .mask = (cycle_t)HPET_MASK, | ||
130 | .mult = 0, /* set below */ | ||
131 | .shift = HPET_SHIFT, | ||
132 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | ||
133 | .vread = vread_hpet, | ||
134 | }; | ||
135 | |||
136 | int __init hpet_arch_init(void) | ||
137 | { | ||
138 | unsigned int id; | ||
139 | u64 tmp; | ||
140 | |||
141 | if (!hpet_address) | ||
142 | return -1; | ||
143 | set_fixmap_nocache(FIX_HPET_BASE, hpet_address); | ||
144 | __set_fixmap(VSYSCALL_HPET, hpet_address, PAGE_KERNEL_VSYSCALL_NOCACHE); | ||
145 | |||
146 | /* | ||
147 | * Read the period, compute tick and quotient. | ||
148 | */ | ||
149 | |||
150 | id = hpet_readl(HPET_ID); | ||
151 | |||
152 | if (!(id & HPET_ID_VENDOR) || !(id & HPET_ID_NUMBER)) | ||
153 | return -1; | ||
154 | |||
155 | hpet_period = hpet_readl(HPET_PERIOD); | ||
156 | if (hpet_period < 100000 || hpet_period > 100000000) | ||
157 | return -1; | ||
158 | |||
159 | hpet_tick = (FSEC_PER_TICK + hpet_period / 2) / hpet_period; | ||
160 | |||
161 | hpet_use_timer = (id & HPET_ID_LEGSUP); | ||
162 | |||
163 | /* | ||
164 | * hpet period is in femto seconds per cycle | ||
165 | * so we need to convert this to ns/cyc units | ||
166 | * aproximated by mult/2^shift | ||
167 | * | ||
168 | * fsec/cyc * 1nsec/1000000fsec = nsec/cyc = mult/2^shift | ||
169 | * fsec/cyc * 1ns/1000000fsec * 2^shift = mult | ||
170 | * fsec/cyc * 2^shift * 1nsec/1000000fsec = mult | ||
171 | * (fsec/cyc << shift)/1000000 = mult | ||
172 | * (hpet_period << shift)/FSEC_PER_NSEC = mult | ||
173 | */ | ||
174 | tmp = (u64)hpet_period << HPET_SHIFT; | ||
175 | do_div(tmp, FSEC_PER_NSEC); | ||
176 | clocksource_hpet.mult = (u32)tmp; | ||
177 | clocksource_register(&clocksource_hpet); | ||
178 | |||
179 | return hpet_timer_stop_set_go(hpet_tick); | ||
180 | } | ||
181 | |||
182 | int hpet_reenable(void) | ||
183 | { | ||
184 | return hpet_timer_stop_set_go(hpet_tick); | ||
185 | } | ||
186 | |||
187 | /* | ||
188 | * calibrate_tsc() calibrates the processor TSC in a very simple way, comparing | ||
189 | * it to the HPET timer of known frequency. | ||
190 | */ | ||
191 | |||
192 | #define TICK_COUNT 100000000 | ||
193 | #define SMI_THRESHOLD 50000 | ||
194 | #define MAX_TRIES 5 | ||
195 | |||
196 | /* | ||
197 | * Some platforms take periodic SMI interrupts with 5ms duration. Make sure none | ||
198 | * occurs between the reads of the hpet & TSC. | ||
199 | */ | ||
200 | static void __init read_hpet_tsc(int *hpet, int *tsc) | ||
201 | { | ||
202 | int tsc1, tsc2, hpet1, i; | ||
203 | |||
204 | for (i = 0; i < MAX_TRIES; i++) { | ||
205 | tsc1 = get_cycles_sync(); | ||
206 | hpet1 = hpet_readl(HPET_COUNTER); | ||
207 | tsc2 = get_cycles_sync(); | ||
208 | if ((tsc2 - tsc1) < SMI_THRESHOLD) | ||
209 | break; | ||
210 | } | ||
211 | *hpet = hpet1; | ||
212 | *tsc = tsc2; | ||
213 | } | ||
214 | |||
215 | unsigned int __init hpet_calibrate_tsc(void) | ||
216 | { | ||
217 | int tsc_start, hpet_start; | ||
218 | int tsc_now, hpet_now; | ||
219 | unsigned long flags; | ||
220 | |||
221 | local_irq_save(flags); | ||
222 | |||
223 | read_hpet_tsc(&hpet_start, &tsc_start); | ||
224 | |||
225 | do { | ||
226 | local_irq_disable(); | ||
227 | read_hpet_tsc(&hpet_now, &tsc_now); | ||
228 | local_irq_restore(flags); | ||
229 | } while ((tsc_now - tsc_start) < TICK_COUNT && | ||
230 | (hpet_now - hpet_start) < TICK_COUNT); | ||
231 | |||
232 | return (tsc_now - tsc_start) * 1000000000L | ||
233 | / ((hpet_now - hpet_start) * hpet_period / 1000); | ||
234 | } | ||
235 | |||
236 | #ifdef CONFIG_HPET_EMULATE_RTC | ||
237 | /* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET | ||
238 | * is enabled, we support RTC interrupt functionality in software. | ||
239 | * RTC has 3 kinds of interrupts: | ||
240 | * 1) Update Interrupt - generate an interrupt, every sec, when RTC clock | ||
241 | * is updated | ||
242 | * 2) Alarm Interrupt - generate an interrupt at a specific time of day | ||
243 | * 3) Periodic Interrupt - generate periodic interrupt, with frequencies | ||
244 | * 2Hz-8192Hz (2Hz-64Hz for non-root user) (all freqs in powers of 2) | ||
245 | * (1) and (2) above are implemented using polling at a frequency of | ||
246 | * 64 Hz. The exact frequency is a tradeoff between accuracy and interrupt | ||
247 | * overhead. (DEFAULT_RTC_INT_FREQ) | ||
248 | * For (3), we use interrupts at 64Hz or user specified periodic | ||
249 | * frequency, whichever is higher. | ||
250 | */ | ||
251 | #include <linux/rtc.h> | ||
252 | |||
253 | #define DEFAULT_RTC_INT_FREQ 64 | ||
254 | #define RTC_NUM_INTS 1 | ||
255 | |||
256 | static unsigned long UIE_on; | ||
257 | static unsigned long prev_update_sec; | ||
258 | |||
259 | static unsigned long AIE_on; | ||
260 | static struct rtc_time alarm_time; | ||
261 | |||
262 | static unsigned long PIE_on; | ||
263 | static unsigned long PIE_freq = DEFAULT_RTC_INT_FREQ; | ||
264 | static unsigned long PIE_count; | ||
265 | |||
266 | static unsigned long hpet_rtc_int_freq; /* RTC interrupt frequency */ | ||
267 | static unsigned int hpet_t1_cmp; /* cached comparator register */ | ||
268 | |||
269 | int is_hpet_enabled(void) | ||
270 | { | ||
271 | return hpet_address != 0; | ||
272 | } | ||
273 | |||
274 | /* | ||
275 | * Timer 1 for RTC, we do not use periodic interrupt feature, | ||
276 | * even if HPET supports periodic interrupts on Timer 1. | ||
277 | * The reason being, to set up a periodic interrupt in HPET, we need to | ||
278 | * stop the main counter. And if we do that everytime someone diables/enables | ||
279 | * RTC, we will have adverse effect on main kernel timer running on Timer 0. | ||
280 | * So, for the time being, simulate the periodic interrupt in software. | ||
281 | * | ||
282 | * hpet_rtc_timer_init() is called for the first time and during subsequent | ||
283 | * interuppts reinit happens through hpet_rtc_timer_reinit(). | ||
284 | */ | ||
285 | int hpet_rtc_timer_init(void) | ||
286 | { | ||
287 | unsigned int cfg, cnt; | ||
288 | unsigned long flags; | ||
289 | |||
290 | if (!is_hpet_enabled()) | ||
291 | return 0; | ||
292 | /* | ||
293 | * Set the counter 1 and enable the interrupts. | ||
294 | */ | ||
295 | if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ)) | ||
296 | hpet_rtc_int_freq = PIE_freq; | ||
297 | else | ||
298 | hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ; | ||
299 | |||
300 | local_irq_save(flags); | ||
301 | |||
302 | cnt = hpet_readl(HPET_COUNTER); | ||
303 | cnt += ((hpet_tick*HZ)/hpet_rtc_int_freq); | ||
304 | hpet_writel(cnt, HPET_T1_CMP); | ||
305 | hpet_t1_cmp = cnt; | ||
306 | |||
307 | cfg = hpet_readl(HPET_T1_CFG); | ||
308 | cfg &= ~HPET_TN_PERIODIC; | ||
309 | cfg |= HPET_TN_ENABLE | HPET_TN_32BIT; | ||
310 | hpet_writel(cfg, HPET_T1_CFG); | ||
311 | |||
312 | local_irq_restore(flags); | ||
313 | |||
314 | return 1; | ||
315 | } | ||
316 | |||
317 | static void hpet_rtc_timer_reinit(void) | ||
318 | { | ||
319 | unsigned int cfg, cnt, ticks_per_int, lost_ints; | ||
320 | |||
321 | if (unlikely(!(PIE_on | AIE_on | UIE_on))) { | ||
322 | cfg = hpet_readl(HPET_T1_CFG); | ||
323 | cfg &= ~HPET_TN_ENABLE; | ||
324 | hpet_writel(cfg, HPET_T1_CFG); | ||
325 | return; | ||
326 | } | ||
327 | |||
328 | if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ)) | ||
329 | hpet_rtc_int_freq = PIE_freq; | ||
330 | else | ||
331 | hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ; | ||
332 | |||
333 | /* It is more accurate to use the comparator value than current count.*/ | ||
334 | ticks_per_int = hpet_tick * HZ / hpet_rtc_int_freq; | ||
335 | hpet_t1_cmp += ticks_per_int; | ||
336 | hpet_writel(hpet_t1_cmp, HPET_T1_CMP); | ||
337 | |||
338 | /* | ||
339 | * If the interrupt handler was delayed too long, the write above tries | ||
340 | * to schedule the next interrupt in the past and the hardware would | ||
341 | * not interrupt until the counter had wrapped around. | ||
342 | * So we have to check that the comparator wasn't set to a past time. | ||
343 | */ | ||
344 | cnt = hpet_readl(HPET_COUNTER); | ||
345 | if (unlikely((int)(cnt - hpet_t1_cmp) > 0)) { | ||
346 | lost_ints = (cnt - hpet_t1_cmp) / ticks_per_int + 1; | ||
347 | /* Make sure that, even with the time needed to execute | ||
348 | * this code, the next scheduled interrupt has been moved | ||
349 | * back to the future: */ | ||
350 | lost_ints++; | ||
351 | |||
352 | hpet_t1_cmp += lost_ints * ticks_per_int; | ||
353 | hpet_writel(hpet_t1_cmp, HPET_T1_CMP); | ||
354 | |||
355 | if (PIE_on) | ||
356 | PIE_count += lost_ints; | ||
357 | |||
358 | if (printk_ratelimit()) | ||
359 | printk(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n", | ||
360 | hpet_rtc_int_freq); | ||
361 | } | ||
362 | } | ||
363 | |||
364 | /* | ||
365 | * The functions below are called from rtc driver. | ||
366 | * Return 0 if HPET is not being used. | ||
367 | * Otherwise do the necessary changes and return 1. | ||
368 | */ | ||
369 | int hpet_mask_rtc_irq_bit(unsigned long bit_mask) | ||
370 | { | ||
371 | if (!is_hpet_enabled()) | ||
372 | return 0; | ||
373 | |||
374 | if (bit_mask & RTC_UIE) | ||
375 | UIE_on = 0; | ||
376 | if (bit_mask & RTC_PIE) | ||
377 | PIE_on = 0; | ||
378 | if (bit_mask & RTC_AIE) | ||
379 | AIE_on = 0; | ||
380 | |||
381 | return 1; | ||
382 | } | ||
383 | |||
384 | int hpet_set_rtc_irq_bit(unsigned long bit_mask) | ||
385 | { | ||
386 | int timer_init_reqd = 0; | ||
387 | |||
388 | if (!is_hpet_enabled()) | ||
389 | return 0; | ||
390 | |||
391 | if (!(PIE_on | AIE_on | UIE_on)) | ||
392 | timer_init_reqd = 1; | ||
393 | |||
394 | if (bit_mask & RTC_UIE) { | ||
395 | UIE_on = 1; | ||
396 | } | ||
397 | if (bit_mask & RTC_PIE) { | ||
398 | PIE_on = 1; | ||
399 | PIE_count = 0; | ||
400 | } | ||
401 | if (bit_mask & RTC_AIE) { | ||
402 | AIE_on = 1; | ||
403 | } | ||
404 | |||
405 | if (timer_init_reqd) | ||
406 | hpet_rtc_timer_init(); | ||
407 | |||
408 | return 1; | ||
409 | } | ||
410 | |||
411 | int hpet_set_alarm_time(unsigned char hrs, unsigned char min, unsigned char sec) | ||
412 | { | ||
413 | if (!is_hpet_enabled()) | ||
414 | return 0; | ||
415 | |||
416 | alarm_time.tm_hour = hrs; | ||
417 | alarm_time.tm_min = min; | ||
418 | alarm_time.tm_sec = sec; | ||
419 | |||
420 | return 1; | ||
421 | } | ||
422 | |||
423 | int hpet_set_periodic_freq(unsigned long freq) | ||
424 | { | ||
425 | if (!is_hpet_enabled()) | ||
426 | return 0; | ||
427 | |||
428 | PIE_freq = freq; | ||
429 | PIE_count = 0; | ||
430 | |||
431 | return 1; | ||
432 | } | ||
433 | |||
434 | int hpet_rtc_dropped_irq(void) | ||
435 | { | ||
436 | if (!is_hpet_enabled()) | ||
437 | return 0; | ||
438 | |||
439 | return 1; | ||
440 | } | ||
441 | |||
442 | irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) | ||
443 | { | ||
444 | struct rtc_time curr_time; | ||
445 | unsigned long rtc_int_flag = 0; | ||
446 | int call_rtc_interrupt = 0; | ||
447 | |||
448 | hpet_rtc_timer_reinit(); | ||
449 | |||
450 | if (UIE_on | AIE_on) { | ||
451 | rtc_get_rtc_time(&curr_time); | ||
452 | } | ||
453 | if (UIE_on) { | ||
454 | if (curr_time.tm_sec != prev_update_sec) { | ||
455 | /* Set update int info, call real rtc int routine */ | ||
456 | call_rtc_interrupt = 1; | ||
457 | rtc_int_flag = RTC_UF; | ||
458 | prev_update_sec = curr_time.tm_sec; | ||
459 | } | ||
460 | } | ||
461 | if (PIE_on) { | ||
462 | PIE_count++; | ||
463 | if (PIE_count >= hpet_rtc_int_freq/PIE_freq) { | ||
464 | /* Set periodic int info, call real rtc int routine */ | ||
465 | call_rtc_interrupt = 1; | ||
466 | rtc_int_flag |= RTC_PF; | ||
467 | PIE_count = 0; | ||
468 | } | ||
469 | } | ||
470 | if (AIE_on) { | ||
471 | if ((curr_time.tm_sec == alarm_time.tm_sec) && | ||
472 | (curr_time.tm_min == alarm_time.tm_min) && | ||
473 | (curr_time.tm_hour == alarm_time.tm_hour)) { | ||
474 | /* Set alarm int info, call real rtc int routine */ | ||
475 | call_rtc_interrupt = 1; | ||
476 | rtc_int_flag |= RTC_AF; | ||
477 | } | ||
478 | } | ||
479 | if (call_rtc_interrupt) { | ||
480 | rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8)); | ||
481 | rtc_interrupt(rtc_int_flag, dev_id); | ||
482 | } | ||
483 | return IRQ_HANDLED; | ||
484 | } | ||
485 | #endif | ||
486 | |||
487 | static int __init nohpet_setup(char *s) | ||
488 | { | ||
489 | nohpet = 1; | ||
490 | return 1; | ||
491 | } | ||
492 | |||
493 | __setup("nohpet", nohpet_setup); | ||
diff --git a/arch/x86/kernel/i8253_32.c b/arch/x86/kernel/i8253.c index 6d839f2f1b1a..ac15e4cbd9c1 100644 --- a/arch/x86/kernel/i8253_32.c +++ b/arch/x86/kernel/i8253.c | |||
@@ -13,7 +13,6 @@ | |||
13 | #include <asm/delay.h> | 13 | #include <asm/delay.h> |
14 | #include <asm/i8253.h> | 14 | #include <asm/i8253.h> |
15 | #include <asm/io.h> | 15 | #include <asm/io.h> |
16 | #include <asm/timer.h> | ||
17 | 16 | ||
18 | DEFINE_SPINLOCK(i8253_lock); | 17 | DEFINE_SPINLOCK(i8253_lock); |
19 | EXPORT_SYMBOL(i8253_lock); | 18 | EXPORT_SYMBOL(i8253_lock); |
@@ -120,6 +119,7 @@ void __init setup_pit_timer(void) | |||
120 | global_clock_event = &pit_clockevent; | 119 | global_clock_event = &pit_clockevent; |
121 | } | 120 | } |
122 | 121 | ||
122 | #ifndef CONFIG_X86_64 | ||
123 | /* | 123 | /* |
124 | * Since the PIT overflows every tick, its not very useful | 124 | * Since the PIT overflows every tick, its not very useful |
125 | * to just read by itself. So use jiffies to emulate a free | 125 | * to just read by itself. So use jiffies to emulate a free |
@@ -204,3 +204,5 @@ static int __init init_pit_clocksource(void) | |||
204 | return clocksource_register(&clocksource_pit); | 204 | return clocksource_register(&clocksource_pit); |
205 | } | 205 | } |
206 | arch_initcall(init_pit_clocksource); | 206 | arch_initcall(init_pit_clocksource); |
207 | |||
208 | #endif | ||
diff --git a/arch/x86/kernel/i8259_32.c b/arch/x86/kernel/i8259_32.c index 0499cbe9871a..679bb33acbf1 100644 --- a/arch/x86/kernel/i8259_32.c +++ b/arch/x86/kernel/i8259_32.c | |||
@@ -10,7 +10,6 @@ | |||
10 | #include <linux/sysdev.h> | 10 | #include <linux/sysdev.h> |
11 | #include <linux/bitops.h> | 11 | #include <linux/bitops.h> |
12 | 12 | ||
13 | #include <asm/8253pit.h> | ||
14 | #include <asm/atomic.h> | 13 | #include <asm/atomic.h> |
15 | #include <asm/system.h> | 14 | #include <asm/system.h> |
16 | #include <asm/io.h> | 15 | #include <asm/io.h> |
diff --git a/arch/x86/kernel/i8259_64.c b/arch/x86/kernel/i8259_64.c index 948cae646099..eb72976cc13c 100644 --- a/arch/x86/kernel/i8259_64.c +++ b/arch/x86/kernel/i8259_64.c | |||
@@ -444,46 +444,6 @@ void __init init_ISA_irqs (void) | |||
444 | } | 444 | } |
445 | } | 445 | } |
446 | 446 | ||
447 | static void setup_timer_hardware(void) | ||
448 | { | ||
449 | outb_p(0x34,0x43); /* binary, mode 2, LSB/MSB, ch 0 */ | ||
450 | udelay(10); | ||
451 | outb_p(LATCH & 0xff , 0x40); /* LSB */ | ||
452 | udelay(10); | ||
453 | outb(LATCH >> 8 , 0x40); /* MSB */ | ||
454 | } | ||
455 | |||
456 | static int timer_resume(struct sys_device *dev) | ||
457 | { | ||
458 | setup_timer_hardware(); | ||
459 | return 0; | ||
460 | } | ||
461 | |||
462 | void i8254_timer_resume(void) | ||
463 | { | ||
464 | setup_timer_hardware(); | ||
465 | } | ||
466 | |||
467 | static struct sysdev_class timer_sysclass = { | ||
468 | set_kset_name("timer_pit"), | ||
469 | .resume = timer_resume, | ||
470 | }; | ||
471 | |||
472 | static struct sys_device device_timer = { | ||
473 | .id = 0, | ||
474 | .cls = &timer_sysclass, | ||
475 | }; | ||
476 | |||
477 | static int __init init_timer_sysfs(void) | ||
478 | { | ||
479 | int error = sysdev_class_register(&timer_sysclass); | ||
480 | if (!error) | ||
481 | error = sysdev_register(&device_timer); | ||
482 | return error; | ||
483 | } | ||
484 | |||
485 | device_initcall(init_timer_sysfs); | ||
486 | |||
487 | void __init init_IRQ(void) | 447 | void __init init_IRQ(void) |
488 | { | 448 | { |
489 | int i; | 449 | int i; |
@@ -533,12 +493,6 @@ void __init init_IRQ(void) | |||
533 | set_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); | 493 | set_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); |
534 | set_intr_gate(ERROR_APIC_VECTOR, error_interrupt); | 494 | set_intr_gate(ERROR_APIC_VECTOR, error_interrupt); |
535 | 495 | ||
536 | /* | ||
537 | * Set the clock to HZ Hz, we already have a valid | ||
538 | * vector now: | ||
539 | */ | ||
540 | setup_timer_hardware(); | ||
541 | |||
542 | if (!acpi_ioapic) | 496 | if (!acpi_ioapic) |
543 | setup_irq(2, &irq2); | 497 | setup_irq(2, &irq2); |
544 | } | 498 | } |
diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c new file mode 100644 index 000000000000..0ab680f2d9db --- /dev/null +++ b/arch/x86/kernel/mfgpt_32.c | |||
@@ -0,0 +1,362 @@ | |||
1 | /* | ||
2 | * Driver/API for AMD Geode Multi-Function General Purpose Timers (MFGPT) | ||
3 | * | ||
4 | * Copyright (C) 2006, Advanced Micro Devices, Inc. | ||
5 | * Copyright (C) 2007, Andres Salomon <dilinger@debian.org> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of version 2 of the GNU General Public License | ||
9 | * as published by the Free Software Foundation. | ||
10 | * | ||
11 | * The MFGPTs are documented in AMD Geode CS5536 Companion Device Data Book. | ||
12 | */ | ||
13 | |||
14 | /* | ||
15 | * We are using the 32Khz input clock - its the only one that has the | ||
16 | * ranges we find desirable. The following table lists the suitable | ||
17 | * divisors and the associated hz, minimum interval | ||
18 | * and the maximum interval: | ||
19 | * | ||
20 | * Divisor Hz Min Delta (S) Max Delta (S) | ||
21 | * 1 32000 .0005 2.048 | ||
22 | * 2 16000 .001 4.096 | ||
23 | * 4 8000 .002 8.192 | ||
24 | * 8 4000 .004 16.384 | ||
25 | * 16 2000 .008 32.768 | ||
26 | * 32 1000 .016 65.536 | ||
27 | * 64 500 .032 131.072 | ||
28 | * 128 250 .064 262.144 | ||
29 | * 256 125 .128 524.288 | ||
30 | */ | ||
31 | |||
32 | #include <linux/kernel.h> | ||
33 | #include <linux/interrupt.h> | ||
34 | #include <linux/module.h> | ||
35 | #include <asm/geode.h> | ||
36 | |||
37 | #define F_AVAIL 0x01 | ||
38 | |||
39 | static struct mfgpt_timer_t { | ||
40 | int flags; | ||
41 | struct module *owner; | ||
42 | } mfgpt_timers[MFGPT_MAX_TIMERS]; | ||
43 | |||
44 | /* Selected from the table above */ | ||
45 | |||
46 | #define MFGPT_DIVISOR 16 | ||
47 | #define MFGPT_SCALE 4 /* divisor = 2^(scale) */ | ||
48 | #define MFGPT_HZ (32000 / MFGPT_DIVISOR) | ||
49 | #define MFGPT_PERIODIC (MFGPT_HZ / HZ) | ||
50 | |||
51 | #ifdef CONFIG_GEODE_MFGPT_TIMER | ||
52 | static int __init mfgpt_timer_setup(void); | ||
53 | #else | ||
54 | #define mfgpt_timer_setup() (0) | ||
55 | #endif | ||
56 | |||
57 | /* Allow for disabling of MFGPTs */ | ||
58 | static int disable; | ||
59 | static int __init mfgpt_disable(char *s) | ||
60 | { | ||
61 | disable = 1; | ||
62 | return 1; | ||
63 | } | ||
64 | __setup("nomfgpt", mfgpt_disable); | ||
65 | |||
66 | /* | ||
67 | * Check whether any MFGPTs are available for the kernel to use. In most | ||
68 | * cases, firmware that uses AMD's VSA code will claim all timers during | ||
69 | * bootup; we certainly don't want to take them if they're already in use. | ||
70 | * In other cases (such as with VSAless OpenFirmware), the system firmware | ||
71 | * leaves timers available for us to use. | ||
72 | */ | ||
73 | int __init geode_mfgpt_detect(void) | ||
74 | { | ||
75 | int count = 0, i; | ||
76 | u16 val; | ||
77 | |||
78 | if (disable) { | ||
79 | printk(KERN_INFO "geode-mfgpt: Skipping MFGPT setup\n"); | ||
80 | return 0; | ||
81 | } | ||
82 | |||
83 | for (i = 0; i < MFGPT_MAX_TIMERS; i++) { | ||
84 | val = geode_mfgpt_read(i, MFGPT_REG_SETUP); | ||
85 | if (!(val & MFGPT_SETUP_SETUP)) { | ||
86 | mfgpt_timers[i].flags = F_AVAIL; | ||
87 | count++; | ||
88 | } | ||
89 | } | ||
90 | |||
91 | /* set up clock event device, if desired */ | ||
92 | i = mfgpt_timer_setup(); | ||
93 | |||
94 | return count; | ||
95 | } | ||
96 | |||
97 | int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable) | ||
98 | { | ||
99 | u32 msr, mask, value, dummy; | ||
100 | int shift = (cmp == MFGPT_CMP1) ? 0 : 8; | ||
101 | |||
102 | if (timer < 0 || timer >= MFGPT_MAX_TIMERS) | ||
103 | return -EIO; | ||
104 | |||
105 | /* | ||
106 | * The register maps for these are described in sections 6.17.1.x of | ||
107 | * the AMD Geode CS5536 Companion Device Data Book. | ||
108 | */ | ||
109 | switch (event) { | ||
110 | case MFGPT_EVENT_RESET: | ||
111 | /* | ||
112 | * XXX: According to the docs, we cannot reset timers above | ||
113 | * 6; that is, resets for 7 and 8 will be ignored. Is this | ||
114 | * a problem? -dilinger | ||
115 | */ | ||
116 | msr = MFGPT_NR_MSR; | ||
117 | mask = 1 << (timer + 24); | ||
118 | break; | ||
119 | |||
120 | case MFGPT_EVENT_NMI: | ||
121 | msr = MFGPT_NR_MSR; | ||
122 | mask = 1 << (timer + shift); | ||
123 | break; | ||
124 | |||
125 | case MFGPT_EVENT_IRQ: | ||
126 | msr = MFGPT_IRQ_MSR; | ||
127 | mask = 1 << (timer + shift); | ||
128 | break; | ||
129 | |||
130 | default: | ||
131 | return -EIO; | ||
132 | } | ||
133 | |||
134 | rdmsr(msr, value, dummy); | ||
135 | |||
136 | if (enable) | ||
137 | value |= mask; | ||
138 | else | ||
139 | value &= ~mask; | ||
140 | |||
141 | wrmsr(msr, value, dummy); | ||
142 | return 0; | ||
143 | } | ||
144 | |||
145 | int geode_mfgpt_set_irq(int timer, int cmp, int irq, int enable) | ||
146 | { | ||
147 | u32 val, dummy; | ||
148 | int offset; | ||
149 | |||
150 | if (timer < 0 || timer >= MFGPT_MAX_TIMERS) | ||
151 | return -EIO; | ||
152 | |||
153 | if (geode_mfgpt_toggle_event(timer, cmp, MFGPT_EVENT_IRQ, enable)) | ||
154 | return -EIO; | ||
155 | |||
156 | rdmsr(MSR_PIC_ZSEL_LOW, val, dummy); | ||
157 | |||
158 | offset = (timer % 4) * 4; | ||
159 | |||
160 | val &= ~((0xF << offset) | (0xF << (offset + 16))); | ||
161 | |||
162 | if (enable) { | ||
163 | val |= (irq & 0x0F) << (offset); | ||
164 | val |= (irq & 0x0F) << (offset + 16); | ||
165 | } | ||
166 | |||
167 | wrmsr(MSR_PIC_ZSEL_LOW, val, dummy); | ||
168 | return 0; | ||
169 | } | ||
170 | |||
171 | static int mfgpt_get(int timer, struct module *owner) | ||
172 | { | ||
173 | mfgpt_timers[timer].flags &= ~F_AVAIL; | ||
174 | mfgpt_timers[timer].owner = owner; | ||
175 | printk(KERN_INFO "geode-mfgpt: Registered timer %d\n", timer); | ||
176 | return timer; | ||
177 | } | ||
178 | |||
179 | int geode_mfgpt_alloc_timer(int timer, int domain, struct module *owner) | ||
180 | { | ||
181 | int i; | ||
182 | |||
183 | if (!geode_get_dev_base(GEODE_DEV_MFGPT)) | ||
184 | return -ENODEV; | ||
185 | if (timer >= MFGPT_MAX_TIMERS) | ||
186 | return -EIO; | ||
187 | |||
188 | if (timer < 0) { | ||
189 | /* Try to find an available timer */ | ||
190 | for (i = 0; i < MFGPT_MAX_TIMERS; i++) { | ||
191 | if (mfgpt_timers[i].flags & F_AVAIL) | ||
192 | return mfgpt_get(i, owner); | ||
193 | |||
194 | if (i == 5 && domain == MFGPT_DOMAIN_WORKING) | ||
195 | break; | ||
196 | } | ||
197 | } else { | ||
198 | /* If they requested a specific timer, try to honor that */ | ||
199 | if (mfgpt_timers[timer].flags & F_AVAIL) | ||
200 | return mfgpt_get(timer, owner); | ||
201 | } | ||
202 | |||
203 | /* No timers available - too bad */ | ||
204 | return -1; | ||
205 | } | ||
206 | |||
207 | |||
208 | #ifdef CONFIG_GEODE_MFGPT_TIMER | ||
209 | |||
210 | /* | ||
211 | * The MFPGT timers on the CS5536 provide us with suitable timers to use | ||
212 | * as clock event sources - not as good as a HPET or APIC, but certainly | ||
213 | * better then the PIT. This isn't a general purpose MFGPT driver, but | ||
214 | * a simplified one designed specifically to act as a clock event source. | ||
215 | * For full details about the MFGPT, please consult the CS5536 data sheet. | ||
216 | */ | ||
217 | |||
218 | #include <linux/clocksource.h> | ||
219 | #include <linux/clockchips.h> | ||
220 | |||
221 | static unsigned int mfgpt_tick_mode = CLOCK_EVT_MODE_SHUTDOWN; | ||
222 | static u16 mfgpt_event_clock; | ||
223 | |||
224 | static int irq = 7; | ||
225 | static int __init mfgpt_setup(char *str) | ||
226 | { | ||
227 | get_option(&str, &irq); | ||
228 | return 1; | ||
229 | } | ||
230 | __setup("mfgpt_irq=", mfgpt_setup); | ||
231 | |||
232 | static inline void mfgpt_disable_timer(u16 clock) | ||
233 | { | ||
234 | u16 val = geode_mfgpt_read(clock, MFGPT_REG_SETUP); | ||
235 | geode_mfgpt_write(clock, MFGPT_REG_SETUP, val & ~MFGPT_SETUP_CNTEN); | ||
236 | } | ||
237 | |||
238 | static int mfgpt_next_event(unsigned long, struct clock_event_device *); | ||
239 | static void mfgpt_set_mode(enum clock_event_mode, struct clock_event_device *); | ||
240 | |||
241 | static struct clock_event_device mfgpt_clockevent = { | ||
242 | .name = "mfgpt-timer", | ||
243 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, | ||
244 | .set_mode = mfgpt_set_mode, | ||
245 | .set_next_event = mfgpt_next_event, | ||
246 | .rating = 250, | ||
247 | .cpumask = CPU_MASK_ALL, | ||
248 | .shift = 32 | ||
249 | }; | ||
250 | |||
251 | static inline void mfgpt_start_timer(u16 clock, u16 delta) | ||
252 | { | ||
253 | geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_CMP2, (u16) delta); | ||
254 | geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_COUNTER, 0); | ||
255 | |||
256 | geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_SETUP, | ||
257 | MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP2); | ||
258 | } | ||
259 | |||
260 | static void mfgpt_set_mode(enum clock_event_mode mode, | ||
261 | struct clock_event_device *evt) | ||
262 | { | ||
263 | mfgpt_disable_timer(mfgpt_event_clock); | ||
264 | |||
265 | if (mode == CLOCK_EVT_MODE_PERIODIC) | ||
266 | mfgpt_start_timer(mfgpt_event_clock, MFGPT_PERIODIC); | ||
267 | |||
268 | mfgpt_tick_mode = mode; | ||
269 | } | ||
270 | |||
271 | static int mfgpt_next_event(unsigned long delta, struct clock_event_device *evt) | ||
272 | { | ||
273 | mfgpt_start_timer(mfgpt_event_clock, delta); | ||
274 | return 0; | ||
275 | } | ||
276 | |||
277 | /* Assume (foolishly?), that this interrupt was due to our tick */ | ||
278 | |||
279 | static irqreturn_t mfgpt_tick(int irq, void *dev_id) | ||
280 | { | ||
281 | if (mfgpt_tick_mode == CLOCK_EVT_MODE_SHUTDOWN) | ||
282 | return IRQ_HANDLED; | ||
283 | |||
284 | /* Turn off the clock */ | ||
285 | mfgpt_disable_timer(mfgpt_event_clock); | ||
286 | |||
287 | /* Clear the counter */ | ||
288 | geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_COUNTER, 0); | ||
289 | |||
290 | /* Restart the clock in periodic mode */ | ||
291 | |||
292 | if (mfgpt_tick_mode == CLOCK_EVT_MODE_PERIODIC) { | ||
293 | geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_SETUP, | ||
294 | MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP2); | ||
295 | } | ||
296 | |||
297 | mfgpt_clockevent.event_handler(&mfgpt_clockevent); | ||
298 | return IRQ_HANDLED; | ||
299 | } | ||
300 | |||
301 | static struct irqaction mfgptirq = { | ||
302 | .handler = mfgpt_tick, | ||
303 | .flags = IRQF_DISABLED | IRQF_NOBALANCING, | ||
304 | .mask = CPU_MASK_NONE, | ||
305 | .name = "mfgpt-timer" | ||
306 | }; | ||
307 | |||
308 | static int __init mfgpt_timer_setup(void) | ||
309 | { | ||
310 | int timer, ret; | ||
311 | u16 val; | ||
312 | |||
313 | timer = geode_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING, | ||
314 | THIS_MODULE); | ||
315 | if (timer < 0) { | ||
316 | printk(KERN_ERR | ||
317 | "mfgpt-timer: Could not allocate a MFPGT timer\n"); | ||
318 | return -ENODEV; | ||
319 | } | ||
320 | |||
321 | mfgpt_event_clock = timer; | ||
322 | /* Set the clock scale and enable the event mode for CMP2 */ | ||
323 | val = MFGPT_SCALE | (3 << 8); | ||
324 | |||
325 | geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_SETUP, val); | ||
326 | |||
327 | /* Set up the IRQ on the MFGPT side */ | ||
328 | if (geode_mfgpt_setup_irq(mfgpt_event_clock, MFGPT_CMP2, irq)) { | ||
329 | printk(KERN_ERR "mfgpt-timer: Could not set up IRQ %d\n", irq); | ||
330 | return -EIO; | ||
331 | } | ||
332 | |||
333 | /* And register it with the kernel */ | ||
334 | ret = setup_irq(irq, &mfgptirq); | ||
335 | |||
336 | if (ret) { | ||
337 | printk(KERN_ERR | ||
338 | "mfgpt-timer: Unable to set up the interrupt.\n"); | ||
339 | goto err; | ||
340 | } | ||
341 | |||
342 | /* Set up the clock event */ | ||
343 | mfgpt_clockevent.mult = div_sc(MFGPT_HZ, NSEC_PER_SEC, 32); | ||
344 | mfgpt_clockevent.min_delta_ns = clockevent_delta2ns(0xF, | ||
345 | &mfgpt_clockevent); | ||
346 | mfgpt_clockevent.max_delta_ns = clockevent_delta2ns(0xFFFE, | ||
347 | &mfgpt_clockevent); | ||
348 | |||
349 | printk(KERN_INFO | ||
350 | "mfgpt-timer: registering the MFGT timer as a clock event.\n"); | ||
351 | clockevents_register_device(&mfgpt_clockevent); | ||
352 | |||
353 | return 0; | ||
354 | |||
355 | err: | ||
356 | geode_mfgpt_release_irq(mfgpt_event_clock, MFGPT_CMP2, irq); | ||
357 | printk(KERN_ERR | ||
358 | "mfgpt-timer: Unable to set up the MFGPT clock source\n"); | ||
359 | return -EIO; | ||
360 | } | ||
361 | |||
362 | #endif | ||
diff --git a/arch/x86/kernel/nmi_32.c b/arch/x86/kernel/nmi_32.c index c7227e2180f8..95d3fc203cf7 100644 --- a/arch/x86/kernel/nmi_32.c +++ b/arch/x86/kernel/nmi_32.c | |||
@@ -353,7 +353,8 @@ __kprobes int nmi_watchdog_tick(struct pt_regs * regs, unsigned reason) | |||
353 | * Take the local apic timer and PIT/HPET into account. We don't | 353 | * Take the local apic timer and PIT/HPET into account. We don't |
354 | * know which one is active, when we have highres/dyntick on | 354 | * know which one is active, when we have highres/dyntick on |
355 | */ | 355 | */ |
356 | sum = per_cpu(irq_stat, cpu).apic_timer_irqs + kstat_cpu(cpu).irqs[0]; | 356 | sum = per_cpu(irq_stat, cpu).apic_timer_irqs + |
357 | per_cpu(irq_stat, cpu).irq0_irqs; | ||
357 | 358 | ||
358 | /* if the none of the timers isn't firing, this cpu isn't doing much */ | 359 | /* if the none of the timers isn't firing, this cpu isn't doing much */ |
359 | if (!touched && last_irq_sums[cpu] == sum) { | 360 | if (!touched && last_irq_sums[cpu] == sum) { |
diff --git a/arch/x86/kernel/nmi_64.c b/arch/x86/kernel/nmi_64.c index 0ec6d2ddb931..e60ac0da5283 100644 --- a/arch/x86/kernel/nmi_64.c +++ b/arch/x86/kernel/nmi_64.c | |||
@@ -329,7 +329,7 @@ int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason) | |||
329 | touched = 1; | 329 | touched = 1; |
330 | } | 330 | } |
331 | 331 | ||
332 | sum = read_pda(apic_timer_irqs); | 332 | sum = read_pda(apic_timer_irqs) + read_pda(irq0_irqs); |
333 | if (__get_cpu_var(nmi_touch)) { | 333 | if (__get_cpu_var(nmi_touch)) { |
334 | __get_cpu_var(nmi_touch) = 0; | 334 | __get_cpu_var(nmi_touch) = 0; |
335 | touched = 1; | 335 | touched = 1; |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 98956555450b..6f9dbbe65eef 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/notifier.h> | 38 | #include <linux/notifier.h> |
39 | #include <linux/kprobes.h> | 39 | #include <linux/kprobes.h> |
40 | #include <linux/kdebug.h> | 40 | #include <linux/kdebug.h> |
41 | #include <linux/tick.h> | ||
41 | 42 | ||
42 | #include <asm/uaccess.h> | 43 | #include <asm/uaccess.h> |
43 | #include <asm/pgtable.h> | 44 | #include <asm/pgtable.h> |
@@ -208,6 +209,8 @@ void cpu_idle (void) | |||
208 | if (__get_cpu_var(cpu_idle_state)) | 209 | if (__get_cpu_var(cpu_idle_state)) |
209 | __get_cpu_var(cpu_idle_state) = 0; | 210 | __get_cpu_var(cpu_idle_state) = 0; |
210 | 211 | ||
212 | tick_nohz_stop_sched_tick(); | ||
213 | |||
211 | rmb(); | 214 | rmb(); |
212 | idle = pm_idle; | 215 | idle = pm_idle; |
213 | if (!idle) | 216 | if (!idle) |
@@ -228,6 +231,7 @@ void cpu_idle (void) | |||
228 | __exit_idle(); | 231 | __exit_idle(); |
229 | } | 232 | } |
230 | 233 | ||
234 | tick_nohz_restart_sched_tick(); | ||
231 | preempt_enable_no_resched(); | 235 | preempt_enable_no_resched(); |
232 | schedule(); | 236 | schedule(); |
233 | preempt_disable(); | 237 | preempt_disable(); |
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index 6722469c2633..d769e204f942 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c | |||
@@ -4,6 +4,8 @@ | |||
4 | #include <linux/pci.h> | 4 | #include <linux/pci.h> |
5 | #include <linux/irq.h> | 5 | #include <linux/irq.h> |
6 | 6 | ||
7 | #include <asm/hpet.h> | ||
8 | |||
7 | #if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI) | 9 | #if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI) |
8 | 10 | ||
9 | static void __devinit quirk_intel_irqbalance(struct pci_dev *dev) | 11 | static void __devinit quirk_intel_irqbalance(struct pci_dev *dev) |
@@ -47,3 +49,206 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quir | |||
47 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_intel_irqbalance); | 49 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_intel_irqbalance); |
48 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_intel_irqbalance); | 50 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_intel_irqbalance); |
49 | #endif | 51 | #endif |
52 | |||
53 | #if defined(CONFIG_HPET_TIMER) | ||
54 | unsigned long force_hpet_address; | ||
55 | |||
56 | static enum { | ||
57 | NONE_FORCE_HPET_RESUME, | ||
58 | OLD_ICH_FORCE_HPET_RESUME, | ||
59 | ICH_FORCE_HPET_RESUME | ||
60 | } force_hpet_resume_type; | ||
61 | |||
62 | static void __iomem *rcba_base; | ||
63 | |||
64 | static void ich_force_hpet_resume(void) | ||
65 | { | ||
66 | u32 val; | ||
67 | |||
68 | if (!force_hpet_address) | ||
69 | return; | ||
70 | |||
71 | if (rcba_base == NULL) | ||
72 | BUG(); | ||
73 | |||
74 | /* read the Function Disable register, dword mode only */ | ||
75 | val = readl(rcba_base + 0x3404); | ||
76 | if (!(val & 0x80)) { | ||
77 | /* HPET disabled in HPTC. Trying to enable */ | ||
78 | writel(val | 0x80, rcba_base + 0x3404); | ||
79 | } | ||
80 | |||
81 | val = readl(rcba_base + 0x3404); | ||
82 | if (!(val & 0x80)) | ||
83 | BUG(); | ||
84 | else | ||
85 | printk(KERN_DEBUG "Force enabled HPET at resume\n"); | ||
86 | |||
87 | return; | ||
88 | } | ||
89 | |||
90 | static void ich_force_enable_hpet(struct pci_dev *dev) | ||
91 | { | ||
92 | u32 val; | ||
93 | u32 uninitialized_var(rcba); | ||
94 | int err = 0; | ||
95 | |||
96 | if (hpet_address || force_hpet_address) | ||
97 | return; | ||
98 | |||
99 | pci_read_config_dword(dev, 0xF0, &rcba); | ||
100 | rcba &= 0xFFFFC000; | ||
101 | if (rcba == 0) { | ||
102 | printk(KERN_DEBUG "RCBA disabled. Cannot force enable HPET\n"); | ||
103 | return; | ||
104 | } | ||
105 | |||
106 | /* use bits 31:14, 16 kB aligned */ | ||
107 | rcba_base = ioremap_nocache(rcba, 0x4000); | ||
108 | if (rcba_base == NULL) { | ||
109 | printk(KERN_DEBUG "ioremap failed. Cannot force enable HPET\n"); | ||
110 | return; | ||
111 | } | ||
112 | |||
113 | /* read the Function Disable register, dword mode only */ | ||
114 | val = readl(rcba_base + 0x3404); | ||
115 | |||
116 | if (val & 0x80) { | ||
117 | /* HPET is enabled in HPTC. Just not reported by BIOS */ | ||
118 | val = val & 0x3; | ||
119 | force_hpet_address = 0xFED00000 | (val << 12); | ||
120 | printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n", | ||
121 | force_hpet_address); | ||
122 | iounmap(rcba_base); | ||
123 | return; | ||
124 | } | ||
125 | |||
126 | /* HPET disabled in HPTC. Trying to enable */ | ||
127 | writel(val | 0x80, rcba_base + 0x3404); | ||
128 | |||
129 | val = readl(rcba_base + 0x3404); | ||
130 | if (!(val & 0x80)) { | ||
131 | err = 1; | ||
132 | } else { | ||
133 | val = val & 0x3; | ||
134 | force_hpet_address = 0xFED00000 | (val << 12); | ||
135 | } | ||
136 | |||
137 | if (err) { | ||
138 | force_hpet_address = 0; | ||
139 | iounmap(rcba_base); | ||
140 | printk(KERN_DEBUG "Failed to force enable HPET\n"); | ||
141 | } else { | ||
142 | force_hpet_resume_type = ICH_FORCE_HPET_RESUME; | ||
143 | printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n", | ||
144 | force_hpet_address); | ||
145 | } | ||
146 | } | ||
147 | |||
148 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, | ||
149 | ich_force_enable_hpet); | ||
150 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, | ||
151 | ich_force_enable_hpet); | ||
152 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, | ||
153 | ich_force_enable_hpet); | ||
154 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, | ||
155 | ich_force_enable_hpet); | ||
156 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, | ||
157 | ich_force_enable_hpet); | ||
158 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, | ||
159 | ich_force_enable_hpet); | ||
160 | |||
161 | |||
162 | static struct pci_dev *cached_dev; | ||
163 | |||
164 | static void old_ich_force_hpet_resume(void) | ||
165 | { | ||
166 | u32 val; | ||
167 | u32 uninitialized_var(gen_cntl); | ||
168 | |||
169 | if (!force_hpet_address || !cached_dev) | ||
170 | return; | ||
171 | |||
172 | pci_read_config_dword(cached_dev, 0xD0, &gen_cntl); | ||
173 | gen_cntl &= (~(0x7 << 15)); | ||
174 | gen_cntl |= (0x4 << 15); | ||
175 | |||
176 | pci_write_config_dword(cached_dev, 0xD0, gen_cntl); | ||
177 | pci_read_config_dword(cached_dev, 0xD0, &gen_cntl); | ||
178 | val = gen_cntl >> 15; | ||
179 | val &= 0x7; | ||
180 | if (val == 0x4) | ||
181 | printk(KERN_DEBUG "Force enabled HPET at resume\n"); | ||
182 | else | ||
183 | BUG(); | ||
184 | } | ||
185 | |||
186 | static void old_ich_force_enable_hpet(struct pci_dev *dev) | ||
187 | { | ||
188 | u32 val; | ||
189 | u32 uninitialized_var(gen_cntl); | ||
190 | |||
191 | if (hpet_address || force_hpet_address) | ||
192 | return; | ||
193 | |||
194 | pci_read_config_dword(dev, 0xD0, &gen_cntl); | ||
195 | /* | ||
196 | * Bit 17 is HPET enable bit. | ||
197 | * Bit 16:15 control the HPET base address. | ||
198 | */ | ||
199 | val = gen_cntl >> 15; | ||
200 | val &= 0x7; | ||
201 | if (val & 0x4) { | ||
202 | val &= 0x3; | ||
203 | force_hpet_address = 0xFED00000 | (val << 12); | ||
204 | printk(KERN_DEBUG "HPET at base address 0x%lx\n", | ||
205 | force_hpet_address); | ||
206 | return; | ||
207 | } | ||
208 | |||
209 | /* | ||
210 | * HPET is disabled. Trying enabling at FED00000 and check | ||
211 | * whether it sticks | ||
212 | */ | ||
213 | gen_cntl &= (~(0x7 << 15)); | ||
214 | gen_cntl |= (0x4 << 15); | ||
215 | pci_write_config_dword(dev, 0xD0, gen_cntl); | ||
216 | |||
217 | pci_read_config_dword(dev, 0xD0, &gen_cntl); | ||
218 | |||
219 | val = gen_cntl >> 15; | ||
220 | val &= 0x7; | ||
221 | if (val & 0x4) { | ||
222 | /* HPET is enabled in HPTC. Just not reported by BIOS */ | ||
223 | val &= 0x3; | ||
224 | force_hpet_address = 0xFED00000 | (val << 12); | ||
225 | printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n", | ||
226 | force_hpet_address); | ||
227 | cached_dev = dev; | ||
228 | force_hpet_resume_type = OLD_ICH_FORCE_HPET_RESUME; | ||
229 | return; | ||
230 | } | ||
231 | |||
232 | printk(KERN_DEBUG "Failed to force enable HPET\n"); | ||
233 | } | ||
234 | |||
235 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, | ||
236 | old_ich_force_enable_hpet); | ||
237 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_12, | ||
238 | old_ich_force_enable_hpet); | ||
239 | |||
240 | void force_hpet_resume(void) | ||
241 | { | ||
242 | switch (force_hpet_resume_type) { | ||
243 | case ICH_FORCE_HPET_RESUME: | ||
244 | return ich_force_hpet_resume(); | ||
245 | |||
246 | case OLD_ICH_FORCE_HPET_RESUME: | ||
247 | return old_ich_force_hpet_resume(); | ||
248 | |||
249 | default: | ||
250 | break; | ||
251 | } | ||
252 | } | ||
253 | |||
254 | #endif | ||
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c index af838f6b0b7f..32054bf5ba40 100644 --- a/arch/x86/kernel/setup_64.c +++ b/arch/x86/kernel/setup_64.c | |||
@@ -546,6 +546,37 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c) | |||
546 | #endif | 546 | #endif |
547 | } | 547 | } |
548 | 548 | ||
549 | #define ENABLE_C1E_MASK 0x18000000 | ||
550 | #define CPUID_PROCESSOR_SIGNATURE 1 | ||
551 | #define CPUID_XFAM 0x0ff00000 | ||
552 | #define CPUID_XFAM_K8 0x00000000 | ||
553 | #define CPUID_XFAM_10H 0x00100000 | ||
554 | #define CPUID_XFAM_11H 0x00200000 | ||
555 | #define CPUID_XMOD 0x000f0000 | ||
556 | #define CPUID_XMOD_REV_F 0x00040000 | ||
557 | |||
558 | /* AMD systems with C1E don't have a working lAPIC timer. Check for that. */ | ||
559 | static __cpuinit int amd_apic_timer_broken(void) | ||
560 | { | ||
561 | u32 lo, hi; | ||
562 | u32 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); | ||
563 | switch (eax & CPUID_XFAM) { | ||
564 | case CPUID_XFAM_K8: | ||
565 | if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F) | ||
566 | break; | ||
567 | case CPUID_XFAM_10H: | ||
568 | case CPUID_XFAM_11H: | ||
569 | rdmsr(MSR_K8_ENABLE_C1E, lo, hi); | ||
570 | if (lo & ENABLE_C1E_MASK) | ||
571 | return 1; | ||
572 | break; | ||
573 | default: | ||
574 | /* err on the side of caution */ | ||
575 | return 1; | ||
576 | } | ||
577 | return 0; | ||
578 | } | ||
579 | |||
549 | static void __cpuinit init_amd(struct cpuinfo_x86 *c) | 580 | static void __cpuinit init_amd(struct cpuinfo_x86 *c) |
550 | { | 581 | { |
551 | unsigned level; | 582 | unsigned level; |
@@ -617,6 +648,9 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
617 | /* Family 10 doesn't support C states in MWAIT so don't use it */ | 648 | /* Family 10 doesn't support C states in MWAIT so don't use it */ |
618 | if (c->x86 == 0x10 && !force_mwait) | 649 | if (c->x86 == 0x10 && !force_mwait) |
619 | clear_bit(X86_FEATURE_MWAIT, &c->x86_capability); | 650 | clear_bit(X86_FEATURE_MWAIT, &c->x86_capability); |
651 | |||
652 | if (amd_apic_timer_broken()) | ||
653 | disable_apic_timer = 1; | ||
620 | } | 654 | } |
621 | 655 | ||
622 | static void __cpuinit detect_ht(struct cpuinfo_x86 *c) | 656 | static void __cpuinit detect_ht(struct cpuinfo_x86 *c) |
diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index 32f50783edc8..57ccf7cb6b91 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c | |||
@@ -223,8 +223,6 @@ void __cpuinit smp_callin(void) | |||
223 | local_irq_disable(); | 223 | local_irq_disable(); |
224 | Dprintk("Stack at about %p\n",&cpuid); | 224 | Dprintk("Stack at about %p\n",&cpuid); |
225 | 225 | ||
226 | disable_APIC_timer(); | ||
227 | |||
228 | /* | 226 | /* |
229 | * Save our processor parameters | 227 | * Save our processor parameters |
230 | */ | 228 | */ |
@@ -348,8 +346,6 @@ void __cpuinit start_secondary(void) | |||
348 | enable_8259A_irq(0); | 346 | enable_8259A_irq(0); |
349 | } | 347 | } |
350 | 348 | ||
351 | enable_APIC_timer(); | ||
352 | |||
353 | /* | 349 | /* |
354 | * The sibling maps must be set before turing the online map on for | 350 | * The sibling maps must be set before turing the online map on for |
355 | * this cpu | 351 | * this cpu |
diff --git a/arch/x86/kernel/time_32.c b/arch/x86/kernel/time_32.c index 19a6c678d02e..56dadfc2f41c 100644 --- a/arch/x86/kernel/time_32.c +++ b/arch/x86/kernel/time_32.c | |||
@@ -157,6 +157,9 @@ EXPORT_SYMBOL(profile_pc); | |||
157 | */ | 157 | */ |
158 | irqreturn_t timer_interrupt(int irq, void *dev_id) | 158 | irqreturn_t timer_interrupt(int irq, void *dev_id) |
159 | { | 159 | { |
160 | /* Keep nmi watchdog up to date */ | ||
161 | per_cpu(irq_stat, smp_processor_id()).irq0_irqs++; | ||
162 | |||
160 | #ifdef CONFIG_X86_IO_APIC | 163 | #ifdef CONFIG_X86_IO_APIC |
161 | if (timer_ack) { | 164 | if (timer_ack) { |
162 | /* | 165 | /* |
diff --git a/arch/x86/kernel/time_64.c b/arch/x86/kernel/time_64.c index 6d48a4e826d9..e0134d6c88da 100644 --- a/arch/x86/kernel/time_64.c +++ b/arch/x86/kernel/time_64.c | |||
@@ -28,11 +28,12 @@ | |||
28 | #include <linux/cpu.h> | 28 | #include <linux/cpu.h> |
29 | #include <linux/kallsyms.h> | 29 | #include <linux/kallsyms.h> |
30 | #include <linux/acpi.h> | 30 | #include <linux/acpi.h> |
31 | #include <linux/clockchips.h> | ||
32 | |||
31 | #ifdef CONFIG_ACPI | 33 | #ifdef CONFIG_ACPI |
32 | #include <acpi/achware.h> /* for PM timer frequency */ | 34 | #include <acpi/achware.h> /* for PM timer frequency */ |
33 | #include <acpi/acpi_bus.h> | 35 | #include <acpi/acpi_bus.h> |
34 | #endif | 36 | #endif |
35 | #include <asm/8253pit.h> | ||
36 | #include <asm/i8253.h> | 37 | #include <asm/i8253.h> |
37 | #include <asm/pgtable.h> | 38 | #include <asm/pgtable.h> |
38 | #include <asm/vsyscall.h> | 39 | #include <asm/vsyscall.h> |
@@ -47,12 +48,8 @@ | |||
47 | #include <asm/nmi.h> | 48 | #include <asm/nmi.h> |
48 | #include <asm/vgtod.h> | 49 | #include <asm/vgtod.h> |
49 | 50 | ||
50 | static char *timename = NULL; | ||
51 | |||
52 | DEFINE_SPINLOCK(rtc_lock); | 51 | DEFINE_SPINLOCK(rtc_lock); |
53 | EXPORT_SYMBOL(rtc_lock); | 52 | EXPORT_SYMBOL(rtc_lock); |
54 | DEFINE_SPINLOCK(i8253_lock); | ||
55 | EXPORT_SYMBOL(i8253_lock); | ||
56 | 53 | ||
57 | volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES; | 54 | volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES; |
58 | 55 | ||
@@ -153,45 +150,12 @@ int update_persistent_clock(struct timespec now) | |||
153 | return set_rtc_mmss(now.tv_sec); | 150 | return set_rtc_mmss(now.tv_sec); |
154 | } | 151 | } |
155 | 152 | ||
156 | void main_timer_handler(void) | 153 | static irqreturn_t timer_event_interrupt(int irq, void *dev_id) |
157 | { | 154 | { |
158 | /* | 155 | add_pda(irq0_irqs, 1); |
159 | * Here we are in the timer irq handler. We have irqs locally disabled (so we | ||
160 | * don't need spin_lock_irqsave()) but we don't know if the timer_bh is running | ||
161 | * on the other CPU, so we need a lock. We also need to lock the vsyscall | ||
162 | * variables, because both do_timer() and us change them -arca+vojtech | ||
163 | */ | ||
164 | |||
165 | write_seqlock(&xtime_lock); | ||
166 | 156 | ||
167 | /* | 157 | global_clock_event->event_handler(global_clock_event); |
168 | * Do the timer stuff. | ||
169 | */ | ||
170 | |||
171 | do_timer(1); | ||
172 | #ifndef CONFIG_SMP | ||
173 | update_process_times(user_mode(get_irq_regs())); | ||
174 | #endif | ||
175 | 158 | ||
176 | /* | ||
177 | * In the SMP case we use the local APIC timer interrupt to do the profiling, | ||
178 | * except when we simulate SMP mode on a uniprocessor system, in that case we | ||
179 | * have to call the local interrupt handler. | ||
180 | */ | ||
181 | |||
182 | if (!using_apic_timer) | ||
183 | smp_local_timer_interrupt(); | ||
184 | |||
185 | write_sequnlock(&xtime_lock); | ||
186 | } | ||
187 | |||
188 | static irqreturn_t timer_interrupt(int irq, void *dev_id) | ||
189 | { | ||
190 | if (apic_runs_main_timer > 1) | ||
191 | return IRQ_HANDLED; | ||
192 | main_timer_handler(); | ||
193 | if (using_apic_timer) | ||
194 | smp_send_timer_broadcast_ipi(); | ||
195 | return IRQ_HANDLED; | 159 | return IRQ_HANDLED; |
196 | } | 160 | } |
197 | 161 | ||
@@ -292,97 +256,21 @@ static unsigned int __init tsc_calibrate_cpu_khz(void) | |||
292 | return pmc_now * tsc_khz / (tsc_now - tsc_start); | 256 | return pmc_now * tsc_khz / (tsc_now - tsc_start); |
293 | } | 257 | } |
294 | 258 | ||
295 | /* | ||
296 | * pit_calibrate_tsc() uses the speaker output (channel 2) of | ||
297 | * the PIT. This is better than using the timer interrupt output, | ||
298 | * because we can read the value of the speaker with just one inb(), | ||
299 | * where we need three i/o operations for the interrupt channel. | ||
300 | * We count how many ticks the TSC does in 50 ms. | ||
301 | */ | ||
302 | |||
303 | static unsigned int __init pit_calibrate_tsc(void) | ||
304 | { | ||
305 | unsigned long start, end; | ||
306 | unsigned long flags; | ||
307 | |||
308 | spin_lock_irqsave(&i8253_lock, flags); | ||
309 | |||
310 | outb((inb(0x61) & ~0x02) | 0x01, 0x61); | ||
311 | |||
312 | outb(0xb0, 0x43); | ||
313 | outb((PIT_TICK_RATE / (1000 / 50)) & 0xff, 0x42); | ||
314 | outb((PIT_TICK_RATE / (1000 / 50)) >> 8, 0x42); | ||
315 | start = get_cycles_sync(); | ||
316 | while ((inb(0x61) & 0x20) == 0); | ||
317 | end = get_cycles_sync(); | ||
318 | |||
319 | spin_unlock_irqrestore(&i8253_lock, flags); | ||
320 | |||
321 | return (end - start) / 50; | ||
322 | } | ||
323 | |||
324 | #define PIT_MODE 0x43 | ||
325 | #define PIT_CH0 0x40 | ||
326 | |||
327 | static void __pit_init(int val, u8 mode) | ||
328 | { | ||
329 | unsigned long flags; | ||
330 | |||
331 | spin_lock_irqsave(&i8253_lock, flags); | ||
332 | outb_p(mode, PIT_MODE); | ||
333 | outb_p(val & 0xff, PIT_CH0); /* LSB */ | ||
334 | outb_p(val >> 8, PIT_CH0); /* MSB */ | ||
335 | spin_unlock_irqrestore(&i8253_lock, flags); | ||
336 | } | ||
337 | |||
338 | void __init pit_init(void) | ||
339 | { | ||
340 | __pit_init(LATCH, 0x34); /* binary, mode 2, LSB/MSB, ch 0 */ | ||
341 | } | ||
342 | |||
343 | void pit_stop_interrupt(void) | ||
344 | { | ||
345 | __pit_init(0, 0x30); /* mode 0 */ | ||
346 | } | ||
347 | |||
348 | void stop_timer_interrupt(void) | ||
349 | { | ||
350 | char *name; | ||
351 | if (hpet_address) { | ||
352 | name = "HPET"; | ||
353 | hpet_timer_stop_set_go(0); | ||
354 | } else { | ||
355 | name = "PIT"; | ||
356 | pit_stop_interrupt(); | ||
357 | } | ||
358 | printk(KERN_INFO "timer: %s interrupt stopped.\n", name); | ||
359 | } | ||
360 | |||
361 | static struct irqaction irq0 = { | 259 | static struct irqaction irq0 = { |
362 | .handler = timer_interrupt, | 260 | .handler = timer_event_interrupt, |
363 | .flags = IRQF_DISABLED | IRQF_IRQPOLL, | 261 | .flags = IRQF_DISABLED | IRQF_IRQPOLL | IRQF_NOBALANCING, |
364 | .mask = CPU_MASK_NONE, | 262 | .mask = CPU_MASK_NONE, |
365 | .name = "timer" | 263 | .name = "timer" |
366 | }; | 264 | }; |
367 | 265 | ||
368 | void __init time_init(void) | 266 | void __init time_init(void) |
369 | { | 267 | { |
370 | if (nohpet) | 268 | if (!hpet_enable()) |
371 | hpet_address = 0; | 269 | setup_pit_timer(); |
372 | 270 | ||
373 | if (hpet_arch_init()) | 271 | setup_irq(0, &irq0); |
374 | hpet_address = 0; | ||
375 | 272 | ||
376 | if (hpet_use_timer) { | 273 | tsc_calibrate(); |
377 | /* set tick_nsec to use the proper rate for HPET */ | ||
378 | tick_nsec = TICK_NSEC_HPET; | ||
379 | tsc_khz = hpet_calibrate_tsc(); | ||
380 | timename = "HPET"; | ||
381 | } else { | ||
382 | pit_init(); | ||
383 | tsc_khz = pit_calibrate_tsc(); | ||
384 | timename = "PIT"; | ||
385 | } | ||
386 | 274 | ||
387 | cpu_khz = tsc_khz; | 275 | cpu_khz = tsc_khz; |
388 | if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) && | 276 | if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) && |
@@ -398,50 +286,7 @@ void __init time_init(void) | |||
398 | else | 286 | else |
399 | vgetcpu_mode = VGETCPU_LSL; | 287 | vgetcpu_mode = VGETCPU_LSL; |
400 | 288 | ||
401 | set_cyc2ns_scale(tsc_khz); | ||
402 | printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n", | 289 | printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n", |
403 | cpu_khz / 1000, cpu_khz % 1000); | 290 | cpu_khz / 1000, cpu_khz % 1000); |
404 | init_tsc_clocksource(); | 291 | init_tsc_clocksource(); |
405 | |||
406 | setup_irq(0, &irq0); | ||
407 | } | ||
408 | |||
409 | /* | ||
410 | * sysfs support for the timer. | ||
411 | */ | ||
412 | |||
413 | static int timer_suspend(struct sys_device *dev, pm_message_t state) | ||
414 | { | ||
415 | return 0; | ||
416 | } | ||
417 | |||
418 | static int timer_resume(struct sys_device *dev) | ||
419 | { | ||
420 | if (hpet_address) | ||
421 | hpet_reenable(); | ||
422 | else | ||
423 | i8254_timer_resume(); | ||
424 | return 0; | ||
425 | } | 292 | } |
426 | |||
427 | static struct sysdev_class timer_sysclass = { | ||
428 | .resume = timer_resume, | ||
429 | .suspend = timer_suspend, | ||
430 | set_kset_name("timer"), | ||
431 | }; | ||
432 | |||
433 | /* XXX this sysfs stuff should probably go elsewhere later -john */ | ||
434 | static struct sys_device device_timer = { | ||
435 | .id = 0, | ||
436 | .cls = &timer_sysclass, | ||
437 | }; | ||
438 | |||
439 | static int time_init_device(void) | ||
440 | { | ||
441 | int error = sysdev_class_register(&timer_sysclass); | ||
442 | if (!error) | ||
443 | error = sysdev_register(&device_timer); | ||
444 | return error; | ||
445 | } | ||
446 | |||
447 | device_initcall(time_init_device); | ||
diff --git a/arch/x86/kernel/tsc_64.c b/arch/x86/kernel/tsc_64.c index 2a59bde663f2..9f22e542c374 100644 --- a/arch/x86/kernel/tsc_64.c +++ b/arch/x86/kernel/tsc_64.c | |||
@@ -6,7 +6,9 @@ | |||
6 | #include <linux/time.h> | 6 | #include <linux/time.h> |
7 | #include <linux/acpi.h> | 7 | #include <linux/acpi.h> |
8 | #include <linux/cpufreq.h> | 8 | #include <linux/cpufreq.h> |
9 | #include <linux/acpi_pmtmr.h> | ||
9 | 10 | ||
11 | #include <asm/hpet.h> | ||
10 | #include <asm/timex.h> | 12 | #include <asm/timex.h> |
11 | 13 | ||
12 | static int notsc __initdata = 0; | 14 | static int notsc __initdata = 0; |
@@ -18,7 +20,7 @@ EXPORT_SYMBOL(tsc_khz); | |||
18 | 20 | ||
19 | static unsigned int cyc2ns_scale __read_mostly; | 21 | static unsigned int cyc2ns_scale __read_mostly; |
20 | 22 | ||
21 | void set_cyc2ns_scale(unsigned long khz) | 23 | static inline void set_cyc2ns_scale(unsigned long khz) |
22 | { | 24 | { |
23 | cyc2ns_scale = (NSEC_PER_MSEC << NS_SCALE) / khz; | 25 | cyc2ns_scale = (NSEC_PER_MSEC << NS_SCALE) / khz; |
24 | } | 26 | } |
@@ -118,6 +120,95 @@ core_initcall(cpufreq_tsc); | |||
118 | 120 | ||
119 | #endif | 121 | #endif |
120 | 122 | ||
123 | #define MAX_RETRIES 5 | ||
124 | #define SMI_TRESHOLD 50000 | ||
125 | |||
126 | /* | ||
127 | * Read TSC and the reference counters. Take care of SMI disturbance | ||
128 | */ | ||
129 | static unsigned long __init tsc_read_refs(unsigned long *pm, | ||
130 | unsigned long *hpet) | ||
131 | { | ||
132 | unsigned long t1, t2; | ||
133 | int i; | ||
134 | |||
135 | for (i = 0; i < MAX_RETRIES; i++) { | ||
136 | t1 = get_cycles_sync(); | ||
137 | if (hpet) | ||
138 | *hpet = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF; | ||
139 | else | ||
140 | *pm = acpi_pm_read_early(); | ||
141 | t2 = get_cycles_sync(); | ||
142 | if ((t2 - t1) < SMI_TRESHOLD) | ||
143 | return t2; | ||
144 | } | ||
145 | return ULONG_MAX; | ||
146 | } | ||
147 | |||
148 | /** | ||
149 | * tsc_calibrate - calibrate the tsc on boot | ||
150 | */ | ||
151 | void __init tsc_calibrate(void) | ||
152 | { | ||
153 | unsigned long flags, tsc1, tsc2, tr1, tr2, pm1, pm2, hpet1, hpet2; | ||
154 | int hpet = is_hpet_enabled(); | ||
155 | |||
156 | local_irq_save(flags); | ||
157 | |||
158 | tsc1 = tsc_read_refs(&pm1, hpet ? &hpet1 : NULL); | ||
159 | |||
160 | outb((inb(0x61) & ~0x02) | 0x01, 0x61); | ||
161 | |||
162 | outb(0xb0, 0x43); | ||
163 | outb((CLOCK_TICK_RATE / (1000 / 50)) & 0xff, 0x42); | ||
164 | outb((CLOCK_TICK_RATE / (1000 / 50)) >> 8, 0x42); | ||
165 | tr1 = get_cycles_sync(); | ||
166 | while ((inb(0x61) & 0x20) == 0); | ||
167 | tr2 = get_cycles_sync(); | ||
168 | |||
169 | tsc2 = tsc_read_refs(&pm2, hpet ? &hpet2 : NULL); | ||
170 | |||
171 | local_irq_restore(flags); | ||
172 | |||
173 | /* | ||
174 | * Preset the result with the raw and inaccurate PIT | ||
175 | * calibration value | ||
176 | */ | ||
177 | tsc_khz = (tr2 - tr1) / 50; | ||
178 | |||
179 | /* hpet or pmtimer available ? */ | ||
180 | if (!hpet && !pm1 && !pm2) { | ||
181 | printk(KERN_INFO "TSC calibrated against PIT\n"); | ||
182 | return; | ||
183 | } | ||
184 | |||
185 | /* Check, whether the sampling was disturbed by an SMI */ | ||
186 | if (tsc1 == ULONG_MAX || tsc2 == ULONG_MAX) { | ||
187 | printk(KERN_WARNING "TSC calibration disturbed by SMI, " | ||
188 | "using PIT calibration result\n"); | ||
189 | return; | ||
190 | } | ||
191 | |||
192 | tsc2 = (tsc2 - tsc1) * 1000000L; | ||
193 | |||
194 | if (hpet) { | ||
195 | printk(KERN_INFO "TSC calibrated against HPET\n"); | ||
196 | if (hpet2 < hpet1) | ||
197 | hpet2 += 0x100000000; | ||
198 | hpet2 -= hpet1; | ||
199 | tsc1 = (hpet2 * hpet_readl(HPET_PERIOD)) / 1000000; | ||
200 | } else { | ||
201 | printk(KERN_INFO "TSC calibrated against PM_TIMER\n"); | ||
202 | if (pm2 < pm1) | ||
203 | pm2 += ACPI_PM_OVRRUN; | ||
204 | pm2 -= pm1; | ||
205 | tsc1 = (pm2 * 1000000000) / PMTMR_TICKS_PER_SEC; | ||
206 | } | ||
207 | |||
208 | tsc_khz = tsc2 / tsc1; | ||
209 | set_cyc2ns_scale(tsc_khz); | ||
210 | } | ||
211 | |||
121 | /* | 212 | /* |
122 | * Make an educated guess if the TSC is trustworthy and synchronized | 213 | * Make an educated guess if the TSC is trustworthy and synchronized |
123 | * over all CPUs. | 214 | * over all CPUs. |
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig index b1b98e614f7c..eb80f5aca54e 100644 --- a/arch/x86_64/Kconfig +++ b/arch/x86_64/Kconfig | |||
@@ -36,6 +36,18 @@ config GENERIC_CMOS_UPDATE | |||
36 | bool | 36 | bool |
37 | default y | 37 | default y |
38 | 38 | ||
39 | config CLOCKSOURCE_WATCHDOG | ||
40 | bool | ||
41 | default y | ||
42 | |||
43 | config GENERIC_CLOCKEVENTS | ||
44 | bool | ||
45 | default y | ||
46 | |||
47 | config GENERIC_CLOCKEVENTS_BROADCAST | ||
48 | bool | ||
49 | default y | ||
50 | |||
39 | config ZONE_DMA32 | 51 | config ZONE_DMA32 |
40 | bool | 52 | bool |
41 | default y | 53 | default y |
@@ -130,6 +142,8 @@ source "init/Kconfig" | |||
130 | 142 | ||
131 | menu "Processor type and features" | 143 | menu "Processor type and features" |
132 | 144 | ||
145 | source "kernel/time/Kconfig" | ||
146 | |||
133 | choice | 147 | choice |
134 | prompt "Subarchitecture Type" | 148 | prompt "Subarchitecture Type" |
135 | default X86_PC | 149 | default X86_PC |