diff options
Diffstat (limited to 'arch')
39 files changed, 432 insertions, 352 deletions
diff --git a/arch/arm/common/sharpsl_pm.c b/arch/arm/common/sharpsl_pm.c index 111a7fa5debe..5bba5255b119 100644 --- a/arch/arm/common/sharpsl_pm.c +++ b/arch/arm/common/sharpsl_pm.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
25 | #include <linux/leds.h> | 25 | #include <linux/leds.h> |
26 | #include <linux/apm-emulation.h> | 26 | #include <linux/apm-emulation.h> |
27 | #include <linux/suspend.h> | ||
27 | 28 | ||
28 | #include <asm/hardware.h> | 29 | #include <asm/hardware.h> |
29 | #include <asm/mach-types.h> | 30 | #include <asm/mach-types.h> |
@@ -765,9 +766,9 @@ static void sharpsl_apm_get_power_status(struct apm_power_info *info) | |||
765 | info->battery_life = sharpsl_pm.battstat.mainbat_percent; | 766 | info->battery_life = sharpsl_pm.battstat.mainbat_percent; |
766 | } | 767 | } |
767 | 768 | ||
768 | static struct pm_ops sharpsl_pm_ops = { | 769 | static struct platform_suspend_ops sharpsl_pm_ops = { |
769 | .enter = corgi_pxa_pm_enter, | 770 | .enter = corgi_pxa_pm_enter, |
770 | .valid = pm_valid_only_mem, | 771 | .valid = suspend_valid_only_mem, |
771 | }; | 772 | }; |
772 | 773 | ||
773 | static int __init sharpsl_pm_probe(struct platform_device *pdev) | 774 | static int __init sharpsl_pm_probe(struct platform_device *pdev) |
@@ -799,7 +800,7 @@ static int __init sharpsl_pm_probe(struct platform_device *pdev) | |||
799 | 800 | ||
800 | apm_get_power_status = sharpsl_apm_get_power_status; | 801 | apm_get_power_status = sharpsl_apm_get_power_status; |
801 | 802 | ||
802 | pm_set_ops(&sharpsl_pm_ops); | 803 | suspend_set_ops(&sharpsl_pm_ops); |
803 | 804 | ||
804 | mod_timer(&sharpsl_pm.ac_timer, jiffies + msecs_to_jiffies(250)); | 805 | mod_timer(&sharpsl_pm.ac_timer, jiffies + msecs_to_jiffies(250)); |
805 | 806 | ||
@@ -808,7 +809,7 @@ static int __init sharpsl_pm_probe(struct platform_device *pdev) | |||
808 | 809 | ||
809 | static int sharpsl_pm_remove(struct platform_device *pdev) | 810 | static int sharpsl_pm_remove(struct platform_device *pdev) |
810 | { | 811 | { |
811 | pm_set_ops(NULL); | 812 | suspend_set_ops(NULL); |
812 | 813 | ||
813 | device_remove_file(&pdev->dev, &dev_attr_battery_percentage); | 814 | device_remove_file(&pdev->dev, &dev_attr_battery_percentage); |
814 | device_remove_file(&pdev->dev, &dev_attr_battery_voltage); | 815 | device_remove_file(&pdev->dev, &dev_attr_battery_voltage); |
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index ddf9184d561d..98cb61482917 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c | |||
@@ -10,10 +10,9 @@ | |||
10 | * (at your option) any later version. | 10 | * (at your option) any later version. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/pm.h> | 13 | #include <linux/suspend.h> |
14 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
15 | #include <linux/proc_fs.h> | 15 | #include <linux/proc_fs.h> |
16 | #include <linux/pm.h> | ||
17 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
18 | #include <linux/sysfs.h> | 17 | #include <linux/sysfs.h> |
19 | #include <linux/module.h> | 18 | #include <linux/module.h> |
@@ -199,7 +198,7 @@ error: | |||
199 | } | 198 | } |
200 | 199 | ||
201 | 200 | ||
202 | static struct pm_ops at91_pm_ops ={ | 201 | static struct platform_suspend_ops at91_pm_ops ={ |
203 | .valid = at91_pm_valid_state, | 202 | .valid = at91_pm_valid_state, |
204 | .set_target = at91_pm_set_target, | 203 | .set_target = at91_pm_set_target, |
205 | .enter = at91_pm_enter, | 204 | .enter = at91_pm_enter, |
@@ -220,7 +219,7 @@ static int __init at91_pm_init(void) | |||
220 | /* Disable SDRAM low-power mode. Cannot be used with self-refresh. */ | 219 | /* Disable SDRAM low-power mode. Cannot be used with self-refresh. */ |
221 | at91_sys_write(AT91_SDRAMC_LPR, 0); | 220 | at91_sys_write(AT91_SDRAMC_LPR, 0); |
222 | 221 | ||
223 | pm_set_ops(&at91_pm_ops); | 222 | suspend_set_ops(&at91_pm_ops); |
224 | 223 | ||
225 | return 0; | 224 | return 0; |
226 | } | 225 | } |
diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c index 089b8208de0e..3bf01e28df33 100644 --- a/arch/arm/mach-omap1/pm.c +++ b/arch/arm/mach-omap1/pm.c | |||
@@ -35,10 +35,9 @@ | |||
35 | * 675 Mass Ave, Cambridge, MA 02139, USA. | 35 | * 675 Mass Ave, Cambridge, MA 02139, USA. |
36 | */ | 36 | */ |
37 | 37 | ||
38 | #include <linux/pm.h> | 38 | #include <linux/suspend.h> |
39 | #include <linux/sched.h> | 39 | #include <linux/sched.h> |
40 | #include <linux/proc_fs.h> | 40 | #include <linux/proc_fs.h> |
41 | #include <linux/pm.h> | ||
42 | #include <linux/interrupt.h> | 41 | #include <linux/interrupt.h> |
43 | #include <linux/sysfs.h> | 42 | #include <linux/sysfs.h> |
44 | #include <linux/module.h> | 43 | #include <linux/module.h> |
@@ -600,27 +599,15 @@ static void (*saved_idle)(void) = NULL; | |||
600 | 599 | ||
601 | /* | 600 | /* |
602 | * omap_pm_prepare - Do preliminary suspend work. | 601 | * omap_pm_prepare - Do preliminary suspend work. |
603 | * @state: suspend state we're entering. | ||
604 | * | 602 | * |
605 | */ | 603 | */ |
606 | static int omap_pm_prepare(suspend_state_t state) | 604 | static int omap_pm_prepare(void) |
607 | { | 605 | { |
608 | int error = 0; | ||
609 | |||
610 | /* We cannot sleep in idle until we have resumed */ | 606 | /* We cannot sleep in idle until we have resumed */ |
611 | saved_idle = pm_idle; | 607 | saved_idle = pm_idle; |
612 | pm_idle = NULL; | 608 | pm_idle = NULL; |
613 | 609 | ||
614 | switch (state) | 610 | return 0; |
615 | { | ||
616 | case PM_SUSPEND_STANDBY: | ||
617 | case PM_SUSPEND_MEM: | ||
618 | break; | ||
619 | default: | ||
620 | return -EINVAL; | ||
621 | } | ||
622 | |||
623 | return error; | ||
624 | } | 611 | } |
625 | 612 | ||
626 | 613 | ||
@@ -648,16 +635,14 @@ static int omap_pm_enter(suspend_state_t state) | |||
648 | 635 | ||
649 | /** | 636 | /** |
650 | * omap_pm_finish - Finish up suspend sequence. | 637 | * omap_pm_finish - Finish up suspend sequence. |
651 | * @state: State we're coming out of. | ||
652 | * | 638 | * |
653 | * This is called after we wake back up (or if entering the sleep state | 639 | * This is called after we wake back up (or if entering the sleep state |
654 | * failed). | 640 | * failed). |
655 | */ | 641 | */ |
656 | 642 | ||
657 | static int omap_pm_finish(suspend_state_t state) | 643 | static void omap_pm_finish(void) |
658 | { | 644 | { |
659 | pm_idle = saved_idle; | 645 | pm_idle = saved_idle; |
660 | return 0; | ||
661 | } | 646 | } |
662 | 647 | ||
663 | 648 | ||
@@ -674,11 +659,11 @@ static struct irqaction omap_wakeup_irq = { | |||
674 | 659 | ||
675 | 660 | ||
676 | 661 | ||
677 | static struct pm_ops omap_pm_ops ={ | 662 | static struct platform_suspend_ops omap_pm_ops ={ |
678 | .prepare = omap_pm_prepare, | 663 | .prepare = omap_pm_prepare, |
679 | .enter = omap_pm_enter, | 664 | .enter = omap_pm_enter, |
680 | .finish = omap_pm_finish, | 665 | .finish = omap_pm_finish, |
681 | .valid = pm_valid_only_mem, | 666 | .valid = suspend_valid_only_mem, |
682 | }; | 667 | }; |
683 | 668 | ||
684 | static int __init omap_pm_init(void) | 669 | static int __init omap_pm_init(void) |
@@ -735,7 +720,7 @@ static int __init omap_pm_init(void) | |||
735 | else if (cpu_is_omap16xx()) | 720 | else if (cpu_is_omap16xx()) |
736 | omap_writel(OMAP1610_IDLECT3_VAL, OMAP1610_IDLECT3); | 721 | omap_writel(OMAP1610_IDLECT3_VAL, OMAP1610_IDLECT3); |
737 | 722 | ||
738 | pm_set_ops(&omap_pm_ops); | 723 | suspend_set_ops(&omap_pm_ops); |
739 | 724 | ||
740 | #if defined(DEBUG) && defined(CONFIG_PROC_FS) | 725 | #if defined(DEBUG) && defined(CONFIG_PROC_FS) |
741 | omap_pm_init_proc(); | 726 | omap_pm_init_proc(); |
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c index 6f4a5436d0ce..baf7d82b458b 100644 --- a/arch/arm/mach-omap2/pm.c +++ b/arch/arm/mach-omap2/pm.c | |||
@@ -16,10 +16,9 @@ | |||
16 | * published by the Free Software Foundation. | 16 | * published by the Free Software Foundation. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <linux/pm.h> | 19 | #include <linux/suspend.h> |
20 | #include <linux/sched.h> | 20 | #include <linux/sched.h> |
21 | #include <linux/proc_fs.h> | 21 | #include <linux/proc_fs.h> |
22 | #include <linux/pm.h> | ||
23 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
24 | #include <linux/sysfs.h> | 23 | #include <linux/sysfs.h> |
25 | #include <linux/module.h> | 24 | #include <linux/module.h> |
@@ -71,28 +70,12 @@ void omap2_pm_idle(void) | |||
71 | local_irq_enable(); | 70 | local_irq_enable(); |
72 | } | 71 | } |
73 | 72 | ||
74 | static int omap2_pm_prepare(suspend_state_t state) | 73 | static int omap2_pm_prepare(void) |
75 | { | 74 | { |
76 | int error = 0; | ||
77 | |||
78 | /* We cannot sleep in idle until we have resumed */ | 75 | /* We cannot sleep in idle until we have resumed */ |
79 | saved_idle = pm_idle; | 76 | saved_idle = pm_idle; |
80 | pm_idle = NULL; | 77 | pm_idle = NULL; |
81 | 78 | return 0; | |
82 | switch (state) | ||
83 | { | ||
84 | case PM_SUSPEND_STANDBY: | ||
85 | case PM_SUSPEND_MEM: | ||
86 | break; | ||
87 | |||
88 | case PM_SUSPEND_DISK: | ||
89 | return -ENOTSUPP; | ||
90 | |||
91 | default: | ||
92 | return -EINVAL; | ||
93 | } | ||
94 | |||
95 | return error; | ||
96 | } | 79 | } |
97 | 80 | ||
98 | #define INT0_WAKE_MASK (OMAP_IRQ_BIT(INT_24XX_GPIO_BANK1) | \ | 81 | #define INT0_WAKE_MASK (OMAP_IRQ_BIT(INT_24XX_GPIO_BANK1) | \ |
@@ -353,9 +336,6 @@ static int omap2_pm_enter(suspend_state_t state) | |||
353 | case PM_SUSPEND_MEM: | 336 | case PM_SUSPEND_MEM: |
354 | ret = omap2_pm_suspend(); | 337 | ret = omap2_pm_suspend(); |
355 | break; | 338 | break; |
356 | case PM_SUSPEND_DISK: | ||
357 | ret = -ENOTSUPP; | ||
358 | break; | ||
359 | default: | 339 | default: |
360 | ret = -EINVAL; | 340 | ret = -EINVAL; |
361 | } | 341 | } |
@@ -363,17 +343,16 @@ static int omap2_pm_enter(suspend_state_t state) | |||
363 | return ret; | 343 | return ret; |
364 | } | 344 | } |
365 | 345 | ||
366 | static int omap2_pm_finish(suspend_state_t state) | 346 | static void omap2_pm_finish(void) |
367 | { | 347 | { |
368 | pm_idle = saved_idle; | 348 | pm_idle = saved_idle; |
369 | return 0; | ||
370 | } | 349 | } |
371 | 350 | ||
372 | static struct pm_ops omap_pm_ops = { | 351 | static struct platform_suspend_ops omap_pm_ops = { |
373 | .prepare = omap2_pm_prepare, | 352 | .prepare = omap2_pm_prepare, |
374 | .enter = omap2_pm_enter, | 353 | .enter = omap2_pm_enter, |
375 | .finish = omap2_pm_finish, | 354 | .finish = omap2_pm_finish, |
376 | .valid = pm_valid_only_mem, | 355 | .valid = suspend_valid_only_mem, |
377 | }; | 356 | }; |
378 | 357 | ||
379 | int __init omap2_pm_init(void) | 358 | int __init omap2_pm_init(void) |
@@ -397,7 +376,7 @@ int __init omap2_pm_init(void) | |||
397 | omap2_sram_suspend = omap_sram_push(omap24xx_cpu_suspend, | 376 | omap2_sram_suspend = omap_sram_push(omap24xx_cpu_suspend, |
398 | omap24xx_cpu_suspend_sz); | 377 | omap24xx_cpu_suspend_sz); |
399 | 378 | ||
400 | pm_set_ops(&omap_pm_ops); | 379 | suspend_set_ops(&omap_pm_ops); |
401 | pm_idle = omap2_pm_idle; | 380 | pm_idle = omap2_pm_idle; |
402 | 381 | ||
403 | pmdomain_init(); | 382 | pmdomain_init(); |
diff --git a/arch/arm/mach-pnx4008/pm.c b/arch/arm/mach-pnx4008/pm.c index 2a137f33f752..40116d254349 100644 --- a/arch/arm/mach-pnx4008/pm.c +++ b/arch/arm/mach-pnx4008/pm.c | |||
@@ -15,7 +15,7 @@ | |||
15 | #include <linux/rtc.h> | 15 | #include <linux/rtc.h> |
16 | #include <linux/sched.h> | 16 | #include <linux/sched.h> |
17 | #include <linux/proc_fs.h> | 17 | #include <linux/proc_fs.h> |
18 | #include <linux/pm.h> | 18 | #include <linux/suspend.h> |
19 | #include <linux/delay.h> | 19 | #include <linux/delay.h> |
20 | #include <linux/clk.h> | 20 | #include <linux/clk.h> |
21 | 21 | ||
@@ -117,7 +117,7 @@ static int pnx4008_pm_valid(suspend_state_t state) | |||
117 | (state == PM_SUSPEND_MEM); | 117 | (state == PM_SUSPEND_MEM); |
118 | } | 118 | } |
119 | 119 | ||
120 | static struct pm_ops pnx4008_pm_ops = { | 120 | static struct platform_suspend_ops pnx4008_pm_ops = { |
121 | .enter = pnx4008_pm_enter, | 121 | .enter = pnx4008_pm_enter, |
122 | .valid = pnx4008_pm_valid, | 122 | .valid = pnx4008_pm_valid, |
123 | }; | 123 | }; |
@@ -146,7 +146,7 @@ static int __init pnx4008_pm_init(void) | |||
146 | return -ENOMEM; | 146 | return -ENOMEM; |
147 | } | 147 | } |
148 | 148 | ||
149 | pm_set_ops(&pnx4008_pm_ops); | 149 | suspend_set_ops(&pnx4008_pm_ops); |
150 | return 0; | 150 | return 0; |
151 | } | 151 | } |
152 | 152 | ||
diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c index b59a81a8e7d3..a941c71c7d06 100644 --- a/arch/arm/mach-pxa/pm.c +++ b/arch/arm/mach-pxa/pm.c | |||
@@ -86,7 +86,7 @@ static int pxa_pm_valid(suspend_state_t state) | |||
86 | return -EINVAL; | 86 | return -EINVAL; |
87 | } | 87 | } |
88 | 88 | ||
89 | static struct pm_ops pxa_pm_ops = { | 89 | static struct platform_suspend_ops pxa_pm_ops = { |
90 | .valid = pxa_pm_valid, | 90 | .valid = pxa_pm_valid, |
91 | .enter = pxa_pm_enter, | 91 | .enter = pxa_pm_enter, |
92 | }; | 92 | }; |
@@ -104,7 +104,7 @@ static int __init pxa_pm_init(void) | |||
104 | return -ENOMEM; | 104 | return -ENOMEM; |
105 | } | 105 | } |
106 | 106 | ||
107 | pm_set_ops(&pxa_pm_ops); | 107 | suspend_set_ops(&pxa_pm_ops); |
108 | return 0; | 108 | return 0; |
109 | } | 109 | } |
110 | 110 | ||
diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c index 0d6a72504caa..dcd81f8d0833 100644 --- a/arch/arm/mach-pxa/pxa25x.c +++ b/arch/arm/mach-pxa/pxa25x.c | |||
@@ -20,7 +20,7 @@ | |||
20 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
21 | #include <linux/init.h> | 21 | #include <linux/init.h> |
22 | #include <linux/platform_device.h> | 22 | #include <linux/platform_device.h> |
23 | #include <linux/pm.h> | 23 | #include <linux/suspend.h> |
24 | 24 | ||
25 | #include <asm/hardware.h> | 25 | #include <asm/hardware.h> |
26 | #include <asm/arch/irqs.h> | 26 | #include <asm/arch/irqs.h> |
@@ -215,7 +215,7 @@ static void pxa25x_cpu_pm_enter(suspend_state_t state) | |||
215 | 215 | ||
216 | static struct pxa_cpu_pm_fns pxa25x_cpu_pm_fns = { | 216 | static struct pxa_cpu_pm_fns pxa25x_cpu_pm_fns = { |
217 | .save_size = SLEEP_SAVE_SIZE, | 217 | .save_size = SLEEP_SAVE_SIZE, |
218 | .valid = pm_valid_only_mem, | 218 | .valid = suspend_valid_only_mem, |
219 | .save = pxa25x_cpu_pm_save, | 219 | .save = pxa25x_cpu_pm_save, |
220 | .restore = pxa25x_cpu_pm_restore, | 220 | .restore = pxa25x_cpu_pm_restore, |
221 | .enter = pxa25x_cpu_pm_enter, | 221 | .enter = pxa25x_cpu_pm_enter, |
diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c index 2d7fc39732e4..d0f2b597db12 100644 --- a/arch/arm/mach-pxa/pxa27x.c +++ b/arch/arm/mach-pxa/pxa27x.c | |||
@@ -14,7 +14,7 @@ | |||
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/pm.h> | 17 | #include <linux/suspend.h> |
18 | #include <linux/platform_device.h> | 18 | #include <linux/platform_device.h> |
19 | 19 | ||
20 | #include <asm/hardware.h> | 20 | #include <asm/hardware.h> |
diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c index 01a37d3c0727..246c573e7252 100644 --- a/arch/arm/mach-sa1100/pm.c +++ b/arch/arm/mach-sa1100/pm.c | |||
@@ -122,14 +122,14 @@ unsigned long sleep_phys_sp(void *sp) | |||
122 | return virt_to_phys(sp); | 122 | return virt_to_phys(sp); |
123 | } | 123 | } |
124 | 124 | ||
125 | static struct pm_ops sa11x0_pm_ops = { | 125 | static struct platform_suspend_ops sa11x0_pm_ops = { |
126 | .enter = sa11x0_pm_enter, | 126 | .enter = sa11x0_pm_enter, |
127 | .valid = pm_valid_only_mem, | 127 | .valid = suspend_valid_only_mem, |
128 | }; | 128 | }; |
129 | 129 | ||
130 | static int __init sa11x0_pm_init(void) | 130 | static int __init sa11x0_pm_init(void) |
131 | { | 131 | { |
132 | pm_set_ops(&sa11x0_pm_ops); | 132 | suspend_set_ops(&sa11x0_pm_ops); |
133 | return 0; | 133 | return 0; |
134 | } | 134 | } |
135 | 135 | ||
diff --git a/arch/arm/nwfpe/fpopcode.h b/arch/arm/nwfpe/fpopcode.h index ec78e3517fc9..0090b19bbe61 100644 --- a/arch/arm/nwfpe/fpopcode.h +++ b/arch/arm/nwfpe/fpopcode.h | |||
@@ -369,20 +369,20 @@ TABLE 5 | |||
369 | #define getRoundingMode(opcode) ((opcode & MASK_ROUNDING_MODE) >> 5) | 369 | #define getRoundingMode(opcode) ((opcode & MASK_ROUNDING_MODE) >> 5) |
370 | 370 | ||
371 | #ifdef CONFIG_FPE_NWFPE_XP | 371 | #ifdef CONFIG_FPE_NWFPE_XP |
372 | static inline __attribute_pure__ floatx80 getExtendedConstant(const unsigned int nIndex) | 372 | static inline floatx80 __pure getExtendedConstant(const unsigned int nIndex) |
373 | { | 373 | { |
374 | extern const floatx80 floatx80Constant[]; | 374 | extern const floatx80 floatx80Constant[]; |
375 | return floatx80Constant[nIndex]; | 375 | return floatx80Constant[nIndex]; |
376 | } | 376 | } |
377 | #endif | 377 | #endif |
378 | 378 | ||
379 | static inline __attribute_pure__ float64 getDoubleConstant(const unsigned int nIndex) | 379 | static inline float64 __pure getDoubleConstant(const unsigned int nIndex) |
380 | { | 380 | { |
381 | extern const float64 float64Constant[]; | 381 | extern const float64 float64Constant[]; |
382 | return float64Constant[nIndex]; | 382 | return float64Constant[nIndex]; |
383 | } | 383 | } |
384 | 384 | ||
385 | static inline __attribute_pure__ float32 getSingleConstant(const unsigned int nIndex) | 385 | static inline float32 __pure getSingleConstant(const unsigned int nIndex) |
386 | { | 386 | { |
387 | extern const float32 float32Constant[]; | 387 | extern const float32 float32Constant[]; |
388 | return float32Constant[nIndex]; | 388 | return float32Constant[nIndex]; |
diff --git a/arch/arm/plat-s3c24xx/pm.c b/arch/arm/plat-s3c24xx/pm.c index eab1850616d8..4fdb3117744f 100644 --- a/arch/arm/plat-s3c24xx/pm.c +++ b/arch/arm/plat-s3c24xx/pm.c | |||
@@ -612,9 +612,9 @@ static int s3c2410_pm_enter(suspend_state_t state) | |||
612 | return 0; | 612 | return 0; |
613 | } | 613 | } |
614 | 614 | ||
615 | static struct pm_ops s3c2410_pm_ops = { | 615 | static struct platform_suspend_ops s3c2410_pm_ops = { |
616 | .enter = s3c2410_pm_enter, | 616 | .enter = s3c2410_pm_enter, |
617 | .valid = pm_valid_only_mem, | 617 | .valid = suspend_valid_only_mem, |
618 | }; | 618 | }; |
619 | 619 | ||
620 | /* s3c2410_pm_init | 620 | /* s3c2410_pm_init |
@@ -628,6 +628,6 @@ int __init s3c2410_pm_init(void) | |||
628 | { | 628 | { |
629 | printk("S3C2410 Power Management, (c) 2004 Simtec Electronics\n"); | 629 | printk("S3C2410 Power Management, (c) 2004 Simtec Electronics\n"); |
630 | 630 | ||
631 | pm_set_ops(&s3c2410_pm_ops); | 631 | suspend_set_ops(&s3c2410_pm_ops); |
632 | return 0; | 632 | return 0; |
633 | } | 633 | } |
diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c index b10302722202..dac51fb06f22 100644 --- a/arch/blackfin/mach-common/pm.c +++ b/arch/blackfin/mach-common/pm.c | |||
@@ -32,7 +32,7 @@ | |||
32 | * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | 32 | * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
33 | */ | 33 | */ |
34 | 34 | ||
35 | #include <linux/pm.h> | 35 | #include <linux/suspend.h> |
36 | #include <linux/sched.h> | 36 | #include <linux/sched.h> |
37 | #include <linux/proc_fs.h> | 37 | #include <linux/proc_fs.h> |
38 | #include <linux/io.h> | 38 | #include <linux/io.h> |
@@ -89,28 +89,15 @@ void bfin_pm_suspend_standby_enter(void) | |||
89 | #endif /* CONFIG_PM_WAKEUP_GPIO_BY_SIC_IWR */ | 89 | #endif /* CONFIG_PM_WAKEUP_GPIO_BY_SIC_IWR */ |
90 | } | 90 | } |
91 | 91 | ||
92 | |||
93 | /* | 92 | /* |
94 | * bfin_pm_prepare - Do preliminary suspend work. | 93 | * bfin_pm_valid - Tell the PM core that we only support the standby sleep |
95 | * @state: suspend state we're entering. | 94 | * state |
95 | * @state: suspend state we're checking. | ||
96 | * | 96 | * |
97 | */ | 97 | */ |
98 | static int bfin_pm_prepare(suspend_state_t state) | 98 | static int bfin_pm_valid(suspend_state_t state) |
99 | { | 99 | { |
100 | int error = 0; | 100 | return (state == PM_SUSPEND_STANDBY); |
101 | |||
102 | switch (state) { | ||
103 | case PM_SUSPEND_STANDBY: | ||
104 | break; | ||
105 | |||
106 | case PM_SUSPEND_MEM: | ||
107 | return -ENOTSUPP; | ||
108 | |||
109 | default: | ||
110 | return -EINVAL; | ||
111 | } | ||
112 | |||
113 | return error; | ||
114 | } | 101 | } |
115 | 102 | ||
116 | /* | 103 | /* |
@@ -135,44 +122,14 @@ static int bfin_pm_enter(suspend_state_t state) | |||
135 | return 0; | 122 | return 0; |
136 | } | 123 | } |
137 | 124 | ||
138 | /* | 125 | struct platform_suspend_ops bfin_pm_ops = { |
139 | * bfin_pm_finish - Finish up suspend sequence. | ||
140 | * @state: State we're coming out of. | ||
141 | * | ||
142 | * This is called after we wake back up (or if entering the sleep state | ||
143 | * failed). | ||
144 | */ | ||
145 | static int bfin_pm_finish(suspend_state_t state) | ||
146 | { | ||
147 | switch (state) { | ||
148 | case PM_SUSPEND_STANDBY: | ||
149 | break; | ||
150 | |||
151 | case PM_SUSPEND_MEM: | ||
152 | return -ENOTSUPP; | ||
153 | |||
154 | default: | ||
155 | return -EINVAL; | ||
156 | } | ||
157 | |||
158 | return 0; | ||
159 | } | ||
160 | |||
161 | static int bfin_pm_valid(suspend_state_t state) | ||
162 | { | ||
163 | return (state == PM_SUSPEND_STANDBY); | ||
164 | } | ||
165 | |||
166 | struct pm_ops bfin_pm_ops = { | ||
167 | .prepare = bfin_pm_prepare, | ||
168 | .enter = bfin_pm_enter, | 126 | .enter = bfin_pm_enter, |
169 | .finish = bfin_pm_finish, | ||
170 | .valid = bfin_pm_valid, | 127 | .valid = bfin_pm_valid, |
171 | }; | 128 | }; |
172 | 129 | ||
173 | static int __init bfin_pm_init(void) | 130 | static int __init bfin_pm_init(void) |
174 | { | 131 | { |
175 | pm_set_ops(&bfin_pm_ops); | 132 | suspend_set_ops(&bfin_pm_ops); |
176 | return 0; | 133 | return 0; |
177 | } | 134 | } |
178 | 135 | ||
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 98cfc90cab1d..2bb84214e5f1 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c | |||
@@ -371,6 +371,11 @@ ia64_setup_printk_clock(void) | |||
371 | ia64_printk_clock = ia64_itc_printk_clock; | 371 | ia64_printk_clock = ia64_itc_printk_clock; |
372 | } | 372 | } |
373 | 373 | ||
374 | /* IA64 doesn't cache the timezone */ | ||
375 | void update_vsyscall_tz(void) | ||
376 | { | ||
377 | } | ||
378 | |||
374 | void update_vsyscall(struct timespec *wall, struct clocksource *c) | 379 | void update_vsyscall(struct timespec *wall, struct clocksource *c) |
375 | { | 380 | { |
376 | unsigned long flags; | 381 | unsigned long flags; |
diff --git a/arch/ia64/sn/kernel/xpnet.c b/arch/ia64/sn/kernel/xpnet.c index e58fcadff2e9..a5df672d8392 100644 --- a/arch/ia64/sn/kernel/xpnet.c +++ b/arch/ia64/sn/kernel/xpnet.c | |||
@@ -269,8 +269,9 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg) | |||
269 | skb->protocol = eth_type_trans(skb, xpnet_device); | 269 | skb->protocol = eth_type_trans(skb, xpnet_device); |
270 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 270 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
271 | 271 | ||
272 | dev_dbg(xpnet, "passing skb to network layer; \n\tskb->head=0x%p " | 272 | dev_dbg(xpnet, "passing skb to network layer\n" |
273 | "skb->data=0x%p skb->tail=0x%p skb->end=0x%p skb->len=%d\n", | 273 | KERN_DEBUG "\tskb->head=0x%p skb->data=0x%p skb->tail=0x%p " |
274 | "skb->end=0x%p skb->len=%d\n", | ||
274 | (void *)skb->head, (void *)skb->data, skb_tail_pointer(skb), | 275 | (void *)skb->head, (void *)skb->data, skb_tail_pointer(skb), |
275 | skb_end_pointer(skb), skb->len); | 276 | skb_end_pointer(skb), skb->len); |
276 | 277 | ||
@@ -576,10 +577,10 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
576 | msg->tailout_ignore = end_addr - (u64)skb_tail_pointer(skb); | 577 | msg->tailout_ignore = end_addr - (u64)skb_tail_pointer(skb); |
577 | msg->buf_pa = __pa(start_addr); | 578 | msg->buf_pa = __pa(start_addr); |
578 | 579 | ||
579 | dev_dbg(xpnet, "sending XPC message to %d:%d\nmsg->buf_pa=" | 580 | dev_dbg(xpnet, "sending XPC message to %d:%d\n" |
580 | "0x%lx, msg->size=%u, msg->leadin_ignore=%u, " | 581 | KERN_DEBUG "msg->buf_pa=0x%lx, msg->size=%u, " |
581 | "msg->tailout_ignore=%u\n", dest_partid, | 582 | "msg->leadin_ignore=%u, msg->tailout_ignore=%u\n", |
582 | XPC_NET_CHANNEL, msg->buf_pa, msg->size, | 583 | dest_partid, XPC_NET_CHANNEL, msg->buf_pa, msg->size, |
583 | msg->leadin_ignore, msg->tailout_ignore); | 584 | msg->leadin_ignore, msg->tailout_ignore); |
584 | 585 | ||
585 | 586 | ||
diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig index 95b823b60c97..8e5988c4a164 100644 --- a/arch/powerpc/configs/pmac32_defconfig +++ b/arch/powerpc/configs/pmac32_defconfig | |||
@@ -209,7 +209,6 @@ CONFIG_PM=y | |||
209 | # CONFIG_PM_LEGACY is not set | 209 | # CONFIG_PM_LEGACY is not set |
210 | CONFIG_PM_DEBUG=y | 210 | CONFIG_PM_DEBUG=y |
211 | # CONFIG_PM_VERBOSE is not set | 211 | # CONFIG_PM_VERBOSE is not set |
212 | # CONFIG_DISABLE_CONSOLE_SUSPEND is not set | ||
213 | CONFIG_PM_SLEEP=y | 212 | CONFIG_PM_SLEEP=y |
214 | CONFIG_SUSPEND=y | 213 | CONFIG_SUSPEND=y |
215 | CONFIG_HIBERNATION=y | 214 | CONFIG_HIBERNATION=y |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 0ae5d57b9368..2c8e756d19a3 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -141,6 +141,7 @@ int main(void) | |||
141 | DEFINE(PACALPPACAPTR, offsetof(struct paca_struct, lppaca_ptr)); | 141 | DEFINE(PACALPPACAPTR, offsetof(struct paca_struct, lppaca_ptr)); |
142 | DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); | 142 | DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); |
143 | DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr)); | 143 | DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr)); |
144 | DEFINE(PACA_STARTSPURR, offsetof(struct paca_struct, startspurr)); | ||
144 | DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time)); | 145 | DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time)); |
145 | DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time)); | 146 | DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time)); |
146 | DEFINE(PACA_SLBSHADOWPTR, offsetof(struct paca_struct, slb_shadow_ptr)); | 147 | DEFINE(PACA_SLBSHADOWPTR, offsetof(struct paca_struct, slb_shadow_ptr)); |
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 863a5d6d9b18..9eb3284deac4 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -212,23 +212,44 @@ static u64 read_purr(void) | |||
212 | } | 212 | } |
213 | 213 | ||
214 | /* | 214 | /* |
215 | * Read the SPURR on systems that have it, otherwise the purr | ||
216 | */ | ||
217 | static u64 read_spurr(u64 purr) | ||
218 | { | ||
219 | if (cpu_has_feature(CPU_FTR_SPURR)) | ||
220 | return mfspr(SPRN_SPURR); | ||
221 | return purr; | ||
222 | } | ||
223 | |||
224 | /* | ||
215 | * Account time for a transition between system, hard irq | 225 | * Account time for a transition between system, hard irq |
216 | * or soft irq state. | 226 | * or soft irq state. |
217 | */ | 227 | */ |
218 | void account_system_vtime(struct task_struct *tsk) | 228 | void account_system_vtime(struct task_struct *tsk) |
219 | { | 229 | { |
220 | u64 now, delta; | 230 | u64 now, nowscaled, delta, deltascaled; |
221 | unsigned long flags; | 231 | unsigned long flags; |
222 | 232 | ||
223 | local_irq_save(flags); | 233 | local_irq_save(flags); |
224 | now = read_purr(); | 234 | now = read_purr(); |
225 | delta = now - get_paca()->startpurr; | 235 | delta = now - get_paca()->startpurr; |
226 | get_paca()->startpurr = now; | 236 | get_paca()->startpurr = now; |
237 | nowscaled = read_spurr(now); | ||
238 | deltascaled = nowscaled - get_paca()->startspurr; | ||
239 | get_paca()->startspurr = nowscaled; | ||
227 | if (!in_interrupt()) { | 240 | if (!in_interrupt()) { |
241 | /* deltascaled includes both user and system time. | ||
242 | * Hence scale it based on the purr ratio to estimate | ||
243 | * the system time */ | ||
244 | deltascaled = deltascaled * get_paca()->system_time / | ||
245 | (get_paca()->system_time + get_paca()->user_time); | ||
228 | delta += get_paca()->system_time; | 246 | delta += get_paca()->system_time; |
229 | get_paca()->system_time = 0; | 247 | get_paca()->system_time = 0; |
230 | } | 248 | } |
231 | account_system_time(tsk, 0, delta); | 249 | account_system_time(tsk, 0, delta); |
250 | get_paca()->purrdelta = delta; | ||
251 | account_system_time_scaled(tsk, deltascaled); | ||
252 | get_paca()->spurrdelta = deltascaled; | ||
232 | local_irq_restore(flags); | 253 | local_irq_restore(flags); |
233 | } | 254 | } |
234 | 255 | ||
@@ -240,11 +261,17 @@ void account_system_vtime(struct task_struct *tsk) | |||
240 | */ | 261 | */ |
241 | void account_process_vtime(struct task_struct *tsk) | 262 | void account_process_vtime(struct task_struct *tsk) |
242 | { | 263 | { |
243 | cputime_t utime; | 264 | cputime_t utime, utimescaled; |
244 | 265 | ||
245 | utime = get_paca()->user_time; | 266 | utime = get_paca()->user_time; |
246 | get_paca()->user_time = 0; | 267 | get_paca()->user_time = 0; |
247 | account_user_time(tsk, utime); | 268 | account_user_time(tsk, utime); |
269 | |||
270 | /* Estimate the scaled utime by scaling the real utime based | ||
271 | * on the last spurr to purr ratio */ | ||
272 | utimescaled = utime * get_paca()->spurrdelta / get_paca()->purrdelta; | ||
273 | get_paca()->spurrdelta = get_paca()->purrdelta = 0; | ||
274 | account_user_time_scaled(tsk, utimescaled); | ||
248 | } | 275 | } |
249 | 276 | ||
250 | static void account_process_time(struct pt_regs *regs) | 277 | static void account_process_time(struct pt_regs *regs) |
@@ -266,6 +293,7 @@ struct cpu_purr_data { | |||
266 | int initialized; /* thread is running */ | 293 | int initialized; /* thread is running */ |
267 | u64 tb; /* last TB value read */ | 294 | u64 tb; /* last TB value read */ |
268 | u64 purr; /* last PURR value read */ | 295 | u64 purr; /* last PURR value read */ |
296 | u64 spurr; /* last SPURR value read */ | ||
269 | }; | 297 | }; |
270 | 298 | ||
271 | /* | 299 | /* |
diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c index f26afcd41757..ffa14aff5248 100644 --- a/arch/powerpc/platforms/52xx/lite5200_pm.c +++ b/arch/powerpc/platforms/52xx/lite5200_pm.c | |||
@@ -1,5 +1,5 @@ | |||
1 | #include <linux/init.h> | 1 | #include <linux/init.h> |
2 | #include <linux/pm.h> | 2 | #include <linux/suspend.h> |
3 | #include <asm/io.h> | 3 | #include <asm/io.h> |
4 | #include <asm/time.h> | 4 | #include <asm/time.h> |
5 | #include <asm/mpc52xx.h> | 5 | #include <asm/mpc52xx.h> |
@@ -18,6 +18,8 @@ static void __iomem *sram; | |||
18 | static const int sram_size = 0x4000; /* 16 kBytes */ | 18 | static const int sram_size = 0x4000; /* 16 kBytes */ |
19 | static void __iomem *mbar; | 19 | static void __iomem *mbar; |
20 | 20 | ||
21 | static suspend_state_t lite5200_pm_target_state; | ||
22 | |||
21 | static int lite5200_pm_valid(suspend_state_t state) | 23 | static int lite5200_pm_valid(suspend_state_t state) |
22 | { | 24 | { |
23 | switch (state) { | 25 | switch (state) { |
@@ -29,13 +31,22 @@ static int lite5200_pm_valid(suspend_state_t state) | |||
29 | } | 31 | } |
30 | } | 32 | } |
31 | 33 | ||
32 | static int lite5200_pm_prepare(suspend_state_t state) | 34 | static int lite5200_pm_set_target(suspend_state_t state) |
35 | { | ||
36 | if (lite5200_pm_valid(state)) { | ||
37 | lite5200_pm_target_state = state; | ||
38 | return 0; | ||
39 | } | ||
40 | return -EINVAL; | ||
41 | } | ||
42 | |||
43 | static int lite5200_pm_prepare(void) | ||
33 | { | 44 | { |
34 | /* deep sleep? let mpc52xx code handle that */ | 45 | /* deep sleep? let mpc52xx code handle that */ |
35 | if (state == PM_SUSPEND_STANDBY) | 46 | if (lite5200_pm_target_state == PM_SUSPEND_STANDBY) |
36 | return mpc52xx_pm_prepare(state); | 47 | return mpc52xx_pm_prepare(); |
37 | 48 | ||
38 | if (state != PM_SUSPEND_MEM) | 49 | if (lite5200_pm_target_state != PM_SUSPEND_MEM) |
39 | return -EINVAL; | 50 | return -EINVAL; |
40 | 51 | ||
41 | /* map registers */ | 52 | /* map registers */ |
@@ -190,17 +201,16 @@ static int lite5200_pm_enter(suspend_state_t state) | |||
190 | return 0; | 201 | return 0; |
191 | } | 202 | } |
192 | 203 | ||
193 | static int lite5200_pm_finish(suspend_state_t state) | 204 | static void lite5200_pm_finish(void) |
194 | { | 205 | { |
195 | /* deep sleep? let mpc52xx code handle that */ | 206 | /* deep sleep? let mpc52xx code handle that */ |
196 | if (state == PM_SUSPEND_STANDBY) { | 207 | if (lite5200_pm_target_state == PM_SUSPEND_STANDBY) |
197 | return mpc52xx_pm_finish(state); | 208 | mpc52xx_pm_finish(); |
198 | } | ||
199 | return 0; | ||
200 | } | 209 | } |
201 | 210 | ||
202 | static struct pm_ops lite5200_pm_ops = { | 211 | static struct platform_suspend_ops lite5200_pm_ops = { |
203 | .valid = lite5200_pm_valid, | 212 | .valid = lite5200_pm_valid, |
213 | .set_target = lite5200_pm_set_target, | ||
204 | .prepare = lite5200_pm_prepare, | 214 | .prepare = lite5200_pm_prepare, |
205 | .enter = lite5200_pm_enter, | 215 | .enter = lite5200_pm_enter, |
206 | .finish = lite5200_pm_finish, | 216 | .finish = lite5200_pm_finish, |
@@ -208,6 +218,6 @@ static struct pm_ops lite5200_pm_ops = { | |||
208 | 218 | ||
209 | int __init lite5200_pm_init(void) | 219 | int __init lite5200_pm_init(void) |
210 | { | 220 | { |
211 | pm_set_ops(&lite5200_pm_ops); | 221 | suspend_set_ops(&lite5200_pm_ops); |
212 | return 0; | 222 | return 0; |
213 | } | 223 | } |
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c index ee2e7639c63e..7ffa7babf254 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c | |||
@@ -1,5 +1,5 @@ | |||
1 | #include <linux/init.h> | 1 | #include <linux/init.h> |
2 | #include <linux/pm.h> | 2 | #include <linux/suspend.h> |
3 | #include <linux/io.h> | 3 | #include <linux/io.h> |
4 | #include <asm/time.h> | 4 | #include <asm/time.h> |
5 | #include <asm/cacheflush.h> | 5 | #include <asm/cacheflush.h> |
@@ -57,11 +57,8 @@ int mpc52xx_set_wakeup_gpio(u8 pin, u8 level) | |||
57 | return 0; | 57 | return 0; |
58 | } | 58 | } |
59 | 59 | ||
60 | int mpc52xx_pm_prepare(suspend_state_t state) | 60 | int mpc52xx_pm_prepare(void) |
61 | { | 61 | { |
62 | if (state != PM_SUSPEND_STANDBY) | ||
63 | return -EINVAL; | ||
64 | |||
65 | /* map the whole register space */ | 62 | /* map the whole register space */ |
66 | mbar = mpc52xx_find_and_map("mpc5200"); | 63 | mbar = mpc52xx_find_and_map("mpc5200"); |
67 | if (!mbar) { | 64 | if (!mbar) { |
@@ -166,18 +163,16 @@ int mpc52xx_pm_enter(suspend_state_t state) | |||
166 | return 0; | 163 | return 0; |
167 | } | 164 | } |
168 | 165 | ||
169 | int mpc52xx_pm_finish(suspend_state_t state) | 166 | void mpc52xx_pm_finish(void) |
170 | { | 167 | { |
171 | /* call board resume code */ | 168 | /* call board resume code */ |
172 | if (mpc52xx_suspend.board_resume_finish) | 169 | if (mpc52xx_suspend.board_resume_finish) |
173 | mpc52xx_suspend.board_resume_finish(mbar); | 170 | mpc52xx_suspend.board_resume_finish(mbar); |
174 | 171 | ||
175 | iounmap(mbar); | 172 | iounmap(mbar); |
176 | |||
177 | return 0; | ||
178 | } | 173 | } |
179 | 174 | ||
180 | static struct pm_ops mpc52xx_pm_ops = { | 175 | static struct platform_suspend_ops mpc52xx_pm_ops = { |
181 | .valid = mpc52xx_pm_valid, | 176 | .valid = mpc52xx_pm_valid, |
182 | .prepare = mpc52xx_pm_prepare, | 177 | .prepare = mpc52xx_pm_prepare, |
183 | .enter = mpc52xx_pm_enter, | 178 | .enter = mpc52xx_pm_enter, |
@@ -186,6 +181,6 @@ static struct pm_ops mpc52xx_pm_ops = { | |||
186 | 181 | ||
187 | int __init mpc52xx_pm_init(void) | 182 | int __init mpc52xx_pm_init(void) |
188 | { | 183 | { |
189 | pm_set_ops(&mpc52xx_pm_ops); | 184 | suspend_set_ops(&mpc52xx_pm_ops); |
190 | return 0; | 185 | return 0; |
191 | } | 186 | } |
diff --git a/arch/sh/boards/hp6xx/pm.c b/arch/sh/boards/hp6xx/pm.c index 8143d1b948e7..d22f6eac9cca 100644 --- a/arch/sh/boards/hp6xx/pm.c +++ b/arch/sh/boards/hp6xx/pm.c | |||
@@ -67,14 +67,14 @@ static int hp6x0_pm_enter(suspend_state_t state) | |||
67 | return 0; | 67 | return 0; |
68 | } | 68 | } |
69 | 69 | ||
70 | static struct pm_ops hp6x0_pm_ops = { | 70 | static struct platform_suspend_ops hp6x0_pm_ops = { |
71 | .enter = hp6x0_pm_enter, | 71 | .enter = hp6x0_pm_enter, |
72 | .valid = pm_valid_only_mem, | 72 | .valid = suspend_valid_only_mem, |
73 | }; | 73 | }; |
74 | 74 | ||
75 | static int __init hp6x0_pm_init(void) | 75 | static int __init hp6x0_pm_init(void) |
76 | { | 76 | { |
77 | pm_set_ops(&hp6x0_pm_ops); | 77 | suspend_set_ops(&hp6x0_pm_ops); |
78 | return 0; | 78 | return 0; |
79 | } | 79 | } |
80 | 80 | ||
diff --git a/arch/sparc/kernel/of_device.c b/arch/sparc/kernel/of_device.c index fb2caef79cec..3ea000d15e3a 100644 --- a/arch/sparc/kernel/of_device.c +++ b/arch/sparc/kernel/of_device.c | |||
@@ -585,24 +585,6 @@ static int __init of_debug(char *str) | |||
585 | 585 | ||
586 | __setup("of_debug=", of_debug); | 586 | __setup("of_debug=", of_debug); |
587 | 587 | ||
588 | int of_register_driver(struct of_platform_driver *drv, struct bus_type *bus) | ||
589 | { | ||
590 | /* initialize common driver fields */ | ||
591 | if (!drv->driver.name) | ||
592 | drv->driver.name = drv->name; | ||
593 | if (!drv->driver.owner) | ||
594 | drv->driver.owner = drv->owner; | ||
595 | drv->driver.bus = bus; | ||
596 | |||
597 | /* register with core */ | ||
598 | return driver_register(&drv->driver); | ||
599 | } | ||
600 | |||
601 | void of_unregister_driver(struct of_platform_driver *drv) | ||
602 | { | ||
603 | driver_unregister(&drv->driver); | ||
604 | } | ||
605 | |||
606 | struct of_device* of_platform_device_create(struct device_node *np, | 588 | struct of_device* of_platform_device_create(struct device_node *np, |
607 | const char *bus_id, | 589 | const char *bus_id, |
608 | struct device *parent, | 590 | struct device *parent, |
@@ -628,6 +610,4 @@ struct of_device* of_platform_device_create(struct device_node *np, | |||
628 | return dev; | 610 | return dev; |
629 | } | 611 | } |
630 | 612 | ||
631 | EXPORT_SYMBOL(of_register_driver); | ||
632 | EXPORT_SYMBOL(of_unregister_driver); | ||
633 | EXPORT_SYMBOL(of_platform_device_create); | 613 | EXPORT_SYMBOL(of_platform_device_create); |
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index f3922e5a89f6..2c3bea228159 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c | |||
@@ -877,7 +877,7 @@ void __cpuinit sun4v_register_mondo_queues(int this_cpu) | |||
877 | static void __init alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask) | 877 | static void __init alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask) |
878 | { | 878 | { |
879 | unsigned long size = PAGE_ALIGN(qmask + 1); | 879 | unsigned long size = PAGE_ALIGN(qmask + 1); |
880 | void *p = __alloc_bootmem_low(size, size, 0); | 880 | void *p = __alloc_bootmem(size, size, 0); |
881 | if (!p) { | 881 | if (!p) { |
882 | prom_printf("SUN4V: Error, cannot allocate mondo queue.\n"); | 882 | prom_printf("SUN4V: Error, cannot allocate mondo queue.\n"); |
883 | prom_halt(); | 883 | prom_halt(); |
@@ -889,7 +889,7 @@ static void __init alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask) | |||
889 | static void __init alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask) | 889 | static void __init alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask) |
890 | { | 890 | { |
891 | unsigned long size = PAGE_ALIGN(qmask + 1); | 891 | unsigned long size = PAGE_ALIGN(qmask + 1); |
892 | void *p = __alloc_bootmem_low(size, size, 0); | 892 | void *p = __alloc_bootmem(size, size, 0); |
893 | 893 | ||
894 | if (!p) { | 894 | if (!p) { |
895 | prom_printf("SUN4V: Error, cannot allocate kbuf page.\n"); | 895 | prom_printf("SUN4V: Error, cannot allocate kbuf page.\n"); |
@@ -906,7 +906,7 @@ static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb) | |||
906 | 906 | ||
907 | BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64)); | 907 | BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64)); |
908 | 908 | ||
909 | page = alloc_bootmem_low_pages(PAGE_SIZE); | 909 | page = alloc_bootmem_pages(PAGE_SIZE); |
910 | if (!page) { | 910 | if (!page) { |
911 | prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n"); | 911 | prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n"); |
912 | prom_halt(); | 912 | prom_halt(); |
@@ -953,7 +953,7 @@ void __init init_IRQ(void) | |||
953 | kill_prom_timer(); | 953 | kill_prom_timer(); |
954 | 954 | ||
955 | size = sizeof(struct ino_bucket) * NUM_IVECS; | 955 | size = sizeof(struct ino_bucket) * NUM_IVECS; |
956 | ivector_table = alloc_bootmem_low(size); | 956 | ivector_table = alloc_bootmem(size); |
957 | if (!ivector_table) { | 957 | if (!ivector_table) { |
958 | prom_printf("Fatal error, cannot allocate ivector_table\n"); | 958 | prom_printf("Fatal error, cannot allocate ivector_table\n"); |
959 | prom_halt(); | 959 | prom_halt(); |
diff --git a/arch/sparc64/kernel/of_device.c b/arch/sparc64/kernel/of_device.c index 42d779866fba..fc5c0cc793b8 100644 --- a/arch/sparc64/kernel/of_device.c +++ b/arch/sparc64/kernel/of_device.c | |||
@@ -869,26 +869,6 @@ static int __init of_debug(char *str) | |||
869 | 869 | ||
870 | __setup("of_debug=", of_debug); | 870 | __setup("of_debug=", of_debug); |
871 | 871 | ||
872 | int of_register_driver(struct of_platform_driver *drv, struct bus_type *bus) | ||
873 | { | ||
874 | /* initialize common driver fields */ | ||
875 | if (!drv->driver.name) | ||
876 | drv->driver.name = drv->name; | ||
877 | if (!drv->driver.owner) | ||
878 | drv->driver.owner = drv->owner; | ||
879 | drv->driver.bus = bus; | ||
880 | |||
881 | /* register with core */ | ||
882 | return driver_register(&drv->driver); | ||
883 | } | ||
884 | EXPORT_SYMBOL(of_register_driver); | ||
885 | |||
886 | void of_unregister_driver(struct of_platform_driver *drv) | ||
887 | { | ||
888 | driver_unregister(&drv->driver); | ||
889 | } | ||
890 | EXPORT_SYMBOL(of_unregister_driver); | ||
891 | |||
892 | struct of_device* of_platform_device_create(struct device_node *np, | 872 | struct of_device* of_platform_device_create(struct device_node *np, |
893 | const char *bus_id, | 873 | const char *bus_id, |
894 | struct device *parent, | 874 | struct device *parent, |
diff --git a/arch/sparc64/kernel/pci_common.c b/arch/sparc64/kernel/pci_common.c index c76bfbb7da08..923e0bcc3bfd 100644 --- a/arch/sparc64/kernel/pci_common.c +++ b/arch/sparc64/kernel/pci_common.c | |||
@@ -396,6 +396,13 @@ void pci_determine_mem_io_space(struct pci_pbm_info *pbm) | |||
396 | 396 | ||
397 | saw_mem = saw_io = 0; | 397 | saw_mem = saw_io = 0; |
398 | pbm_ranges = of_get_property(pbm->prom_node, "ranges", &i); | 398 | pbm_ranges = of_get_property(pbm->prom_node, "ranges", &i); |
399 | if (!pbm_ranges) { | ||
400 | prom_printf("PCI: Fatal error, missing PBM ranges property " | ||
401 | " for %s\n", | ||
402 | pbm->name); | ||
403 | prom_halt(); | ||
404 | } | ||
405 | |||
399 | num_pbm_ranges = i / sizeof(*pbm_ranges); | 406 | num_pbm_ranges = i / sizeof(*pbm_ranges); |
400 | 407 | ||
401 | for (i = 0; i < num_pbm_ranges; i++) { | 408 | for (i = 0; i < num_pbm_ranges; i++) { |
diff --git a/arch/sparc64/lib/atomic.S b/arch/sparc64/lib/atomic.S index 9633750167d0..70ac4186f62b 100644 --- a/arch/sparc64/lib/atomic.S +++ b/arch/sparc64/lib/atomic.S | |||
@@ -1,10 +1,10 @@ | |||
1 | /* $Id: atomic.S,v 1.4 2001/11/18 00:12:56 davem Exp $ | 1 | /* atomic.S: These things are too big to do inline. |
2 | * atomic.S: These things are too big to do inline. | ||
3 | * | 2 | * |
4 | * Copyright (C) 1999 David S. Miller (davem@redhat.com) | 3 | * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net) |
5 | */ | 4 | */ |
6 | 5 | ||
7 | #include <asm/asi.h> | 6 | #include <asm/asi.h> |
7 | #include <asm/backoff.h> | ||
8 | 8 | ||
9 | .text | 9 | .text |
10 | 10 | ||
@@ -16,27 +16,31 @@ | |||
16 | .globl atomic_add | 16 | .globl atomic_add |
17 | .type atomic_add,#function | 17 | .type atomic_add,#function |
18 | atomic_add: /* %o0 = increment, %o1 = atomic_ptr */ | 18 | atomic_add: /* %o0 = increment, %o1 = atomic_ptr */ |
19 | BACKOFF_SETUP(%o2) | ||
19 | 1: lduw [%o1], %g1 | 20 | 1: lduw [%o1], %g1 |
20 | add %g1, %o0, %g7 | 21 | add %g1, %o0, %g7 |
21 | cas [%o1], %g1, %g7 | 22 | cas [%o1], %g1, %g7 |
22 | cmp %g1, %g7 | 23 | cmp %g1, %g7 |
23 | bne,pn %icc, 1b | 24 | bne,pn %icc, 2f |
24 | nop | 25 | nop |
25 | retl | 26 | retl |
26 | nop | 27 | nop |
28 | 2: BACKOFF_SPIN(%o2, %o3, 1b) | ||
27 | .size atomic_add, .-atomic_add | 29 | .size atomic_add, .-atomic_add |
28 | 30 | ||
29 | .globl atomic_sub | 31 | .globl atomic_sub |
30 | .type atomic_sub,#function | 32 | .type atomic_sub,#function |
31 | atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */ | 33 | atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */ |
34 | BACKOFF_SETUP(%o2) | ||
32 | 1: lduw [%o1], %g1 | 35 | 1: lduw [%o1], %g1 |
33 | sub %g1, %o0, %g7 | 36 | sub %g1, %o0, %g7 |
34 | cas [%o1], %g1, %g7 | 37 | cas [%o1], %g1, %g7 |
35 | cmp %g1, %g7 | 38 | cmp %g1, %g7 |
36 | bne,pn %icc, 1b | 39 | bne,pn %icc, 2f |
37 | nop | 40 | nop |
38 | retl | 41 | retl |
39 | nop | 42 | nop |
43 | 2: BACKOFF_SPIN(%o2, %o3, 1b) | ||
40 | .size atomic_sub, .-atomic_sub | 44 | .size atomic_sub, .-atomic_sub |
41 | 45 | ||
42 | /* On SMP we need to use memory barriers to ensure | 46 | /* On SMP we need to use memory barriers to ensure |
@@ -60,89 +64,101 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */ | |||
60 | .globl atomic_add_ret | 64 | .globl atomic_add_ret |
61 | .type atomic_add_ret,#function | 65 | .type atomic_add_ret,#function |
62 | atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */ | 66 | atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */ |
67 | BACKOFF_SETUP(%o2) | ||
63 | ATOMIC_PRE_BARRIER | 68 | ATOMIC_PRE_BARRIER |
64 | 1: lduw [%o1], %g1 | 69 | 1: lduw [%o1], %g1 |
65 | add %g1, %o0, %g7 | 70 | add %g1, %o0, %g7 |
66 | cas [%o1], %g1, %g7 | 71 | cas [%o1], %g1, %g7 |
67 | cmp %g1, %g7 | 72 | cmp %g1, %g7 |
68 | bne,pn %icc, 1b | 73 | bne,pn %icc, 2f |
69 | add %g7, %o0, %g7 | 74 | add %g7, %o0, %g7 |
70 | sra %g7, 0, %o0 | 75 | sra %g7, 0, %o0 |
71 | ATOMIC_POST_BARRIER | 76 | ATOMIC_POST_BARRIER |
72 | retl | 77 | retl |
73 | nop | 78 | nop |
79 | 2: BACKOFF_SPIN(%o2, %o3, 1b) | ||
74 | .size atomic_add_ret, .-atomic_add_ret | 80 | .size atomic_add_ret, .-atomic_add_ret |
75 | 81 | ||
76 | .globl atomic_sub_ret | 82 | .globl atomic_sub_ret |
77 | .type atomic_sub_ret,#function | 83 | .type atomic_sub_ret,#function |
78 | atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */ | 84 | atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */ |
85 | BACKOFF_SETUP(%o2) | ||
79 | ATOMIC_PRE_BARRIER | 86 | ATOMIC_PRE_BARRIER |
80 | 1: lduw [%o1], %g1 | 87 | 1: lduw [%o1], %g1 |
81 | sub %g1, %o0, %g7 | 88 | sub %g1, %o0, %g7 |
82 | cas [%o1], %g1, %g7 | 89 | cas [%o1], %g1, %g7 |
83 | cmp %g1, %g7 | 90 | cmp %g1, %g7 |
84 | bne,pn %icc, 1b | 91 | bne,pn %icc, 2f |
85 | sub %g7, %o0, %g7 | 92 | sub %g7, %o0, %g7 |
86 | sra %g7, 0, %o0 | 93 | sra %g7, 0, %o0 |
87 | ATOMIC_POST_BARRIER | 94 | ATOMIC_POST_BARRIER |
88 | retl | 95 | retl |
89 | nop | 96 | nop |
97 | 2: BACKOFF_SPIN(%o2, %o3, 1b) | ||
90 | .size atomic_sub_ret, .-atomic_sub_ret | 98 | .size atomic_sub_ret, .-atomic_sub_ret |
91 | 99 | ||
92 | .globl atomic64_add | 100 | .globl atomic64_add |
93 | .type atomic64_add,#function | 101 | .type atomic64_add,#function |
94 | atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */ | 102 | atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */ |
103 | BACKOFF_SETUP(%o2) | ||
95 | 1: ldx [%o1], %g1 | 104 | 1: ldx [%o1], %g1 |
96 | add %g1, %o0, %g7 | 105 | add %g1, %o0, %g7 |
97 | casx [%o1], %g1, %g7 | 106 | casx [%o1], %g1, %g7 |
98 | cmp %g1, %g7 | 107 | cmp %g1, %g7 |
99 | bne,pn %xcc, 1b | 108 | bne,pn %xcc, 2f |
100 | nop | 109 | nop |
101 | retl | 110 | retl |
102 | nop | 111 | nop |
112 | 2: BACKOFF_SPIN(%o2, %o3, 1b) | ||
103 | .size atomic64_add, .-atomic64_add | 113 | .size atomic64_add, .-atomic64_add |
104 | 114 | ||
105 | .globl atomic64_sub | 115 | .globl atomic64_sub |
106 | .type atomic64_sub,#function | 116 | .type atomic64_sub,#function |
107 | atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */ | 117 | atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */ |
118 | BACKOFF_SETUP(%o2) | ||
108 | 1: ldx [%o1], %g1 | 119 | 1: ldx [%o1], %g1 |
109 | sub %g1, %o0, %g7 | 120 | sub %g1, %o0, %g7 |
110 | casx [%o1], %g1, %g7 | 121 | casx [%o1], %g1, %g7 |
111 | cmp %g1, %g7 | 122 | cmp %g1, %g7 |
112 | bne,pn %xcc, 1b | 123 | bne,pn %xcc, 2f |
113 | nop | 124 | nop |
114 | retl | 125 | retl |
115 | nop | 126 | nop |
127 | 2: BACKOFF_SPIN(%o2, %o3, 1b) | ||
116 | .size atomic64_sub, .-atomic64_sub | 128 | .size atomic64_sub, .-atomic64_sub |
117 | 129 | ||
118 | .globl atomic64_add_ret | 130 | .globl atomic64_add_ret |
119 | .type atomic64_add_ret,#function | 131 | .type atomic64_add_ret,#function |
120 | atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */ | 132 | atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */ |
133 | BACKOFF_SETUP(%o2) | ||
121 | ATOMIC_PRE_BARRIER | 134 | ATOMIC_PRE_BARRIER |
122 | 1: ldx [%o1], %g1 | 135 | 1: ldx [%o1], %g1 |
123 | add %g1, %o0, %g7 | 136 | add %g1, %o0, %g7 |
124 | casx [%o1], %g1, %g7 | 137 | casx [%o1], %g1, %g7 |
125 | cmp %g1, %g7 | 138 | cmp %g1, %g7 |
126 | bne,pn %xcc, 1b | 139 | bne,pn %xcc, 2f |
127 | add %g7, %o0, %g7 | 140 | add %g7, %o0, %g7 |
128 | mov %g7, %o0 | 141 | mov %g7, %o0 |
129 | ATOMIC_POST_BARRIER | 142 | ATOMIC_POST_BARRIER |
130 | retl | 143 | retl |
131 | nop | 144 | nop |
145 | 2: BACKOFF_SPIN(%o2, %o3, 1b) | ||
132 | .size atomic64_add_ret, .-atomic64_add_ret | 146 | .size atomic64_add_ret, .-atomic64_add_ret |
133 | 147 | ||
134 | .globl atomic64_sub_ret | 148 | .globl atomic64_sub_ret |
135 | .type atomic64_sub_ret,#function | 149 | .type atomic64_sub_ret,#function |
136 | atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */ | 150 | atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */ |
151 | BACKOFF_SETUP(%o2) | ||
137 | ATOMIC_PRE_BARRIER | 152 | ATOMIC_PRE_BARRIER |
138 | 1: ldx [%o1], %g1 | 153 | 1: ldx [%o1], %g1 |
139 | sub %g1, %o0, %g7 | 154 | sub %g1, %o0, %g7 |
140 | casx [%o1], %g1, %g7 | 155 | casx [%o1], %g1, %g7 |
141 | cmp %g1, %g7 | 156 | cmp %g1, %g7 |
142 | bne,pn %xcc, 1b | 157 | bne,pn %xcc, 2f |
143 | sub %g7, %o0, %g7 | 158 | sub %g7, %o0, %g7 |
144 | mov %g7, %o0 | 159 | mov %g7, %o0 |
145 | ATOMIC_POST_BARRIER | 160 | ATOMIC_POST_BARRIER |
146 | retl | 161 | retl |
147 | nop | 162 | nop |
163 | 2: BACKOFF_SPIN(%o2, %o3, 1b) | ||
148 | .size atomic64_sub_ret, .-atomic64_sub_ret | 164 | .size atomic64_sub_ret, .-atomic64_sub_ret |
diff --git a/arch/sparc64/lib/bitops.S b/arch/sparc64/lib/bitops.S index 892431a82131..6b015a6eefb5 100644 --- a/arch/sparc64/lib/bitops.S +++ b/arch/sparc64/lib/bitops.S | |||
@@ -1,10 +1,10 @@ | |||
1 | /* $Id: bitops.S,v 1.3 2001/11/18 00:12:56 davem Exp $ | 1 | /* bitops.S: Sparc64 atomic bit operations. |
2 | * bitops.S: Sparc64 atomic bit operations. | ||
3 | * | 2 | * |
4 | * Copyright (C) 2000 David S. Miller (davem@redhat.com) | 3 | * Copyright (C) 2000, 2007 David S. Miller (davem@davemloft.net) |
5 | */ | 4 | */ |
6 | 5 | ||
7 | #include <asm/asi.h> | 6 | #include <asm/asi.h> |
7 | #include <asm/backoff.h> | ||
8 | 8 | ||
9 | .text | 9 | .text |
10 | 10 | ||
@@ -29,6 +29,7 @@ | |||
29 | .globl test_and_set_bit | 29 | .globl test_and_set_bit |
30 | .type test_and_set_bit,#function | 30 | .type test_and_set_bit,#function |
31 | test_and_set_bit: /* %o0=nr, %o1=addr */ | 31 | test_and_set_bit: /* %o0=nr, %o1=addr */ |
32 | BACKOFF_SETUP(%o3) | ||
32 | BITOP_PRE_BARRIER | 33 | BITOP_PRE_BARRIER |
33 | srlx %o0, 6, %g1 | 34 | srlx %o0, 6, %g1 |
34 | mov 1, %o2 | 35 | mov 1, %o2 |
@@ -40,18 +41,20 @@ test_and_set_bit: /* %o0=nr, %o1=addr */ | |||
40 | or %g7, %o2, %g1 | 41 | or %g7, %o2, %g1 |
41 | casx [%o1], %g7, %g1 | 42 | casx [%o1], %g7, %g1 |
42 | cmp %g7, %g1 | 43 | cmp %g7, %g1 |
43 | bne,pn %xcc, 1b | 44 | bne,pn %xcc, 2f |
44 | and %g7, %o2, %g2 | 45 | and %g7, %o2, %g2 |
45 | clr %o0 | 46 | clr %o0 |
46 | movrne %g2, 1, %o0 | 47 | movrne %g2, 1, %o0 |
47 | BITOP_POST_BARRIER | 48 | BITOP_POST_BARRIER |
48 | retl | 49 | retl |
49 | nop | 50 | nop |
51 | 2: BACKOFF_SPIN(%o3, %o4, 1b) | ||
50 | .size test_and_set_bit, .-test_and_set_bit | 52 | .size test_and_set_bit, .-test_and_set_bit |
51 | 53 | ||
52 | .globl test_and_clear_bit | 54 | .globl test_and_clear_bit |
53 | .type test_and_clear_bit,#function | 55 | .type test_and_clear_bit,#function |
54 | test_and_clear_bit: /* %o0=nr, %o1=addr */ | 56 | test_and_clear_bit: /* %o0=nr, %o1=addr */ |
57 | BACKOFF_SETUP(%o3) | ||
55 | BITOP_PRE_BARRIER | 58 | BITOP_PRE_BARRIER |
56 | srlx %o0, 6, %g1 | 59 | srlx %o0, 6, %g1 |
57 | mov 1, %o2 | 60 | mov 1, %o2 |
@@ -63,18 +66,20 @@ test_and_clear_bit: /* %o0=nr, %o1=addr */ | |||
63 | andn %g7, %o2, %g1 | 66 | andn %g7, %o2, %g1 |
64 | casx [%o1], %g7, %g1 | 67 | casx [%o1], %g7, %g1 |
65 | cmp %g7, %g1 | 68 | cmp %g7, %g1 |
66 | bne,pn %xcc, 1b | 69 | bne,pn %xcc, 2f |
67 | and %g7, %o2, %g2 | 70 | and %g7, %o2, %g2 |
68 | clr %o0 | 71 | clr %o0 |
69 | movrne %g2, 1, %o0 | 72 | movrne %g2, 1, %o0 |
70 | BITOP_POST_BARRIER | 73 | BITOP_POST_BARRIER |
71 | retl | 74 | retl |
72 | nop | 75 | nop |
76 | 2: BACKOFF_SPIN(%o3, %o4, 1b) | ||
73 | .size test_and_clear_bit, .-test_and_clear_bit | 77 | .size test_and_clear_bit, .-test_and_clear_bit |
74 | 78 | ||
75 | .globl test_and_change_bit | 79 | .globl test_and_change_bit |
76 | .type test_and_change_bit,#function | 80 | .type test_and_change_bit,#function |
77 | test_and_change_bit: /* %o0=nr, %o1=addr */ | 81 | test_and_change_bit: /* %o0=nr, %o1=addr */ |
82 | BACKOFF_SETUP(%o3) | ||
78 | BITOP_PRE_BARRIER | 83 | BITOP_PRE_BARRIER |
79 | srlx %o0, 6, %g1 | 84 | srlx %o0, 6, %g1 |
80 | mov 1, %o2 | 85 | mov 1, %o2 |
@@ -86,18 +91,20 @@ test_and_change_bit: /* %o0=nr, %o1=addr */ | |||
86 | xor %g7, %o2, %g1 | 91 | xor %g7, %o2, %g1 |
87 | casx [%o1], %g7, %g1 | 92 | casx [%o1], %g7, %g1 |
88 | cmp %g7, %g1 | 93 | cmp %g7, %g1 |
89 | bne,pn %xcc, 1b | 94 | bne,pn %xcc, 2f |
90 | and %g7, %o2, %g2 | 95 | and %g7, %o2, %g2 |
91 | clr %o0 | 96 | clr %o0 |
92 | movrne %g2, 1, %o0 | 97 | movrne %g2, 1, %o0 |
93 | BITOP_POST_BARRIER | 98 | BITOP_POST_BARRIER |
94 | retl | 99 | retl |
95 | nop | 100 | nop |
101 | 2: BACKOFF_SPIN(%o3, %o4, 1b) | ||
96 | .size test_and_change_bit, .-test_and_change_bit | 102 | .size test_and_change_bit, .-test_and_change_bit |
97 | 103 | ||
98 | .globl set_bit | 104 | .globl set_bit |
99 | .type set_bit,#function | 105 | .type set_bit,#function |
100 | set_bit: /* %o0=nr, %o1=addr */ | 106 | set_bit: /* %o0=nr, %o1=addr */ |
107 | BACKOFF_SETUP(%o3) | ||
101 | srlx %o0, 6, %g1 | 108 | srlx %o0, 6, %g1 |
102 | mov 1, %o2 | 109 | mov 1, %o2 |
103 | sllx %g1, 3, %g3 | 110 | sllx %g1, 3, %g3 |
@@ -108,15 +115,17 @@ set_bit: /* %o0=nr, %o1=addr */ | |||
108 | or %g7, %o2, %g1 | 115 | or %g7, %o2, %g1 |
109 | casx [%o1], %g7, %g1 | 116 | casx [%o1], %g7, %g1 |
110 | cmp %g7, %g1 | 117 | cmp %g7, %g1 |
111 | bne,pn %xcc, 1b | 118 | bne,pn %xcc, 2f |
112 | nop | 119 | nop |
113 | retl | 120 | retl |
114 | nop | 121 | nop |
122 | 2: BACKOFF_SPIN(%o3, %o4, 1b) | ||
115 | .size set_bit, .-set_bit | 123 | .size set_bit, .-set_bit |
116 | 124 | ||
117 | .globl clear_bit | 125 | .globl clear_bit |
118 | .type clear_bit,#function | 126 | .type clear_bit,#function |
119 | clear_bit: /* %o0=nr, %o1=addr */ | 127 | clear_bit: /* %o0=nr, %o1=addr */ |
128 | BACKOFF_SETUP(%o3) | ||
120 | srlx %o0, 6, %g1 | 129 | srlx %o0, 6, %g1 |
121 | mov 1, %o2 | 130 | mov 1, %o2 |
122 | sllx %g1, 3, %g3 | 131 | sllx %g1, 3, %g3 |
@@ -127,15 +136,17 @@ clear_bit: /* %o0=nr, %o1=addr */ | |||
127 | andn %g7, %o2, %g1 | 136 | andn %g7, %o2, %g1 |
128 | casx [%o1], %g7, %g1 | 137 | casx [%o1], %g7, %g1 |
129 | cmp %g7, %g1 | 138 | cmp %g7, %g1 |
130 | bne,pn %xcc, 1b | 139 | bne,pn %xcc, 2f |
131 | nop | 140 | nop |
132 | retl | 141 | retl |
133 | nop | 142 | nop |
143 | 2: BACKOFF_SPIN(%o3, %o4, 1b) | ||
134 | .size clear_bit, .-clear_bit | 144 | .size clear_bit, .-clear_bit |
135 | 145 | ||
136 | .globl change_bit | 146 | .globl change_bit |
137 | .type change_bit,#function | 147 | .type change_bit,#function |
138 | change_bit: /* %o0=nr, %o1=addr */ | 148 | change_bit: /* %o0=nr, %o1=addr */ |
149 | BACKOFF_SETUP(%o3) | ||
139 | srlx %o0, 6, %g1 | 150 | srlx %o0, 6, %g1 |
140 | mov 1, %o2 | 151 | mov 1, %o2 |
141 | sllx %g1, 3, %g3 | 152 | sllx %g1, 3, %g3 |
@@ -146,8 +157,9 @@ change_bit: /* %o0=nr, %o1=addr */ | |||
146 | xor %g7, %o2, %g1 | 157 | xor %g7, %o2, %g1 |
147 | casx [%o1], %g7, %g1 | 158 | casx [%o1], %g7, %g1 |
148 | cmp %g7, %g1 | 159 | cmp %g7, %g1 |
149 | bne,pn %xcc, 1b | 160 | bne,pn %xcc, 2f |
150 | nop | 161 | nop |
151 | retl | 162 | retl |
152 | nop | 163 | nop |
164 | 2: BACKOFF_SPIN(%o3, %o4, 1b) | ||
153 | .size change_bit, .-change_bit | 165 | .size change_bit, .-change_bit |
diff --git a/arch/um/drivers/slip_kern.c b/arch/um/drivers/slip_kern.c index ae67e7158e71..6b4a0f9e38de 100644 --- a/arch/um/drivers/slip_kern.c +++ b/arch/um/drivers/slip_kern.c | |||
@@ -31,10 +31,8 @@ void slip_init(struct net_device *dev, void *data) | |||
31 | slip_proto_init(&spri->slip); | 31 | slip_proto_init(&spri->slip); |
32 | 32 | ||
33 | dev->init = NULL; | 33 | dev->init = NULL; |
34 | dev->header_cache_update = NULL; | ||
35 | dev->hard_header_cache = NULL; | ||
36 | dev->hard_header = NULL; | ||
37 | dev->hard_header_len = 0; | 34 | dev->hard_header_len = 0; |
35 | dev->header_ops = NULL; | ||
38 | dev->addr_len = 0; | 36 | dev->addr_len = 0; |
39 | dev->type = ARPHRD_SLIP; | 37 | dev->type = ARPHRD_SLIP; |
40 | dev->tx_queue_len = 256; | 38 | dev->tx_queue_len = 256; |
diff --git a/arch/um/drivers/slirp_kern.c b/arch/um/drivers/slirp_kern.c index 240ee650865d..d987af277db9 100644 --- a/arch/um/drivers/slirp_kern.c +++ b/arch/um/drivers/slirp_kern.c | |||
@@ -34,9 +34,7 @@ void slirp_init(struct net_device *dev, void *data) | |||
34 | 34 | ||
35 | dev->init = NULL; | 35 | dev->init = NULL; |
36 | dev->hard_header_len = 0; | 36 | dev->hard_header_len = 0; |
37 | dev->header_cache_update = NULL; | 37 | dev->header_ops = NULL; |
38 | dev->hard_header_cache = NULL; | ||
39 | dev->hard_header = NULL; | ||
40 | dev->addr_len = 0; | 38 | dev->addr_len = 0; |
41 | dev->type = ARPHRD_SLIP; | 39 | dev->type = ARPHRD_SLIP; |
42 | dev->tx_queue_len = 256; | 40 | dev->tx_queue_len = 256; |
diff --git a/arch/x86/ia32/ia32_binfmt.c b/arch/x86/ia32/ia32_binfmt.c index 118b9f9ff499..5027650eb273 100644 --- a/arch/x86/ia32/ia32_binfmt.c +++ b/arch/x86/ia32/ia32_binfmt.c | |||
@@ -289,7 +289,6 @@ static void elf32_init(struct pt_regs *regs) | |||
289 | 289 | ||
290 | static ctl_table abi_table2[] = { | 290 | static ctl_table abi_table2[] = { |
291 | { | 291 | { |
292 | .ctl_name = 99, | ||
293 | .procname = "vsyscall32", | 292 | .procname = "vsyscall32", |
294 | .data = &sysctl_vsyscall32, | 293 | .data = &sysctl_vsyscall32, |
295 | .maxlen = sizeof(int), | 294 | .maxlen = sizeof(int), |
diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S index f22ba8534d26..a97313b1270e 100644 --- a/arch/x86/kernel/acpi/wakeup_32.S +++ b/arch/x86/kernel/acpi/wakeup_32.S | |||
@@ -11,7 +11,7 @@ | |||
11 | # | 11 | # |
12 | # If physical address of wakeup_code is 0x12345, BIOS should call us with | 12 | # If physical address of wakeup_code is 0x12345, BIOS should call us with |
13 | # cs = 0x1234, eip = 0x05 | 13 | # cs = 0x1234, eip = 0x05 |
14 | # | 14 | # |
15 | 15 | ||
16 | #define BEEP \ | 16 | #define BEEP \ |
17 | inb $97, %al; \ | 17 | inb $97, %al; \ |
@@ -52,7 +52,6 @@ wakeup_code: | |||
52 | BEEP | 52 | BEEP |
53 | 1: | 53 | 1: |
54 | mov $(wakeup_stack - wakeup_code), %sp # Private stack is needed for ASUS board | 54 | mov $(wakeup_stack - wakeup_code), %sp # Private stack is needed for ASUS board |
55 | movw $0x0e00 + 'S', %fs:(0x12) | ||
56 | 55 | ||
57 | pushl $0 # Kill any dangerous flags | 56 | pushl $0 # Kill any dangerous flags |
58 | popfl | 57 | popfl |
@@ -90,9 +89,6 @@ wakeup_code: | |||
90 | # make sure %cr4 is set correctly (features, etc) | 89 | # make sure %cr4 is set correctly (features, etc) |
91 | movl real_save_cr4 - wakeup_code, %eax | 90 | movl real_save_cr4 - wakeup_code, %eax |
92 | movl %eax, %cr4 | 91 | movl %eax, %cr4 |
93 | movw $0xb800, %ax | ||
94 | movw %ax,%fs | ||
95 | movw $0x0e00 + 'i', %fs:(0x12) | ||
96 | 92 | ||
97 | # need a gdt -- use lgdtl to force 32-bit operands, in case | 93 | # need a gdt -- use lgdtl to force 32-bit operands, in case |
98 | # the GDT is located past 16 megabytes. | 94 | # the GDT is located past 16 megabytes. |
@@ -102,8 +98,6 @@ wakeup_code: | |||
102 | movl %eax, %cr0 | 98 | movl %eax, %cr0 |
103 | jmp 1f | 99 | jmp 1f |
104 | 1: | 100 | 1: |
105 | movw $0x0e00 + 'n', %fs:(0x14) | ||
106 | |||
107 | movl real_magic - wakeup_code, %eax | 101 | movl real_magic - wakeup_code, %eax |
108 | cmpl $0x12345678, %eax | 102 | cmpl $0x12345678, %eax |
109 | jne bogus_real_magic | 103 | jne bogus_real_magic |
@@ -122,13 +116,11 @@ real_save_cr4: .long 0 | |||
122 | real_magic: .long 0 | 116 | real_magic: .long 0 |
123 | video_mode: .long 0 | 117 | video_mode: .long 0 |
124 | realmode_flags: .long 0 | 118 | realmode_flags: .long 0 |
125 | beep_flags: .long 0 | ||
126 | real_efer_save_restore: .long 0 | 119 | real_efer_save_restore: .long 0 |
127 | real_save_efer_edx: .long 0 | 120 | real_save_efer_edx: .long 0 |
128 | real_save_efer_eax: .long 0 | 121 | real_save_efer_eax: .long 0 |
129 | 122 | ||
130 | bogus_real_magic: | 123 | bogus_real_magic: |
131 | movw $0x0e00 + 'B', %fs:(0x12) | ||
132 | jmp bogus_real_magic | 124 | jmp bogus_real_magic |
133 | 125 | ||
134 | /* This code uses an extended set of video mode numbers. These include: | 126 | /* This code uses an extended set of video mode numbers. These include: |
@@ -194,7 +186,6 @@ wakeup_pmode_return: | |||
194 | movw %ax, %es | 186 | movw %ax, %es |
195 | movw %ax, %fs | 187 | movw %ax, %fs |
196 | movw %ax, %gs | 188 | movw %ax, %gs |
197 | movw $0x0e00 + 'u', 0xb8016 | ||
198 | 189 | ||
199 | # reload the gdt, as we need the full 32 bit address | 190 | # reload the gdt, as we need the full 32 bit address |
200 | lgdt saved_gdt | 191 | lgdt saved_gdt |
@@ -218,7 +209,6 @@ wakeup_pmode_return: | |||
218 | jmp *%eax | 209 | jmp *%eax |
219 | 210 | ||
220 | bogus_magic: | 211 | bogus_magic: |
221 | movw $0x0e00 + 'B', 0xb8018 | ||
222 | jmp bogus_magic | 212 | jmp bogus_magic |
223 | 213 | ||
224 | 214 | ||
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S index 8b4357e1efe0..55608ec2ed72 100644 --- a/arch/x86/kernel/acpi/wakeup_64.S +++ b/arch/x86/kernel/acpi/wakeup_64.S | |||
@@ -41,7 +41,6 @@ wakeup_code: | |||
41 | 41 | ||
42 | # Running in *copy* of this code, somewhere in low 1MB. | 42 | # Running in *copy* of this code, somewhere in low 1MB. |
43 | 43 | ||
44 | movb $0xa1, %al ; outb %al, $0x80 | ||
45 | cli | 44 | cli |
46 | cld | 45 | cld |
47 | # setup data segment | 46 | # setup data segment |
@@ -65,11 +64,6 @@ wakeup_code: | |||
65 | cmpl $0x12345678, %eax | 64 | cmpl $0x12345678, %eax |
66 | jne bogus_real_magic | 65 | jne bogus_real_magic |
67 | 66 | ||
68 | call verify_cpu # Verify the cpu supports long | ||
69 | # mode | ||
70 | testl %eax, %eax | ||
71 | jnz no_longmode | ||
72 | |||
73 | testl $1, realmode_flags - wakeup_code | 67 | testl $1, realmode_flags - wakeup_code |
74 | jz 1f | 68 | jz 1f |
75 | lcall $0xc000,$3 | 69 | lcall $0xc000,$3 |
@@ -84,12 +78,6 @@ wakeup_code: | |||
84 | call mode_set | 78 | call mode_set |
85 | 1: | 79 | 1: |
86 | 80 | ||
87 | movw $0xb800, %ax | ||
88 | movw %ax,%fs | ||
89 | movw $0x0e00 + 'L', %fs:(0x10) | ||
90 | |||
91 | movb $0xa2, %al ; outb %al, $0x80 | ||
92 | |||
93 | mov %ds, %ax # Find 32bit wakeup_code addr | 81 | mov %ds, %ax # Find 32bit wakeup_code addr |
94 | movzx %ax, %esi # (Convert %ds:gdt to a liner ptr) | 82 | movzx %ax, %esi # (Convert %ds:gdt to a liner ptr) |
95 | shll $4, %esi | 83 | shll $4, %esi |
@@ -117,14 +105,10 @@ wakeup_32_vector: | |||
117 | .code32 | 105 | .code32 |
118 | wakeup_32: | 106 | wakeup_32: |
119 | # Running in this code, but at low address; paging is not yet turned on. | 107 | # Running in this code, but at low address; paging is not yet turned on. |
120 | movb $0xa5, %al ; outb %al, $0x80 | ||
121 | 108 | ||
122 | movl $__KERNEL_DS, %eax | 109 | movl $__KERNEL_DS, %eax |
123 | movl %eax, %ds | 110 | movl %eax, %ds |
124 | 111 | ||
125 | movw $0x0e00 + 'i', %ds:(0xb8012) | ||
126 | movb $0xa8, %al ; outb %al, $0x80; | ||
127 | |||
128 | /* | 112 | /* |
129 | * Prepare for entering 64bits mode | 113 | * Prepare for entering 64bits mode |
130 | */ | 114 | */ |
@@ -200,16 +184,11 @@ wakeup_long64: | |||
200 | */ | 184 | */ |
201 | lgdt cpu_gdt_descr | 185 | lgdt cpu_gdt_descr |
202 | 186 | ||
203 | movw $0x0e00 + 'n', %ds:(0xb8014) | ||
204 | movb $0xa9, %al ; outb %al, $0x80 | ||
205 | |||
206 | movq saved_magic, %rax | 187 | movq saved_magic, %rax |
207 | movq $0x123456789abcdef0, %rdx | 188 | movq $0x123456789abcdef0, %rdx |
208 | cmpq %rdx, %rax | 189 | cmpq %rdx, %rax |
209 | jne bogus_64_magic | 190 | jne bogus_64_magic |
210 | 191 | ||
211 | movw $0x0e00 + 'u', %ds:(0xb8016) | ||
212 | |||
213 | nop | 192 | nop |
214 | nop | 193 | nop |
215 | movw $__KERNEL_DS, %ax | 194 | movw $__KERNEL_DS, %ax |
@@ -220,13 +199,11 @@ wakeup_long64: | |||
220 | movw %ax, %gs | 199 | movw %ax, %gs |
221 | movq saved_rsp, %rsp | 200 | movq saved_rsp, %rsp |
222 | 201 | ||
223 | movw $0x0e00 + 'x', %ds:(0xb8018) | ||
224 | movq saved_rbx, %rbx | 202 | movq saved_rbx, %rbx |
225 | movq saved_rdi, %rdi | 203 | movq saved_rdi, %rdi |
226 | movq saved_rsi, %rsi | 204 | movq saved_rsi, %rsi |
227 | movq saved_rbp, %rbp | 205 | movq saved_rbp, %rbp |
228 | 206 | ||
229 | movw $0x0e00 + '!', %ds:(0xb801a) | ||
230 | movq saved_rip, %rax | 207 | movq saved_rip, %rax |
231 | jmp *%rax | 208 | jmp *%rax |
232 | 209 | ||
@@ -256,21 +233,12 @@ realmode_flags: .quad 0 | |||
256 | 233 | ||
257 | .code16 | 234 | .code16 |
258 | bogus_real_magic: | 235 | bogus_real_magic: |
259 | movb $0xba,%al ; outb %al,$0x80 | ||
260 | jmp bogus_real_magic | 236 | jmp bogus_real_magic |
261 | 237 | ||
262 | .code64 | 238 | .code64 |
263 | bogus_64_magic: | 239 | bogus_64_magic: |
264 | movb $0xb3,%al ; outb %al,$0x80 | ||
265 | jmp bogus_64_magic | 240 | jmp bogus_64_magic |
266 | 241 | ||
267 | .code16 | ||
268 | no_longmode: | ||
269 | movb $0xbc,%al ; outb %al,$0x80 | ||
270 | jmp no_longmode | ||
271 | |||
272 | #include "../verify_cpu_64.S" | ||
273 | |||
274 | /* This code uses an extended set of video mode numbers. These include: | 242 | /* This code uses an extended set of video mode numbers. These include: |
275 | * Aliases for standard modes | 243 | * Aliases for standard modes |
276 | * NORMAL_VGA (-1) | 244 | * NORMAL_VGA (-1) |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 1826395ebeeb..297a24116949 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -499,6 +499,11 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) { | |||
499 | 499 | ||
500 | static void free_cache_attributes(unsigned int cpu) | 500 | static void free_cache_attributes(unsigned int cpu) |
501 | { | 501 | { |
502 | int i; | ||
503 | |||
504 | for (i = 0; i < num_cache_leaves; i++) | ||
505 | cache_remove_shared_cpu_map(cpu, i); | ||
506 | |||
502 | kfree(cpuid4_info[cpu]); | 507 | kfree(cpuid4_info[cpu]); |
503 | cpuid4_info[cpu] = NULL; | 508 | cpuid4_info[cpu] = NULL; |
504 | } | 509 | } |
@@ -506,8 +511,8 @@ static void free_cache_attributes(unsigned int cpu) | |||
506 | static int __cpuinit detect_cache_attributes(unsigned int cpu) | 511 | static int __cpuinit detect_cache_attributes(unsigned int cpu) |
507 | { | 512 | { |
508 | struct _cpuid4_info *this_leaf; | 513 | struct _cpuid4_info *this_leaf; |
509 | unsigned long j; | 514 | unsigned long j; |
510 | int retval; | 515 | int retval; |
511 | cpumask_t oldmask; | 516 | cpumask_t oldmask; |
512 | 517 | ||
513 | if (num_cache_leaves == 0) | 518 | if (num_cache_leaves == 0) |
@@ -524,19 +529,26 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) | |||
524 | goto out; | 529 | goto out; |
525 | 530 | ||
526 | /* Do cpuid and store the results */ | 531 | /* Do cpuid and store the results */ |
527 | retval = 0; | ||
528 | for (j = 0; j < num_cache_leaves; j++) { | 532 | for (j = 0; j < num_cache_leaves; j++) { |
529 | this_leaf = CPUID4_INFO_IDX(cpu, j); | 533 | this_leaf = CPUID4_INFO_IDX(cpu, j); |
530 | retval = cpuid4_cache_lookup(j, this_leaf); | 534 | retval = cpuid4_cache_lookup(j, this_leaf); |
531 | if (unlikely(retval < 0)) | 535 | if (unlikely(retval < 0)) { |
536 | int i; | ||
537 | |||
538 | for (i = 0; i < j; i++) | ||
539 | cache_remove_shared_cpu_map(cpu, i); | ||
532 | break; | 540 | break; |
541 | } | ||
533 | cache_shared_cpu_map_setup(cpu, j); | 542 | cache_shared_cpu_map_setup(cpu, j); |
534 | } | 543 | } |
535 | set_cpus_allowed(current, oldmask); | 544 | set_cpus_allowed(current, oldmask); |
536 | 545 | ||
537 | out: | 546 | out: |
538 | if (retval) | 547 | if (retval) { |
539 | free_cache_attributes(cpu); | 548 | kfree(cpuid4_info[cpu]); |
549 | cpuid4_info[cpu] = NULL; | ||
550 | } | ||
551 | |||
540 | return retval; | 552 | return retval; |
541 | } | 553 | } |
542 | 554 | ||
@@ -669,7 +681,7 @@ static struct kobj_type ktype_percpu_entry = { | |||
669 | .sysfs_ops = &sysfs_ops, | 681 | .sysfs_ops = &sysfs_ops, |
670 | }; | 682 | }; |
671 | 683 | ||
672 | static void cpuid4_cache_sysfs_exit(unsigned int cpu) | 684 | static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu) |
673 | { | 685 | { |
674 | kfree(cache_kobject[cpu]); | 686 | kfree(cache_kobject[cpu]); |
675 | kfree(index_kobject[cpu]); | 687 | kfree(index_kobject[cpu]); |
@@ -680,13 +692,14 @@ static void cpuid4_cache_sysfs_exit(unsigned int cpu) | |||
680 | 692 | ||
681 | static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu) | 693 | static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu) |
682 | { | 694 | { |
695 | int err; | ||
683 | 696 | ||
684 | if (num_cache_leaves == 0) | 697 | if (num_cache_leaves == 0) |
685 | return -ENOENT; | 698 | return -ENOENT; |
686 | 699 | ||
687 | detect_cache_attributes(cpu); | 700 | err = detect_cache_attributes(cpu); |
688 | if (cpuid4_info[cpu] == NULL) | 701 | if (err) |
689 | return -ENOENT; | 702 | return err; |
690 | 703 | ||
691 | /* Allocate all required memory */ | 704 | /* Allocate all required memory */ |
692 | cache_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL); | 705 | cache_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL); |
@@ -705,13 +718,15 @@ err_out: | |||
705 | return -ENOMEM; | 718 | return -ENOMEM; |
706 | } | 719 | } |
707 | 720 | ||
721 | static cpumask_t cache_dev_map = CPU_MASK_NONE; | ||
722 | |||
708 | /* Add/Remove cache interface for CPU device */ | 723 | /* Add/Remove cache interface for CPU device */ |
709 | static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | 724 | static int __cpuinit cache_add_dev(struct sys_device * sys_dev) |
710 | { | 725 | { |
711 | unsigned int cpu = sys_dev->id; | 726 | unsigned int cpu = sys_dev->id; |
712 | unsigned long i, j; | 727 | unsigned long i, j; |
713 | struct _index_kobject *this_object; | 728 | struct _index_kobject *this_object; |
714 | int retval = 0; | 729 | int retval; |
715 | 730 | ||
716 | retval = cpuid4_cache_sysfs_init(cpu); | 731 | retval = cpuid4_cache_sysfs_init(cpu); |
717 | if (unlikely(retval < 0)) | 732 | if (unlikely(retval < 0)) |
@@ -721,6 +736,10 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
721 | kobject_set_name(cache_kobject[cpu], "%s", "cache"); | 736 | kobject_set_name(cache_kobject[cpu], "%s", "cache"); |
722 | cache_kobject[cpu]->ktype = &ktype_percpu_entry; | 737 | cache_kobject[cpu]->ktype = &ktype_percpu_entry; |
723 | retval = kobject_register(cache_kobject[cpu]); | 738 | retval = kobject_register(cache_kobject[cpu]); |
739 | if (retval < 0) { | ||
740 | cpuid4_cache_sysfs_exit(cpu); | ||
741 | return retval; | ||
742 | } | ||
724 | 743 | ||
725 | for (i = 0; i < num_cache_leaves; i++) { | 744 | for (i = 0; i < num_cache_leaves; i++) { |
726 | this_object = INDEX_KOBJECT_PTR(cpu,i); | 745 | this_object = INDEX_KOBJECT_PTR(cpu,i); |
@@ -740,6 +759,9 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
740 | break; | 759 | break; |
741 | } | 760 | } |
742 | } | 761 | } |
762 | if (!retval) | ||
763 | cpu_set(cpu, cache_dev_map); | ||
764 | |||
743 | return retval; | 765 | return retval; |
744 | } | 766 | } |
745 | 767 | ||
@@ -750,13 +772,14 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev) | |||
750 | 772 | ||
751 | if (cpuid4_info[cpu] == NULL) | 773 | if (cpuid4_info[cpu] == NULL) |
752 | return; | 774 | return; |
753 | for (i = 0; i < num_cache_leaves; i++) { | 775 | if (!cpu_isset(cpu, cache_dev_map)) |
754 | cache_remove_shared_cpu_map(cpu, i); | 776 | return; |
777 | cpu_clear(cpu, cache_dev_map); | ||
778 | |||
779 | for (i = 0; i < num_cache_leaves; i++) | ||
755 | kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); | 780 | kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); |
756 | } | ||
757 | kobject_unregister(cache_kobject[cpu]); | 781 | kobject_unregister(cache_kobject[cpu]); |
758 | cpuid4_cache_sysfs_exit(cpu); | 782 | cpuid4_cache_sysfs_exit(cpu); |
759 | return; | ||
760 | } | 783 | } |
761 | 784 | ||
762 | static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb, | 785 | static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb, |
@@ -781,7 +804,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb, | |||
781 | 804 | ||
782 | static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = | 805 | static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = |
783 | { | 806 | { |
784 | .notifier_call = cacheinfo_cpu_callback, | 807 | .notifier_call = cacheinfo_cpu_callback, |
785 | }; | 808 | }; |
786 | 809 | ||
787 | static int __cpuinit cache_sysfs_init(void) | 810 | static int __cpuinit cache_sysfs_init(void) |
@@ -791,14 +814,15 @@ static int __cpuinit cache_sysfs_init(void) | |||
791 | if (num_cache_leaves == 0) | 814 | if (num_cache_leaves == 0) |
792 | return 0; | 815 | return 0; |
793 | 816 | ||
794 | register_hotcpu_notifier(&cacheinfo_cpu_notifier); | ||
795 | |||
796 | for_each_online_cpu(i) { | 817 | for_each_online_cpu(i) { |
797 | struct sys_device *sys_dev = get_cpu_sysdev((unsigned int)i); | 818 | int err; |
819 | struct sys_device *sys_dev = get_cpu_sysdev(i); | ||
798 | 820 | ||
799 | cache_add_dev(sys_dev); | 821 | err = cache_add_dev(sys_dev); |
822 | if (err) | ||
823 | return err; | ||
800 | } | 824 | } |
801 | 825 | register_hotcpu_notifier(&cacheinfo_cpu_notifier); | |
802 | return 0; | 826 | return 0; |
803 | } | 827 | } |
804 | 828 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index 494d320d909b..24885be5c48c 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c | |||
@@ -131,17 +131,19 @@ static __cpuinit int thermal_throttle_cpu_callback(struct notifier_block *nfb, | |||
131 | { | 131 | { |
132 | unsigned int cpu = (unsigned long)hcpu; | 132 | unsigned int cpu = (unsigned long)hcpu; |
133 | struct sys_device *sys_dev; | 133 | struct sys_device *sys_dev; |
134 | int err; | 134 | int err = 0; |
135 | 135 | ||
136 | sys_dev = get_cpu_sysdev(cpu); | 136 | sys_dev = get_cpu_sysdev(cpu); |
137 | switch (action) { | 137 | switch (action) { |
138 | case CPU_ONLINE: | 138 | case CPU_UP_PREPARE: |
139 | case CPU_ONLINE_FROZEN: | 139 | case CPU_UP_PREPARE_FROZEN: |
140 | mutex_lock(&therm_cpu_lock); | 140 | mutex_lock(&therm_cpu_lock); |
141 | err = thermal_throttle_add_dev(sys_dev); | 141 | err = thermal_throttle_add_dev(sys_dev); |
142 | mutex_unlock(&therm_cpu_lock); | 142 | mutex_unlock(&therm_cpu_lock); |
143 | WARN_ON(err); | 143 | WARN_ON(err); |
144 | break; | 144 | break; |
145 | case CPU_UP_CANCELED: | ||
146 | case CPU_UP_CANCELED_FROZEN: | ||
145 | case CPU_DEAD: | 147 | case CPU_DEAD: |
146 | case CPU_DEAD_FROZEN: | 148 | case CPU_DEAD_FROZEN: |
147 | mutex_lock(&therm_cpu_lock); | 149 | mutex_lock(&therm_cpu_lock); |
@@ -149,7 +151,7 @@ static __cpuinit int thermal_throttle_cpu_callback(struct notifier_block *nfb, | |||
149 | mutex_unlock(&therm_cpu_lock); | 151 | mutex_unlock(&therm_cpu_lock); |
150 | break; | 152 | break; |
151 | } | 153 | } |
152 | return NOTIFY_OK; | 154 | return err ? NOTIFY_BAD : NOTIFY_OK; |
153 | } | 155 | } |
154 | 156 | ||
155 | static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata = | 157 | static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata = |
diff --git a/arch/x86/kernel/mce_64.c b/arch/x86/kernel/mce_64.c index 8ca8f8648969..66e6b797b2cb 100644 --- a/arch/x86/kernel/mce_64.c +++ b/arch/x86/kernel/mce_64.c | |||
@@ -802,16 +802,29 @@ static __cpuinit int mce_create_device(unsigned int cpu) | |||
802 | if (!mce_available(&cpu_data[cpu])) | 802 | if (!mce_available(&cpu_data[cpu])) |
803 | return -EIO; | 803 | return -EIO; |
804 | 804 | ||
805 | memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject)); | ||
805 | per_cpu(device_mce,cpu).id = cpu; | 806 | per_cpu(device_mce,cpu).id = cpu; |
806 | per_cpu(device_mce,cpu).cls = &mce_sysclass; | 807 | per_cpu(device_mce,cpu).cls = &mce_sysclass; |
807 | 808 | ||
808 | err = sysdev_register(&per_cpu(device_mce,cpu)); | 809 | err = sysdev_register(&per_cpu(device_mce,cpu)); |
810 | if (err) | ||
811 | return err; | ||
812 | |||
813 | for (i = 0; mce_attributes[i]; i++) { | ||
814 | err = sysdev_create_file(&per_cpu(device_mce,cpu), | ||
815 | mce_attributes[i]); | ||
816 | if (err) | ||
817 | goto error; | ||
818 | } | ||
809 | 819 | ||
810 | if (!err) { | 820 | return 0; |
811 | for (i = 0; mce_attributes[i]; i++) | 821 | error: |
812 | sysdev_create_file(&per_cpu(device_mce,cpu), | 822 | while (i--) { |
813 | mce_attributes[i]); | 823 | sysdev_remove_file(&per_cpu(device_mce,cpu), |
824 | mce_attributes[i]); | ||
814 | } | 825 | } |
826 | sysdev_unregister(&per_cpu(device_mce,cpu)); | ||
827 | |||
815 | return err; | 828 | return err; |
816 | } | 829 | } |
817 | 830 | ||
@@ -823,7 +836,6 @@ static void mce_remove_device(unsigned int cpu) | |||
823 | sysdev_remove_file(&per_cpu(device_mce,cpu), | 836 | sysdev_remove_file(&per_cpu(device_mce,cpu), |
824 | mce_attributes[i]); | 837 | mce_attributes[i]); |
825 | sysdev_unregister(&per_cpu(device_mce,cpu)); | 838 | sysdev_unregister(&per_cpu(device_mce,cpu)); |
826 | memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject)); | ||
827 | } | 839 | } |
828 | 840 | ||
829 | /* Get notified when a cpu comes on/off. Be hotplug friendly. */ | 841 | /* Get notified when a cpu comes on/off. Be hotplug friendly. */ |
@@ -831,18 +843,21 @@ static int | |||
831 | mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | 843 | mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) |
832 | { | 844 | { |
833 | unsigned int cpu = (unsigned long)hcpu; | 845 | unsigned int cpu = (unsigned long)hcpu; |
846 | int err = 0; | ||
834 | 847 | ||
835 | switch (action) { | 848 | switch (action) { |
836 | case CPU_ONLINE: | 849 | case CPU_UP_PREPARE: |
837 | case CPU_ONLINE_FROZEN: | 850 | case CPU_UP_PREPARE_FROZEN: |
838 | mce_create_device(cpu); | 851 | err = mce_create_device(cpu); |
839 | break; | 852 | break; |
853 | case CPU_UP_CANCELED: | ||
854 | case CPU_UP_CANCELED_FROZEN: | ||
840 | case CPU_DEAD: | 855 | case CPU_DEAD: |
841 | case CPU_DEAD_FROZEN: | 856 | case CPU_DEAD_FROZEN: |
842 | mce_remove_device(cpu); | 857 | mce_remove_device(cpu); |
843 | break; | 858 | break; |
844 | } | 859 | } |
845 | return NOTIFY_OK; | 860 | return err ? NOTIFY_BAD : NOTIFY_OK; |
846 | } | 861 | } |
847 | 862 | ||
848 | static struct notifier_block mce_cpu_notifier = { | 863 | static struct notifier_block mce_cpu_notifier = { |
@@ -857,9 +872,13 @@ static __init int mce_init_device(void) | |||
857 | if (!mce_available(&boot_cpu_data)) | 872 | if (!mce_available(&boot_cpu_data)) |
858 | return -EIO; | 873 | return -EIO; |
859 | err = sysdev_class_register(&mce_sysclass); | 874 | err = sysdev_class_register(&mce_sysclass); |
875 | if (err) | ||
876 | return err; | ||
860 | 877 | ||
861 | for_each_online_cpu(i) { | 878 | for_each_online_cpu(i) { |
862 | mce_create_device(i); | 879 | err = mce_create_device(i); |
880 | if (err) | ||
881 | return err; | ||
863 | } | 882 | } |
864 | 883 | ||
865 | register_hotcpu_notifier(&mce_cpu_notifier); | 884 | register_hotcpu_notifier(&mce_cpu_notifier); |
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index df85c9c13601..e18e516cf549 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c | |||
@@ -133,37 +133,42 @@ static const struct file_operations msr_fops = { | |||
133 | .open = msr_open, | 133 | .open = msr_open, |
134 | }; | 134 | }; |
135 | 135 | ||
136 | static int __cpuinit msr_device_create(int i) | 136 | static int __cpuinit msr_device_create(int cpu) |
137 | { | 137 | { |
138 | int err = 0; | ||
139 | struct device *dev; | 138 | struct device *dev; |
140 | 139 | ||
141 | dev = device_create(msr_class, NULL, MKDEV(MSR_MAJOR, i), "msr%d",i); | 140 | dev = device_create(msr_class, NULL, MKDEV(MSR_MAJOR, cpu), |
142 | if (IS_ERR(dev)) | 141 | "msr%d", cpu); |
143 | err = PTR_ERR(dev); | 142 | return IS_ERR(dev) ? PTR_ERR(dev) : 0; |
144 | return err; | 143 | } |
144 | |||
145 | static void msr_device_destroy(int cpu) | ||
146 | { | ||
147 | device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu)); | ||
145 | } | 148 | } |
146 | 149 | ||
147 | static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb, | 150 | static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb, |
148 | unsigned long action, void *hcpu) | 151 | unsigned long action, void *hcpu) |
149 | { | 152 | { |
150 | unsigned int cpu = (unsigned long)hcpu; | 153 | unsigned int cpu = (unsigned long)hcpu; |
154 | int err = 0; | ||
151 | 155 | ||
152 | switch (action) { | 156 | switch (action) { |
153 | case CPU_ONLINE: | 157 | case CPU_UP_PREPARE: |
154 | case CPU_ONLINE_FROZEN: | 158 | case CPU_UP_PREPARE_FROZEN: |
155 | msr_device_create(cpu); | 159 | err = msr_device_create(cpu); |
156 | break; | 160 | break; |
161 | case CPU_UP_CANCELED: | ||
162 | case CPU_UP_CANCELED_FROZEN: | ||
157 | case CPU_DEAD: | 163 | case CPU_DEAD: |
158 | case CPU_DEAD_FROZEN: | 164 | case CPU_DEAD_FROZEN: |
159 | device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu)); | 165 | msr_device_destroy(cpu); |
160 | break; | 166 | break; |
161 | } | 167 | } |
162 | return NOTIFY_OK; | 168 | return err ? NOTIFY_BAD : NOTIFY_OK; |
163 | } | 169 | } |
164 | 170 | ||
165 | static struct notifier_block __cpuinitdata msr_class_cpu_notifier = | 171 | static struct notifier_block __cpuinitdata msr_class_cpu_notifier = { |
166 | { | ||
167 | .notifier_call = msr_class_cpu_callback, | 172 | .notifier_call = msr_class_cpu_callback, |
168 | }; | 173 | }; |
169 | 174 | ||
@@ -196,7 +201,7 @@ static int __init msr_init(void) | |||
196 | out_class: | 201 | out_class: |
197 | i = 0; | 202 | i = 0; |
198 | for_each_online_cpu(i) | 203 | for_each_online_cpu(i) |
199 | device_destroy(msr_class, MKDEV(MSR_MAJOR, i)); | 204 | msr_device_destroy(i); |
200 | class_destroy(msr_class); | 205 | class_destroy(msr_class); |
201 | out_chrdev: | 206 | out_chrdev: |
202 | unregister_chrdev(MSR_MAJOR, "cpu/msr"); | 207 | unregister_chrdev(MSR_MAJOR, "cpu/msr"); |
@@ -208,7 +213,7 @@ static void __exit msr_exit(void) | |||
208 | { | 213 | { |
209 | int cpu = 0; | 214 | int cpu = 0; |
210 | for_each_online_cpu(cpu) | 215 | for_each_online_cpu(cpu) |
211 | device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu)); | 216 | msr_device_destroy(cpu); |
212 | class_destroy(msr_class); | 217 | class_destroy(msr_class); |
213 | unregister_chrdev(MSR_MAJOR, "cpu/msr"); | 218 | unregister_chrdev(MSR_MAJOR, "cpu/msr"); |
214 | unregister_hotcpu_notifier(&msr_class_cpu_notifier); | 219 | unregister_hotcpu_notifier(&msr_class_cpu_notifier); |
diff --git a/arch/x86/kernel/suspend_64.c b/arch/x86/kernel/suspend_64.c index 573c0a6e0ac6..f8fafe527ff1 100644 --- a/arch/x86/kernel/suspend_64.c +++ b/arch/x86/kernel/suspend_64.c | |||
@@ -150,8 +150,22 @@ void fix_processor_context(void) | |||
150 | /* Defined in arch/x86_64/kernel/suspend_asm.S */ | 150 | /* Defined in arch/x86_64/kernel/suspend_asm.S */ |
151 | extern int restore_image(void); | 151 | extern int restore_image(void); |
152 | 152 | ||
153 | /* | ||
154 | * Address to jump to in the last phase of restore in order to get to the image | ||
155 | * kernel's text (this value is passed in the image header). | ||
156 | */ | ||
157 | unsigned long restore_jump_address; | ||
158 | |||
159 | /* | ||
160 | * Value of the cr3 register from before the hibernation (this value is passed | ||
161 | * in the image header). | ||
162 | */ | ||
163 | unsigned long restore_cr3; | ||
164 | |||
153 | pgd_t *temp_level4_pgt; | 165 | pgd_t *temp_level4_pgt; |
154 | 166 | ||
167 | void *relocated_restore_code; | ||
168 | |||
155 | static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end) | 169 | static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end) |
156 | { | 170 | { |
157 | long i, j; | 171 | long i, j; |
@@ -175,7 +189,7 @@ static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long en | |||
175 | 189 | ||
176 | if (paddr >= end) | 190 | if (paddr >= end) |
177 | break; | 191 | break; |
178 | pe = _PAGE_NX | _PAGE_PSE | _KERNPG_TABLE | paddr; | 192 | pe = __PAGE_KERNEL_LARGE_EXEC | paddr; |
179 | pe &= __supported_pte_mask; | 193 | pe &= __supported_pte_mask; |
180 | set_pmd(pmd, __pmd(pe)); | 194 | set_pmd(pmd, __pmd(pe)); |
181 | } | 195 | } |
@@ -183,25 +197,42 @@ static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long en | |||
183 | return 0; | 197 | return 0; |
184 | } | 198 | } |
185 | 199 | ||
200 | static int res_kernel_text_pud_init(pud_t *pud, unsigned long start) | ||
201 | { | ||
202 | pmd_t *pmd; | ||
203 | unsigned long paddr; | ||
204 | |||
205 | pmd = (pmd_t *)get_safe_page(GFP_ATOMIC); | ||
206 | if (!pmd) | ||
207 | return -ENOMEM; | ||
208 | set_pud(pud + pud_index(start), __pud(__pa(pmd) | _KERNPG_TABLE)); | ||
209 | for (paddr = 0; paddr < KERNEL_TEXT_SIZE; pmd++, paddr += PMD_SIZE) { | ||
210 | unsigned long pe; | ||
211 | |||
212 | pe = __PAGE_KERNEL_LARGE_EXEC | _PAGE_GLOBAL | paddr; | ||
213 | pe &= __supported_pte_mask; | ||
214 | set_pmd(pmd, __pmd(pe)); | ||
215 | } | ||
216 | |||
217 | return 0; | ||
218 | } | ||
219 | |||
186 | static int set_up_temporary_mappings(void) | 220 | static int set_up_temporary_mappings(void) |
187 | { | 221 | { |
188 | unsigned long start, end, next; | 222 | unsigned long start, end, next; |
223 | pud_t *pud; | ||
189 | int error; | 224 | int error; |
190 | 225 | ||
191 | temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC); | 226 | temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC); |
192 | if (!temp_level4_pgt) | 227 | if (!temp_level4_pgt) |
193 | return -ENOMEM; | 228 | return -ENOMEM; |
194 | 229 | ||
195 | /* It is safe to reuse the original kernel mapping */ | ||
196 | set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map), | ||
197 | init_level4_pgt[pgd_index(__START_KERNEL_map)]); | ||
198 | |||
199 | /* Set up the direct mapping from scratch */ | 230 | /* Set up the direct mapping from scratch */ |
200 | start = (unsigned long)pfn_to_kaddr(0); | 231 | start = (unsigned long)pfn_to_kaddr(0); |
201 | end = (unsigned long)pfn_to_kaddr(end_pfn); | 232 | end = (unsigned long)pfn_to_kaddr(end_pfn); |
202 | 233 | ||
203 | for (; start < end; start = next) { | 234 | for (; start < end; start = next) { |
204 | pud_t *pud = (pud_t *)get_safe_page(GFP_ATOMIC); | 235 | pud = (pud_t *)get_safe_page(GFP_ATOMIC); |
205 | if (!pud) | 236 | if (!pud) |
206 | return -ENOMEM; | 237 | return -ENOMEM; |
207 | next = start + PGDIR_SIZE; | 238 | next = start + PGDIR_SIZE; |
@@ -212,7 +243,17 @@ static int set_up_temporary_mappings(void) | |||
212 | set_pgd(temp_level4_pgt + pgd_index(start), | 243 | set_pgd(temp_level4_pgt + pgd_index(start), |
213 | mk_kernel_pgd(__pa(pud))); | 244 | mk_kernel_pgd(__pa(pud))); |
214 | } | 245 | } |
215 | return 0; | 246 | |
247 | /* Set up the kernel text mapping from scratch */ | ||
248 | pud = (pud_t *)get_safe_page(GFP_ATOMIC); | ||
249 | if (!pud) | ||
250 | return -ENOMEM; | ||
251 | error = res_kernel_text_pud_init(pud, __START_KERNEL_map); | ||
252 | if (!error) | ||
253 | set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map), | ||
254 | __pgd(__pa(pud) | _PAGE_TABLE)); | ||
255 | |||
256 | return error; | ||
216 | } | 257 | } |
217 | 258 | ||
218 | int swsusp_arch_resume(void) | 259 | int swsusp_arch_resume(void) |
@@ -222,6 +263,13 @@ int swsusp_arch_resume(void) | |||
222 | /* We have got enough memory and from now on we cannot recover */ | 263 | /* We have got enough memory and from now on we cannot recover */ |
223 | if ((error = set_up_temporary_mappings())) | 264 | if ((error = set_up_temporary_mappings())) |
224 | return error; | 265 | return error; |
266 | |||
267 | relocated_restore_code = (void *)get_safe_page(GFP_ATOMIC); | ||
268 | if (!relocated_restore_code) | ||
269 | return -ENOMEM; | ||
270 | memcpy(relocated_restore_code, &core_restore_code, | ||
271 | &restore_registers - &core_restore_code); | ||
272 | |||
225 | restore_image(); | 273 | restore_image(); |
226 | return 0; | 274 | return 0; |
227 | } | 275 | } |
@@ -236,4 +284,43 @@ int pfn_is_nosave(unsigned long pfn) | |||
236 | unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT; | 284 | unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT; |
237 | return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn); | 285 | return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn); |
238 | } | 286 | } |
287 | |||
288 | struct restore_data_record { | ||
289 | unsigned long jump_address; | ||
290 | unsigned long cr3; | ||
291 | unsigned long magic; | ||
292 | }; | ||
293 | |||
294 | #define RESTORE_MAGIC 0x0123456789ABCDEFUL | ||
295 | |||
296 | /** | ||
297 | * arch_hibernation_header_save - populate the architecture specific part | ||
298 | * of a hibernation image header | ||
299 | * @addr: address to save the data at | ||
300 | */ | ||
301 | int arch_hibernation_header_save(void *addr, unsigned int max_size) | ||
302 | { | ||
303 | struct restore_data_record *rdr = addr; | ||
304 | |||
305 | if (max_size < sizeof(struct restore_data_record)) | ||
306 | return -EOVERFLOW; | ||
307 | rdr->jump_address = restore_jump_address; | ||
308 | rdr->cr3 = restore_cr3; | ||
309 | rdr->magic = RESTORE_MAGIC; | ||
310 | return 0; | ||
311 | } | ||
312 | |||
313 | /** | ||
314 | * arch_hibernation_header_restore - read the architecture specific data | ||
315 | * from the hibernation image header | ||
316 | * @addr: address to read the data from | ||
317 | */ | ||
318 | int arch_hibernation_header_restore(void *addr) | ||
319 | { | ||
320 | struct restore_data_record *rdr = addr; | ||
321 | |||
322 | restore_jump_address = rdr->jump_address; | ||
323 | restore_cr3 = rdr->cr3; | ||
324 | return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL; | ||
325 | } | ||
239 | #endif /* CONFIG_HIBERNATION */ | 326 | #endif /* CONFIG_HIBERNATION */ |
diff --git a/arch/x86/kernel/suspend_asm_64.S b/arch/x86/kernel/suspend_asm_64.S index 16d183f67bc1..48344b666d2c 100644 --- a/arch/x86/kernel/suspend_asm_64.S +++ b/arch/x86/kernel/suspend_asm_64.S | |||
@@ -2,8 +2,8 @@ | |||
2 | * | 2 | * |
3 | * Distribute under GPLv2. | 3 | * Distribute under GPLv2. |
4 | * | 4 | * |
5 | * swsusp_arch_resume may not use any stack, nor any variable that is | 5 | * swsusp_arch_resume must not use any stack or any nonlocal variables while |
6 | * not "NoSave" during copying pages: | 6 | * copying pages: |
7 | * | 7 | * |
8 | * Its rewriting one kernel image with another. What is stack in "old" | 8 | * Its rewriting one kernel image with another. What is stack in "old" |
9 | * image could very well be data page in "new" image, and overwriting | 9 | * image could very well be data page in "new" image, and overwriting |
@@ -36,6 +36,13 @@ ENTRY(swsusp_arch_suspend) | |||
36 | movq %r15, saved_context_r15(%rip) | 36 | movq %r15, saved_context_r15(%rip) |
37 | pushfq ; popq saved_context_eflags(%rip) | 37 | pushfq ; popq saved_context_eflags(%rip) |
38 | 38 | ||
39 | /* save the address of restore_registers */ | ||
40 | movq $restore_registers, %rax | ||
41 | movq %rax, restore_jump_address(%rip) | ||
42 | /* save cr3 */ | ||
43 | movq %cr3, %rax | ||
44 | movq %rax, restore_cr3(%rip) | ||
45 | |||
39 | call swsusp_save | 46 | call swsusp_save |
40 | ret | 47 | ret |
41 | 48 | ||
@@ -54,7 +61,17 @@ ENTRY(restore_image) | |||
54 | movq %rcx, %cr3; | 61 | movq %rcx, %cr3; |
55 | movq %rax, %cr4; # turn PGE back on | 62 | movq %rax, %cr4; # turn PGE back on |
56 | 63 | ||
64 | /* prepare to jump to the image kernel */ | ||
65 | movq restore_jump_address(%rip), %rax | ||
66 | movq restore_cr3(%rip), %rbx | ||
67 | |||
68 | /* prepare to copy image data to their original locations */ | ||
57 | movq restore_pblist(%rip), %rdx | 69 | movq restore_pblist(%rip), %rdx |
70 | movq relocated_restore_code(%rip), %rcx | ||
71 | jmpq *%rcx | ||
72 | |||
73 | /* code below has been relocated to a safe page */ | ||
74 | ENTRY(core_restore_code) | ||
58 | loop: | 75 | loop: |
59 | testq %rdx, %rdx | 76 | testq %rdx, %rdx |
60 | jz done | 77 | jz done |
@@ -62,7 +79,7 @@ loop: | |||
62 | /* get addresses from the pbe and copy the page */ | 79 | /* get addresses from the pbe and copy the page */ |
63 | movq pbe_address(%rdx), %rsi | 80 | movq pbe_address(%rdx), %rsi |
64 | movq pbe_orig_address(%rdx), %rdi | 81 | movq pbe_orig_address(%rdx), %rdi |
65 | movq $512, %rcx | 82 | movq $(PAGE_SIZE >> 3), %rcx |
66 | rep | 83 | rep |
67 | movsq | 84 | movsq |
68 | 85 | ||
@@ -70,10 +87,22 @@ loop: | |||
70 | movq pbe_next(%rdx), %rdx | 87 | movq pbe_next(%rdx), %rdx |
71 | jmp loop | 88 | jmp loop |
72 | done: | 89 | done: |
90 | /* jump to the restore_registers address from the image header */ | ||
91 | jmpq *%rax | ||
92 | /* | ||
93 | * NOTE: This assumes that the boot kernel's text mapping covers the | ||
94 | * image kernel's page containing restore_registers and the address of | ||
95 | * this page is the same as in the image kernel's text mapping (it | ||
96 | * should always be true, because the text mapping is linear, starting | ||
97 | * from 0, and is supposed to cover the entire kernel text for every | ||
98 | * kernel). | ||
99 | * | ||
100 | * code below belongs to the image kernel | ||
101 | */ | ||
102 | |||
103 | ENTRY(restore_registers) | ||
73 | /* go back to the original page tables */ | 104 | /* go back to the original page tables */ |
74 | movq $(init_level4_pgt - __START_KERNEL_map), %rax | 105 | movq %rbx, %cr3 |
75 | addq phys_base(%rip), %rax | ||
76 | movq %rax, %cr3 | ||
77 | 106 | ||
78 | /* Flush TLB, including "global" things (vmalloc) */ | 107 | /* Flush TLB, including "global" things (vmalloc) */ |
79 | movq mmu_cr4_features(%rip), %rax | 108 | movq mmu_cr4_features(%rip), %rax |
@@ -84,12 +113,9 @@ done: | |||
84 | movq %rcx, %cr3 | 113 | movq %rcx, %cr3 |
85 | movq %rax, %cr4; # turn PGE back on | 114 | movq %rax, %cr4; # turn PGE back on |
86 | 115 | ||
87 | movl $24, %eax | ||
88 | movl %eax, %ds | ||
89 | |||
90 | movq saved_context_esp(%rip), %rsp | 116 | movq saved_context_esp(%rip), %rsp |
91 | movq saved_context_ebp(%rip), %rbp | 117 | movq saved_context_ebp(%rip), %rbp |
92 | /* Don't restore %rax, it must be 0 anyway */ | 118 | /* restore GPRs (we don't restore %rax, it must be 0 anyway) */ |
93 | movq saved_context_ebx(%rip), %rbx | 119 | movq saved_context_ebx(%rip), %rbx |
94 | movq saved_context_ecx(%rip), %rcx | 120 | movq saved_context_ecx(%rip), %rcx |
95 | movq saved_context_edx(%rip), %rdx | 121 | movq saved_context_edx(%rip), %rdx |
@@ -107,4 +133,7 @@ done: | |||
107 | 133 | ||
108 | xorq %rax, %rax | 134 | xorq %rax, %rax |
109 | 135 | ||
136 | /* tell the hibernation core that we've just restored the memory */ | ||
137 | movq %rax, in_suspend(%rip) | ||
138 | |||
110 | ret | 139 | ret |
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index 8a67e282cb5e..585541ca1a7e 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c | |||
@@ -64,6 +64,16 @@ struct vsyscall_gtod_data __vsyscall_gtod_data __section_vsyscall_gtod_data = | |||
64 | .sysctl_enabled = 1, | 64 | .sysctl_enabled = 1, |
65 | }; | 65 | }; |
66 | 66 | ||
67 | void update_vsyscall_tz(void) | ||
68 | { | ||
69 | unsigned long flags; | ||
70 | |||
71 | write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags); | ||
72 | /* sys_tz has changed */ | ||
73 | vsyscall_gtod_data.sys_tz = sys_tz; | ||
74 | write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); | ||
75 | } | ||
76 | |||
67 | void update_vsyscall(struct timespec *wall_time, struct clocksource *clock) | 77 | void update_vsyscall(struct timespec *wall_time, struct clocksource *clock) |
68 | { | 78 | { |
69 | unsigned long flags; | 79 | unsigned long flags; |
@@ -77,7 +87,6 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock) | |||
77 | vsyscall_gtod_data.clock.shift = clock->shift; | 87 | vsyscall_gtod_data.clock.shift = clock->shift; |
78 | vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; | 88 | vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; |
79 | vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; | 89 | vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; |
80 | vsyscall_gtod_data.sys_tz = sys_tz; | ||
81 | vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic; | 90 | vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic; |
82 | write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); | 91 | write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); |
83 | } | 92 | } |
@@ -163,7 +172,7 @@ time_t __vsyscall(1) vtime(time_t *t) | |||
163 | if (unlikely(!__vsyscall_gtod_data.sysctl_enabled)) | 172 | if (unlikely(!__vsyscall_gtod_data.sysctl_enabled)) |
164 | return time_syscall(t); | 173 | return time_syscall(t); |
165 | 174 | ||
166 | vgettimeofday(&tv, 0); | 175 | vgettimeofday(&tv, NULL); |
167 | result = tv.tv_sec; | 176 | result = tv.tv_sec; |
168 | if (t) | 177 | if (t) |
169 | *t = result; | 178 | *t = result; |
@@ -257,18 +266,10 @@ out: | |||
257 | return ret; | 266 | return ret; |
258 | } | 267 | } |
259 | 268 | ||
260 | static int vsyscall_sysctl_nostrat(ctl_table *t, int __user *name, int nlen, | ||
261 | void __user *oldval, size_t __user *oldlenp, | ||
262 | void __user *newval, size_t newlen) | ||
263 | { | ||
264 | return -ENOSYS; | ||
265 | } | ||
266 | |||
267 | static ctl_table kernel_table2[] = { | 269 | static ctl_table kernel_table2[] = { |
268 | { .ctl_name = 99, .procname = "vsyscall64", | 270 | { .procname = "vsyscall64", |
269 | .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int), | 271 | .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int), |
270 | .mode = 0644, | 272 | .mode = 0644, |
271 | .strategy = vsyscall_sysctl_nostrat, | ||
272 | .proc_handler = vsyscall_sysctl_change }, | 273 | .proc_handler = vsyscall_sysctl_change }, |
273 | {} | 274 | {} |
274 | }; | 275 | }; |
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig index 43fafe9e9c08..78cb68f2ebbd 100644 --- a/arch/x86_64/Kconfig +++ b/arch/x86_64/Kconfig | |||
@@ -716,6 +716,11 @@ menu "Power management options" | |||
716 | 716 | ||
717 | source kernel/power/Kconfig | 717 | source kernel/power/Kconfig |
718 | 718 | ||
719 | config ARCH_HIBERNATION_HEADER | ||
720 | bool | ||
721 | depends on HIBERNATION | ||
722 | default y | ||
723 | |||
719 | source "drivers/acpi/Kconfig" | 724 | source "drivers/acpi/Kconfig" |
720 | 725 | ||
721 | source "arch/x86/kernel/cpufreq/Kconfig" | 726 | source "arch/x86/kernel/cpufreq/Kconfig" |