diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-01 14:00:07 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-01 14:00:07 -0400 |
| commit | 1ead65812486cda65093683a99b8907a7242fa93 (patch) | |
| tree | 094684870815537aae4aedb69c10d669ba29f0af | |
| parent | b6d739e9581272f0bbbd6edd15340fb8e108df96 (diff) | |
| parent | b97f0291a2504291aef850077f98cab68a5a2f33 (diff) | |
Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer changes from Thomas Gleixner:
"This assorted collection provides:
- A new timer based timer broadcast feature for systems which do not
provide a global accessible timer device. That allows those
systems to put CPUs into deep idle states where the per cpu timer
device stops.
- A few NOHZ_FULL related improvements to the timer wheel
- The usual updates to timer devices found in ARM SoCs
- Small improvements and updates all over the place"
* 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (44 commits)
tick: Remove code duplication in tick_handle_periodic()
tick: Fix spelling mistake in tick_handle_periodic()
x86: hpet: Use proper destructor for delayed work
workqueue: Provide destroy_delayed_work_on_stack()
clocksource: CMT, MTU2, TMU and STI should depend on GENERIC_CLOCKEVENTS
timer: Remove code redundancy while calling get_nohz_timer_target()
hrtimer: Rearrange comments in the order struct members are declared
timer: Use variable head instead of &work_list in __run_timers()
clocksource: exynos_mct: silence a static checker warning
arm: zynq: Add support for cpufreq
arm: zynq: Don't use arm_global_timer with cpufreq
clocksource/cadence_ttc: Overhaul clocksource frequency adjustment
clocksource/cadence_ttc: Call clockevents_update_freq() with IRQs enabled
clocksource: Add Kconfig entries for CMT, MTU2, TMU and STI
sh: Remove Kconfig entries for TMU, CMT and MTU2
ARM: shmobile: Remove CMT, TMU and STI Kconfig entries
clocksource: armada-370-xp: Use atomic access for shared registers
clocksource: orion: Use atomic access for shared registers
clocksource: timer-keystone: Delete unnecessary variable
clocksource: timer-keystone: introduce clocksource driver for Keystone
...
44 files changed, 807 insertions, 226 deletions
diff --git a/Documentation/devicetree/bindings/timer/allwinner,sun4i-timer.txt b/Documentation/devicetree/bindings/timer/allwinner,sun4i-timer.txt index 48aeb7884ed3..5c2e23574ca0 100644 --- a/Documentation/devicetree/bindings/timer/allwinner,sun4i-timer.txt +++ b/Documentation/devicetree/bindings/timer/allwinner,sun4i-timer.txt | |||
| @@ -2,7 +2,7 @@ Allwinner A1X SoCs Timer Controller | |||
| 2 | 2 | ||
| 3 | Required properties: | 3 | Required properties: |
| 4 | 4 | ||
| 5 | - compatible : should be "allwinner,sun4i-timer" | 5 | - compatible : should be "allwinner,sun4i-a10-timer" |
| 6 | - reg : Specifies base physical address and size of the registers. | 6 | - reg : Specifies base physical address and size of the registers. |
| 7 | - interrupts : The interrupt of the first timer | 7 | - interrupts : The interrupt of the first timer |
| 8 | - clocks: phandle to the source clock (usually a 24 MHz fixed clock) | 8 | - clocks: phandle to the source clock (usually a 24 MHz fixed clock) |
| @@ -10,7 +10,7 @@ Required properties: | |||
| 10 | Example: | 10 | Example: |
| 11 | 11 | ||
| 12 | timer { | 12 | timer { |
| 13 | compatible = "allwinner,sun4i-timer"; | 13 | compatible = "allwinner,sun4i-a10-timer"; |
| 14 | reg = <0x01c20c00 0x400>; | 14 | reg = <0x01c20c00 0x400>; |
| 15 | interrupts = <22>; | 15 | interrupts = <22>; |
| 16 | clocks = <&osc>; | 16 | clocks = <&osc>; |
diff --git a/Documentation/devicetree/bindings/timer/ti,keystone-timer.txt b/Documentation/devicetree/bindings/timer/ti,keystone-timer.txt new file mode 100644 index 000000000000..5fbe361252b4 --- /dev/null +++ b/Documentation/devicetree/bindings/timer/ti,keystone-timer.txt | |||
| @@ -0,0 +1,29 @@ | |||
| 1 | * Device tree bindings for Texas instruments Keystone timer | ||
| 2 | |||
| 3 | This document provides bindings for the 64-bit timer in the KeyStone | ||
| 4 | architecture devices. The timer can be configured as a general-purpose 64-bit | ||
| 5 | timer, dual general-purpose 32-bit timers. When configured as dual 32-bit | ||
| 6 | timers, each half can operate in conjunction (chain mode) or independently | ||
| 7 | (unchained mode) of each other. | ||
| 8 | |||
| 9 | It is global timer is a free running up-counter and can generate interrupt | ||
| 10 | when the counter reaches preset counter values. | ||
| 11 | |||
| 12 | Documentation: | ||
| 13 | http://www.ti.com/lit/ug/sprugv5a/sprugv5a.pdf | ||
| 14 | |||
| 15 | Required properties: | ||
| 16 | |||
| 17 | - compatible : should be "ti,keystone-timer". | ||
| 18 | - reg : specifies base physical address and count of the registers. | ||
| 19 | - interrupts : interrupt generated by the timer. | ||
| 20 | - clocks : the clock feeding the timer clock. | ||
| 21 | |||
| 22 | Example: | ||
| 23 | |||
| 24 | timer@22f0000 { | ||
| 25 | compatible = "ti,keystone-timer"; | ||
| 26 | reg = <0x022f0000 0x80>; | ||
| 27 | interrupts = <GIC_SPI 110 IRQ_TYPE_EDGE_RISING>; | ||
| 28 | clocks = <&clktimer15>; | ||
| 29 | }; | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 79031a56711b..d1f3cb340e01 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -1320,6 +1320,7 @@ M: Linus Walleij <linus.walleij@linaro.org> | |||
| 1320 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1320 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
| 1321 | S: Supported | 1321 | S: Supported |
| 1322 | F: arch/arm/mach-u300/ | 1322 | F: arch/arm/mach-u300/ |
| 1323 | F: drivers/clocksource/timer-u300.c | ||
| 1323 | F: drivers/i2c/busses/i2c-stu300.c | 1324 | F: drivers/i2c/busses/i2c-stu300.c |
| 1324 | F: drivers/rtc/rtc-coh901331.c | 1325 | F: drivers/rtc/rtc-coh901331.c |
| 1325 | F: drivers/watchdog/coh901327_wdt.c | 1326 | F: drivers/watchdog/coh901327_wdt.c |
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi index d4d2763f4794..2ce61228d5f9 100644 --- a/arch/arm/boot/dts/sun4i-a10.dtsi +++ b/arch/arm/boot/dts/sun4i-a10.dtsi | |||
| @@ -403,7 +403,7 @@ | |||
| 403 | }; | 403 | }; |
| 404 | 404 | ||
| 405 | timer@01c20c00 { | 405 | timer@01c20c00 { |
| 406 | compatible = "allwinner,sun4i-timer"; | 406 | compatible = "allwinner,sun4i-a10-timer"; |
| 407 | reg = <0x01c20c00 0x90>; | 407 | reg = <0x01c20c00 0x90>; |
| 408 | interrupts = <22>; | 408 | interrupts = <22>; |
| 409 | clocks = <&osc24M>; | 409 | clocks = <&osc24M>; |
diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi index 79fd412005b0..29dd32d8e77e 100644 --- a/arch/arm/boot/dts/sun5i-a10s.dtsi +++ b/arch/arm/boot/dts/sun5i-a10s.dtsi | |||
| @@ -366,7 +366,7 @@ | |||
| 366 | }; | 366 | }; |
| 367 | 367 | ||
| 368 | timer@01c20c00 { | 368 | timer@01c20c00 { |
| 369 | compatible = "allwinner,sun4i-timer"; | 369 | compatible = "allwinner,sun4i-a10-timer"; |
| 370 | reg = <0x01c20c00 0x90>; | 370 | reg = <0x01c20c00 0x90>; |
| 371 | interrupts = <22>; | 371 | interrupts = <22>; |
| 372 | clocks = <&osc24M>; | 372 | clocks = <&osc24M>; |
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi index c463fd730c91..e63bb383b43d 100644 --- a/arch/arm/boot/dts/sun5i-a13.dtsi +++ b/arch/arm/boot/dts/sun5i-a13.dtsi | |||
| @@ -329,7 +329,7 @@ | |||
| 329 | }; | 329 | }; |
| 330 | 330 | ||
| 331 | timer@01c20c00 { | 331 | timer@01c20c00 { |
| 332 | compatible = "allwinner,sun4i-timer"; | 332 | compatible = "allwinner,sun4i-a10-timer"; |
| 333 | reg = <0x01c20c00 0x90>; | 333 | reg = <0x01c20c00 0x90>; |
| 334 | interrupts = <22>; | 334 | interrupts = <22>; |
| 335 | clocks = <&osc24M>; | 335 | clocks = <&osc24M>; |
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi index 5256ad9be52c..996fff54c8a2 100644 --- a/arch/arm/boot/dts/sun6i-a31.dtsi +++ b/arch/arm/boot/dts/sun6i-a31.dtsi | |||
| @@ -231,7 +231,7 @@ | |||
| 231 | }; | 231 | }; |
| 232 | 232 | ||
| 233 | timer@01c20c00 { | 233 | timer@01c20c00 { |
| 234 | compatible = "allwinner,sun4i-timer"; | 234 | compatible = "allwinner,sun4i-a10-timer"; |
| 235 | reg = <0x01c20c00 0xa0>; | 235 | reg = <0x01c20c00 0xa0>; |
| 236 | interrupts = <0 18 4>, | 236 | interrupts = <0 18 4>, |
| 237 | <0 19 4>, | 237 | <0 19 4>, |
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi index 6f25cf559ad0..dddc8ac2d522 100644 --- a/arch/arm/boot/dts/sun7i-a20.dtsi +++ b/arch/arm/boot/dts/sun7i-a20.dtsi | |||
| @@ -435,7 +435,7 @@ | |||
| 435 | }; | 435 | }; |
| 436 | 436 | ||
| 437 | timer@01c20c00 { | 437 | timer@01c20c00 { |
| 438 | compatible = "allwinner,sun4i-timer"; | 438 | compatible = "allwinner,sun4i-a10-timer"; |
| 439 | reg = <0x01c20c00 0x90>; | 439 | reg = <0x01c20c00 0x90>; |
| 440 | interrupts = <0 22 4>, | 440 | interrupts = <0 22 4>, |
| 441 | <0 23 4>, | 441 | <0 23 4>, |
diff --git a/arch/arm/boot/dts/zynq-7000.dtsi b/arch/arm/boot/dts/zynq-7000.dtsi index 8b67b19392ec..789d0bacc110 100644 --- a/arch/arm/boot/dts/zynq-7000.dtsi +++ b/arch/arm/boot/dts/zynq-7000.dtsi | |||
| @@ -24,6 +24,12 @@ | |||
| 24 | device_type = "cpu"; | 24 | device_type = "cpu"; |
| 25 | reg = <0>; | 25 | reg = <0>; |
| 26 | clocks = <&clkc 3>; | 26 | clocks = <&clkc 3>; |
| 27 | operating-points = < | ||
| 28 | /* kHz uV */ | ||
| 29 | 666667 1000000 | ||
| 30 | 333334 1000000 | ||
| 31 | 222223 1000000 | ||
| 32 | >; | ||
| 27 | }; | 33 | }; |
| 28 | 34 | ||
| 29 | cpu@1 { | 35 | cpu@1 { |
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig index 05fa505df585..f6db7dcae3f4 100644 --- a/arch/arm/mach-shmobile/Kconfig +++ b/arch/arm/mach-shmobile/Kconfig | |||
| @@ -24,17 +24,21 @@ comment "Renesas ARM SoCs System Type" | |||
| 24 | 24 | ||
| 25 | config ARCH_EMEV2 | 25 | config ARCH_EMEV2 |
| 26 | bool "Emma Mobile EV2" | 26 | bool "Emma Mobile EV2" |
| 27 | select SYS_SUPPORTS_EM_STI | ||
| 27 | 28 | ||
| 28 | config ARCH_R7S72100 | 29 | config ARCH_R7S72100 |
| 29 | bool "RZ/A1H (R7S72100)" | 30 | bool "RZ/A1H (R7S72100)" |
| 31 | select SYS_SUPPORTS_SH_MTU2 | ||
| 30 | 32 | ||
| 31 | config ARCH_R8A7790 | 33 | config ARCH_R8A7790 |
| 32 | bool "R-Car H2 (R8A77900)" | 34 | bool "R-Car H2 (R8A77900)" |
| 33 | select RENESAS_IRQC | 35 | select RENESAS_IRQC |
| 36 | select SYS_SUPPORTS_SH_CMT | ||
| 34 | 37 | ||
| 35 | config ARCH_R8A7791 | 38 | config ARCH_R8A7791 |
| 36 | bool "R-Car M2 (R8A77910)" | 39 | bool "R-Car M2 (R8A77910)" |
| 37 | select RENESAS_IRQC | 40 | select RENESAS_IRQC |
| 41 | select SYS_SUPPORTS_SH_CMT | ||
| 38 | 42 | ||
| 39 | comment "Renesas ARM SoCs Board Type" | 43 | comment "Renesas ARM SoCs Board Type" |
| 40 | 44 | ||
| @@ -68,6 +72,8 @@ config ARCH_SH7372 | |||
| 68 | select ARM_CPU_SUSPEND if PM || CPU_IDLE | 72 | select ARM_CPU_SUSPEND if PM || CPU_IDLE |
| 69 | select CPU_V7 | 73 | select CPU_V7 |
| 70 | select SH_CLK_CPG | 74 | select SH_CLK_CPG |
| 75 | select SYS_SUPPORTS_SH_CMT | ||
| 76 | select SYS_SUPPORTS_SH_TMU | ||
| 71 | 77 | ||
| 72 | config ARCH_SH73A0 | 78 | config ARCH_SH73A0 |
| 73 | bool "SH-Mobile AG5 (R8A73A00)" | 79 | bool "SH-Mobile AG5 (R8A73A00)" |
| @@ -77,6 +83,8 @@ config ARCH_SH73A0 | |||
| 77 | select I2C | 83 | select I2C |
| 78 | select SH_CLK_CPG | 84 | select SH_CLK_CPG |
| 79 | select RENESAS_INTC_IRQPIN | 85 | select RENESAS_INTC_IRQPIN |
| 86 | select SYS_SUPPORTS_SH_CMT | ||
| 87 | select SYS_SUPPORTS_SH_TMU | ||
| 80 | 88 | ||
| 81 | config ARCH_R8A73A4 | 89 | config ARCH_R8A73A4 |
| 82 | bool "R-Mobile APE6 (R8A73A40)" | 90 | bool "R-Mobile APE6 (R8A73A40)" |
| @@ -87,6 +95,8 @@ config ARCH_R8A73A4 | |||
| 87 | select RENESAS_IRQC | 95 | select RENESAS_IRQC |
| 88 | select ARCH_HAS_CPUFREQ | 96 | select ARCH_HAS_CPUFREQ |
| 89 | select ARCH_HAS_OPP | 97 | select ARCH_HAS_OPP |
| 98 | select SYS_SUPPORTS_SH_CMT | ||
| 99 | select SYS_SUPPORTS_SH_TMU | ||
| 90 | 100 | ||
| 91 | config ARCH_R8A7740 | 101 | config ARCH_R8A7740 |
| 92 | bool "R-Mobile A1 (R8A77400)" | 102 | bool "R-Mobile A1 (R8A77400)" |
| @@ -95,6 +105,8 @@ config ARCH_R8A7740 | |||
| 95 | select CPU_V7 | 105 | select CPU_V7 |
| 96 | select SH_CLK_CPG | 106 | select SH_CLK_CPG |
| 97 | select RENESAS_INTC_IRQPIN | 107 | select RENESAS_INTC_IRQPIN |
| 108 | select SYS_SUPPORTS_SH_CMT | ||
| 109 | select SYS_SUPPORTS_SH_TMU | ||
| 98 | 110 | ||
| 99 | config ARCH_R8A7778 | 111 | config ARCH_R8A7778 |
| 100 | bool "R-Car M1A (R8A77781)" | 112 | bool "R-Car M1A (R8A77781)" |
| @@ -104,6 +116,7 @@ config ARCH_R8A7778 | |||
| 104 | select ARM_GIC | 116 | select ARM_GIC |
| 105 | select USB_ARCH_HAS_EHCI | 117 | select USB_ARCH_HAS_EHCI |
| 106 | select USB_ARCH_HAS_OHCI | 118 | select USB_ARCH_HAS_OHCI |
| 119 | select SYS_SUPPORTS_SH_TMU | ||
| 107 | 120 | ||
| 108 | config ARCH_R8A7779 | 121 | config ARCH_R8A7779 |
| 109 | bool "R-Car H1 (R8A77790)" | 122 | bool "R-Car H1 (R8A77790)" |
| @@ -114,6 +127,7 @@ config ARCH_R8A7779 | |||
| 114 | select USB_ARCH_HAS_EHCI | 127 | select USB_ARCH_HAS_EHCI |
| 115 | select USB_ARCH_HAS_OHCI | 128 | select USB_ARCH_HAS_OHCI |
| 116 | select RENESAS_INTC_IRQPIN | 129 | select RENESAS_INTC_IRQPIN |
| 130 | select SYS_SUPPORTS_SH_TMU | ||
| 117 | 131 | ||
| 118 | config ARCH_R8A7790 | 132 | config ARCH_R8A7790 |
| 119 | bool "R-Car H2 (R8A77900)" | 133 | bool "R-Car H2 (R8A77900)" |
| @@ -123,6 +137,7 @@ config ARCH_R8A7790 | |||
| 123 | select MIGHT_HAVE_PCI | 137 | select MIGHT_HAVE_PCI |
| 124 | select SH_CLK_CPG | 138 | select SH_CLK_CPG |
| 125 | select RENESAS_IRQC | 139 | select RENESAS_IRQC |
| 140 | select SYS_SUPPORTS_SH_CMT | ||
| 126 | 141 | ||
| 127 | config ARCH_R8A7791 | 142 | config ARCH_R8A7791 |
| 128 | bool "R-Car M2 (R8A77910)" | 143 | bool "R-Car M2 (R8A77910)" |
| @@ -132,6 +147,7 @@ config ARCH_R8A7791 | |||
| 132 | select MIGHT_HAVE_PCI | 147 | select MIGHT_HAVE_PCI |
| 133 | select SH_CLK_CPG | 148 | select SH_CLK_CPG |
| 134 | select RENESAS_IRQC | 149 | select RENESAS_IRQC |
| 150 | select SYS_SUPPORTS_SH_CMT | ||
| 135 | 151 | ||
| 136 | config ARCH_EMEV2 | 152 | config ARCH_EMEV2 |
| 137 | bool "Emma Mobile EV2" | 153 | bool "Emma Mobile EV2" |
| @@ -141,6 +157,7 @@ config ARCH_EMEV2 | |||
| 141 | select MIGHT_HAVE_PCI | 157 | select MIGHT_HAVE_PCI |
| 142 | select USE_OF | 158 | select USE_OF |
| 143 | select AUTO_ZRELADDR | 159 | select AUTO_ZRELADDR |
| 160 | select SYS_SUPPORTS_EM_STI | ||
| 144 | 161 | ||
| 145 | config ARCH_R7S72100 | 162 | config ARCH_R7S72100 |
| 146 | bool "RZ/A1H (R7S72100)" | 163 | bool "RZ/A1H (R7S72100)" |
| @@ -148,6 +165,7 @@ config ARCH_R7S72100 | |||
| 148 | select ARM_GIC | 165 | select ARM_GIC |
| 149 | select CPU_V7 | 166 | select CPU_V7 |
| 150 | select SH_CLK_CPG | 167 | select SH_CLK_CPG |
| 168 | select SYS_SUPPORTS_SH_MTU2 | ||
| 151 | 169 | ||
| 152 | comment "Renesas ARM SoCs Board Type" | 170 | comment "Renesas ARM SoCs Board Type" |
| 153 | 171 | ||
| @@ -321,24 +339,6 @@ config SHMOBILE_TIMER_HZ | |||
| 321 | want to select a HZ value such as 128 that can evenly divide RCLK. | 339 | want to select a HZ value such as 128 that can evenly divide RCLK. |
| 322 | A HZ value that does not divide evenly may cause timer drift. | 340 | A HZ value that does not divide evenly may cause timer drift. |
| 323 | 341 | ||
| 324 | config SH_TIMER_CMT | ||
| 325 | bool "CMT timer driver" | ||
| 326 | default y | ||
| 327 | help | ||
| 328 | This enables build of the CMT timer driver. | ||
| 329 | |||
| 330 | config SH_TIMER_TMU | ||
| 331 | bool "TMU timer driver" | ||
| 332 | default y | ||
| 333 | help | ||
| 334 | This enables build of the TMU timer driver. | ||
| 335 | |||
| 336 | config EM_TIMER_STI | ||
| 337 | bool "STI timer driver" | ||
| 338 | default y | ||
| 339 | help | ||
| 340 | This enables build of the STI timer driver. | ||
| 341 | |||
| 342 | endmenu | 342 | endmenu |
| 343 | 343 | ||
| 344 | endif | 344 | endif |
diff --git a/arch/arm/mach-u300/Makefile b/arch/arm/mach-u300/Makefile index 0f362b64fb87..3ec74ac95bc1 100644 --- a/arch/arm/mach-u300/Makefile +++ b/arch/arm/mach-u300/Makefile | |||
| @@ -2,7 +2,7 @@ | |||
| 2 | # Makefile for the linux kernel, U300 machine. | 2 | # Makefile for the linux kernel, U300 machine. |
| 3 | # | 3 | # |
| 4 | 4 | ||
| 5 | obj-y := core.o timer.o | 5 | obj-y := core.o |
| 6 | obj-m := | 6 | obj-m := |
| 7 | obj-n := | 7 | obj-n := |
| 8 | obj- := | 8 | obj- := |
diff --git a/arch/arm/mach-zynq/Kconfig b/arch/arm/mach-zynq/Kconfig index 6b04260aa142..f03e75bd0b2b 100644 --- a/arch/arm/mach-zynq/Kconfig +++ b/arch/arm/mach-zynq/Kconfig | |||
| @@ -2,6 +2,8 @@ config ARCH_ZYNQ | |||
| 2 | bool "Xilinx Zynq ARM Cortex A9 Platform" if ARCH_MULTI_V7 | 2 | bool "Xilinx Zynq ARM Cortex A9 Platform" if ARCH_MULTI_V7 |
| 3 | select ARM_AMBA | 3 | select ARM_AMBA |
| 4 | select ARM_GIC | 4 | select ARM_GIC |
| 5 | select ARCH_HAS_CPUFREQ | ||
| 6 | select ARCH_HAS_OPP | ||
| 5 | select COMMON_CLK | 7 | select COMMON_CLK |
| 6 | select CPU_V7 | 8 | select CPU_V7 |
| 7 | select GENERIC_CLOCKEVENTS | 9 | select GENERIC_CLOCKEVENTS |
| @@ -13,6 +15,6 @@ config ARCH_ZYNQ | |||
| 13 | select HAVE_SMP | 15 | select HAVE_SMP |
| 14 | select SPARSE_IRQ | 16 | select SPARSE_IRQ |
| 15 | select CADENCE_TTC_TIMER | 17 | select CADENCE_TTC_TIMER |
| 16 | select ARM_GLOBAL_TIMER | 18 | select ARM_GLOBAL_TIMER if !CPU_FREQ |
| 17 | help | 19 | help |
| 18 | Support for Xilinx Zynq ARM Cortex A9 Platform | 20 | Support for Xilinx Zynq ARM Cortex A9 Platform |
diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c index 8c09a8393fb6..a39be8e80856 100644 --- a/arch/arm/mach-zynq/common.c +++ b/arch/arm/mach-zynq/common.c | |||
| @@ -64,6 +64,8 @@ static struct platform_device zynq_cpuidle_device = { | |||
| 64 | */ | 64 | */ |
| 65 | static void __init zynq_init_machine(void) | 65 | static void __init zynq_init_machine(void) |
| 66 | { | 66 | { |
| 67 | struct platform_device_info devinfo = { .name = "cpufreq-cpu0", }; | ||
| 68 | |||
| 67 | /* | 69 | /* |
| 68 | * 64KB way size, 8-way associativity, parity disabled | 70 | * 64KB way size, 8-way associativity, parity disabled |
| 69 | */ | 71 | */ |
| @@ -72,6 +74,7 @@ static void __init zynq_init_machine(void) | |||
| 72 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); | 74 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); |
| 73 | 75 | ||
| 74 | platform_device_register(&zynq_cpuidle_device); | 76 | platform_device_register(&zynq_cpuidle_device); |
| 77 | platform_device_register_full(&devinfo); | ||
| 75 | } | 78 | } |
| 76 | 79 | ||
| 77 | static void __init zynq_timer_init(void) | 80 | static void __init zynq_timer_init(void) |
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 6357710753d5..364d204298fa 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig | |||
| @@ -123,15 +123,6 @@ config SYS_SUPPORTS_NUMA | |||
| 123 | config SYS_SUPPORTS_PCI | 123 | config SYS_SUPPORTS_PCI |
| 124 | bool | 124 | bool |
| 125 | 125 | ||
| 126 | config SYS_SUPPORTS_CMT | ||
| 127 | bool | ||
| 128 | |||
| 129 | config SYS_SUPPORTS_MTU2 | ||
| 130 | bool | ||
| 131 | |||
| 132 | config SYS_SUPPORTS_TMU | ||
| 133 | bool | ||
| 134 | |||
| 135 | config STACKTRACE_SUPPORT | 126 | config STACKTRACE_SUPPORT |
| 136 | def_bool y | 127 | def_bool y |
| 137 | 128 | ||
| @@ -191,14 +182,14 @@ config CPU_SH3 | |||
| 191 | bool | 182 | bool |
| 192 | select CPU_HAS_INTEVT | 183 | select CPU_HAS_INTEVT |
| 193 | select CPU_HAS_SR_RB | 184 | select CPU_HAS_SR_RB |
| 194 | select SYS_SUPPORTS_TMU | 185 | select SYS_SUPPORTS_SH_TMU |
| 195 | 186 | ||
| 196 | config CPU_SH4 | 187 | config CPU_SH4 |
| 197 | bool | 188 | bool |
| 198 | select CPU_HAS_INTEVT | 189 | select CPU_HAS_INTEVT |
| 199 | select CPU_HAS_SR_RB | 190 | select CPU_HAS_SR_RB |
| 200 | select CPU_HAS_FPU if !CPU_SH4AL_DSP | 191 | select CPU_HAS_FPU if !CPU_SH4AL_DSP |
| 201 | select SYS_SUPPORTS_TMU | 192 | select SYS_SUPPORTS_SH_TMU |
| 202 | select SYS_SUPPORTS_HUGETLBFS if MMU | 193 | select SYS_SUPPORTS_HUGETLBFS if MMU |
| 203 | 194 | ||
| 204 | config CPU_SH4A | 195 | config CPU_SH4A |
| @@ -213,7 +204,7 @@ config CPU_SH4AL_DSP | |||
| 213 | config CPU_SH5 | 204 | config CPU_SH5 |
| 214 | bool | 205 | bool |
| 215 | select CPU_HAS_FPU | 206 | select CPU_HAS_FPU |
| 216 | select SYS_SUPPORTS_TMU | 207 | select SYS_SUPPORTS_SH_TMU |
| 217 | select SYS_SUPPORTS_HUGETLBFS if MMU | 208 | select SYS_SUPPORTS_HUGETLBFS if MMU |
| 218 | 209 | ||
| 219 | config CPU_SHX2 | 210 | config CPU_SHX2 |
| @@ -250,7 +241,7 @@ choice | |||
| 250 | config CPU_SUBTYPE_SH7619 | 241 | config CPU_SUBTYPE_SH7619 |
| 251 | bool "Support SH7619 processor" | 242 | bool "Support SH7619 processor" |
| 252 | select CPU_SH2 | 243 | select CPU_SH2 |
| 253 | select SYS_SUPPORTS_CMT | 244 | select SYS_SUPPORTS_SH_CMT |
| 254 | 245 | ||
| 255 | # SH-2A Processor Support | 246 | # SH-2A Processor Support |
| 256 | 247 | ||
| @@ -258,50 +249,50 @@ config CPU_SUBTYPE_SH7201 | |||
| 258 | bool "Support SH7201 processor" | 249 | bool "Support SH7201 processor" |
| 259 | select CPU_SH2A | 250 | select CPU_SH2A |
| 260 | select CPU_HAS_FPU | 251 | select CPU_HAS_FPU |
| 261 | select SYS_SUPPORTS_MTU2 | 252 | select SYS_SUPPORTS_SH_MTU2 |
| 262 | 253 | ||
| 263 | config CPU_SUBTYPE_SH7203 | 254 | config CPU_SUBTYPE_SH7203 |
| 264 | bool "Support SH7203 processor" | 255 | bool "Support SH7203 processor" |
| 265 | select CPU_SH2A | 256 | select CPU_SH2A |
| 266 | select CPU_HAS_FPU | 257 | select CPU_HAS_FPU |
| 267 | select SYS_SUPPORTS_CMT | 258 | select SYS_SUPPORTS_SH_CMT |
| 268 | select SYS_SUPPORTS_MTU2 | 259 | select SYS_SUPPORTS_SH_MTU2 |
| 269 | select ARCH_WANT_OPTIONAL_GPIOLIB | 260 | select ARCH_WANT_OPTIONAL_GPIOLIB |
| 270 | select PINCTRL | 261 | select PINCTRL |
| 271 | 262 | ||
| 272 | config CPU_SUBTYPE_SH7206 | 263 | config CPU_SUBTYPE_SH7206 |
| 273 | bool "Support SH7206 processor" | 264 | bool "Support SH7206 processor" |
| 274 | select CPU_SH2A | 265 | select CPU_SH2A |
| 275 | select SYS_SUPPORTS_CMT | 266 | select SYS_SUPPORTS_SH_CMT |
| 276 | select SYS_SUPPORTS_MTU2 | 267 | select SYS_SUPPORTS_SH_MTU2 |
| 277 | 268 | ||
| 278 | config CPU_SUBTYPE_SH7263 | 269 | config CPU_SUBTYPE_SH7263 |
| 279 | bool "Support SH7263 processor" | 270 | bool "Support SH7263 processor" |
| 280 | select CPU_SH2A | 271 | select CPU_SH2A |
| 281 | select CPU_HAS_FPU | 272 | select CPU_HAS_FPU |
| 282 | select SYS_SUPPORTS_CMT | 273 | select SYS_SUPPORTS_SH_CMT |
| 283 | select SYS_SUPPORTS_MTU2 | 274 | select SYS_SUPPORTS_SH_MTU2 |
| 284 | 275 | ||
| 285 | config CPU_SUBTYPE_SH7264 | 276 | config CPU_SUBTYPE_SH7264 |
| 286 | bool "Support SH7264 processor" | 277 | bool "Support SH7264 processor" |
| 287 | select CPU_SH2A | 278 | select CPU_SH2A |
| 288 | select CPU_HAS_FPU | 279 | select CPU_HAS_FPU |
| 289 | select SYS_SUPPORTS_CMT | 280 | select SYS_SUPPORTS_SH_CMT |
| 290 | select SYS_SUPPORTS_MTU2 | 281 | select SYS_SUPPORTS_SH_MTU2 |
| 291 | select PINCTRL | 282 | select PINCTRL |
| 292 | 283 | ||
| 293 | config CPU_SUBTYPE_SH7269 | 284 | config CPU_SUBTYPE_SH7269 |
| 294 | bool "Support SH7269 processor" | 285 | bool "Support SH7269 processor" |
| 295 | select CPU_SH2A | 286 | select CPU_SH2A |
| 296 | select CPU_HAS_FPU | 287 | select CPU_HAS_FPU |
| 297 | select SYS_SUPPORTS_CMT | 288 | select SYS_SUPPORTS_SH_CMT |
| 298 | select SYS_SUPPORTS_MTU2 | 289 | select SYS_SUPPORTS_SH_MTU2 |
| 299 | select PINCTRL | 290 | select PINCTRL |
| 300 | 291 | ||
| 301 | config CPU_SUBTYPE_MXG | 292 | config CPU_SUBTYPE_MXG |
| 302 | bool "Support MX-G processor" | 293 | bool "Support MX-G processor" |
| 303 | select CPU_SH2A | 294 | select CPU_SH2A |
| 304 | select SYS_SUPPORTS_MTU2 | 295 | select SYS_SUPPORTS_SH_MTU2 |
| 305 | help | 296 | help |
| 306 | Select MX-G if running on an R8A03022BG part. | 297 | Select MX-G if running on an R8A03022BG part. |
| 307 | 298 | ||
| @@ -354,7 +345,7 @@ config CPU_SUBTYPE_SH7720 | |||
| 354 | bool "Support SH7720 processor" | 345 | bool "Support SH7720 processor" |
| 355 | select CPU_SH3 | 346 | select CPU_SH3 |
| 356 | select CPU_HAS_DSP | 347 | select CPU_HAS_DSP |
| 357 | select SYS_SUPPORTS_CMT | 348 | select SYS_SUPPORTS_SH_CMT |
| 358 | select ARCH_WANT_OPTIONAL_GPIOLIB | 349 | select ARCH_WANT_OPTIONAL_GPIOLIB |
| 359 | select USB_ARCH_HAS_OHCI | 350 | select USB_ARCH_HAS_OHCI |
| 360 | select USB_OHCI_SH if USB_OHCI_HCD | 351 | select USB_OHCI_SH if USB_OHCI_HCD |
| @@ -366,7 +357,7 @@ config CPU_SUBTYPE_SH7721 | |||
| 366 | bool "Support SH7721 processor" | 357 | bool "Support SH7721 processor" |
| 367 | select CPU_SH3 | 358 | select CPU_SH3 |
| 368 | select CPU_HAS_DSP | 359 | select CPU_HAS_DSP |
| 369 | select SYS_SUPPORTS_CMT | 360 | select SYS_SUPPORTS_SH_CMT |
| 370 | select USB_ARCH_HAS_OHCI | 361 | select USB_ARCH_HAS_OHCI |
| 371 | select USB_OHCI_SH if USB_OHCI_HCD | 362 | select USB_OHCI_SH if USB_OHCI_HCD |
| 372 | help | 363 | help |
| @@ -422,7 +413,7 @@ config CPU_SUBTYPE_SH7723 | |||
| 422 | select CPU_SHX2 | 413 | select CPU_SHX2 |
| 423 | select ARCH_SHMOBILE | 414 | select ARCH_SHMOBILE |
| 424 | select ARCH_SPARSEMEM_ENABLE | 415 | select ARCH_SPARSEMEM_ENABLE |
| 425 | select SYS_SUPPORTS_CMT | 416 | select SYS_SUPPORTS_SH_CMT |
| 426 | select ARCH_WANT_OPTIONAL_GPIOLIB | 417 | select ARCH_WANT_OPTIONAL_GPIOLIB |
| 427 | select PINCTRL | 418 | select PINCTRL |
| 428 | help | 419 | help |
| @@ -434,7 +425,7 @@ config CPU_SUBTYPE_SH7724 | |||
| 434 | select CPU_SHX2 | 425 | select CPU_SHX2 |
| 435 | select ARCH_SHMOBILE | 426 | select ARCH_SHMOBILE |
| 436 | select ARCH_SPARSEMEM_ENABLE | 427 | select ARCH_SPARSEMEM_ENABLE |
| 437 | select SYS_SUPPORTS_CMT | 428 | select SYS_SUPPORTS_SH_CMT |
| 438 | select ARCH_WANT_OPTIONAL_GPIOLIB | 429 | select ARCH_WANT_OPTIONAL_GPIOLIB |
| 439 | select PINCTRL | 430 | select PINCTRL |
| 440 | help | 431 | help |
| @@ -514,7 +505,7 @@ config CPU_SUBTYPE_SH7343 | |||
| 514 | bool "Support SH7343 processor" | 505 | bool "Support SH7343 processor" |
| 515 | select CPU_SH4AL_DSP | 506 | select CPU_SH4AL_DSP |
| 516 | select ARCH_SHMOBILE | 507 | select ARCH_SHMOBILE |
| 517 | select SYS_SUPPORTS_CMT | 508 | select SYS_SUPPORTS_SH_CMT |
| 518 | 509 | ||
| 519 | config CPU_SUBTYPE_SH7722 | 510 | config CPU_SUBTYPE_SH7722 |
| 520 | bool "Support SH7722 processor" | 511 | bool "Support SH7722 processor" |
| @@ -523,7 +514,7 @@ config CPU_SUBTYPE_SH7722 | |||
| 523 | select ARCH_SHMOBILE | 514 | select ARCH_SHMOBILE |
| 524 | select ARCH_SPARSEMEM_ENABLE | 515 | select ARCH_SPARSEMEM_ENABLE |
| 525 | select SYS_SUPPORTS_NUMA | 516 | select SYS_SUPPORTS_NUMA |
| 526 | select SYS_SUPPORTS_CMT | 517 | select SYS_SUPPORTS_SH_CMT |
| 527 | select ARCH_WANT_OPTIONAL_GPIOLIB | 518 | select ARCH_WANT_OPTIONAL_GPIOLIB |
| 528 | select PINCTRL | 519 | select PINCTRL |
| 529 | 520 | ||
| @@ -534,7 +525,7 @@ config CPU_SUBTYPE_SH7366 | |||
| 534 | select ARCH_SHMOBILE | 525 | select ARCH_SHMOBILE |
| 535 | select ARCH_SPARSEMEM_ENABLE | 526 | select ARCH_SPARSEMEM_ENABLE |
| 536 | select SYS_SUPPORTS_NUMA | 527 | select SYS_SUPPORTS_NUMA |
| 537 | select SYS_SUPPORTS_CMT | 528 | select SYS_SUPPORTS_SH_CMT |
| 538 | 529 | ||
| 539 | endchoice | 530 | endchoice |
| 540 | 531 | ||
| @@ -567,27 +558,6 @@ source "arch/sh/boards/Kconfig" | |||
| 567 | 558 | ||
| 568 | menu "Timer and clock configuration" | 559 | menu "Timer and clock configuration" |
| 569 | 560 | ||
| 570 | config SH_TIMER_TMU | ||
| 571 | bool "TMU timer driver" | ||
| 572 | depends on SYS_SUPPORTS_TMU | ||
| 573 | default y | ||
| 574 | help | ||
| 575 | This enables the build of the TMU timer driver. | ||
| 576 | |||
| 577 | config SH_TIMER_CMT | ||
| 578 | bool "CMT timer driver" | ||
| 579 | depends on SYS_SUPPORTS_CMT | ||
| 580 | default y | ||
| 581 | help | ||
| 582 | This enables build of the CMT timer driver. | ||
| 583 | |||
| 584 | config SH_TIMER_MTU2 | ||
| 585 | bool "MTU2 timer driver" | ||
| 586 | depends on SYS_SUPPORTS_MTU2 | ||
| 587 | default y | ||
| 588 | help | ||
| 589 | This enables build of the MTU2 timer driver. | ||
| 590 | |||
| 591 | config SH_PCLK_FREQ | 561 | config SH_PCLK_FREQ |
| 592 | int "Peripheral clock frequency (in Hz)" | 562 | int "Peripheral clock frequency (in Hz)" |
| 593 | depends on SH_CLK_CPG_LEGACY | 563 | depends on SH_CLK_CPG_LEGACY |
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index da85a8e830a1..b91abfdd4931 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
| @@ -699,7 +699,7 @@ static int hpet_cpuhp_notify(struct notifier_block *n, | |||
| 699 | /* FIXME: add schedule_work_on() */ | 699 | /* FIXME: add schedule_work_on() */ |
| 700 | schedule_delayed_work_on(cpu, &work.work, 0); | 700 | schedule_delayed_work_on(cpu, &work.work, 0); |
| 701 | wait_for_completion(&work.complete); | 701 | wait_for_completion(&work.complete); |
| 702 | destroy_timer_on_stack(&work.work.timer); | 702 | destroy_delayed_work_on_stack(&work.work); |
| 703 | break; | 703 | break; |
| 704 | case CPU_DEAD: | 704 | case CPU_DEAD: |
| 705 | if (hdev) { | 705 | if (hdev) { |
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index cd6950fd8caf..52e9329e3c51 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig | |||
| @@ -140,3 +140,51 @@ config VF_PIT_TIMER | |||
| 140 | bool | 140 | bool |
| 141 | help | 141 | help |
| 142 | Support for Period Interrupt Timer on Freescale Vybrid Family SoCs. | 142 | Support for Period Interrupt Timer on Freescale Vybrid Family SoCs. |
| 143 | |||
| 144 | config SYS_SUPPORTS_SH_CMT | ||
| 145 | bool | ||
| 146 | |||
| 147 | config SYS_SUPPORTS_SH_MTU2 | ||
| 148 | bool | ||
| 149 | |||
| 150 | config SYS_SUPPORTS_SH_TMU | ||
| 151 | bool | ||
| 152 | |||
| 153 | config SYS_SUPPORTS_EM_STI | ||
| 154 | bool | ||
| 155 | |||
| 156 | config SH_TIMER_CMT | ||
| 157 | bool "Renesas CMT timer driver" if COMPILE_TEST | ||
| 158 | depends on GENERIC_CLOCKEVENTS | ||
| 159 | default SYS_SUPPORTS_SH_CMT | ||
| 160 | help | ||
| 161 | This enables build of a clocksource and clockevent driver for | ||
| 162 | the Compare Match Timer (CMT) hardware available in 16/32/48-bit | ||
| 163 | variants on a wide range of Mobile and Automotive SoCs from Renesas. | ||
| 164 | |||
| 165 | config SH_TIMER_MTU2 | ||
| 166 | bool "Renesas MTU2 timer driver" if COMPILE_TEST | ||
| 167 | depends on GENERIC_CLOCKEVENTS | ||
| 168 | default SYS_SUPPORTS_SH_MTU2 | ||
| 169 | help | ||
| 170 | This enables build of a clockevent driver for the Multi-Function | ||
| 171 | Timer Pulse Unit 2 (TMU2) hardware available on SoCs from Renesas. | ||
| 172 | This hardware comes with 16 bit-timer registers. | ||
| 173 | |||
| 174 | config SH_TIMER_TMU | ||
| 175 | bool "Renesas TMU timer driver" if COMPILE_TEST | ||
| 176 | depends on GENERIC_CLOCKEVENTS | ||
| 177 | default SYS_SUPPORTS_SH_TMU | ||
| 178 | help | ||
| 179 | This enables build of a clocksource and clockevent driver for | ||
| 180 | the 32-bit Timer Unit (TMU) hardware available on a wide range | ||
| 181 | SoCs from Renesas. | ||
| 182 | |||
| 183 | config EM_TIMER_STI | ||
| 184 | bool "Renesas STI timer driver" if COMPILE_TEST | ||
| 185 | depends on GENERIC_CLOCKEVENTS | ||
| 186 | default SYS_SUPPORTS_EM_STI | ||
| 187 | help | ||
| 188 | This enables build of a clocksource and clockevent driver for | ||
| 189 | the 48-bit System Timer (STI) hardware available on a SoCs | ||
| 190 | such as EMEV2 from former NEC Electronics. | ||
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index c7ca50a9c232..aed3488d9426 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile | |||
| @@ -21,6 +21,7 @@ obj-$(CONFIG_ARCH_MARCO) += timer-marco.o | |||
| 21 | obj-$(CONFIG_ARCH_MOXART) += moxart_timer.o | 21 | obj-$(CONFIG_ARCH_MOXART) += moxart_timer.o |
| 22 | obj-$(CONFIG_ARCH_MXS) += mxs_timer.o | 22 | obj-$(CONFIG_ARCH_MXS) += mxs_timer.o |
| 23 | obj-$(CONFIG_ARCH_PRIMA2) += timer-prima2.o | 23 | obj-$(CONFIG_ARCH_PRIMA2) += timer-prima2.o |
| 24 | obj-$(CONFIG_ARCH_U300) += timer-u300.o | ||
| 24 | obj-$(CONFIG_SUN4I_TIMER) += sun4i_timer.o | 25 | obj-$(CONFIG_SUN4I_TIMER) += sun4i_timer.o |
| 25 | obj-$(CONFIG_SUN5I_HSTIMER) += timer-sun5i.o | 26 | obj-$(CONFIG_SUN5I_HSTIMER) += timer-sun5i.o |
| 26 | obj-$(CONFIG_ARCH_TEGRA) += tegra20_timer.o | 27 | obj-$(CONFIG_ARCH_TEGRA) += tegra20_timer.o |
| @@ -37,3 +38,4 @@ obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o | |||
| 37 | obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o | 38 | obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o |
| 38 | obj-$(CONFIG_CLKSRC_METAG_GENERIC) += metag_generic.o | 39 | obj-$(CONFIG_CLKSRC_METAG_GENERIC) += metag_generic.o |
| 39 | obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST) += dummy_timer.o | 40 | obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST) += dummy_timer.o |
| 41 | obj-$(CONFIG_ARCH_KEYSTONE) += timer-keystone.o | ||
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 95fb944e15ee..57e823c44d2a 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c | |||
| @@ -277,6 +277,7 @@ static void __arch_timer_setup(unsigned type, | |||
| 277 | clk->set_next_event = arch_timer_set_next_event_phys; | 277 | clk->set_next_event = arch_timer_set_next_event_phys; |
| 278 | } | 278 | } |
| 279 | } else { | 279 | } else { |
| 280 | clk->features |= CLOCK_EVT_FEAT_DYNIRQ; | ||
| 280 | clk->name = "arch_mem_timer"; | 281 | clk->name = "arch_mem_timer"; |
| 281 | clk->rating = 400; | 282 | clk->rating = 400; |
| 282 | clk->cpumask = cpu_all_mask; | 283 | clk->cpumask = cpu_all_mask; |
diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c index 63f176de0d02..49fbe2847c84 100644 --- a/drivers/clocksource/cadence_ttc_timer.c +++ b/drivers/clocksource/cadence_ttc_timer.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | */ | 16 | */ |
| 17 | 17 | ||
| 18 | #include <linux/clk.h> | 18 | #include <linux/clk.h> |
| 19 | #include <linux/clk-provider.h> | ||
| 19 | #include <linux/interrupt.h> | 20 | #include <linux/interrupt.h> |
| 20 | #include <linux/clockchips.h> | 21 | #include <linux/clockchips.h> |
| 21 | #include <linux/of_address.h> | 22 | #include <linux/of_address.h> |
| @@ -52,6 +53,8 @@ | |||
| 52 | #define TTC_CNT_CNTRL_DISABLE_MASK 0x1 | 53 | #define TTC_CNT_CNTRL_DISABLE_MASK 0x1 |
| 53 | 54 | ||
| 54 | #define TTC_CLK_CNTRL_CSRC_MASK (1 << 5) /* clock source */ | 55 | #define TTC_CLK_CNTRL_CSRC_MASK (1 << 5) /* clock source */ |
| 56 | #define TTC_CLK_CNTRL_PSV_MASK 0x1e | ||
| 57 | #define TTC_CLK_CNTRL_PSV_SHIFT 1 | ||
| 55 | 58 | ||
| 56 | /* | 59 | /* |
| 57 | * Setup the timers to use pre-scaling, using a fixed value for now that will | 60 | * Setup the timers to use pre-scaling, using a fixed value for now that will |
| @@ -63,6 +66,8 @@ | |||
| 63 | #define CLK_CNTRL_PRESCALE_EN 1 | 66 | #define CLK_CNTRL_PRESCALE_EN 1 |
| 64 | #define CNT_CNTRL_RESET (1 << 4) | 67 | #define CNT_CNTRL_RESET (1 << 4) |
| 65 | 68 | ||
| 69 | #define MAX_F_ERR 50 | ||
| 70 | |||
| 66 | /** | 71 | /** |
| 67 | * struct ttc_timer - This definition defines local timer structure | 72 | * struct ttc_timer - This definition defines local timer structure |
| 68 | * | 73 | * |
| @@ -82,6 +87,8 @@ struct ttc_timer { | |||
| 82 | container_of(x, struct ttc_timer, clk_rate_change_nb) | 87 | container_of(x, struct ttc_timer, clk_rate_change_nb) |
| 83 | 88 | ||
| 84 | struct ttc_timer_clocksource { | 89 | struct ttc_timer_clocksource { |
| 90 | u32 scale_clk_ctrl_reg_old; | ||
| 91 | u32 scale_clk_ctrl_reg_new; | ||
| 85 | struct ttc_timer ttc; | 92 | struct ttc_timer ttc; |
| 86 | struct clocksource cs; | 93 | struct clocksource cs; |
| 87 | }; | 94 | }; |
| @@ -229,32 +236,89 @@ static int ttc_rate_change_clocksource_cb(struct notifier_block *nb, | |||
| 229 | struct ttc_timer_clocksource, ttc); | 236 | struct ttc_timer_clocksource, ttc); |
| 230 | 237 | ||
| 231 | switch (event) { | 238 | switch (event) { |
| 232 | case POST_RATE_CHANGE: | 239 | case PRE_RATE_CHANGE: |
| 240 | { | ||
| 241 | u32 psv; | ||
| 242 | unsigned long factor, rate_low, rate_high; | ||
| 243 | |||
| 244 | if (ndata->new_rate > ndata->old_rate) { | ||
| 245 | factor = DIV_ROUND_CLOSEST(ndata->new_rate, | ||
| 246 | ndata->old_rate); | ||
| 247 | rate_low = ndata->old_rate; | ||
| 248 | rate_high = ndata->new_rate; | ||
| 249 | } else { | ||
| 250 | factor = DIV_ROUND_CLOSEST(ndata->old_rate, | ||
| 251 | ndata->new_rate); | ||
| 252 | rate_low = ndata->new_rate; | ||
| 253 | rate_high = ndata->old_rate; | ||
| 254 | } | ||
| 255 | |||
| 256 | if (!is_power_of_2(factor)) | ||
| 257 | return NOTIFY_BAD; | ||
| 258 | |||
| 259 | if (abs(rate_high - (factor * rate_low)) > MAX_F_ERR) | ||
| 260 | return NOTIFY_BAD; | ||
| 261 | |||
| 262 | factor = __ilog2_u32(factor); | ||
| 263 | |||
| 233 | /* | 264 | /* |
| 234 | * Do whatever is necessary to maintain a proper time base | 265 | * store timer clock ctrl register so we can restore it in case |
| 235 | * | 266 | * of an abort. |
| 236 | * I cannot find a way to adjust the currently used clocksource | ||
| 237 | * to the new frequency. __clocksource_updatefreq_hz() sounds | ||
| 238 | * good, but does not work. Not sure what's that missing. | ||
| 239 | * | ||
| 240 | * This approach works, but triggers two clocksource switches. | ||
| 241 | * The first after unregister to clocksource jiffies. And | ||
| 242 | * another one after the register to the newly registered timer. | ||
| 243 | * | ||
| 244 | * Alternatively we could 'waste' another HW timer to ping pong | ||
| 245 | * between clock sources. That would also use one register and | ||
| 246 | * one unregister call, but only trigger one clocksource switch | ||
| 247 | * for the cost of another HW timer used by the OS. | ||
| 248 | */ | 267 | */ |
| 249 | clocksource_unregister(&ttccs->cs); | 268 | ttccs->scale_clk_ctrl_reg_old = |
| 250 | clocksource_register_hz(&ttccs->cs, | 269 | __raw_readl(ttccs->ttc.base_addr + |
| 251 | ndata->new_rate / PRESCALE); | 270 | TTC_CLK_CNTRL_OFFSET); |
| 252 | /* fall through */ | 271 | |
| 253 | case PRE_RATE_CHANGE: | 272 | psv = (ttccs->scale_clk_ctrl_reg_old & |
| 273 | TTC_CLK_CNTRL_PSV_MASK) >> | ||
| 274 | TTC_CLK_CNTRL_PSV_SHIFT; | ||
| 275 | if (ndata->new_rate < ndata->old_rate) | ||
| 276 | psv -= factor; | ||
| 277 | else | ||
| 278 | psv += factor; | ||
| 279 | |||
| 280 | /* prescaler within legal range? */ | ||
| 281 | if (psv & ~(TTC_CLK_CNTRL_PSV_MASK >> TTC_CLK_CNTRL_PSV_SHIFT)) | ||
| 282 | return NOTIFY_BAD; | ||
| 283 | |||
| 284 | ttccs->scale_clk_ctrl_reg_new = ttccs->scale_clk_ctrl_reg_old & | ||
| 285 | ~TTC_CLK_CNTRL_PSV_MASK; | ||
| 286 | ttccs->scale_clk_ctrl_reg_new |= psv << TTC_CLK_CNTRL_PSV_SHIFT; | ||
| 287 | |||
| 288 | |||
| 289 | /* scale down: adjust divider in post-change notification */ | ||
| 290 | if (ndata->new_rate < ndata->old_rate) | ||
| 291 | return NOTIFY_DONE; | ||
| 292 | |||
| 293 | /* scale up: adjust divider now - before frequency change */ | ||
| 294 | __raw_writel(ttccs->scale_clk_ctrl_reg_new, | ||
| 295 | ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET); | ||
| 296 | break; | ||
| 297 | } | ||
| 298 | case POST_RATE_CHANGE: | ||
| 299 | /* scale up: pre-change notification did the adjustment */ | ||
| 300 | if (ndata->new_rate > ndata->old_rate) | ||
| 301 | return NOTIFY_OK; | ||
| 302 | |||
| 303 | /* scale down: adjust divider now - after frequency change */ | ||
| 304 | __raw_writel(ttccs->scale_clk_ctrl_reg_new, | ||
| 305 | ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET); | ||
| 306 | break; | ||
| 307 | |||
| 254 | case ABORT_RATE_CHANGE: | 308 | case ABORT_RATE_CHANGE: |
| 309 | /* we have to undo the adjustment in case we scale up */ | ||
| 310 | if (ndata->new_rate < ndata->old_rate) | ||
| 311 | return NOTIFY_OK; | ||
| 312 | |||
| 313 | /* restore original register value */ | ||
| 314 | __raw_writel(ttccs->scale_clk_ctrl_reg_old, | ||
| 315 | ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET); | ||
| 316 | /* fall through */ | ||
| 255 | default: | 317 | default: |
| 256 | return NOTIFY_DONE; | 318 | return NOTIFY_DONE; |
| 257 | } | 319 | } |
| 320 | |||
| 321 | return NOTIFY_DONE; | ||
| 258 | } | 322 | } |
| 259 | 323 | ||
| 260 | static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base) | 324 | static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base) |
| @@ -321,25 +385,12 @@ static int ttc_rate_change_clockevent_cb(struct notifier_block *nb, | |||
| 321 | 385 | ||
| 322 | switch (event) { | 386 | switch (event) { |
| 323 | case POST_RATE_CHANGE: | 387 | case POST_RATE_CHANGE: |
| 324 | { | ||
| 325 | unsigned long flags; | ||
| 326 | |||
| 327 | /* | ||
| 328 | * clockevents_update_freq should be called with IRQ disabled on | ||
| 329 | * the CPU the timer provides events for. The timer we use is | ||
| 330 | * common to both CPUs, not sure if we need to run on both | ||
| 331 | * cores. | ||
| 332 | */ | ||
| 333 | local_irq_save(flags); | ||
| 334 | clockevents_update_freq(&ttcce->ce, | ||
| 335 | ndata->new_rate / PRESCALE); | ||
| 336 | local_irq_restore(flags); | ||
| 337 | |||
| 338 | /* update cached frequency */ | 388 | /* update cached frequency */ |
| 339 | ttc->freq = ndata->new_rate; | 389 | ttc->freq = ndata->new_rate; |
| 340 | 390 | ||
| 391 | clockevents_update_freq(&ttcce->ce, ndata->new_rate / PRESCALE); | ||
| 392 | |||
| 341 | /* fall through */ | 393 | /* fall through */ |
| 342 | } | ||
| 343 | case PRE_RATE_CHANGE: | 394 | case PRE_RATE_CHANGE: |
| 344 | case ABORT_RATE_CHANGE: | 395 | case ABORT_RATE_CHANGE: |
| 345 | default: | 396 | default: |
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index 48f76bc05da0..c2e390efbdca 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c | |||
| @@ -410,7 +410,7 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt) | |||
| 410 | mevt = container_of(evt, struct mct_clock_event_device, evt); | 410 | mevt = container_of(evt, struct mct_clock_event_device, evt); |
| 411 | 411 | ||
| 412 | mevt->base = EXYNOS4_MCT_L_BASE(cpu); | 412 | mevt->base = EXYNOS4_MCT_L_BASE(cpu); |
| 413 | sprintf(mevt->name, "mct_tick%d", cpu); | 413 | snprintf(mevt->name, sizeof(mevt->name), "mct_tick%d", cpu); |
| 414 | 414 | ||
| 415 | evt->name = mevt->name; | 415 | evt->name = mevt->name; |
| 416 | evt->cpumask = cpumask_of(cpu); | 416 | evt->cpumask = cpumask_of(cpu); |
diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c index bf497afba9ad..efb17c3ee120 100644 --- a/drivers/clocksource/sun4i_timer.c +++ b/drivers/clocksource/sun4i_timer.c | |||
| @@ -196,5 +196,5 @@ static void __init sun4i_timer_init(struct device_node *node) | |||
| 196 | clockevents_config_and_register(&sun4i_clockevent, rate, | 196 | clockevents_config_and_register(&sun4i_clockevent, rate, |
| 197 | TIMER_SYNC_TICKS, 0xffffffff); | 197 | TIMER_SYNC_TICKS, 0xffffffff); |
| 198 | } | 198 | } |
| 199 | CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-timer", | 199 | CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-a10-timer", |
| 200 | sun4i_timer_init); | 200 | sun4i_timer_init); |
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c index ee8691b89944..0451e62fac7a 100644 --- a/drivers/clocksource/time-armada-370-xp.c +++ b/drivers/clocksource/time-armada-370-xp.c | |||
| @@ -85,12 +85,6 @@ static u32 ticks_per_jiffy; | |||
| 85 | 85 | ||
| 86 | static struct clock_event_device __percpu *armada_370_xp_evt; | 86 | static struct clock_event_device __percpu *armada_370_xp_evt; |
| 87 | 87 | ||
| 88 | static void timer_ctrl_clrset(u32 clr, u32 set) | ||
| 89 | { | ||
| 90 | writel((readl(timer_base + TIMER_CTRL_OFF) & ~clr) | set, | ||
| 91 | timer_base + TIMER_CTRL_OFF); | ||
| 92 | } | ||
| 93 | |||
| 94 | static void local_timer_ctrl_clrset(u32 clr, u32 set) | 88 | static void local_timer_ctrl_clrset(u32 clr, u32 set) |
| 95 | { | 89 | { |
| 96 | writel((readl(local_base + TIMER_CTRL_OFF) & ~clr) | set, | 90 | writel((readl(local_base + TIMER_CTRL_OFF) & ~clr) | set, |
| @@ -245,7 +239,7 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np) | |||
| 245 | clr = TIMER0_25MHZ; | 239 | clr = TIMER0_25MHZ; |
| 246 | enable_mask = TIMER0_EN | TIMER0_DIV(TIMER_DIVIDER_SHIFT); | 240 | enable_mask = TIMER0_EN | TIMER0_DIV(TIMER_DIVIDER_SHIFT); |
| 247 | } | 241 | } |
| 248 | timer_ctrl_clrset(clr, set); | 242 | atomic_io_modify(timer_base + TIMER_CTRL_OFF, clr | set, set); |
| 249 | local_timer_ctrl_clrset(clr, set); | 243 | local_timer_ctrl_clrset(clr, set); |
| 250 | 244 | ||
| 251 | /* | 245 | /* |
| @@ -263,7 +257,9 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np) | |||
| 263 | writel(0xffffffff, timer_base + TIMER0_VAL_OFF); | 257 | writel(0xffffffff, timer_base + TIMER0_VAL_OFF); |
| 264 | writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF); | 258 | writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF); |
| 265 | 259 | ||
| 266 | timer_ctrl_clrset(0, TIMER0_RELOAD_EN | enable_mask); | 260 | atomic_io_modify(timer_base + TIMER_CTRL_OFF, |
| 261 | TIMER0_RELOAD_EN | enable_mask, | ||
| 262 | TIMER0_RELOAD_EN | enable_mask); | ||
| 267 | 263 | ||
| 268 | /* | 264 | /* |
| 269 | * Set scale and timer for sched_clock. | 265 | * Set scale and timer for sched_clock. |
diff --git a/drivers/clocksource/time-orion.c b/drivers/clocksource/time-orion.c index 20066222f3f2..0b3ce0399c51 100644 --- a/drivers/clocksource/time-orion.c +++ b/drivers/clocksource/time-orion.c | |||
| @@ -35,20 +35,6 @@ | |||
| 35 | #define ORION_ONESHOT_MAX 0xfffffffe | 35 | #define ORION_ONESHOT_MAX 0xfffffffe |
| 36 | 36 | ||
| 37 | static void __iomem *timer_base; | 37 | static void __iomem *timer_base; |
| 38 | static DEFINE_SPINLOCK(timer_ctrl_lock); | ||
| 39 | |||
| 40 | /* | ||
| 41 | * Thread-safe access to TIMER_CTRL register | ||
| 42 | * (shared with watchdog timer) | ||
| 43 | */ | ||
| 44 | void orion_timer_ctrl_clrset(u32 clr, u32 set) | ||
| 45 | { | ||
| 46 | spin_lock(&timer_ctrl_lock); | ||
| 47 | writel((readl(timer_base + TIMER_CTRL) & ~clr) | set, | ||
| 48 | timer_base + TIMER_CTRL); | ||
| 49 | spin_unlock(&timer_ctrl_lock); | ||
| 50 | } | ||
| 51 | EXPORT_SYMBOL(orion_timer_ctrl_clrset); | ||
| 52 | 38 | ||
| 53 | /* | 39 | /* |
| 54 | * Free-running clocksource handling. | 40 | * Free-running clocksource handling. |
| @@ -68,7 +54,8 @@ static int orion_clkevt_next_event(unsigned long delta, | |||
| 68 | { | 54 | { |
| 69 | /* setup and enable one-shot timer */ | 55 | /* setup and enable one-shot timer */ |
| 70 | writel(delta, timer_base + TIMER1_VAL); | 56 | writel(delta, timer_base + TIMER1_VAL); |
| 71 | orion_timer_ctrl_clrset(TIMER1_RELOAD_EN, TIMER1_EN); | 57 | atomic_io_modify(timer_base + TIMER_CTRL, |
| 58 | TIMER1_RELOAD_EN | TIMER1_EN, TIMER1_EN); | ||
| 72 | 59 | ||
| 73 | return 0; | 60 | return 0; |
| 74 | } | 61 | } |
| @@ -80,10 +67,13 @@ static void orion_clkevt_mode(enum clock_event_mode mode, | |||
| 80 | /* setup and enable periodic timer at 1/HZ intervals */ | 67 | /* setup and enable periodic timer at 1/HZ intervals */ |
| 81 | writel(ticks_per_jiffy - 1, timer_base + TIMER1_RELOAD); | 68 | writel(ticks_per_jiffy - 1, timer_base + TIMER1_RELOAD); |
| 82 | writel(ticks_per_jiffy - 1, timer_base + TIMER1_VAL); | 69 | writel(ticks_per_jiffy - 1, timer_base + TIMER1_VAL); |
| 83 | orion_timer_ctrl_clrset(0, TIMER1_RELOAD_EN | TIMER1_EN); | 70 | atomic_io_modify(timer_base + TIMER_CTRL, |
| 71 | TIMER1_RELOAD_EN | TIMER1_EN, | ||
| 72 | TIMER1_RELOAD_EN | TIMER1_EN); | ||
| 84 | } else { | 73 | } else { |
| 85 | /* disable timer */ | 74 | /* disable timer */ |
| 86 | orion_timer_ctrl_clrset(TIMER1_RELOAD_EN | TIMER1_EN, 0); | 75 | atomic_io_modify(timer_base + TIMER_CTRL, |
| 76 | TIMER1_RELOAD_EN | TIMER1_EN, 0); | ||
| 87 | } | 77 | } |
| 88 | } | 78 | } |
| 89 | 79 | ||
| @@ -131,7 +121,9 @@ static void __init orion_timer_init(struct device_node *np) | |||
| 131 | /* setup timer0 as free-running clocksource */ | 121 | /* setup timer0 as free-running clocksource */ |
| 132 | writel(~0, timer_base + TIMER0_VAL); | 122 | writel(~0, timer_base + TIMER0_VAL); |
| 133 | writel(~0, timer_base + TIMER0_RELOAD); | 123 | writel(~0, timer_base + TIMER0_RELOAD); |
| 134 | orion_timer_ctrl_clrset(0, TIMER0_RELOAD_EN | TIMER0_EN); | 124 | atomic_io_modify(timer_base + TIMER_CTRL, |
| 125 | TIMER0_RELOAD_EN | TIMER0_EN, | ||
| 126 | TIMER0_RELOAD_EN | TIMER0_EN); | ||
| 135 | clocksource_mmio_init(timer_base + TIMER0_VAL, "orion_clocksource", | 127 | clocksource_mmio_init(timer_base + TIMER0_VAL, "orion_clocksource", |
| 136 | clk_get_rate(clk), 300, 32, | 128 | clk_get_rate(clk), 300, 32, |
| 137 | clocksource_mmio_readl_down); | 129 | clocksource_mmio_readl_down); |
diff --git a/drivers/clocksource/timer-keystone.c b/drivers/clocksource/timer-keystone.c new file mode 100644 index 000000000000..0250354f7e55 --- /dev/null +++ b/drivers/clocksource/timer-keystone.c | |||
| @@ -0,0 +1,241 @@ | |||
| 1 | /* | ||
| 2 | * Keystone broadcast clock-event | ||
| 3 | * | ||
| 4 | * Copyright 2013 Texas Instruments, Inc. | ||
| 5 | * | ||
| 6 | * Author: Ivan Khoronzhuk <ivan.khoronzhuk@ti.com> | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License version 2 as | ||
| 10 | * published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | */ | ||
| 13 | |||
| 14 | #include <linux/clk.h> | ||
| 15 | #include <linux/clockchips.h> | ||
| 16 | #include <linux/clocksource.h> | ||
| 17 | #include <linux/interrupt.h> | ||
| 18 | #include <linux/of_address.h> | ||
| 19 | #include <linux/of_irq.h> | ||
| 20 | |||
| 21 | #define TIMER_NAME "timer-keystone" | ||
| 22 | |||
| 23 | /* Timer register offsets */ | ||
| 24 | #define TIM12 0x10 | ||
| 25 | #define TIM34 0x14 | ||
| 26 | #define PRD12 0x18 | ||
| 27 | #define PRD34 0x1c | ||
| 28 | #define TCR 0x20 | ||
| 29 | #define TGCR 0x24 | ||
| 30 | #define INTCTLSTAT 0x44 | ||
| 31 | |||
| 32 | /* Timer register bitfields */ | ||
| 33 | #define TCR_ENAMODE_MASK 0xC0 | ||
| 34 | #define TCR_ENAMODE_ONESHOT_MASK 0x40 | ||
| 35 | #define TCR_ENAMODE_PERIODIC_MASK 0x80 | ||
| 36 | |||
| 37 | #define TGCR_TIM_UNRESET_MASK 0x03 | ||
| 38 | #define INTCTLSTAT_ENINT_MASK 0x01 | ||
| 39 | |||
| 40 | /** | ||
| 41 | * struct keystone_timer: holds timer's data | ||
| 42 | * @base: timer memory base address | ||
| 43 | * @hz_period: cycles per HZ period | ||
| 44 | * @event_dev: event device based on timer | ||
| 45 | */ | ||
| 46 | static struct keystone_timer { | ||
| 47 | void __iomem *base; | ||
| 48 | unsigned long hz_period; | ||
| 49 | struct clock_event_device event_dev; | ||
| 50 | } timer; | ||
| 51 | |||
| 52 | static inline u32 keystone_timer_readl(unsigned long rg) | ||
| 53 | { | ||
| 54 | return readl_relaxed(timer.base + rg); | ||
| 55 | } | ||
| 56 | |||
| 57 | static inline void keystone_timer_writel(u32 val, unsigned long rg) | ||
| 58 | { | ||
| 59 | writel_relaxed(val, timer.base + rg); | ||
| 60 | } | ||
| 61 | |||
| 62 | /** | ||
| 63 | * keystone_timer_barrier: write memory barrier | ||
| 64 | * use explicit barrier to avoid using readl/writel non relaxed function | ||
| 65 | * variants, because in our case non relaxed variants hide the true places | ||
| 66 | * where barrier is needed. | ||
| 67 | */ | ||
| 68 | static inline void keystone_timer_barrier(void) | ||
| 69 | { | ||
| 70 | __iowmb(); | ||
| 71 | } | ||
| 72 | |||
| 73 | /** | ||
| 74 | * keystone_timer_config: configures timer to work in oneshot/periodic modes. | ||
| 75 | * @ mode: mode to configure | ||
| 76 | * @ period: cycles number to configure for | ||
| 77 | */ | ||
| 78 | static int keystone_timer_config(u64 period, enum clock_event_mode mode) | ||
| 79 | { | ||
| 80 | u32 tcr; | ||
| 81 | u32 off; | ||
| 82 | |||
| 83 | tcr = keystone_timer_readl(TCR); | ||
| 84 | off = tcr & ~(TCR_ENAMODE_MASK); | ||
| 85 | |||
| 86 | /* set enable mode */ | ||
| 87 | switch (mode) { | ||
| 88 | case CLOCK_EVT_MODE_ONESHOT: | ||
| 89 | tcr |= TCR_ENAMODE_ONESHOT_MASK; | ||
| 90 | break; | ||
| 91 | case CLOCK_EVT_MODE_PERIODIC: | ||
| 92 | tcr |= TCR_ENAMODE_PERIODIC_MASK; | ||
| 93 | break; | ||
| 94 | default: | ||
| 95 | return -1; | ||
| 96 | } | ||
| 97 | |||
| 98 | /* disable timer */ | ||
| 99 | keystone_timer_writel(off, TCR); | ||
| 100 | /* here we have to be sure the timer has been disabled */ | ||
| 101 | keystone_timer_barrier(); | ||
| 102 | |||
| 103 | /* reset counter to zero, set new period */ | ||
| 104 | keystone_timer_writel(0, TIM12); | ||
| 105 | keystone_timer_writel(0, TIM34); | ||
| 106 | keystone_timer_writel(period & 0xffffffff, PRD12); | ||
| 107 | keystone_timer_writel(period >> 32, PRD34); | ||
| 108 | |||
| 109 | /* | ||
| 110 | * enable timer | ||
| 111 | * here we have to be sure that CNTLO, CNTHI, PRDLO, PRDHI registers | ||
| 112 | * have been written. | ||
| 113 | */ | ||
| 114 | keystone_timer_barrier(); | ||
| 115 | keystone_timer_writel(tcr, TCR); | ||
| 116 | return 0; | ||
| 117 | } | ||
| 118 | |||
| 119 | static void keystone_timer_disable(void) | ||
| 120 | { | ||
| 121 | u32 tcr; | ||
| 122 | |||
| 123 | tcr = keystone_timer_readl(TCR); | ||
| 124 | |||
| 125 | /* disable timer */ | ||
| 126 | tcr &= ~(TCR_ENAMODE_MASK); | ||
| 127 | keystone_timer_writel(tcr, TCR); | ||
| 128 | } | ||
| 129 | |||
| 130 | static irqreturn_t keystone_timer_interrupt(int irq, void *dev_id) | ||
| 131 | { | ||
| 132 | struct clock_event_device *evt = dev_id; | ||
| 133 | |||
| 134 | evt->event_handler(evt); | ||
| 135 | return IRQ_HANDLED; | ||
| 136 | } | ||
| 137 | |||
| 138 | static int keystone_set_next_event(unsigned long cycles, | ||
| 139 | struct clock_event_device *evt) | ||
| 140 | { | ||
| 141 | return keystone_timer_config(cycles, evt->mode); | ||
| 142 | } | ||
| 143 | |||
| 144 | static void keystone_set_mode(enum clock_event_mode mode, | ||
| 145 | struct clock_event_device *evt) | ||
| 146 | { | ||
| 147 | switch (mode) { | ||
| 148 | case CLOCK_EVT_MODE_PERIODIC: | ||
| 149 | keystone_timer_config(timer.hz_period, CLOCK_EVT_MODE_PERIODIC); | ||
| 150 | break; | ||
| 151 | case CLOCK_EVT_MODE_UNUSED: | ||
| 152 | case CLOCK_EVT_MODE_SHUTDOWN: | ||
| 153 | case CLOCK_EVT_MODE_ONESHOT: | ||
| 154 | keystone_timer_disable(); | ||
| 155 | break; | ||
| 156 | default: | ||
| 157 | break; | ||
| 158 | } | ||
| 159 | } | ||
| 160 | |||
| 161 | static void __init keystone_timer_init(struct device_node *np) | ||
| 162 | { | ||
| 163 | struct clock_event_device *event_dev = &timer.event_dev; | ||
| 164 | unsigned long rate; | ||
| 165 | struct clk *clk; | ||
| 166 | int irq, error; | ||
| 167 | |||
| 168 | irq = irq_of_parse_and_map(np, 0); | ||
| 169 | if (irq == NO_IRQ) { | ||
| 170 | pr_err("%s: failed to map interrupts\n", __func__); | ||
| 171 | return; | ||
| 172 | } | ||
| 173 | |||
| 174 | timer.base = of_iomap(np, 0); | ||
| 175 | if (!timer.base) { | ||
| 176 | pr_err("%s: failed to map registers\n", __func__); | ||
| 177 | return; | ||
| 178 | } | ||
| 179 | |||
| 180 | clk = of_clk_get(np, 0); | ||
| 181 | if (IS_ERR(clk)) { | ||
| 182 | pr_err("%s: failed to get clock\n", __func__); | ||
| 183 | iounmap(timer.base); | ||
| 184 | return; | ||
| 185 | } | ||
| 186 | |||
| 187 | error = clk_prepare_enable(clk); | ||
| 188 | if (error) { | ||
| 189 | pr_err("%s: failed to enable clock\n", __func__); | ||
| 190 | goto err; | ||
| 191 | } | ||
| 192 | |||
| 193 | rate = clk_get_rate(clk); | ||
| 194 | |||
| 195 | /* disable, use internal clock source */ | ||
| 196 | keystone_timer_writel(0, TCR); | ||
| 197 | /* here we have to be sure the timer has been disabled */ | ||
| 198 | keystone_timer_barrier(); | ||
| 199 | |||
| 200 | /* reset timer as 64-bit, no pre-scaler, plus features are disabled */ | ||
| 201 | keystone_timer_writel(0, TGCR); | ||
| 202 | |||
| 203 | /* unreset timer */ | ||
| 204 | keystone_timer_writel(TGCR_TIM_UNRESET_MASK, TGCR); | ||
| 205 | |||
| 206 | /* init counter to zero */ | ||
| 207 | keystone_timer_writel(0, TIM12); | ||
| 208 | keystone_timer_writel(0, TIM34); | ||
| 209 | |||
| 210 | timer.hz_period = DIV_ROUND_UP(rate, HZ); | ||
| 211 | |||
| 212 | /* enable timer interrupts */ | ||
| 213 | keystone_timer_writel(INTCTLSTAT_ENINT_MASK, INTCTLSTAT); | ||
| 214 | |||
| 215 | error = request_irq(irq, keystone_timer_interrupt, IRQF_TIMER, | ||
| 216 | TIMER_NAME, event_dev); | ||
| 217 | if (error) { | ||
| 218 | pr_err("%s: failed to setup irq\n", __func__); | ||
| 219 | goto err; | ||
| 220 | } | ||
| 221 | |||
| 222 | /* setup clockevent */ | ||
| 223 | event_dev->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; | ||
| 224 | event_dev->set_next_event = keystone_set_next_event; | ||
| 225 | event_dev->set_mode = keystone_set_mode; | ||
| 226 | event_dev->cpumask = cpu_all_mask; | ||
| 227 | event_dev->owner = THIS_MODULE; | ||
| 228 | event_dev->name = TIMER_NAME; | ||
| 229 | event_dev->irq = irq; | ||
| 230 | |||
| 231 | clockevents_config_and_register(event_dev, rate, 1, ULONG_MAX); | ||
| 232 | |||
| 233 | pr_info("keystone timer clock @%lu Hz\n", rate); | ||
| 234 | return; | ||
| 235 | err: | ||
| 236 | clk_put(clk); | ||
| 237 | iounmap(timer.base); | ||
| 238 | } | ||
| 239 | |||
| 240 | CLOCKSOURCE_OF_DECLARE(keystone_timer, "ti,keystone-timer", | ||
| 241 | keystone_timer_init); | ||
diff --git a/arch/arm/mach-u300/timer.c b/drivers/clocksource/timer-u300.c index fe08fd34c0ce..e63d469661fd 100644 --- a/arch/arm/mach-u300/timer.c +++ b/drivers/clocksource/timer-u300.c | |||
| @@ -1,8 +1,4 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * | ||
| 3 | * arch/arm/mach-u300/timer.c | ||
| 4 | * | ||
| 5 | * | ||
| 6 | * Copyright (C) 2007-2009 ST-Ericsson AB | 2 | * Copyright (C) 2007-2009 ST-Ericsson AB |
| 7 | * License terms: GNU General Public License (GPL) version 2 | 3 | * License terms: GNU General Public License (GPL) version 2 |
| 8 | * Timer COH 901 328, runs the OS timer interrupt. | 4 | * Timer COH 901 328, runs the OS timer interrupt. |
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index a55e68f2cfc8..09d05ab262be 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c | |||
| @@ -140,12 +140,14 @@ int cpuidle_idle_call(void) | |||
| 140 | return 0; | 140 | return 0; |
| 141 | } | 141 | } |
| 142 | 142 | ||
| 143 | trace_cpu_idle_rcuidle(next_state, dev->cpu); | ||
| 144 | |||
| 145 | broadcast = !!(drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP); | 143 | broadcast = !!(drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP); |
| 146 | 144 | ||
| 147 | if (broadcast) | 145 | if (broadcast && |
| 148 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu); | 146 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu)) |
| 147 | return -EBUSY; | ||
| 148 | |||
| 149 | |||
| 150 | trace_cpu_idle_rcuidle(next_state, dev->cpu); | ||
| 149 | 151 | ||
| 150 | if (cpuidle_state_is_coupled(dev, drv, next_state)) | 152 | if (cpuidle_state_is_coupled(dev, drv, next_state)) |
| 151 | entered_state = cpuidle_enter_state_coupled(dev, drv, | 153 | entered_state = cpuidle_enter_state_coupled(dev, drv, |
| @@ -153,11 +155,11 @@ int cpuidle_idle_call(void) | |||
| 153 | else | 155 | else |
| 154 | entered_state = cpuidle_enter_state(dev, drv, next_state); | 156 | entered_state = cpuidle_enter_state(dev, drv, next_state); |
| 155 | 157 | ||
| 158 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); | ||
| 159 | |||
| 156 | if (broadcast) | 160 | if (broadcast) |
| 157 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu); | 161 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu); |
| 158 | 162 | ||
| 159 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); | ||
| 160 | |||
| 161 | /* give the governor an opportunity to reflect on the outcome */ | 163 | /* give the governor an opportunity to reflect on the outcome */ |
| 162 | if (cpuidle_curr_governor->reflect) | 164 | if (cpuidle_curr_governor->reflect) |
| 163 | cpuidle_curr_governor->reflect(dev, entered_state); | 165 | cpuidle_curr_governor->reflect(dev, entered_state); |
diff --git a/fs/timerfd.c b/fs/timerfd.c index 929312180dd0..0013142c0475 100644 --- a/fs/timerfd.c +++ b/fs/timerfd.c | |||
| @@ -317,6 +317,7 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags) | |||
| 317 | (clockid != CLOCK_MONOTONIC && | 317 | (clockid != CLOCK_MONOTONIC && |
| 318 | clockid != CLOCK_REALTIME && | 318 | clockid != CLOCK_REALTIME && |
| 319 | clockid != CLOCK_REALTIME_ALARM && | 319 | clockid != CLOCK_REALTIME_ALARM && |
| 320 | clockid != CLOCK_BOOTTIME && | ||
| 320 | clockid != CLOCK_BOOTTIME_ALARM)) | 321 | clockid != CLOCK_BOOTTIME_ALARM)) |
| 321 | return -EINVAL; | 322 | return -EINVAL; |
| 322 | 323 | ||
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index 493aa021c7a9..2e4cb67f6e56 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h | |||
| @@ -62,6 +62,11 @@ enum clock_event_mode { | |||
| 62 | #define CLOCK_EVT_FEAT_DYNIRQ 0x000020 | 62 | #define CLOCK_EVT_FEAT_DYNIRQ 0x000020 |
| 63 | #define CLOCK_EVT_FEAT_PERCPU 0x000040 | 63 | #define CLOCK_EVT_FEAT_PERCPU 0x000040 |
| 64 | 64 | ||
| 65 | /* | ||
| 66 | * Clockevent device is based on a hrtimer for broadcast | ||
| 67 | */ | ||
| 68 | #define CLOCK_EVT_FEAT_HRTIMER 0x000080 | ||
| 69 | |||
| 65 | /** | 70 | /** |
| 66 | * struct clock_event_device - clock event device descriptor | 71 | * struct clock_event_device - clock event device descriptor |
| 67 | * @event_handler: Assigned by the framework to be called by the low | 72 | * @event_handler: Assigned by the framework to be called by the low |
| @@ -83,6 +88,7 @@ enum clock_event_mode { | |||
| 83 | * @name: ptr to clock event name | 88 | * @name: ptr to clock event name |
| 84 | * @rating: variable to rate clock event devices | 89 | * @rating: variable to rate clock event devices |
| 85 | * @irq: IRQ number (only for non CPU local devices) | 90 | * @irq: IRQ number (only for non CPU local devices) |
| 91 | * @bound_on: Bound on CPU | ||
| 86 | * @cpumask: cpumask to indicate for which CPUs this device works | 92 | * @cpumask: cpumask to indicate for which CPUs this device works |
| 87 | * @list: list head for the management code | 93 | * @list: list head for the management code |
| 88 | * @owner: module reference | 94 | * @owner: module reference |
| @@ -113,6 +119,7 @@ struct clock_event_device { | |||
| 113 | const char *name; | 119 | const char *name; |
| 114 | int rating; | 120 | int rating; |
| 115 | int irq; | 121 | int irq; |
| 122 | int bound_on; | ||
| 116 | const struct cpumask *cpumask; | 123 | const struct cpumask *cpumask; |
| 117 | struct list_head list; | 124 | struct list_head list; |
| 118 | struct module *owner; | 125 | struct module *owner; |
| @@ -180,15 +187,17 @@ extern int tick_receive_broadcast(void); | |||
| 180 | #endif | 187 | #endif |
| 181 | 188 | ||
| 182 | #if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT) | 189 | #if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT) |
| 190 | extern void tick_setup_hrtimer_broadcast(void); | ||
| 183 | extern int tick_check_broadcast_expired(void); | 191 | extern int tick_check_broadcast_expired(void); |
| 184 | #else | 192 | #else |
| 185 | static inline int tick_check_broadcast_expired(void) { return 0; } | 193 | static inline int tick_check_broadcast_expired(void) { return 0; } |
| 194 | static inline void tick_setup_hrtimer_broadcast(void) {}; | ||
| 186 | #endif | 195 | #endif |
| 187 | 196 | ||
| 188 | #ifdef CONFIG_GENERIC_CLOCKEVENTS | 197 | #ifdef CONFIG_GENERIC_CLOCKEVENTS |
| 189 | extern void clockevents_notify(unsigned long reason, void *arg); | 198 | extern int clockevents_notify(unsigned long reason, void *arg); |
| 190 | #else | 199 | #else |
| 191 | static inline void clockevents_notify(unsigned long reason, void *arg) {} | 200 | static inline int clockevents_notify(unsigned long reason, void *arg) { return 0; } |
| 192 | #endif | 201 | #endif |
| 193 | 202 | ||
| 194 | #else /* CONFIG_GENERIC_CLOCKEVENTS_BUILD */ | 203 | #else /* CONFIG_GENERIC_CLOCKEVENTS_BUILD */ |
| @@ -196,8 +205,9 @@ static inline void clockevents_notify(unsigned long reason, void *arg) {} | |||
| 196 | static inline void clockevents_suspend(void) {} | 205 | static inline void clockevents_suspend(void) {} |
| 197 | static inline void clockevents_resume(void) {} | 206 | static inline void clockevents_resume(void) {} |
| 198 | 207 | ||
| 199 | static inline void clockevents_notify(unsigned long reason, void *arg) {} | 208 | static inline int clockevents_notify(unsigned long reason, void *arg) { return 0; } |
| 200 | static inline int tick_check_broadcast_expired(void) { return 0; } | 209 | static inline int tick_check_broadcast_expired(void) { return 0; } |
| 210 | static inline void tick_setup_hrtimer_broadcast(void) {}; | ||
| 201 | 211 | ||
| 202 | #endif | 212 | #endif |
| 203 | 213 | ||
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index d19a5c2d2270..e7a8d3fa91d5 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h | |||
| @@ -96,12 +96,12 @@ enum hrtimer_restart { | |||
| 96 | * @function: timer expiry callback function | 96 | * @function: timer expiry callback function |
| 97 | * @base: pointer to the timer base (per cpu and per clock) | 97 | * @base: pointer to the timer base (per cpu and per clock) |
| 98 | * @state: state information (See bit values above) | 98 | * @state: state information (See bit values above) |
| 99 | * @start_pid: timer statistics field to store the pid of the task which | ||
| 100 | * started the timer | ||
| 99 | * @start_site: timer statistics field to store the site where the timer | 101 | * @start_site: timer statistics field to store the site where the timer |
| 100 | * was started | 102 | * was started |
| 101 | * @start_comm: timer statistics field to store the name of the process which | 103 | * @start_comm: timer statistics field to store the name of the process which |
| 102 | * started the timer | 104 | * started the timer |
| 103 | * @start_pid: timer statistics field to store the pid of the task which | ||
| 104 | * started the timer | ||
| 105 | * | 105 | * |
| 106 | * The hrtimer structure must be initialized by hrtimer_init() | 106 | * The hrtimer structure must be initialized by hrtimer_init() |
| 107 | */ | 107 | */ |
diff --git a/include/linux/sched.h b/include/linux/sched.h index c399ed826648..7cb07fd26680 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -294,10 +294,14 @@ extern int runqueue_is_locked(int cpu); | |||
| 294 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) | 294 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) |
| 295 | extern void nohz_balance_enter_idle(int cpu); | 295 | extern void nohz_balance_enter_idle(int cpu); |
| 296 | extern void set_cpu_sd_state_idle(void); | 296 | extern void set_cpu_sd_state_idle(void); |
| 297 | extern int get_nohz_timer_target(void); | 297 | extern int get_nohz_timer_target(int pinned); |
| 298 | #else | 298 | #else |
| 299 | static inline void nohz_balance_enter_idle(int cpu) { } | 299 | static inline void nohz_balance_enter_idle(int cpu) { } |
| 300 | static inline void set_cpu_sd_state_idle(void) { } | 300 | static inline void set_cpu_sd_state_idle(void) { } |
| 301 | static inline int get_nohz_timer_target(int pinned) | ||
| 302 | { | ||
| 303 | return smp_processor_id(); | ||
| 304 | } | ||
| 301 | #endif | 305 | #endif |
| 302 | 306 | ||
| 303 | /* | 307 | /* |
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 532994651684..1b22c42e9c2d 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
| @@ -180,6 +180,7 @@ struct execute_work { | |||
| 180 | #ifdef CONFIG_DEBUG_OBJECTS_WORK | 180 | #ifdef CONFIG_DEBUG_OBJECTS_WORK |
| 181 | extern void __init_work(struct work_struct *work, int onstack); | 181 | extern void __init_work(struct work_struct *work, int onstack); |
| 182 | extern void destroy_work_on_stack(struct work_struct *work); | 182 | extern void destroy_work_on_stack(struct work_struct *work); |
| 183 | extern void destroy_delayed_work_on_stack(struct delayed_work *work); | ||
| 183 | static inline unsigned int work_static(struct work_struct *work) | 184 | static inline unsigned int work_static(struct work_struct *work) |
| 184 | { | 185 | { |
| 185 | return *work_data_bits(work) & WORK_STRUCT_STATIC; | 186 | return *work_data_bits(work) & WORK_STRUCT_STATIC; |
| @@ -187,6 +188,7 @@ static inline unsigned int work_static(struct work_struct *work) | |||
| 187 | #else | 188 | #else |
| 188 | static inline void __init_work(struct work_struct *work, int onstack) { } | 189 | static inline void __init_work(struct work_struct *work, int onstack) { } |
| 189 | static inline void destroy_work_on_stack(struct work_struct *work) { } | 190 | static inline void destroy_work_on_stack(struct work_struct *work) { } |
| 191 | static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { } | ||
| 190 | static inline unsigned int work_static(struct work_struct *work) { return 0; } | 192 | static inline unsigned int work_static(struct work_struct *work) { return 0; } |
| 191 | #endif | 193 | #endif |
| 192 | 194 | ||
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 09094361dce5..d55092ceee29 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
| @@ -168,19 +168,6 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, | |||
| 168 | } | 168 | } |
| 169 | } | 169 | } |
| 170 | 170 | ||
| 171 | |||
| 172 | /* | ||
| 173 | * Get the preferred target CPU for NOHZ | ||
| 174 | */ | ||
| 175 | static int hrtimer_get_target(int this_cpu, int pinned) | ||
| 176 | { | ||
| 177 | #ifdef CONFIG_NO_HZ_COMMON | ||
| 178 | if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu)) | ||
| 179 | return get_nohz_timer_target(); | ||
| 180 | #endif | ||
| 181 | return this_cpu; | ||
| 182 | } | ||
| 183 | |||
| 184 | /* | 171 | /* |
| 185 | * With HIGHRES=y we do not migrate the timer when it is expiring | 172 | * With HIGHRES=y we do not migrate the timer when it is expiring |
| 186 | * before the next event on the target cpu because we cannot reprogram | 173 | * before the next event on the target cpu because we cannot reprogram |
| @@ -214,7 +201,7 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, | |||
| 214 | struct hrtimer_clock_base *new_base; | 201 | struct hrtimer_clock_base *new_base; |
| 215 | struct hrtimer_cpu_base *new_cpu_base; | 202 | struct hrtimer_cpu_base *new_cpu_base; |
| 216 | int this_cpu = smp_processor_id(); | 203 | int this_cpu = smp_processor_id(); |
| 217 | int cpu = hrtimer_get_target(this_cpu, pinned); | 204 | int cpu = get_nohz_timer_target(pinned); |
| 218 | int basenum = base->index; | 205 | int basenum = base->index; |
| 219 | 206 | ||
| 220 | again: | 207 | again: |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d11a1768357d..3c4d096544ce 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
| @@ -555,12 +555,15 @@ void resched_cpu(int cpu) | |||
| 555 | * selecting an idle cpu will add more delays to the timers than intended | 555 | * selecting an idle cpu will add more delays to the timers than intended |
| 556 | * (as that cpu's timer base may not be uptodate wrt jiffies etc). | 556 | * (as that cpu's timer base may not be uptodate wrt jiffies etc). |
| 557 | */ | 557 | */ |
| 558 | int get_nohz_timer_target(void) | 558 | int get_nohz_timer_target(int pinned) |
| 559 | { | 559 | { |
| 560 | int cpu = smp_processor_id(); | 560 | int cpu = smp_processor_id(); |
| 561 | int i; | 561 | int i; |
| 562 | struct sched_domain *sd; | 562 | struct sched_domain *sd; |
| 563 | 563 | ||
| 564 | if (pinned || !get_sysctl_timer_migration() || !idle_cpu(cpu)) | ||
| 565 | return cpu; | ||
| 566 | |||
| 564 | rcu_read_lock(); | 567 | rcu_read_lock(); |
| 565 | for_each_domain(cpu, sd) { | 568 | for_each_domain(cpu, sd) { |
| 566 | for_each_cpu(i, sched_domain_span(sd)) { | 569 | for_each_cpu(i, sched_domain_span(sd)) { |
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig index 3ce6e8c5f3fc..f448513a45ed 100644 --- a/kernel/time/Kconfig +++ b/kernel/time/Kconfig | |||
| @@ -124,7 +124,7 @@ config NO_HZ_FULL | |||
| 124 | endchoice | 124 | endchoice |
| 125 | 125 | ||
| 126 | config NO_HZ_FULL_ALL | 126 | config NO_HZ_FULL_ALL |
| 127 | bool "Full dynticks system on all CPUs by default" | 127 | bool "Full dynticks system on all CPUs by default (except CPU 0)" |
| 128 | depends on NO_HZ_FULL | 128 | depends on NO_HZ_FULL |
| 129 | help | 129 | help |
| 130 | If the user doesn't pass the nohz_full boot option to | 130 | If the user doesn't pass the nohz_full boot option to |
diff --git a/kernel/time/Makefile b/kernel/time/Makefile index 9250130646f5..57a413fd0ebf 100644 --- a/kernel/time/Makefile +++ b/kernel/time/Makefile | |||
| @@ -3,7 +3,10 @@ obj-y += timeconv.o posix-clock.o alarmtimer.o | |||
| 3 | 3 | ||
| 4 | obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o | 4 | obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o |
| 5 | obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o | 5 | obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o |
| 6 | obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += tick-broadcast.o | 6 | ifeq ($(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST),y) |
| 7 | obj-y += tick-broadcast.o | ||
| 8 | obj-$(CONFIG_TICK_ONESHOT) += tick-broadcast-hrtimer.o | ||
| 9 | endif | ||
| 7 | obj-$(CONFIG_GENERIC_SCHED_CLOCK) += sched_clock.o | 10 | obj-$(CONFIG_GENERIC_SCHED_CLOCK) += sched_clock.o |
| 8 | obj-$(CONFIG_TICK_ONESHOT) += tick-oneshot.o | 11 | obj-$(CONFIG_TICK_ONESHOT) += tick-oneshot.o |
| 9 | obj-$(CONFIG_TICK_ONESHOT) += tick-sched.o | 12 | obj-$(CONFIG_TICK_ONESHOT) += tick-sched.o |
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 086ad6043bcb..ad362c260ef4 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c | |||
| @@ -439,6 +439,19 @@ void clockevents_config_and_register(struct clock_event_device *dev, | |||
| 439 | } | 439 | } |
| 440 | EXPORT_SYMBOL_GPL(clockevents_config_and_register); | 440 | EXPORT_SYMBOL_GPL(clockevents_config_and_register); |
| 441 | 441 | ||
| 442 | int __clockevents_update_freq(struct clock_event_device *dev, u32 freq) | ||
| 443 | { | ||
| 444 | clockevents_config(dev, freq); | ||
| 445 | |||
| 446 | if (dev->mode == CLOCK_EVT_MODE_ONESHOT) | ||
| 447 | return clockevents_program_event(dev, dev->next_event, false); | ||
| 448 | |||
| 449 | if (dev->mode == CLOCK_EVT_MODE_PERIODIC) | ||
| 450 | dev->set_mode(CLOCK_EVT_MODE_PERIODIC, dev); | ||
| 451 | |||
| 452 | return 0; | ||
| 453 | } | ||
| 454 | |||
| 442 | /** | 455 | /** |
| 443 | * clockevents_update_freq - Update frequency and reprogram a clock event device. | 456 | * clockevents_update_freq - Update frequency and reprogram a clock event device. |
| 444 | * @dev: device to modify | 457 | * @dev: device to modify |
| @@ -446,17 +459,22 @@ EXPORT_SYMBOL_GPL(clockevents_config_and_register); | |||
| 446 | * | 459 | * |
| 447 | * Reconfigure and reprogram a clock event device in oneshot | 460 | * Reconfigure and reprogram a clock event device in oneshot |
| 448 | * mode. Must be called on the cpu for which the device delivers per | 461 | * mode. Must be called on the cpu for which the device delivers per |
| 449 | * cpu timer events with interrupts disabled! Returns 0 on success, | 462 | * cpu timer events. If called for the broadcast device the core takes |
| 450 | * -ETIME when the event is in the past. | 463 | * care of serialization. |
| 464 | * | ||
| 465 | * Returns 0 on success, -ETIME when the event is in the past. | ||
| 451 | */ | 466 | */ |
| 452 | int clockevents_update_freq(struct clock_event_device *dev, u32 freq) | 467 | int clockevents_update_freq(struct clock_event_device *dev, u32 freq) |
| 453 | { | 468 | { |
| 454 | clockevents_config(dev, freq); | 469 | unsigned long flags; |
| 455 | 470 | int ret; | |
| 456 | if (dev->mode != CLOCK_EVT_MODE_ONESHOT) | ||
| 457 | return 0; | ||
| 458 | 471 | ||
| 459 | return clockevents_program_event(dev, dev->next_event, false); | 472 | local_irq_save(flags); |
| 473 | ret = tick_broadcast_update_freq(dev, freq); | ||
| 474 | if (ret == -ENODEV) | ||
| 475 | ret = __clockevents_update_freq(dev, freq); | ||
| 476 | local_irq_restore(flags); | ||
| 477 | return ret; | ||
| 460 | } | 478 | } |
| 461 | 479 | ||
| 462 | /* | 480 | /* |
| @@ -524,12 +542,13 @@ void clockevents_resume(void) | |||
| 524 | #ifdef CONFIG_GENERIC_CLOCKEVENTS | 542 | #ifdef CONFIG_GENERIC_CLOCKEVENTS |
| 525 | /** | 543 | /** |
| 526 | * clockevents_notify - notification about relevant events | 544 | * clockevents_notify - notification about relevant events |
| 545 | * Returns 0 on success, any other value on error | ||
| 527 | */ | 546 | */ |
| 528 | void clockevents_notify(unsigned long reason, void *arg) | 547 | int clockevents_notify(unsigned long reason, void *arg) |
| 529 | { | 548 | { |
| 530 | struct clock_event_device *dev, *tmp; | 549 | struct clock_event_device *dev, *tmp; |
| 531 | unsigned long flags; | 550 | unsigned long flags; |
| 532 | int cpu; | 551 | int cpu, ret = 0; |
| 533 | 552 | ||
| 534 | raw_spin_lock_irqsave(&clockevents_lock, flags); | 553 | raw_spin_lock_irqsave(&clockevents_lock, flags); |
| 535 | 554 | ||
| @@ -542,7 +561,7 @@ void clockevents_notify(unsigned long reason, void *arg) | |||
| 542 | 561 | ||
| 543 | case CLOCK_EVT_NOTIFY_BROADCAST_ENTER: | 562 | case CLOCK_EVT_NOTIFY_BROADCAST_ENTER: |
| 544 | case CLOCK_EVT_NOTIFY_BROADCAST_EXIT: | 563 | case CLOCK_EVT_NOTIFY_BROADCAST_EXIT: |
| 545 | tick_broadcast_oneshot_control(reason); | 564 | ret = tick_broadcast_oneshot_control(reason); |
| 546 | break; | 565 | break; |
| 547 | 566 | ||
| 548 | case CLOCK_EVT_NOTIFY_CPU_DYING: | 567 | case CLOCK_EVT_NOTIFY_CPU_DYING: |
| @@ -585,6 +604,7 @@ void clockevents_notify(unsigned long reason, void *arg) | |||
| 585 | break; | 604 | break; |
| 586 | } | 605 | } |
| 587 | raw_spin_unlock_irqrestore(&clockevents_lock, flags); | 606 | raw_spin_unlock_irqrestore(&clockevents_lock, flags); |
| 607 | return ret; | ||
| 588 | } | 608 | } |
| 589 | EXPORT_SYMBOL_GPL(clockevents_notify); | 609 | EXPORT_SYMBOL_GPL(clockevents_notify); |
| 590 | 610 | ||
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index af8d1d4f3d55..419a52cecd20 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c | |||
| @@ -514,12 +514,13 @@ static void sync_cmos_clock(struct work_struct *work) | |||
| 514 | next.tv_sec++; | 514 | next.tv_sec++; |
| 515 | next.tv_nsec -= NSEC_PER_SEC; | 515 | next.tv_nsec -= NSEC_PER_SEC; |
| 516 | } | 516 | } |
| 517 | schedule_delayed_work(&sync_cmos_work, timespec_to_jiffies(&next)); | 517 | queue_delayed_work(system_power_efficient_wq, |
| 518 | &sync_cmos_work, timespec_to_jiffies(&next)); | ||
| 518 | } | 519 | } |
| 519 | 520 | ||
| 520 | void ntp_notify_cmos_timer(void) | 521 | void ntp_notify_cmos_timer(void) |
| 521 | { | 522 | { |
| 522 | schedule_delayed_work(&sync_cmos_work, 0); | 523 | queue_delayed_work(system_power_efficient_wq, &sync_cmos_work, 0); |
| 523 | } | 524 | } |
| 524 | 525 | ||
| 525 | #else | 526 | #else |
diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c new file mode 100644 index 000000000000..eb682d5c697c --- /dev/null +++ b/kernel/time/tick-broadcast-hrtimer.c | |||
| @@ -0,0 +1,106 @@ | |||
| 1 | /* | ||
| 2 | * linux/kernel/time/tick-broadcast-hrtimer.c | ||
| 3 | * This file emulates a local clock event device | ||
| 4 | * via a pseudo clock device. | ||
| 5 | */ | ||
| 6 | #include <linux/cpu.h> | ||
| 7 | #include <linux/err.h> | ||
| 8 | #include <linux/hrtimer.h> | ||
| 9 | #include <linux/interrupt.h> | ||
| 10 | #include <linux/percpu.h> | ||
| 11 | #include <linux/profile.h> | ||
| 12 | #include <linux/clockchips.h> | ||
| 13 | #include <linux/sched.h> | ||
| 14 | #include <linux/smp.h> | ||
| 15 | #include <linux/module.h> | ||
| 16 | |||
| 17 | #include "tick-internal.h" | ||
| 18 | |||
| 19 | static struct hrtimer bctimer; | ||
| 20 | |||
| 21 | static void bc_set_mode(enum clock_event_mode mode, | ||
| 22 | struct clock_event_device *bc) | ||
| 23 | { | ||
| 24 | switch (mode) { | ||
| 25 | case CLOCK_EVT_MODE_SHUTDOWN: | ||
| 26 | /* | ||
| 27 | * Note, we cannot cancel the timer here as we might | ||
| 28 | * run into the following live lock scenario: | ||
| 29 | * | ||
| 30 | * cpu 0 cpu1 | ||
| 31 | * lock(broadcast_lock); | ||
| 32 | * hrtimer_interrupt() | ||
| 33 | * bc_handler() | ||
| 34 | * tick_handle_oneshot_broadcast(); | ||
| 35 | * lock(broadcast_lock); | ||
| 36 | * hrtimer_cancel() | ||
| 37 | * wait_for_callback() | ||
| 38 | */ | ||
| 39 | hrtimer_try_to_cancel(&bctimer); | ||
| 40 | break; | ||
| 41 | default: | ||
| 42 | break; | ||
| 43 | } | ||
| 44 | } | ||
| 45 | |||
| 46 | /* | ||
| 47 | * This is called from the guts of the broadcast code when the cpu | ||
| 48 | * which is about to enter idle has the earliest broadcast timer event. | ||
| 49 | */ | ||
| 50 | static int bc_set_next(ktime_t expires, struct clock_event_device *bc) | ||
| 51 | { | ||
| 52 | /* | ||
| 53 | * We try to cancel the timer first. If the callback is on | ||
| 54 | * flight on some other cpu then we let it handle it. If we | ||
| 55 | * were able to cancel the timer nothing can rearm it as we | ||
| 56 | * own broadcast_lock. | ||
| 57 | * | ||
| 58 | * However we can also be called from the event handler of | ||
| 59 | * ce_broadcast_hrtimer itself when it expires. We cannot | ||
| 60 | * restart the timer because we are in the callback, but we | ||
| 61 | * can set the expiry time and let the callback return | ||
| 62 | * HRTIMER_RESTART. | ||
| 63 | */ | ||
| 64 | if (hrtimer_try_to_cancel(&bctimer) >= 0) { | ||
| 65 | hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED); | ||
| 66 | /* Bind the "device" to the cpu */ | ||
| 67 | bc->bound_on = smp_processor_id(); | ||
| 68 | } else if (bc->bound_on == smp_processor_id()) { | ||
| 69 | hrtimer_set_expires(&bctimer, expires); | ||
| 70 | } | ||
| 71 | return 0; | ||
| 72 | } | ||
| 73 | |||
| 74 | static struct clock_event_device ce_broadcast_hrtimer = { | ||
| 75 | .set_mode = bc_set_mode, | ||
| 76 | .set_next_ktime = bc_set_next, | ||
| 77 | .features = CLOCK_EVT_FEAT_ONESHOT | | ||
| 78 | CLOCK_EVT_FEAT_KTIME | | ||
| 79 | CLOCK_EVT_FEAT_HRTIMER, | ||
| 80 | .rating = 0, | ||
| 81 | .bound_on = -1, | ||
| 82 | .min_delta_ns = 1, | ||
| 83 | .max_delta_ns = KTIME_MAX, | ||
| 84 | .min_delta_ticks = 1, | ||
| 85 | .max_delta_ticks = ULONG_MAX, | ||
| 86 | .mult = 1, | ||
| 87 | .shift = 0, | ||
| 88 | .cpumask = cpu_all_mask, | ||
| 89 | }; | ||
| 90 | |||
| 91 | static enum hrtimer_restart bc_handler(struct hrtimer *t) | ||
| 92 | { | ||
| 93 | ce_broadcast_hrtimer.event_handler(&ce_broadcast_hrtimer); | ||
| 94 | |||
| 95 | if (ce_broadcast_hrtimer.next_event.tv64 == KTIME_MAX) | ||
| 96 | return HRTIMER_NORESTART; | ||
| 97 | |||
| 98 | return HRTIMER_RESTART; | ||
| 99 | } | ||
| 100 | |||
| 101 | void tick_setup_hrtimer_broadcast(void) | ||
| 102 | { | ||
| 103 | hrtimer_init(&bctimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | ||
| 104 | bctimer.function = bc_handler; | ||
| 105 | clockevents_register_device(&ce_broadcast_hrtimer); | ||
| 106 | } | ||
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 98977a57ac72..64c5990fd500 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
| @@ -120,6 +120,19 @@ int tick_is_broadcast_device(struct clock_event_device *dev) | |||
| 120 | return (dev && tick_broadcast_device.evtdev == dev); | 120 | return (dev && tick_broadcast_device.evtdev == dev); |
| 121 | } | 121 | } |
| 122 | 122 | ||
| 123 | int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq) | ||
| 124 | { | ||
| 125 | int ret = -ENODEV; | ||
| 126 | |||
| 127 | if (tick_is_broadcast_device(dev)) { | ||
| 128 | raw_spin_lock(&tick_broadcast_lock); | ||
| 129 | ret = __clockevents_update_freq(dev, freq); | ||
| 130 | raw_spin_unlock(&tick_broadcast_lock); | ||
| 131 | } | ||
| 132 | return ret; | ||
| 133 | } | ||
| 134 | |||
| 135 | |||
| 123 | static void err_broadcast(const struct cpumask *mask) | 136 | static void err_broadcast(const struct cpumask *mask) |
| 124 | { | 137 | { |
| 125 | pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n"); | 138 | pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n"); |
| @@ -272,12 +285,8 @@ static void tick_do_broadcast(struct cpumask *mask) | |||
| 272 | */ | 285 | */ |
| 273 | static void tick_do_periodic_broadcast(void) | 286 | static void tick_do_periodic_broadcast(void) |
| 274 | { | 287 | { |
| 275 | raw_spin_lock(&tick_broadcast_lock); | ||
| 276 | |||
| 277 | cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask); | 288 | cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask); |
| 278 | tick_do_broadcast(tmpmask); | 289 | tick_do_broadcast(tmpmask); |
| 279 | |||
| 280 | raw_spin_unlock(&tick_broadcast_lock); | ||
| 281 | } | 290 | } |
| 282 | 291 | ||
| 283 | /* | 292 | /* |
| @@ -287,13 +296,15 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev) | |||
| 287 | { | 296 | { |
| 288 | ktime_t next; | 297 | ktime_t next; |
| 289 | 298 | ||
| 299 | raw_spin_lock(&tick_broadcast_lock); | ||
| 300 | |||
| 290 | tick_do_periodic_broadcast(); | 301 | tick_do_periodic_broadcast(); |
| 291 | 302 | ||
| 292 | /* | 303 | /* |
| 293 | * The device is in periodic mode. No reprogramming necessary: | 304 | * The device is in periodic mode. No reprogramming necessary: |
| 294 | */ | 305 | */ |
| 295 | if (dev->mode == CLOCK_EVT_MODE_PERIODIC) | 306 | if (dev->mode == CLOCK_EVT_MODE_PERIODIC) |
| 296 | return; | 307 | goto unlock; |
| 297 | 308 | ||
| 298 | /* | 309 | /* |
| 299 | * Setup the next period for devices, which do not have | 310 | * Setup the next period for devices, which do not have |
| @@ -306,9 +317,11 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev) | |||
| 306 | next = ktime_add(next, tick_period); | 317 | next = ktime_add(next, tick_period); |
| 307 | 318 | ||
| 308 | if (!clockevents_program_event(dev, next, false)) | 319 | if (!clockevents_program_event(dev, next, false)) |
| 309 | return; | 320 | goto unlock; |
| 310 | tick_do_periodic_broadcast(); | 321 | tick_do_periodic_broadcast(); |
| 311 | } | 322 | } |
| 323 | unlock: | ||
| 324 | raw_spin_unlock(&tick_broadcast_lock); | ||
| 312 | } | 325 | } |
| 313 | 326 | ||
| 314 | /* | 327 | /* |
| @@ -630,24 +643,61 @@ again: | |||
| 630 | raw_spin_unlock(&tick_broadcast_lock); | 643 | raw_spin_unlock(&tick_broadcast_lock); |
| 631 | } | 644 | } |
| 632 | 645 | ||
| 646 | static int broadcast_needs_cpu(struct clock_event_device *bc, int cpu) | ||
| 647 | { | ||
| 648 | if (!(bc->features & CLOCK_EVT_FEAT_HRTIMER)) | ||
| 649 | return 0; | ||
| 650 | if (bc->next_event.tv64 == KTIME_MAX) | ||
| 651 | return 0; | ||
| 652 | return bc->bound_on == cpu ? -EBUSY : 0; | ||
| 653 | } | ||
| 654 | |||
| 655 | static void broadcast_shutdown_local(struct clock_event_device *bc, | ||
| 656 | struct clock_event_device *dev) | ||
| 657 | { | ||
| 658 | /* | ||
| 659 | * For hrtimer based broadcasting we cannot shutdown the cpu | ||
| 660 | * local device if our own event is the first one to expire or | ||
| 661 | * if we own the broadcast timer. | ||
| 662 | */ | ||
| 663 | if (bc->features & CLOCK_EVT_FEAT_HRTIMER) { | ||
| 664 | if (broadcast_needs_cpu(bc, smp_processor_id())) | ||
| 665 | return; | ||
| 666 | if (dev->next_event.tv64 < bc->next_event.tv64) | ||
| 667 | return; | ||
| 668 | } | ||
| 669 | clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); | ||
| 670 | } | ||
| 671 | |||
| 672 | static void broadcast_move_bc(int deadcpu) | ||
| 673 | { | ||
| 674 | struct clock_event_device *bc = tick_broadcast_device.evtdev; | ||
| 675 | |||
| 676 | if (!bc || !broadcast_needs_cpu(bc, deadcpu)) | ||
| 677 | return; | ||
| 678 | /* This moves the broadcast assignment to this cpu */ | ||
| 679 | clockevents_program_event(bc, bc->next_event, 1); | ||
| 680 | } | ||
| 681 | |||
| 633 | /* | 682 | /* |
| 634 | * Powerstate information: The system enters/leaves a state, where | 683 | * Powerstate information: The system enters/leaves a state, where |
| 635 | * affected devices might stop | 684 | * affected devices might stop |
| 685 | * Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups. | ||
| 636 | */ | 686 | */ |
| 637 | void tick_broadcast_oneshot_control(unsigned long reason) | 687 | int tick_broadcast_oneshot_control(unsigned long reason) |
| 638 | { | 688 | { |
| 639 | struct clock_event_device *bc, *dev; | 689 | struct clock_event_device *bc, *dev; |
| 640 | struct tick_device *td; | 690 | struct tick_device *td; |
| 641 | unsigned long flags; | 691 | unsigned long flags; |
| 642 | ktime_t now; | 692 | ktime_t now; |
| 643 | int cpu; | 693 | int cpu, ret = 0; |
| 644 | 694 | ||
| 645 | /* | 695 | /* |
| 646 | * Periodic mode does not care about the enter/exit of power | 696 | * Periodic mode does not care about the enter/exit of power |
| 647 | * states | 697 | * states |
| 648 | */ | 698 | */ |
| 649 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) | 699 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) |
| 650 | return; | 700 | return 0; |
| 651 | 701 | ||
| 652 | /* | 702 | /* |
| 653 | * We are called with preemtion disabled from the depth of the | 703 | * We are called with preemtion disabled from the depth of the |
| @@ -658,7 +708,7 @@ void tick_broadcast_oneshot_control(unsigned long reason) | |||
| 658 | dev = td->evtdev; | 708 | dev = td->evtdev; |
| 659 | 709 | ||
| 660 | if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) | 710 | if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) |
| 661 | return; | 711 | return 0; |
| 662 | 712 | ||
| 663 | bc = tick_broadcast_device.evtdev; | 713 | bc = tick_broadcast_device.evtdev; |
| 664 | 714 | ||
| @@ -666,7 +716,7 @@ void tick_broadcast_oneshot_control(unsigned long reason) | |||
| 666 | if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) { | 716 | if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) { |
| 667 | if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) { | 717 | if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) { |
| 668 | WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask)); | 718 | WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask)); |
| 669 | clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); | 719 | broadcast_shutdown_local(bc, dev); |
| 670 | /* | 720 | /* |
| 671 | * We only reprogram the broadcast timer if we | 721 | * We only reprogram the broadcast timer if we |
| 672 | * did not mark ourself in the force mask and | 722 | * did not mark ourself in the force mask and |
| @@ -679,6 +729,16 @@ void tick_broadcast_oneshot_control(unsigned long reason) | |||
| 679 | dev->next_event.tv64 < bc->next_event.tv64) | 729 | dev->next_event.tv64 < bc->next_event.tv64) |
| 680 | tick_broadcast_set_event(bc, cpu, dev->next_event, 1); | 730 | tick_broadcast_set_event(bc, cpu, dev->next_event, 1); |
| 681 | } | 731 | } |
| 732 | /* | ||
| 733 | * If the current CPU owns the hrtimer broadcast | ||
| 734 | * mechanism, it cannot go deep idle and we remove the | ||
| 735 | * CPU from the broadcast mask. We don't have to go | ||
| 736 | * through the EXIT path as the local timer is not | ||
| 737 | * shutdown. | ||
| 738 | */ | ||
| 739 | ret = broadcast_needs_cpu(bc, cpu); | ||
| 740 | if (ret) | ||
| 741 | cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); | ||
| 682 | } else { | 742 | } else { |
| 683 | if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) { | 743 | if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) { |
| 684 | clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); | 744 | clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); |
| @@ -746,6 +806,7 @@ void tick_broadcast_oneshot_control(unsigned long reason) | |||
| 746 | } | 806 | } |
| 747 | out: | 807 | out: |
| 748 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 808 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
| 809 | return ret; | ||
| 749 | } | 810 | } |
| 750 | 811 | ||
| 751 | /* | 812 | /* |
| @@ -852,6 +913,8 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup) | |||
| 852 | cpumask_clear_cpu(cpu, tick_broadcast_pending_mask); | 913 | cpumask_clear_cpu(cpu, tick_broadcast_pending_mask); |
| 853 | cpumask_clear_cpu(cpu, tick_broadcast_force_mask); | 914 | cpumask_clear_cpu(cpu, tick_broadcast_force_mask); |
| 854 | 915 | ||
| 916 | broadcast_move_bc(cpu); | ||
| 917 | |||
| 855 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 918 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
| 856 | } | 919 | } |
| 857 | 920 | ||
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 20b2fe37d105..015661279b68 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
| @@ -98,18 +98,19 @@ static void tick_periodic(int cpu) | |||
| 98 | void tick_handle_periodic(struct clock_event_device *dev) | 98 | void tick_handle_periodic(struct clock_event_device *dev) |
| 99 | { | 99 | { |
| 100 | int cpu = smp_processor_id(); | 100 | int cpu = smp_processor_id(); |
| 101 | ktime_t next; | 101 | ktime_t next = dev->next_event; |
| 102 | 102 | ||
| 103 | tick_periodic(cpu); | 103 | tick_periodic(cpu); |
| 104 | 104 | ||
| 105 | if (dev->mode != CLOCK_EVT_MODE_ONESHOT) | 105 | if (dev->mode != CLOCK_EVT_MODE_ONESHOT) |
| 106 | return; | 106 | return; |
| 107 | /* | ||
| 108 | * Setup the next period for devices, which do not have | ||
| 109 | * periodic mode: | ||
| 110 | */ | ||
| 111 | next = ktime_add(dev->next_event, tick_period); | ||
| 112 | for (;;) { | 107 | for (;;) { |
| 108 | /* | ||
| 109 | * Setup the next period for devices, which do not have | ||
| 110 | * periodic mode: | ||
| 111 | */ | ||
| 112 | next = ktime_add(next, tick_period); | ||
| 113 | |||
| 113 | if (!clockevents_program_event(dev, next, false)) | 114 | if (!clockevents_program_event(dev, next, false)) |
| 114 | return; | 115 | return; |
| 115 | /* | 116 | /* |
| @@ -118,12 +119,11 @@ void tick_handle_periodic(struct clock_event_device *dev) | |||
| 118 | * to be sure we're using a real hardware clocksource. | 119 | * to be sure we're using a real hardware clocksource. |
| 119 | * Otherwise we could get trapped in an infinite | 120 | * Otherwise we could get trapped in an infinite |
| 120 | * loop, as the tick_periodic() increments jiffies, | 121 | * loop, as the tick_periodic() increments jiffies, |
| 121 | * when then will increment time, posibly causing | 122 | * which then will increment time, possibly causing |
| 122 | * the loop to trigger again and again. | 123 | * the loop to trigger again and again. |
| 123 | */ | 124 | */ |
| 124 | if (timekeeping_valid_for_hres()) | 125 | if (timekeeping_valid_for_hres()) |
| 125 | tick_periodic(cpu); | 126 | tick_periodic(cpu); |
| 126 | next = ktime_add(next, tick_period); | ||
| 127 | } | 127 | } |
| 128 | } | 128 | } |
| 129 | 129 | ||
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index 8329669b51ec..7ab92b19965a 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h | |||
| @@ -46,7 +46,7 @@ extern int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *)); | |||
| 46 | extern void tick_resume_oneshot(void); | 46 | extern void tick_resume_oneshot(void); |
| 47 | # ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST | 47 | # ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
| 48 | extern void tick_broadcast_setup_oneshot(struct clock_event_device *bc); | 48 | extern void tick_broadcast_setup_oneshot(struct clock_event_device *bc); |
| 49 | extern void tick_broadcast_oneshot_control(unsigned long reason); | 49 | extern int tick_broadcast_oneshot_control(unsigned long reason); |
| 50 | extern void tick_broadcast_switch_to_oneshot(void); | 50 | extern void tick_broadcast_switch_to_oneshot(void); |
| 51 | extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); | 51 | extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); |
| 52 | extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); | 52 | extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); |
| @@ -58,7 +58,7 @@ static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | |||
| 58 | { | 58 | { |
| 59 | BUG(); | 59 | BUG(); |
| 60 | } | 60 | } |
| 61 | static inline void tick_broadcast_oneshot_control(unsigned long reason) { } | 61 | static inline int tick_broadcast_oneshot_control(unsigned long reason) { return 0; } |
| 62 | static inline void tick_broadcast_switch_to_oneshot(void) { } | 62 | static inline void tick_broadcast_switch_to_oneshot(void) { } |
| 63 | static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } | 63 | static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } |
| 64 | static inline int tick_broadcast_oneshot_active(void) { return 0; } | 64 | static inline int tick_broadcast_oneshot_active(void) { return 0; } |
| @@ -87,7 +87,7 @@ static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | |||
| 87 | { | 87 | { |
| 88 | BUG(); | 88 | BUG(); |
| 89 | } | 89 | } |
| 90 | static inline void tick_broadcast_oneshot_control(unsigned long reason) { } | 90 | static inline int tick_broadcast_oneshot_control(unsigned long reason) { return 0; } |
| 91 | static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } | 91 | static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } |
| 92 | static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc) | 92 | static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc) |
| 93 | { | 93 | { |
| @@ -111,6 +111,7 @@ extern int tick_resume_broadcast(void); | |||
| 111 | extern void tick_broadcast_init(void); | 111 | extern void tick_broadcast_init(void); |
| 112 | extern void | 112 | extern void |
| 113 | tick_set_periodic_handler(struct clock_event_device *dev, int broadcast); | 113 | tick_set_periodic_handler(struct clock_event_device *dev, int broadcast); |
| 114 | int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq); | ||
| 114 | 115 | ||
| 115 | #else /* !BROADCAST */ | 116 | #else /* !BROADCAST */ |
| 116 | 117 | ||
| @@ -133,6 +134,8 @@ static inline void tick_shutdown_broadcast(unsigned int *cpup) { } | |||
| 133 | static inline void tick_suspend_broadcast(void) { } | 134 | static inline void tick_suspend_broadcast(void) { } |
| 134 | static inline int tick_resume_broadcast(void) { return 0; } | 135 | static inline int tick_resume_broadcast(void) { return 0; } |
| 135 | static inline void tick_broadcast_init(void) { } | 136 | static inline void tick_broadcast_init(void) { } |
| 137 | static inline int tick_broadcast_update_freq(struct clock_event_device *dev, | ||
| 138 | u32 freq) { return -ENODEV; } | ||
| 136 | 139 | ||
| 137 | /* | 140 | /* |
| 138 | * Set the periodic handler in non broadcast mode | 141 | * Set the periodic handler in non broadcast mode |
| @@ -152,6 +155,8 @@ static inline int tick_device_is_functional(struct clock_event_device *dev) | |||
| 152 | return !(dev->features & CLOCK_EVT_FEAT_DUMMY); | 155 | return !(dev->features & CLOCK_EVT_FEAT_DUMMY); |
| 153 | } | 156 | } |
| 154 | 157 | ||
| 158 | int __clockevents_update_freq(struct clock_event_device *dev, u32 freq); | ||
| 159 | |||
| 155 | #endif | 160 | #endif |
| 156 | 161 | ||
| 157 | extern void do_timer(unsigned long ticks); | 162 | extern void do_timer(unsigned long ticks); |
diff --git a/kernel/time/timekeeping_debug.c b/kernel/time/timekeeping_debug.c index 802433a4f5eb..4d54f97558df 100644 --- a/kernel/time/timekeeping_debug.c +++ b/kernel/time/timekeeping_debug.c | |||
| @@ -21,6 +21,8 @@ | |||
| 21 | #include <linux/seq_file.h> | 21 | #include <linux/seq_file.h> |
| 22 | #include <linux/time.h> | 22 | #include <linux/time.h> |
| 23 | 23 | ||
| 24 | #include "timekeeping_internal.h" | ||
| 25 | |||
| 24 | static unsigned int sleep_time_bin[32] = {0}; | 26 | static unsigned int sleep_time_bin[32] = {0}; |
| 25 | 27 | ||
| 26 | static int tk_debug_show_sleep_time(struct seq_file *s, void *data) | 28 | static int tk_debug_show_sleep_time(struct seq_file *s, void *data) |
diff --git a/kernel/timer.c b/kernel/timer.c index d78de047599b..87bd529879c2 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
| @@ -81,6 +81,7 @@ struct tvec_base { | |||
| 81 | unsigned long timer_jiffies; | 81 | unsigned long timer_jiffies; |
| 82 | unsigned long next_timer; | 82 | unsigned long next_timer; |
| 83 | unsigned long active_timers; | 83 | unsigned long active_timers; |
| 84 | unsigned long all_timers; | ||
| 84 | struct tvec_root tv1; | 85 | struct tvec_root tv1; |
| 85 | struct tvec tv2; | 86 | struct tvec tv2; |
| 86 | struct tvec tv3; | 87 | struct tvec tv3; |
| @@ -337,6 +338,20 @@ void set_timer_slack(struct timer_list *timer, int slack_hz) | |||
| 337 | } | 338 | } |
| 338 | EXPORT_SYMBOL_GPL(set_timer_slack); | 339 | EXPORT_SYMBOL_GPL(set_timer_slack); |
| 339 | 340 | ||
| 341 | /* | ||
| 342 | * If the list is empty, catch up ->timer_jiffies to the current time. | ||
| 343 | * The caller must hold the tvec_base lock. Returns true if the list | ||
| 344 | * was empty and therefore ->timer_jiffies was updated. | ||
| 345 | */ | ||
| 346 | static bool catchup_timer_jiffies(struct tvec_base *base) | ||
| 347 | { | ||
| 348 | if (!base->all_timers) { | ||
| 349 | base->timer_jiffies = jiffies; | ||
| 350 | return true; | ||
| 351 | } | ||
| 352 | return false; | ||
| 353 | } | ||
| 354 | |||
| 340 | static void | 355 | static void |
| 341 | __internal_add_timer(struct tvec_base *base, struct timer_list *timer) | 356 | __internal_add_timer(struct tvec_base *base, struct timer_list *timer) |
| 342 | { | 357 | { |
| @@ -383,15 +398,17 @@ __internal_add_timer(struct tvec_base *base, struct timer_list *timer) | |||
| 383 | 398 | ||
| 384 | static void internal_add_timer(struct tvec_base *base, struct timer_list *timer) | 399 | static void internal_add_timer(struct tvec_base *base, struct timer_list *timer) |
| 385 | { | 400 | { |
| 401 | (void)catchup_timer_jiffies(base); | ||
| 386 | __internal_add_timer(base, timer); | 402 | __internal_add_timer(base, timer); |
| 387 | /* | 403 | /* |
| 388 | * Update base->active_timers and base->next_timer | 404 | * Update base->active_timers and base->next_timer |
| 389 | */ | 405 | */ |
| 390 | if (!tbase_get_deferrable(timer->base)) { | 406 | if (!tbase_get_deferrable(timer->base)) { |
| 391 | if (time_before(timer->expires, base->next_timer)) | 407 | if (!base->active_timers++ || |
| 408 | time_before(timer->expires, base->next_timer)) | ||
| 392 | base->next_timer = timer->expires; | 409 | base->next_timer = timer->expires; |
| 393 | base->active_timers++; | ||
| 394 | } | 410 | } |
| 411 | base->all_timers++; | ||
| 395 | } | 412 | } |
| 396 | 413 | ||
| 397 | #ifdef CONFIG_TIMER_STATS | 414 | #ifdef CONFIG_TIMER_STATS |
| @@ -671,6 +688,8 @@ detach_expired_timer(struct timer_list *timer, struct tvec_base *base) | |||
| 671 | detach_timer(timer, true); | 688 | detach_timer(timer, true); |
| 672 | if (!tbase_get_deferrable(timer->base)) | 689 | if (!tbase_get_deferrable(timer->base)) |
| 673 | base->active_timers--; | 690 | base->active_timers--; |
| 691 | base->all_timers--; | ||
| 692 | (void)catchup_timer_jiffies(base); | ||
| 674 | } | 693 | } |
| 675 | 694 | ||
| 676 | static int detach_if_pending(struct timer_list *timer, struct tvec_base *base, | 695 | static int detach_if_pending(struct timer_list *timer, struct tvec_base *base, |
| @@ -685,6 +704,8 @@ static int detach_if_pending(struct timer_list *timer, struct tvec_base *base, | |||
| 685 | if (timer->expires == base->next_timer) | 704 | if (timer->expires == base->next_timer) |
| 686 | base->next_timer = base->timer_jiffies; | 705 | base->next_timer = base->timer_jiffies; |
| 687 | } | 706 | } |
| 707 | base->all_timers--; | ||
| 708 | (void)catchup_timer_jiffies(base); | ||
| 688 | return 1; | 709 | return 1; |
| 689 | } | 710 | } |
| 690 | 711 | ||
| @@ -739,12 +760,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, | |||
| 739 | 760 | ||
| 740 | debug_activate(timer, expires); | 761 | debug_activate(timer, expires); |
| 741 | 762 | ||
| 742 | cpu = smp_processor_id(); | 763 | cpu = get_nohz_timer_target(pinned); |
| 743 | |||
| 744 | #if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP) | ||
| 745 | if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu)) | ||
| 746 | cpu = get_nohz_timer_target(); | ||
| 747 | #endif | ||
| 748 | new_base = per_cpu(tvec_bases, cpu); | 764 | new_base = per_cpu(tvec_bases, cpu); |
| 749 | 765 | ||
| 750 | if (base != new_base) { | 766 | if (base != new_base) { |
| @@ -939,8 +955,15 @@ void add_timer_on(struct timer_list *timer, int cpu) | |||
| 939 | * with the timer by holding the timer base lock. This also | 955 | * with the timer by holding the timer base lock. This also |
| 940 | * makes sure that a CPU on the way to stop its tick can not | 956 | * makes sure that a CPU on the way to stop its tick can not |
| 941 | * evaluate the timer wheel. | 957 | * evaluate the timer wheel. |
| 958 | * | ||
| 959 | * Spare the IPI for deferrable timers on idle targets though. | ||
| 960 | * The next busy ticks will take care of it. Except full dynticks | ||
| 961 | * require special care against races with idle_cpu(), lets deal | ||
| 962 | * with that later. | ||
| 942 | */ | 963 | */ |
| 943 | wake_up_nohz_cpu(cpu); | 964 | if (!tbase_get_deferrable(timer->base) || tick_nohz_full_cpu(cpu)) |
| 965 | wake_up_nohz_cpu(cpu); | ||
| 966 | |||
| 944 | spin_unlock_irqrestore(&base->lock, flags); | 967 | spin_unlock_irqrestore(&base->lock, flags); |
| 945 | } | 968 | } |
| 946 | EXPORT_SYMBOL_GPL(add_timer_on); | 969 | EXPORT_SYMBOL_GPL(add_timer_on); |
| @@ -1146,6 +1169,10 @@ static inline void __run_timers(struct tvec_base *base) | |||
| 1146 | struct timer_list *timer; | 1169 | struct timer_list *timer; |
| 1147 | 1170 | ||
| 1148 | spin_lock_irq(&base->lock); | 1171 | spin_lock_irq(&base->lock); |
| 1172 | if (catchup_timer_jiffies(base)) { | ||
| 1173 | spin_unlock_irq(&base->lock); | ||
| 1174 | return; | ||
| 1175 | } | ||
| 1149 | while (time_after_eq(jiffies, base->timer_jiffies)) { | 1176 | while (time_after_eq(jiffies, base->timer_jiffies)) { |
| 1150 | struct list_head work_list; | 1177 | struct list_head work_list; |
| 1151 | struct list_head *head = &work_list; | 1178 | struct list_head *head = &work_list; |
| @@ -1160,7 +1187,7 @@ static inline void __run_timers(struct tvec_base *base) | |||
| 1160 | !cascade(base, &base->tv4, INDEX(2))) | 1187 | !cascade(base, &base->tv4, INDEX(2))) |
| 1161 | cascade(base, &base->tv5, INDEX(3)); | 1188 | cascade(base, &base->tv5, INDEX(3)); |
| 1162 | ++base->timer_jiffies; | 1189 | ++base->timer_jiffies; |
| 1163 | list_replace_init(base->tv1.vec + index, &work_list); | 1190 | list_replace_init(base->tv1.vec + index, head); |
| 1164 | while (!list_empty(head)) { | 1191 | while (!list_empty(head)) { |
| 1165 | void (*fn)(unsigned long); | 1192 | void (*fn)(unsigned long); |
| 1166 | unsigned long data; | 1193 | unsigned long data; |
| @@ -1523,9 +1550,8 @@ static int init_timers_cpu(int cpu) | |||
| 1523 | if (!base) | 1550 | if (!base) |
| 1524 | return -ENOMEM; | 1551 | return -ENOMEM; |
| 1525 | 1552 | ||
| 1526 | /* Make sure that tvec_base is 2 byte aligned */ | 1553 | /* Make sure tvec_base has TIMER_FLAG_MASK bits free */ |
| 1527 | if (tbase_get_deferrable(base)) { | 1554 | if (WARN_ON(base != tbase_get_base(base))) { |
| 1528 | WARN_ON(1); | ||
| 1529 | kfree(base); | 1555 | kfree(base); |
| 1530 | return -ENOMEM; | 1556 | return -ENOMEM; |
| 1531 | } | 1557 | } |
| @@ -1559,6 +1585,7 @@ static int init_timers_cpu(int cpu) | |||
| 1559 | base->timer_jiffies = jiffies; | 1585 | base->timer_jiffies = jiffies; |
| 1560 | base->next_timer = base->timer_jiffies; | 1586 | base->next_timer = base->timer_jiffies; |
| 1561 | base->active_timers = 0; | 1587 | base->active_timers = 0; |
| 1588 | base->all_timers = 0; | ||
| 1562 | return 0; | 1589 | return 0; |
| 1563 | } | 1590 | } |
| 1564 | 1591 | ||
| @@ -1648,9 +1675,9 @@ void __init init_timers(void) | |||
| 1648 | 1675 | ||
| 1649 | err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE, | 1676 | err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE, |
| 1650 | (void *)(long)smp_processor_id()); | 1677 | (void *)(long)smp_processor_id()); |
| 1651 | init_timer_stats(); | ||
| 1652 | |||
| 1653 | BUG_ON(err != NOTIFY_OK); | 1678 | BUG_ON(err != NOTIFY_OK); |
| 1679 | |||
| 1680 | init_timer_stats(); | ||
| 1654 | register_cpu_notifier(&timers_nb); | 1681 | register_cpu_notifier(&timers_nb); |
| 1655 | open_softirq(TIMER_SOFTIRQ, run_timer_softirq); | 1682 | open_softirq(TIMER_SOFTIRQ, run_timer_softirq); |
| 1656 | } | 1683 | } |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 3fa5b8f3aae3..0ee63af30bd1 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -516,6 +516,13 @@ void destroy_work_on_stack(struct work_struct *work) | |||
| 516 | } | 516 | } |
| 517 | EXPORT_SYMBOL_GPL(destroy_work_on_stack); | 517 | EXPORT_SYMBOL_GPL(destroy_work_on_stack); |
| 518 | 518 | ||
| 519 | void destroy_delayed_work_on_stack(struct delayed_work *work) | ||
| 520 | { | ||
| 521 | destroy_timer_on_stack(&work->timer); | ||
| 522 | debug_object_free(&work->work, &work_debug_descr); | ||
| 523 | } | ||
| 524 | EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack); | ||
| 525 | |||
| 519 | #else | 526 | #else |
| 520 | static inline void debug_work_activate(struct work_struct *work) { } | 527 | static inline void debug_work_activate(struct work_struct *work) { } |
| 521 | static inline void debug_work_deactivate(struct work_struct *work) { } | 528 | static inline void debug_work_deactivate(struct work_struct *work) { } |
