diff options
47 files changed, 562 insertions, 567 deletions
diff --git a/Documentation/RCU/stallwarn.txt b/Documentation/RCU/stallwarn.txt index 4f8e33952b88..b57c0c1cdac6 100644 --- a/Documentation/RCU/stallwarn.txt +++ b/Documentation/RCU/stallwarn.txt | |||
@@ -152,6 +152,15 @@ no non-lazy callbacks ("." is printed otherwise, as shown above) and | |||
152 | "D" indicates that dyntick-idle processing is enabled ("." is printed | 152 | "D" indicates that dyntick-idle processing is enabled ("." is printed |
153 | otherwise, for example, if disabled via the "nohz=" kernel boot parameter). | 153 | otherwise, for example, if disabled via the "nohz=" kernel boot parameter). |
154 | 154 | ||
155 | If the relevant grace-period kthread has been unable to run prior to | ||
156 | the stall warning, the following additional line is printed: | ||
157 | |||
158 | rcu_preempt kthread starved for 2023 jiffies! | ||
159 | |||
160 | Starving the grace-period kthreads of CPU time can of course result in | ||
161 | RCU CPU stall warnings even when all CPUs and tasks have passed through | ||
162 | the required quiescent states. | ||
163 | |||
155 | 164 | ||
156 | Multiple Warnings From One Stall | 165 | Multiple Warnings From One Stall |
157 | 166 | ||
@@ -187,6 +196,11 @@ o For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the | |||
187 | behavior, you might need to replace some of the cond_resched() | 196 | behavior, you might need to replace some of the cond_resched() |
188 | calls with calls to cond_resched_rcu_qs(). | 197 | calls with calls to cond_resched_rcu_qs(). |
189 | 198 | ||
199 | o Anything that prevents RCU's grace-period kthreads from running. | ||
200 | This can result in the "All QSes seen" console-log message. | ||
201 | This message will include information on when the kthread last | ||
202 | ran and how often it should be expected to run. | ||
203 | |||
190 | o A CPU-bound real-time task in a CONFIG_PREEMPT kernel, which might | 204 | o A CPU-bound real-time task in a CONFIG_PREEMPT kernel, which might |
191 | happen to preempt a low-priority task in the middle of an RCU | 205 | happen to preempt a low-priority task in the middle of an RCU |
192 | read-side critical section. This is especially damaging if | 206 | read-side critical section. This is especially damaging if |
diff --git a/Documentation/RCU/trace.txt b/Documentation/RCU/trace.txt index b63b9bb3bc0c..08651da15448 100644 --- a/Documentation/RCU/trace.txt +++ b/Documentation/RCU/trace.txt | |||
@@ -56,14 +56,14 @@ rcuboost: | |||
56 | 56 | ||
57 | The output of "cat rcu/rcu_preempt/rcudata" looks as follows: | 57 | The output of "cat rcu/rcu_preempt/rcudata" looks as follows: |
58 | 58 | ||
59 | 0!c=30455 g=30456 pq=1 qp=1 dt=126535/140000000000000/0 df=2002 of=4 ql=0/0 qs=N... b=10 ci=74572 nci=0 co=1131 ca=716 | 59 | 0!c=30455 g=30456 pq=1/0 qp=1 dt=126535/140000000000000/0 df=2002 of=4 ql=0/0 qs=N... b=10 ci=74572 nci=0 co=1131 ca=716 |
60 | 1!c=30719 g=30720 pq=1 qp=0 dt=132007/140000000000000/0 df=1874 of=10 ql=0/0 qs=N... b=10 ci=123209 nci=0 co=685 ca=982 | 60 | 1!c=30719 g=30720 pq=1/0 qp=0 dt=132007/140000000000000/0 df=1874 of=10 ql=0/0 qs=N... b=10 ci=123209 nci=0 co=685 ca=982 |
61 | 2!c=30150 g=30151 pq=1 qp=1 dt=138537/140000000000000/0 df=1707 of=8 ql=0/0 qs=N... b=10 ci=80132 nci=0 co=1328 ca=1458 | 61 | 2!c=30150 g=30151 pq=1/1 qp=1 dt=138537/140000000000000/0 df=1707 of=8 ql=0/0 qs=N... b=10 ci=80132 nci=0 co=1328 ca=1458 |
62 | 3 c=31249 g=31250 pq=1 qp=0 dt=107255/140000000000000/0 df=1749 of=6 ql=0/450 qs=NRW. b=10 ci=151700 nci=0 co=509 ca=622 | 62 | 3 c=31249 g=31250 pq=1/1 qp=0 dt=107255/140000000000000/0 df=1749 of=6 ql=0/450 qs=NRW. b=10 ci=151700 nci=0 co=509 ca=622 |
63 | 4!c=29502 g=29503 pq=1 qp=1 dt=83647/140000000000000/0 df=965 of=5 ql=0/0 qs=N... b=10 ci=65643 nci=0 co=1373 ca=1521 | 63 | 4!c=29502 g=29503 pq=1/0 qp=1 dt=83647/140000000000000/0 df=965 of=5 ql=0/0 qs=N... b=10 ci=65643 nci=0 co=1373 ca=1521 |
64 | 5 c=31201 g=31202 pq=1 qp=1 dt=70422/0/0 df=535 of=7 ql=0/0 qs=.... b=10 ci=58500 nci=0 co=764 ca=698 | 64 | 5 c=31201 g=31202 pq=1/0 qp=1 dt=70422/0/0 df=535 of=7 ql=0/0 qs=.... b=10 ci=58500 nci=0 co=764 ca=698 |
65 | 6!c=30253 g=30254 pq=1 qp=1 dt=95363/140000000000000/0 df=780 of=5 ql=0/0 qs=N... b=10 ci=100607 nci=0 co=1414 ca=1353 | 65 | 6!c=30253 g=30254 pq=1/0 qp=1 dt=95363/140000000000000/0 df=780 of=5 ql=0/0 qs=N... b=10 ci=100607 nci=0 co=1414 ca=1353 |
66 | 7 c=31178 g=31178 pq=1 qp=0 dt=91536/0/0 df=547 of=4 ql=0/0 qs=.... b=10 ci=109819 nci=0 co=1115 ca=969 | 66 | 7 c=31178 g=31178 pq=1/0 qp=0 dt=91536/0/0 df=547 of=4 ql=0/0 qs=.... b=10 ci=109819 nci=0 co=1115 ca=969 |
67 | 67 | ||
68 | This file has one line per CPU, or eight for this 8-CPU system. | 68 | This file has one line per CPU, or eight for this 8-CPU system. |
69 | The fields are as follows: | 69 | The fields are as follows: |
@@ -188,14 +188,14 @@ o "ca" is the number of RCU callbacks that have been adopted by this | |||
188 | Kernels compiled with CONFIG_RCU_BOOST=y display the following from | 188 | Kernels compiled with CONFIG_RCU_BOOST=y display the following from |
189 | /debug/rcu/rcu_preempt/rcudata: | 189 | /debug/rcu/rcu_preempt/rcudata: |
190 | 190 | ||
191 | 0!c=12865 g=12866 pq=1 qp=1 dt=83113/140000000000000/0 df=288 of=11 ql=0/0 qs=N... kt=0/O ktl=944 b=10 ci=60709 nci=0 co=748 ca=871 | 191 | 0!c=12865 g=12866 pq=1/0 qp=1 dt=83113/140000000000000/0 df=288 of=11 ql=0/0 qs=N... kt=0/O ktl=944 b=10 ci=60709 nci=0 co=748 ca=871 |
192 | 1 c=14407 g=14408 pq=1 qp=0 dt=100679/140000000000000/0 df=378 of=7 ql=0/119 qs=NRW. kt=0/W ktl=9b6 b=10 ci=109740 nci=0 co=589 ca=485 | 192 | 1 c=14407 g=14408 pq=1/0 qp=0 dt=100679/140000000000000/0 df=378 of=7 ql=0/119 qs=NRW. kt=0/W ktl=9b6 b=10 ci=109740 nci=0 co=589 ca=485 |
193 | 2 c=14407 g=14408 pq=1 qp=0 dt=105486/0/0 df=90 of=9 ql=0/89 qs=NRW. kt=0/W ktl=c0c b=10 ci=83113 nci=0 co=533 ca=490 | 193 | 2 c=14407 g=14408 pq=1/0 qp=0 dt=105486/0/0 df=90 of=9 ql=0/89 qs=NRW. kt=0/W ktl=c0c b=10 ci=83113 nci=0 co=533 ca=490 |
194 | 3 c=14407 g=14408 pq=1 qp=0 dt=107138/0/0 df=142 of=8 ql=0/188 qs=NRW. kt=0/W ktl=b96 b=10 ci=121114 nci=0 co=426 ca=290 | 194 | 3 c=14407 g=14408 pq=1/0 qp=0 dt=107138/0/0 df=142 of=8 ql=0/188 qs=NRW. kt=0/W ktl=b96 b=10 ci=121114 nci=0 co=426 ca=290 |
195 | 4 c=14405 g=14406 pq=1 qp=1 dt=50238/0/0 df=706 of=7 ql=0/0 qs=.... kt=0/W ktl=812 b=10 ci=34929 nci=0 co=643 ca=114 | 195 | 4 c=14405 g=14406 pq=1/0 qp=1 dt=50238/0/0 df=706 of=7 ql=0/0 qs=.... kt=0/W ktl=812 b=10 ci=34929 nci=0 co=643 ca=114 |
196 | 5!c=14168 g=14169 pq=1 qp=0 dt=45465/140000000000000/0 df=161 of=11 ql=0/0 qs=N... kt=0/O ktl=b4d b=10 ci=47712 nci=0 co=677 ca=722 | 196 | 5!c=14168 g=14169 pq=1/0 qp=0 dt=45465/140000000000000/0 df=161 of=11 ql=0/0 qs=N... kt=0/O ktl=b4d b=10 ci=47712 nci=0 co=677 ca=722 |
197 | 6 c=14404 g=14405 pq=1 qp=0 dt=59454/0/0 df=94 of=6 ql=0/0 qs=.... kt=0/W ktl=e57 b=10 ci=55597 nci=0 co=701 ca=811 | 197 | 6 c=14404 g=14405 pq=1/0 qp=0 dt=59454/0/0 df=94 of=6 ql=0/0 qs=.... kt=0/W ktl=e57 b=10 ci=55597 nci=0 co=701 ca=811 |
198 | 7 c=14407 g=14408 pq=1 qp=1 dt=68850/0/0 df=31 of=8 ql=0/0 qs=.... kt=0/W ktl=14bd b=10 ci=77475 nci=0 co=508 ca=1042 | 198 | 7 c=14407 g=14408 pq=1/0 qp=1 dt=68850/0/0 df=31 of=8 ql=0/0 qs=.... kt=0/W ktl=14bd b=10 ci=77475 nci=0 co=508 ca=1042 |
199 | 199 | ||
200 | This is similar to the output discussed above, but contains the following | 200 | This is similar to the output discussed above, but contains the following |
201 | additional fields: | 201 | additional fields: |
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig index 466bd299b1a8..3afee5f40f4f 100644 --- a/arch/arm/kvm/Kconfig +++ b/arch/arm/kvm/Kconfig | |||
@@ -23,6 +23,7 @@ config KVM | |||
23 | select HAVE_KVM_CPU_RELAX_INTERCEPT | 23 | select HAVE_KVM_CPU_RELAX_INTERCEPT |
24 | select KVM_MMIO | 24 | select KVM_MMIO |
25 | select KVM_ARM_HOST | 25 | select KVM_ARM_HOST |
26 | select SRCU | ||
26 | depends on ARM_VIRT_EXT && ARM_LPAE | 27 | depends on ARM_VIRT_EXT && ARM_LPAE |
27 | ---help--- | 28 | ---help--- |
28 | Support hosting virtualized guest machines. You will also | 29 | Support hosting virtualized guest machines. You will also |
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index 8ba85e9ea388..b334084d3675 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig | |||
@@ -26,6 +26,7 @@ config KVM | |||
26 | select KVM_ARM_HOST | 26 | select KVM_ARM_HOST |
27 | select KVM_ARM_VGIC | 27 | select KVM_ARM_VGIC |
28 | select KVM_ARM_TIMER | 28 | select KVM_ARM_TIMER |
29 | select SRCU | ||
29 | ---help--- | 30 | ---help--- |
30 | Support hosting virtualized guest machines. | 31 | Support hosting virtualized guest machines. |
31 | 32 | ||
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig index 30e334e823bd..2ae12825529f 100644 --- a/arch/mips/kvm/Kconfig +++ b/arch/mips/kvm/Kconfig | |||
@@ -20,6 +20,7 @@ config KVM | |||
20 | select PREEMPT_NOTIFIERS | 20 | select PREEMPT_NOTIFIERS |
21 | select ANON_INODES | 21 | select ANON_INODES |
22 | select KVM_MMIO | 22 | select KVM_MMIO |
23 | select SRCU | ||
23 | ---help--- | 24 | ---help--- |
24 | Support for hosting Guest kernels. | 25 | Support for hosting Guest kernels. |
25 | Currently supported on MIPS32 processors. | 26 | Currently supported on MIPS32 processors. |
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index f5769f19ae25..11850f310fb4 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig | |||
@@ -21,6 +21,7 @@ config KVM | |||
21 | select PREEMPT_NOTIFIERS | 21 | select PREEMPT_NOTIFIERS |
22 | select ANON_INODES | 22 | select ANON_INODES |
23 | select HAVE_KVM_EVENTFD | 23 | select HAVE_KVM_EVENTFD |
24 | select SRCU | ||
24 | 25 | ||
25 | config KVM_BOOK3S_HANDLER | 26 | config KVM_BOOK3S_HANDLER |
26 | bool | 27 | bool |
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig index 646db9c467d1..5fce52cf0e57 100644 --- a/arch/s390/kvm/Kconfig +++ b/arch/s390/kvm/Kconfig | |||
@@ -28,6 +28,7 @@ config KVM | |||
28 | select HAVE_KVM_IRQCHIP | 28 | select HAVE_KVM_IRQCHIP |
29 | select HAVE_KVM_IRQFD | 29 | select HAVE_KVM_IRQFD |
30 | select HAVE_KVM_IRQ_ROUTING | 30 | select HAVE_KVM_IRQ_ROUTING |
31 | select SRCU | ||
31 | ---help--- | 32 | ---help--- |
32 | Support hosting paravirtualized guest machines using the SIE | 33 | Support hosting paravirtualized guest machines using the SIE |
33 | virtualization capability on the mainframe. This should work | 34 | virtualization capability on the mainframe. This should work |
diff --git a/arch/tile/kvm/Kconfig b/arch/tile/kvm/Kconfig index 2298cb1daff7..1e968f7550dc 100644 --- a/arch/tile/kvm/Kconfig +++ b/arch/tile/kvm/Kconfig | |||
@@ -21,6 +21,7 @@ config KVM | |||
21 | depends on HAVE_KVM && MODULES | 21 | depends on HAVE_KVM && MODULES |
22 | select PREEMPT_NOTIFIERS | 22 | select PREEMPT_NOTIFIERS |
23 | select ANON_INODES | 23 | select ANON_INODES |
24 | select SRCU | ||
24 | ---help--- | 25 | ---help--- |
25 | Support hosting paravirtualized guest machines. | 26 | Support hosting paravirtualized guest machines. |
26 | 27 | ||
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index ba397bde7948..661269953c1a 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -138,6 +138,7 @@ config X86 | |||
138 | select HAVE_ACPI_APEI_NMI if ACPI | 138 | select HAVE_ACPI_APEI_NMI if ACPI |
139 | select ACPI_LEGACY_TABLES_LOOKUP if ACPI | 139 | select ACPI_LEGACY_TABLES_LOOKUP if ACPI |
140 | select X86_FEATURE_NAMES if PROC_FS | 140 | select X86_FEATURE_NAMES if PROC_FS |
141 | select SRCU | ||
141 | 142 | ||
142 | config INSTRUCTION_DECODER | 143 | config INSTRUCTION_DECODER |
143 | def_bool y | 144 | def_bool y |
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index f9d16ff56c6b..7dc7ba577ecd 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig | |||
@@ -40,6 +40,7 @@ config KVM | |||
40 | select HAVE_KVM_MSI | 40 | select HAVE_KVM_MSI |
41 | select HAVE_KVM_CPU_RELAX_INTERCEPT | 41 | select HAVE_KVM_CPU_RELAX_INTERCEPT |
42 | select KVM_VFIO | 42 | select KVM_VFIO |
43 | select SRCU | ||
43 | ---help--- | 44 | ---help--- |
44 | Support hosting fully virtualized guest machines using hardware | 45 | Support hosting fully virtualized guest machines using hardware |
45 | virtualization extensions. You will need a fairly recent | 46 | virtualization extensions. You will need a fairly recent |
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index 3f44f292d066..91f86131bb7a 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig | |||
@@ -13,6 +13,7 @@ config COMMON_CLK | |||
13 | bool | 13 | bool |
14 | select HAVE_CLK_PREPARE | 14 | select HAVE_CLK_PREPARE |
15 | select CLKDEV_LOOKUP | 15 | select CLKDEV_LOOKUP |
16 | select SRCU | ||
16 | ---help--- | 17 | ---help--- |
17 | The common clock framework is a single definition of struct | 18 | The common clock framework is a single definition of struct |
18 | clk, useful across many platforms, as well as an | 19 | clk, useful across many platforms, as well as an |
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 29b2ef5a68b9..a171fef2c2b6 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig | |||
@@ -2,6 +2,7 @@ menu "CPU Frequency scaling" | |||
2 | 2 | ||
3 | config CPU_FREQ | 3 | config CPU_FREQ |
4 | bool "CPU Frequency scaling" | 4 | bool "CPU Frequency scaling" |
5 | select SRCU | ||
5 | help | 6 | help |
6 | CPU Frequency scaling allows you to change the clock speed of | 7 | CPU Frequency scaling allows you to change the clock speed of |
7 | CPUs on the fly. This is a nice method to save power, because | 8 | CPUs on the fly. This is a nice method to save power, because |
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig index faf4e70c42e0..3891f6781298 100644 --- a/drivers/devfreq/Kconfig +++ b/drivers/devfreq/Kconfig | |||
@@ -1,5 +1,6 @@ | |||
1 | menuconfig PM_DEVFREQ | 1 | menuconfig PM_DEVFREQ |
2 | bool "Generic Dynamic Voltage and Frequency Scaling (DVFS) support" | 2 | bool "Generic Dynamic Voltage and Frequency Scaling (DVFS) support" |
3 | select SRCU | ||
3 | help | 4 | help |
4 | A device may have a list of frequencies and voltages available. | 5 | A device may have a list of frequencies and voltages available. |
5 | devfreq, a generic DVFS framework can be registered for a device | 6 | devfreq, a generic DVFS framework can be registered for a device |
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 5bdedf6df153..c355a226a024 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig | |||
@@ -5,6 +5,7 @@ | |||
5 | menuconfig MD | 5 | menuconfig MD |
6 | bool "Multiple devices driver support (RAID and LVM)" | 6 | bool "Multiple devices driver support (RAID and LVM)" |
7 | depends on BLOCK | 7 | depends on BLOCK |
8 | select SRCU | ||
8 | help | 9 | help |
9 | Support multiple physical spindles through a single logical device. | 10 | Support multiple physical spindles through a single logical device. |
10 | Required for RAID and logical volume management. | 11 | Required for RAID and logical volume management. |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index d6607ee9c855..84673ebcf428 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -197,6 +197,7 @@ config NETCONSOLE_DYNAMIC | |||
197 | 197 | ||
198 | config NETPOLL | 198 | config NETPOLL |
199 | def_bool NETCONSOLE | 199 | def_bool NETCONSOLE |
200 | select SRCU | ||
200 | 201 | ||
201 | config NET_POLL_CONTROLLER | 202 | config NET_POLL_CONTROLLER |
202 | def_bool NETPOLL | 203 | def_bool NETPOLL |
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig index a66768ebc8d1..80e9c18ea64f 100644 --- a/fs/btrfs/Kconfig +++ b/fs/btrfs/Kconfig | |||
@@ -8,6 +8,7 @@ config BTRFS_FS | |||
8 | select LZO_DECOMPRESS | 8 | select LZO_DECOMPRESS |
9 | select RAID6_PQ | 9 | select RAID6_PQ |
10 | select XOR_BLOCKS | 10 | select XOR_BLOCKS |
11 | select SRCU | ||
11 | 12 | ||
12 | help | 13 | help |
13 | Btrfs is a general purpose copy-on-write filesystem with extents, | 14 | Btrfs is a general purpose copy-on-write filesystem with extents, |
diff --git a/fs/notify/Kconfig b/fs/notify/Kconfig index 22c629eedd82..2a24249b30af 100644 --- a/fs/notify/Kconfig +++ b/fs/notify/Kconfig | |||
@@ -1,5 +1,6 @@ | |||
1 | config FSNOTIFY | 1 | config FSNOTIFY |
2 | def_bool n | 2 | def_bool n |
3 | select SRCU | ||
3 | 4 | ||
4 | source "fs/notify/dnotify/Kconfig" | 5 | source "fs/notify/dnotify/Kconfig" |
5 | source "fs/notify/inotify/Kconfig" | 6 | source "fs/notify/inotify/Kconfig" |
diff --git a/fs/quota/Kconfig b/fs/quota/Kconfig index c51df1dd237e..4a09975aac90 100644 --- a/fs/quota/Kconfig +++ b/fs/quota/Kconfig | |||
@@ -5,6 +5,7 @@ | |||
5 | config QUOTA | 5 | config QUOTA |
6 | bool "Quota support" | 6 | bool "Quota support" |
7 | select QUOTACTL | 7 | select QUOTACTL |
8 | select SRCU | ||
8 | help | 9 | help |
9 | If you say Y here, you will be able to set per user limits for disk | 10 | If you say Y here, you will be able to set per user limits for disk |
10 | usage (also called disk quotas). Currently, it works for the | 11 | usage (also called disk quotas). Currently, it works for the |
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index a1c81f80978e..49811cdddaa5 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
@@ -385,7 +385,7 @@ static __always_inline void __assign_once_size(volatile void *p, void *res, int | |||
385 | 385 | ||
386 | /* Is this type a native word size -- useful for atomic operations */ | 386 | /* Is this type a native word size -- useful for atomic operations */ |
387 | #ifndef __native_word | 387 | #ifndef __native_word |
388 | # define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) | 388 | # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) |
389 | #endif | 389 | #endif |
390 | 390 | ||
391 | /* Compile time object size, -1 for unknown */ | 391 | /* Compile time object size, -1 for unknown */ |
diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 529bc946f450..a18b16f1dc0e 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h | |||
@@ -524,11 +524,11 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n, | |||
524 | * @member: the name of the hlist_node within the struct. | 524 | * @member: the name of the hlist_node within the struct. |
525 | */ | 525 | */ |
526 | #define hlist_for_each_entry_continue_rcu(pos, member) \ | 526 | #define hlist_for_each_entry_continue_rcu(pos, member) \ |
527 | for (pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\ | 527 | for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ |
528 | typeof(*(pos)), member); \ | 528 | &(pos)->member)), typeof(*(pos)), member); \ |
529 | pos; \ | 529 | pos; \ |
530 | pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\ | 530 | pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ |
531 | typeof(*(pos)), member)) | 531 | &(pos)->member)), typeof(*(pos)), member)) |
532 | 532 | ||
533 | /** | 533 | /** |
534 | * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point | 534 | * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point |
@@ -536,11 +536,11 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n, | |||
536 | * @member: the name of the hlist_node within the struct. | 536 | * @member: the name of the hlist_node within the struct. |
537 | */ | 537 | */ |
538 | #define hlist_for_each_entry_continue_rcu_bh(pos, member) \ | 538 | #define hlist_for_each_entry_continue_rcu_bh(pos, member) \ |
539 | for (pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\ | 539 | for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \ |
540 | typeof(*(pos)), member); \ | 540 | &(pos)->member)), typeof(*(pos)), member); \ |
541 | pos; \ | 541 | pos; \ |
542 | pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\ | 542 | pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \ |
543 | typeof(*(pos)), member)) | 543 | &(pos)->member)), typeof(*(pos)), member)) |
544 | 544 | ||
545 | /** | 545 | /** |
546 | * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point | 546 | * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index ed4f5939a452..78097491cd99 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -331,12 +331,13 @@ static inline void rcu_init_nohz(void) | |||
331 | extern struct srcu_struct tasks_rcu_exit_srcu; | 331 | extern struct srcu_struct tasks_rcu_exit_srcu; |
332 | #define rcu_note_voluntary_context_switch(t) \ | 332 | #define rcu_note_voluntary_context_switch(t) \ |
333 | do { \ | 333 | do { \ |
334 | rcu_all_qs(); \ | ||
334 | if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \ | 335 | if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \ |
335 | ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \ | 336 | ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \ |
336 | } while (0) | 337 | } while (0) |
337 | #else /* #ifdef CONFIG_TASKS_RCU */ | 338 | #else /* #ifdef CONFIG_TASKS_RCU */ |
338 | #define TASKS_RCU(x) do { } while (0) | 339 | #define TASKS_RCU(x) do { } while (0) |
339 | #define rcu_note_voluntary_context_switch(t) do { } while (0) | 340 | #define rcu_note_voluntary_context_switch(t) rcu_all_qs() |
340 | #endif /* #else #ifdef CONFIG_TASKS_RCU */ | 341 | #endif /* #else #ifdef CONFIG_TASKS_RCU */ |
341 | 342 | ||
342 | /** | 343 | /** |
@@ -582,11 +583,11 @@ static inline void rcu_preempt_sleep_check(void) | |||
582 | }) | 583 | }) |
583 | #define __rcu_dereference_check(p, c, space) \ | 584 | #define __rcu_dereference_check(p, c, space) \ |
584 | ({ \ | 585 | ({ \ |
585 | typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \ | 586 | /* Dependency order vs. p above. */ \ |
587 | typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \ | ||
586 | rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \ | 588 | rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \ |
587 | rcu_dereference_sparse(p, space); \ | 589 | rcu_dereference_sparse(p, space); \ |
588 | smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ | 590 | ((typeof(*p) __force __kernel *)(________p1)); \ |
589 | ((typeof(*p) __force __kernel *)(_________p1)); \ | ||
590 | }) | 591 | }) |
591 | #define __rcu_dereference_protected(p, c, space) \ | 592 | #define __rcu_dereference_protected(p, c, space) \ |
592 | ({ \ | 593 | ({ \ |
@@ -603,10 +604,10 @@ static inline void rcu_preempt_sleep_check(void) | |||
603 | }) | 604 | }) |
604 | #define __rcu_dereference_index_check(p, c) \ | 605 | #define __rcu_dereference_index_check(p, c) \ |
605 | ({ \ | 606 | ({ \ |
606 | typeof(p) _________p1 = ACCESS_ONCE(p); \ | 607 | /* Dependency order vs. p above. */ \ |
608 | typeof(p) _________p1 = lockless_dereference(p); \ | ||
607 | rcu_lockdep_assert(c, \ | 609 | rcu_lockdep_assert(c, \ |
608 | "suspicious rcu_dereference_index_check() usage"); \ | 610 | "suspicious rcu_dereference_index_check() usage"); \ |
609 | smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ | ||
610 | (_________p1); \ | 611 | (_________p1); \ |
611 | }) | 612 | }) |
612 | 613 | ||
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 0e5366200154..937edaeb150d 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h | |||
@@ -92,17 +92,49 @@ static inline void rcu_virt_note_context_switch(int cpu) | |||
92 | } | 92 | } |
93 | 93 | ||
94 | /* | 94 | /* |
95 | * Return the number of grace periods. | 95 | * Return the number of grace periods started. |
96 | */ | 96 | */ |
97 | static inline long rcu_batches_completed(void) | 97 | static inline unsigned long rcu_batches_started(void) |
98 | { | 98 | { |
99 | return 0; | 99 | return 0; |
100 | } | 100 | } |
101 | 101 | ||
102 | /* | 102 | /* |
103 | * Return the number of bottom-half grace periods. | 103 | * Return the number of bottom-half grace periods started. |
104 | */ | 104 | */ |
105 | static inline long rcu_batches_completed_bh(void) | 105 | static inline unsigned long rcu_batches_started_bh(void) |
106 | { | ||
107 | return 0; | ||
108 | } | ||
109 | |||
110 | /* | ||
111 | * Return the number of sched grace periods started. | ||
112 | */ | ||
113 | static inline unsigned long rcu_batches_started_sched(void) | ||
114 | { | ||
115 | return 0; | ||
116 | } | ||
117 | |||
118 | /* | ||
119 | * Return the number of grace periods completed. | ||
120 | */ | ||
121 | static inline unsigned long rcu_batches_completed(void) | ||
122 | { | ||
123 | return 0; | ||
124 | } | ||
125 | |||
126 | /* | ||
127 | * Return the number of bottom-half grace periods completed. | ||
128 | */ | ||
129 | static inline unsigned long rcu_batches_completed_bh(void) | ||
130 | { | ||
131 | return 0; | ||
132 | } | ||
133 | |||
134 | /* | ||
135 | * Return the number of sched grace periods completed. | ||
136 | */ | ||
137 | static inline unsigned long rcu_batches_completed_sched(void) | ||
106 | { | 138 | { |
107 | return 0; | 139 | return 0; |
108 | } | 140 | } |
@@ -154,7 +186,10 @@ static inline bool rcu_is_watching(void) | |||
154 | return true; | 186 | return true; |
155 | } | 187 | } |
156 | 188 | ||
157 | |||
158 | #endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ | 189 | #endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ |
159 | 190 | ||
191 | static inline void rcu_all_qs(void) | ||
192 | { | ||
193 | } | ||
194 | |||
160 | #endif /* __LINUX_RCUTINY_H */ | 195 | #endif /* __LINUX_RCUTINY_H */ |
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 52953790dcca..d2e583a6aaca 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
@@ -81,9 +81,12 @@ void cond_synchronize_rcu(unsigned long oldstate); | |||
81 | 81 | ||
82 | extern unsigned long rcutorture_testseq; | 82 | extern unsigned long rcutorture_testseq; |
83 | extern unsigned long rcutorture_vernum; | 83 | extern unsigned long rcutorture_vernum; |
84 | long rcu_batches_completed(void); | 84 | unsigned long rcu_batches_started(void); |
85 | long rcu_batches_completed_bh(void); | 85 | unsigned long rcu_batches_started_bh(void); |
86 | long rcu_batches_completed_sched(void); | 86 | unsigned long rcu_batches_started_sched(void); |
87 | unsigned long rcu_batches_completed(void); | ||
88 | unsigned long rcu_batches_completed_bh(void); | ||
89 | unsigned long rcu_batches_completed_sched(void); | ||
87 | void show_rcu_gp_kthreads(void); | 90 | void show_rcu_gp_kthreads(void); |
88 | 91 | ||
89 | void rcu_force_quiescent_state(void); | 92 | void rcu_force_quiescent_state(void); |
@@ -97,4 +100,6 @@ extern int rcu_scheduler_active __read_mostly; | |||
97 | 100 | ||
98 | bool rcu_is_watching(void); | 101 | bool rcu_is_watching(void); |
99 | 102 | ||
103 | void rcu_all_qs(void); | ||
104 | |||
100 | #endif /* __LINUX_RCUTREE_H */ | 105 | #endif /* __LINUX_RCUTREE_H */ |
diff --git a/include/linux/srcu.h b/include/linux/srcu.h index a2783cb5d275..9cfd9623fb03 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h | |||
@@ -45,7 +45,7 @@ struct rcu_batch { | |||
45 | #define RCU_BATCH_INIT(name) { NULL, &(name.head) } | 45 | #define RCU_BATCH_INIT(name) { NULL, &(name.head) } |
46 | 46 | ||
47 | struct srcu_struct { | 47 | struct srcu_struct { |
48 | unsigned completed; | 48 | unsigned long completed; |
49 | struct srcu_struct_array __percpu *per_cpu_ref; | 49 | struct srcu_struct_array __percpu *per_cpu_ref; |
50 | spinlock_t queue_lock; /* protect ->batch_queue, ->running */ | 50 | spinlock_t queue_lock; /* protect ->batch_queue, ->running */ |
51 | bool running; | 51 | bool running; |
@@ -102,13 +102,11 @@ void process_srcu(struct work_struct *work); | |||
102 | * define and init a srcu struct at build time. | 102 | * define and init a srcu struct at build time. |
103 | * dont't call init_srcu_struct() nor cleanup_srcu_struct() on it. | 103 | * dont't call init_srcu_struct() nor cleanup_srcu_struct() on it. |
104 | */ | 104 | */ |
105 | #define DEFINE_SRCU(name) \ | 105 | #define __DEFINE_SRCU(name, is_static) \ |
106 | static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\ | 106 | static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\ |
107 | struct srcu_struct name = __SRCU_STRUCT_INIT(name); | 107 | is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name) |
108 | 108 | #define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */) | |
109 | #define DEFINE_STATIC_SRCU(name) \ | 109 | #define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static) |
110 | static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\ | ||
111 | static struct srcu_struct name = __SRCU_STRUCT_INIT(name); | ||
112 | 110 | ||
113 | /** | 111 | /** |
114 | * call_srcu() - Queue a callback for invocation after an SRCU grace period | 112 | * call_srcu() - Queue a callback for invocation after an SRCU grace period |
@@ -135,7 +133,7 @@ int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp); | |||
135 | void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp); | 133 | void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp); |
136 | void synchronize_srcu(struct srcu_struct *sp); | 134 | void synchronize_srcu(struct srcu_struct *sp); |
137 | void synchronize_srcu_expedited(struct srcu_struct *sp); | 135 | void synchronize_srcu_expedited(struct srcu_struct *sp); |
138 | long srcu_batches_completed(struct srcu_struct *sp); | 136 | unsigned long srcu_batches_completed(struct srcu_struct *sp); |
139 | void srcu_barrier(struct srcu_struct *sp); | 137 | void srcu_barrier(struct srcu_struct *sp); |
140 | 138 | ||
141 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 139 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
diff --git a/init/Kconfig b/init/Kconfig index 9afb971497f4..1354ac09b516 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -470,7 +470,6 @@ choice | |||
470 | config TREE_RCU | 470 | config TREE_RCU |
471 | bool "Tree-based hierarchical RCU" | 471 | bool "Tree-based hierarchical RCU" |
472 | depends on !PREEMPT && SMP | 472 | depends on !PREEMPT && SMP |
473 | select IRQ_WORK | ||
474 | help | 473 | help |
475 | This option selects the RCU implementation that is | 474 | This option selects the RCU implementation that is |
476 | designed for very large SMP system with hundreds or | 475 | designed for very large SMP system with hundreds or |
@@ -480,7 +479,6 @@ config TREE_RCU | |||
480 | config PREEMPT_RCU | 479 | config PREEMPT_RCU |
481 | bool "Preemptible tree-based hierarchical RCU" | 480 | bool "Preemptible tree-based hierarchical RCU" |
482 | depends on PREEMPT | 481 | depends on PREEMPT |
483 | select IRQ_WORK | ||
484 | help | 482 | help |
485 | This option selects the RCU implementation that is | 483 | This option selects the RCU implementation that is |
486 | designed for very large SMP systems with hundreds or | 484 | designed for very large SMP systems with hundreds or |
@@ -501,9 +499,17 @@ config TINY_RCU | |||
501 | 499 | ||
502 | endchoice | 500 | endchoice |
503 | 501 | ||
502 | config SRCU | ||
503 | bool | ||
504 | help | ||
505 | This option selects the sleepable version of RCU. This version | ||
506 | permits arbitrary sleeping or blocking within RCU read-side critical | ||
507 | sections. | ||
508 | |||
504 | config TASKS_RCU | 509 | config TASKS_RCU |
505 | bool "Task_based RCU implementation using voluntary context switch" | 510 | bool "Task_based RCU implementation using voluntary context switch" |
506 | default n | 511 | default n |
512 | select SRCU | ||
507 | help | 513 | help |
508 | This option enables a task-based RCU implementation that uses | 514 | This option enables a task-based RCU implementation that uses |
509 | only voluntary context switch (not preemption!), idle, and | 515 | only voluntary context switch (not preemption!), idle, and |
@@ -668,9 +674,10 @@ config RCU_BOOST | |||
668 | 674 | ||
669 | config RCU_KTHREAD_PRIO | 675 | config RCU_KTHREAD_PRIO |
670 | int "Real-time priority to use for RCU worker threads" | 676 | int "Real-time priority to use for RCU worker threads" |
671 | range 1 99 | 677 | range 1 99 if RCU_BOOST |
672 | depends on RCU_BOOST | 678 | range 0 99 if !RCU_BOOST |
673 | default 1 | 679 | default 1 if RCU_BOOST |
680 | default 0 if !RCU_BOOST | ||
674 | help | 681 | help |
675 | This option specifies the SCHED_FIFO priority value that will be | 682 | This option specifies the SCHED_FIFO priority value that will be |
676 | assigned to the rcuc/n and rcub/n threads and is also the value | 683 | assigned to the rcuc/n and rcub/n threads and is also the value |
@@ -1595,6 +1602,7 @@ config PERF_EVENTS | |||
1595 | depends on HAVE_PERF_EVENTS | 1602 | depends on HAVE_PERF_EVENTS |
1596 | select ANON_INODES | 1603 | select ANON_INODES |
1597 | select IRQ_WORK | 1604 | select IRQ_WORK |
1605 | select SRCU | ||
1598 | help | 1606 | help |
1599 | Enable kernel support for various performance events provided | 1607 | Enable kernel support for various performance events provided |
1600 | by software and hardware. | 1608 | by software and hardware. |
diff --git a/kernel/cpu.c b/kernel/cpu.c index 5d220234b3ca..1972b161c61e 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -58,22 +58,23 @@ static int cpu_hotplug_disabled; | |||
58 | 58 | ||
59 | static struct { | 59 | static struct { |
60 | struct task_struct *active_writer; | 60 | struct task_struct *active_writer; |
61 | struct mutex lock; /* Synchronizes accesses to refcount, */ | 61 | /* wait queue to wake up the active_writer */ |
62 | wait_queue_head_t wq; | ||
63 | /* verifies that no writer will get active while readers are active */ | ||
64 | struct mutex lock; | ||
62 | /* | 65 | /* |
63 | * Also blocks the new readers during | 66 | * Also blocks the new readers during |
64 | * an ongoing cpu hotplug operation. | 67 | * an ongoing cpu hotplug operation. |
65 | */ | 68 | */ |
66 | int refcount; | 69 | atomic_t refcount; |
67 | /* And allows lockless put_online_cpus(). */ | ||
68 | atomic_t puts_pending; | ||
69 | 70 | ||
70 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 71 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
71 | struct lockdep_map dep_map; | 72 | struct lockdep_map dep_map; |
72 | #endif | 73 | #endif |
73 | } cpu_hotplug = { | 74 | } cpu_hotplug = { |
74 | .active_writer = NULL, | 75 | .active_writer = NULL, |
76 | .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq), | ||
75 | .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), | 77 | .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), |
76 | .refcount = 0, | ||
77 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 78 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
78 | .dep_map = {.name = "cpu_hotplug.lock" }, | 79 | .dep_map = {.name = "cpu_hotplug.lock" }, |
79 | #endif | 80 | #endif |
@@ -86,15 +87,6 @@ static struct { | |||
86 | #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map) | 87 | #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map) |
87 | #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map) | 88 | #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map) |
88 | 89 | ||
89 | static void apply_puts_pending(int max) | ||
90 | { | ||
91 | int delta; | ||
92 | |||
93 | if (atomic_read(&cpu_hotplug.puts_pending) >= max) { | ||
94 | delta = atomic_xchg(&cpu_hotplug.puts_pending, 0); | ||
95 | cpu_hotplug.refcount -= delta; | ||
96 | } | ||
97 | } | ||
98 | 90 | ||
99 | void get_online_cpus(void) | 91 | void get_online_cpus(void) |
100 | { | 92 | { |
@@ -103,8 +95,7 @@ void get_online_cpus(void) | |||
103 | return; | 95 | return; |
104 | cpuhp_lock_acquire_read(); | 96 | cpuhp_lock_acquire_read(); |
105 | mutex_lock(&cpu_hotplug.lock); | 97 | mutex_lock(&cpu_hotplug.lock); |
106 | apply_puts_pending(65536); | 98 | atomic_inc(&cpu_hotplug.refcount); |
107 | cpu_hotplug.refcount++; | ||
108 | mutex_unlock(&cpu_hotplug.lock); | 99 | mutex_unlock(&cpu_hotplug.lock); |
109 | } | 100 | } |
110 | EXPORT_SYMBOL_GPL(get_online_cpus); | 101 | EXPORT_SYMBOL_GPL(get_online_cpus); |
@@ -116,8 +107,7 @@ bool try_get_online_cpus(void) | |||
116 | if (!mutex_trylock(&cpu_hotplug.lock)) | 107 | if (!mutex_trylock(&cpu_hotplug.lock)) |
117 | return false; | 108 | return false; |
118 | cpuhp_lock_acquire_tryread(); | 109 | cpuhp_lock_acquire_tryread(); |
119 | apply_puts_pending(65536); | 110 | atomic_inc(&cpu_hotplug.refcount); |
120 | cpu_hotplug.refcount++; | ||
121 | mutex_unlock(&cpu_hotplug.lock); | 111 | mutex_unlock(&cpu_hotplug.lock); |
122 | return true; | 112 | return true; |
123 | } | 113 | } |
@@ -125,20 +115,18 @@ EXPORT_SYMBOL_GPL(try_get_online_cpus); | |||
125 | 115 | ||
126 | void put_online_cpus(void) | 116 | void put_online_cpus(void) |
127 | { | 117 | { |
118 | int refcount; | ||
119 | |||
128 | if (cpu_hotplug.active_writer == current) | 120 | if (cpu_hotplug.active_writer == current) |
129 | return; | 121 | return; |
130 | if (!mutex_trylock(&cpu_hotplug.lock)) { | ||
131 | atomic_inc(&cpu_hotplug.puts_pending); | ||
132 | cpuhp_lock_release(); | ||
133 | return; | ||
134 | } | ||
135 | 122 | ||
136 | if (WARN_ON(!cpu_hotplug.refcount)) | 123 | refcount = atomic_dec_return(&cpu_hotplug.refcount); |
137 | cpu_hotplug.refcount++; /* try to fix things up */ | 124 | if (WARN_ON(refcount < 0)) /* try to fix things up */ |
125 | atomic_inc(&cpu_hotplug.refcount); | ||
126 | |||
127 | if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq)) | ||
128 | wake_up(&cpu_hotplug.wq); | ||
138 | 129 | ||
139 | if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer)) | ||
140 | wake_up_process(cpu_hotplug.active_writer); | ||
141 | mutex_unlock(&cpu_hotplug.lock); | ||
142 | cpuhp_lock_release(); | 130 | cpuhp_lock_release(); |
143 | 131 | ||
144 | } | 132 | } |
@@ -168,18 +156,20 @@ EXPORT_SYMBOL_GPL(put_online_cpus); | |||
168 | */ | 156 | */ |
169 | void cpu_hotplug_begin(void) | 157 | void cpu_hotplug_begin(void) |
170 | { | 158 | { |
171 | cpu_hotplug.active_writer = current; | 159 | DEFINE_WAIT(wait); |
172 | 160 | ||
161 | cpu_hotplug.active_writer = current; | ||
173 | cpuhp_lock_acquire(); | 162 | cpuhp_lock_acquire(); |
163 | |||
174 | for (;;) { | 164 | for (;;) { |
175 | mutex_lock(&cpu_hotplug.lock); | 165 | mutex_lock(&cpu_hotplug.lock); |
176 | apply_puts_pending(1); | 166 | prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE); |
177 | if (likely(!cpu_hotplug.refcount)) | 167 | if (likely(!atomic_read(&cpu_hotplug.refcount))) |
178 | break; | 168 | break; |
179 | __set_current_state(TASK_UNINTERRUPTIBLE); | ||
180 | mutex_unlock(&cpu_hotplug.lock); | 169 | mutex_unlock(&cpu_hotplug.lock); |
181 | schedule(); | 170 | schedule(); |
182 | } | 171 | } |
172 | finish_wait(&cpu_hotplug.wq, &wait); | ||
183 | } | 173 | } |
184 | 174 | ||
185 | void cpu_hotplug_done(void) | 175 | void cpu_hotplug_done(void) |
diff --git a/kernel/notifier.c b/kernel/notifier.c index 4803da6eab62..ae9fc7cc360e 100644 --- a/kernel/notifier.c +++ b/kernel/notifier.c | |||
@@ -402,6 +402,7 @@ int raw_notifier_call_chain(struct raw_notifier_head *nh, | |||
402 | } | 402 | } |
403 | EXPORT_SYMBOL_GPL(raw_notifier_call_chain); | 403 | EXPORT_SYMBOL_GPL(raw_notifier_call_chain); |
404 | 404 | ||
405 | #ifdef CONFIG_SRCU | ||
405 | /* | 406 | /* |
406 | * SRCU notifier chain routines. Registration and unregistration | 407 | * SRCU notifier chain routines. Registration and unregistration |
407 | * use a mutex, and call_chain is synchronized by SRCU (no locks). | 408 | * use a mutex, and call_chain is synchronized by SRCU (no locks). |
@@ -528,6 +529,8 @@ void srcu_init_notifier_head(struct srcu_notifier_head *nh) | |||
528 | } | 529 | } |
529 | EXPORT_SYMBOL_GPL(srcu_init_notifier_head); | 530 | EXPORT_SYMBOL_GPL(srcu_init_notifier_head); |
530 | 531 | ||
532 | #endif /* CONFIG_SRCU */ | ||
533 | |||
531 | static ATOMIC_NOTIFIER_HEAD(die_chain); | 534 | static ATOMIC_NOTIFIER_HEAD(die_chain); |
532 | 535 | ||
533 | int notrace notify_die(enum die_val val, const char *str, | 536 | int notrace notify_die(enum die_val val, const char *str, |
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index 48b28d387c7f..7e01f78f0417 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig | |||
@@ -251,6 +251,7 @@ config APM_EMULATION | |||
251 | 251 | ||
252 | config PM_OPP | 252 | config PM_OPP |
253 | bool | 253 | bool |
254 | select SRCU | ||
254 | ---help--- | 255 | ---help--- |
255 | SOCs have a standard set of tuples consisting of frequency and | 256 | SOCs have a standard set of tuples consisting of frequency and |
256 | voltage pairs that the device will support per voltage domain. This | 257 | voltage pairs that the device will support per voltage domain. This |
diff --git a/kernel/rcu/Makefile b/kernel/rcu/Makefile index e6fae503d1bc..50a808424b06 100644 --- a/kernel/rcu/Makefile +++ b/kernel/rcu/Makefile | |||
@@ -1,4 +1,5 @@ | |||
1 | obj-y += update.o srcu.o | 1 | obj-y += update.o |
2 | obj-$(CONFIG_SRCU) += srcu.o | ||
2 | obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o | 3 | obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o |
3 | obj-$(CONFIG_TREE_RCU) += tree.o | 4 | obj-$(CONFIG_TREE_RCU) += tree.o |
4 | obj-$(CONFIG_PREEMPT_RCU) += tree.o | 5 | obj-$(CONFIG_PREEMPT_RCU) += tree.o |
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index 07bb02eda844..80adef7d4c3d 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h | |||
@@ -137,4 +137,10 @@ int rcu_jiffies_till_stall_check(void); | |||
137 | 137 | ||
138 | void rcu_early_boot_tests(void); | 138 | void rcu_early_boot_tests(void); |
139 | 139 | ||
140 | /* | ||
141 | * This function really isn't for public consumption, but RCU is special in | ||
142 | * that context switches can allow the state machine to make progress. | ||
143 | */ | ||
144 | extern void resched_cpu(int cpu); | ||
145 | |||
140 | #endif /* __LINUX_RCU_H */ | 146 | #endif /* __LINUX_RCU_H */ |
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 4d559baf06e0..30d42aa55d83 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c | |||
@@ -244,7 +244,8 @@ struct rcu_torture_ops { | |||
244 | int (*readlock)(void); | 244 | int (*readlock)(void); |
245 | void (*read_delay)(struct torture_random_state *rrsp); | 245 | void (*read_delay)(struct torture_random_state *rrsp); |
246 | void (*readunlock)(int idx); | 246 | void (*readunlock)(int idx); |
247 | int (*completed)(void); | 247 | unsigned long (*started)(void); |
248 | unsigned long (*completed)(void); | ||
248 | void (*deferred_free)(struct rcu_torture *p); | 249 | void (*deferred_free)(struct rcu_torture *p); |
249 | void (*sync)(void); | 250 | void (*sync)(void); |
250 | void (*exp_sync)(void); | 251 | void (*exp_sync)(void); |
@@ -296,11 +297,6 @@ static void rcu_torture_read_unlock(int idx) __releases(RCU) | |||
296 | rcu_read_unlock(); | 297 | rcu_read_unlock(); |
297 | } | 298 | } |
298 | 299 | ||
299 | static int rcu_torture_completed(void) | ||
300 | { | ||
301 | return rcu_batches_completed(); | ||
302 | } | ||
303 | |||
304 | /* | 300 | /* |
305 | * Update callback in the pipe. This should be invoked after a grace period. | 301 | * Update callback in the pipe. This should be invoked after a grace period. |
306 | */ | 302 | */ |
@@ -356,7 +352,7 @@ rcu_torture_cb(struct rcu_head *p) | |||
356 | cur_ops->deferred_free(rp); | 352 | cur_ops->deferred_free(rp); |
357 | } | 353 | } |
358 | 354 | ||
359 | static int rcu_no_completed(void) | 355 | static unsigned long rcu_no_completed(void) |
360 | { | 356 | { |
361 | return 0; | 357 | return 0; |
362 | } | 358 | } |
@@ -377,7 +373,8 @@ static struct rcu_torture_ops rcu_ops = { | |||
377 | .readlock = rcu_torture_read_lock, | 373 | .readlock = rcu_torture_read_lock, |
378 | .read_delay = rcu_read_delay, | 374 | .read_delay = rcu_read_delay, |
379 | .readunlock = rcu_torture_read_unlock, | 375 | .readunlock = rcu_torture_read_unlock, |
380 | .completed = rcu_torture_completed, | 376 | .started = rcu_batches_started, |
377 | .completed = rcu_batches_completed, | ||
381 | .deferred_free = rcu_torture_deferred_free, | 378 | .deferred_free = rcu_torture_deferred_free, |
382 | .sync = synchronize_rcu, | 379 | .sync = synchronize_rcu, |
383 | .exp_sync = synchronize_rcu_expedited, | 380 | .exp_sync = synchronize_rcu_expedited, |
@@ -407,11 +404,6 @@ static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH) | |||
407 | rcu_read_unlock_bh(); | 404 | rcu_read_unlock_bh(); |
408 | } | 405 | } |
409 | 406 | ||
410 | static int rcu_bh_torture_completed(void) | ||
411 | { | ||
412 | return rcu_batches_completed_bh(); | ||
413 | } | ||
414 | |||
415 | static void rcu_bh_torture_deferred_free(struct rcu_torture *p) | 407 | static void rcu_bh_torture_deferred_free(struct rcu_torture *p) |
416 | { | 408 | { |
417 | call_rcu_bh(&p->rtort_rcu, rcu_torture_cb); | 409 | call_rcu_bh(&p->rtort_rcu, rcu_torture_cb); |
@@ -423,7 +415,8 @@ static struct rcu_torture_ops rcu_bh_ops = { | |||
423 | .readlock = rcu_bh_torture_read_lock, | 415 | .readlock = rcu_bh_torture_read_lock, |
424 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ | 416 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
425 | .readunlock = rcu_bh_torture_read_unlock, | 417 | .readunlock = rcu_bh_torture_read_unlock, |
426 | .completed = rcu_bh_torture_completed, | 418 | .started = rcu_batches_started_bh, |
419 | .completed = rcu_batches_completed_bh, | ||
427 | .deferred_free = rcu_bh_torture_deferred_free, | 420 | .deferred_free = rcu_bh_torture_deferred_free, |
428 | .sync = synchronize_rcu_bh, | 421 | .sync = synchronize_rcu_bh, |
429 | .exp_sync = synchronize_rcu_bh_expedited, | 422 | .exp_sync = synchronize_rcu_bh_expedited, |
@@ -466,6 +459,7 @@ static struct rcu_torture_ops rcu_busted_ops = { | |||
466 | .readlock = rcu_torture_read_lock, | 459 | .readlock = rcu_torture_read_lock, |
467 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ | 460 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
468 | .readunlock = rcu_torture_read_unlock, | 461 | .readunlock = rcu_torture_read_unlock, |
462 | .started = rcu_no_completed, | ||
469 | .completed = rcu_no_completed, | 463 | .completed = rcu_no_completed, |
470 | .deferred_free = rcu_busted_torture_deferred_free, | 464 | .deferred_free = rcu_busted_torture_deferred_free, |
471 | .sync = synchronize_rcu_busted, | 465 | .sync = synchronize_rcu_busted, |
@@ -510,7 +504,7 @@ static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl) | |||
510 | srcu_read_unlock(&srcu_ctl, idx); | 504 | srcu_read_unlock(&srcu_ctl, idx); |
511 | } | 505 | } |
512 | 506 | ||
513 | static int srcu_torture_completed(void) | 507 | static unsigned long srcu_torture_completed(void) |
514 | { | 508 | { |
515 | return srcu_batches_completed(&srcu_ctl); | 509 | return srcu_batches_completed(&srcu_ctl); |
516 | } | 510 | } |
@@ -564,6 +558,7 @@ static struct rcu_torture_ops srcu_ops = { | |||
564 | .readlock = srcu_torture_read_lock, | 558 | .readlock = srcu_torture_read_lock, |
565 | .read_delay = srcu_read_delay, | 559 | .read_delay = srcu_read_delay, |
566 | .readunlock = srcu_torture_read_unlock, | 560 | .readunlock = srcu_torture_read_unlock, |
561 | .started = NULL, | ||
567 | .completed = srcu_torture_completed, | 562 | .completed = srcu_torture_completed, |
568 | .deferred_free = srcu_torture_deferred_free, | 563 | .deferred_free = srcu_torture_deferred_free, |
569 | .sync = srcu_torture_synchronize, | 564 | .sync = srcu_torture_synchronize, |
@@ -600,7 +595,8 @@ static struct rcu_torture_ops sched_ops = { | |||
600 | .readlock = sched_torture_read_lock, | 595 | .readlock = sched_torture_read_lock, |
601 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ | 596 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
602 | .readunlock = sched_torture_read_unlock, | 597 | .readunlock = sched_torture_read_unlock, |
603 | .completed = rcu_no_completed, | 598 | .started = rcu_batches_started_sched, |
599 | .completed = rcu_batches_completed_sched, | ||
604 | .deferred_free = rcu_sched_torture_deferred_free, | 600 | .deferred_free = rcu_sched_torture_deferred_free, |
605 | .sync = synchronize_sched, | 601 | .sync = synchronize_sched, |
606 | .exp_sync = synchronize_sched_expedited, | 602 | .exp_sync = synchronize_sched_expedited, |
@@ -638,6 +634,7 @@ static struct rcu_torture_ops tasks_ops = { | |||
638 | .readlock = tasks_torture_read_lock, | 634 | .readlock = tasks_torture_read_lock, |
639 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ | 635 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
640 | .readunlock = tasks_torture_read_unlock, | 636 | .readunlock = tasks_torture_read_unlock, |
637 | .started = rcu_no_completed, | ||
641 | .completed = rcu_no_completed, | 638 | .completed = rcu_no_completed, |
642 | .deferred_free = rcu_tasks_torture_deferred_free, | 639 | .deferred_free = rcu_tasks_torture_deferred_free, |
643 | .sync = synchronize_rcu_tasks, | 640 | .sync = synchronize_rcu_tasks, |
@@ -1015,8 +1012,8 @@ static void rcutorture_trace_dump(void) | |||
1015 | static void rcu_torture_timer(unsigned long unused) | 1012 | static void rcu_torture_timer(unsigned long unused) |
1016 | { | 1013 | { |
1017 | int idx; | 1014 | int idx; |
1018 | int completed; | 1015 | unsigned long started; |
1019 | int completed_end; | 1016 | unsigned long completed; |
1020 | static DEFINE_TORTURE_RANDOM(rand); | 1017 | static DEFINE_TORTURE_RANDOM(rand); |
1021 | static DEFINE_SPINLOCK(rand_lock); | 1018 | static DEFINE_SPINLOCK(rand_lock); |
1022 | struct rcu_torture *p; | 1019 | struct rcu_torture *p; |
@@ -1024,7 +1021,10 @@ static void rcu_torture_timer(unsigned long unused) | |||
1024 | unsigned long long ts; | 1021 | unsigned long long ts; |
1025 | 1022 | ||
1026 | idx = cur_ops->readlock(); | 1023 | idx = cur_ops->readlock(); |
1027 | completed = cur_ops->completed(); | 1024 | if (cur_ops->started) |
1025 | started = cur_ops->started(); | ||
1026 | else | ||
1027 | started = cur_ops->completed(); | ||
1028 | ts = rcu_trace_clock_local(); | 1028 | ts = rcu_trace_clock_local(); |
1029 | p = rcu_dereference_check(rcu_torture_current, | 1029 | p = rcu_dereference_check(rcu_torture_current, |
1030 | rcu_read_lock_bh_held() || | 1030 | rcu_read_lock_bh_held() || |
@@ -1047,14 +1047,16 @@ static void rcu_torture_timer(unsigned long unused) | |||
1047 | /* Should not happen, but... */ | 1047 | /* Should not happen, but... */ |
1048 | pipe_count = RCU_TORTURE_PIPE_LEN; | 1048 | pipe_count = RCU_TORTURE_PIPE_LEN; |
1049 | } | 1049 | } |
1050 | completed_end = cur_ops->completed(); | 1050 | completed = cur_ops->completed(); |
1051 | if (pipe_count > 1) { | 1051 | if (pipe_count > 1) { |
1052 | do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts, | 1052 | do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts, |
1053 | completed, completed_end); | 1053 | started, completed); |
1054 | rcutorture_trace_dump(); | 1054 | rcutorture_trace_dump(); |
1055 | } | 1055 | } |
1056 | __this_cpu_inc(rcu_torture_count[pipe_count]); | 1056 | __this_cpu_inc(rcu_torture_count[pipe_count]); |
1057 | completed = completed_end - completed; | 1057 | completed = completed - started; |
1058 | if (cur_ops->started) | ||
1059 | completed++; | ||
1058 | if (completed > RCU_TORTURE_PIPE_LEN) { | 1060 | if (completed > RCU_TORTURE_PIPE_LEN) { |
1059 | /* Should not happen, but... */ | 1061 | /* Should not happen, but... */ |
1060 | completed = RCU_TORTURE_PIPE_LEN; | 1062 | completed = RCU_TORTURE_PIPE_LEN; |
@@ -1073,8 +1075,8 @@ static void rcu_torture_timer(unsigned long unused) | |||
1073 | static int | 1075 | static int |
1074 | rcu_torture_reader(void *arg) | 1076 | rcu_torture_reader(void *arg) |
1075 | { | 1077 | { |
1076 | int completed; | 1078 | unsigned long started; |
1077 | int completed_end; | 1079 | unsigned long completed; |
1078 | int idx; | 1080 | int idx; |
1079 | DEFINE_TORTURE_RANDOM(rand); | 1081 | DEFINE_TORTURE_RANDOM(rand); |
1080 | struct rcu_torture *p; | 1082 | struct rcu_torture *p; |
@@ -1093,7 +1095,10 @@ rcu_torture_reader(void *arg) | |||
1093 | mod_timer(&t, jiffies + 1); | 1095 | mod_timer(&t, jiffies + 1); |
1094 | } | 1096 | } |
1095 | idx = cur_ops->readlock(); | 1097 | idx = cur_ops->readlock(); |
1096 | completed = cur_ops->completed(); | 1098 | if (cur_ops->started) |
1099 | started = cur_ops->started(); | ||
1100 | else | ||
1101 | started = cur_ops->completed(); | ||
1097 | ts = rcu_trace_clock_local(); | 1102 | ts = rcu_trace_clock_local(); |
1098 | p = rcu_dereference_check(rcu_torture_current, | 1103 | p = rcu_dereference_check(rcu_torture_current, |
1099 | rcu_read_lock_bh_held() || | 1104 | rcu_read_lock_bh_held() || |
@@ -1114,14 +1119,16 @@ rcu_torture_reader(void *arg) | |||
1114 | /* Should not happen, but... */ | 1119 | /* Should not happen, but... */ |
1115 | pipe_count = RCU_TORTURE_PIPE_LEN; | 1120 | pipe_count = RCU_TORTURE_PIPE_LEN; |
1116 | } | 1121 | } |
1117 | completed_end = cur_ops->completed(); | 1122 | completed = cur_ops->completed(); |
1118 | if (pipe_count > 1) { | 1123 | if (pipe_count > 1) { |
1119 | do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, | 1124 | do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, |
1120 | ts, completed, completed_end); | 1125 | ts, started, completed); |
1121 | rcutorture_trace_dump(); | 1126 | rcutorture_trace_dump(); |
1122 | } | 1127 | } |
1123 | __this_cpu_inc(rcu_torture_count[pipe_count]); | 1128 | __this_cpu_inc(rcu_torture_count[pipe_count]); |
1124 | completed = completed_end - completed; | 1129 | completed = completed - started; |
1130 | if (cur_ops->started) | ||
1131 | completed++; | ||
1125 | if (completed > RCU_TORTURE_PIPE_LEN) { | 1132 | if (completed > RCU_TORTURE_PIPE_LEN) { |
1126 | /* Should not happen, but... */ | 1133 | /* Should not happen, but... */ |
1127 | completed = RCU_TORTURE_PIPE_LEN; | 1134 | completed = RCU_TORTURE_PIPE_LEN; |
@@ -1420,6 +1427,9 @@ static int rcu_torture_barrier(void *arg) | |||
1420 | cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ | 1427 | cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ |
1421 | if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) { | 1428 | if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) { |
1422 | n_rcu_torture_barrier_error++; | 1429 | n_rcu_torture_barrier_error++; |
1430 | pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n", | ||
1431 | atomic_read(&barrier_cbs_invoked), | ||
1432 | n_barrier_cbs); | ||
1423 | WARN_ON_ONCE(1); | 1433 | WARN_ON_ONCE(1); |
1424 | } | 1434 | } |
1425 | n_barrier_successes++; | 1435 | n_barrier_successes++; |
diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c index e037f3eb2f7b..445bf8ffe3fb 100644 --- a/kernel/rcu/srcu.c +++ b/kernel/rcu/srcu.c | |||
@@ -546,7 +546,7 @@ EXPORT_SYMBOL_GPL(srcu_barrier); | |||
546 | * Report the number of batches, correlated with, but not necessarily | 546 | * Report the number of batches, correlated with, but not necessarily |
547 | * precisely the same as, the number of grace periods that have elapsed. | 547 | * precisely the same as, the number of grace periods that have elapsed. |
548 | */ | 548 | */ |
549 | long srcu_batches_completed(struct srcu_struct *sp) | 549 | unsigned long srcu_batches_completed(struct srcu_struct *sp) |
550 | { | 550 | { |
551 | return sp->completed; | 551 | return sp->completed; |
552 | } | 552 | } |
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c index 0db5649f8817..cc9ceca7bde1 100644 --- a/kernel/rcu/tiny.c +++ b/kernel/rcu/tiny.c | |||
@@ -47,54 +47,14 @@ static void __call_rcu(struct rcu_head *head, | |||
47 | void (*func)(struct rcu_head *rcu), | 47 | void (*func)(struct rcu_head *rcu), |
48 | struct rcu_ctrlblk *rcp); | 48 | struct rcu_ctrlblk *rcp); |
49 | 49 | ||
50 | static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; | ||
51 | |||
52 | #include "tiny_plugin.h" | 50 | #include "tiny_plugin.h" |
53 | 51 | ||
54 | /* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcu/tree.c. */ | ||
55 | static void rcu_idle_enter_common(long long newval) | ||
56 | { | ||
57 | if (newval) { | ||
58 | RCU_TRACE(trace_rcu_dyntick(TPS("--="), | ||
59 | rcu_dynticks_nesting, newval)); | ||
60 | rcu_dynticks_nesting = newval; | ||
61 | return; | ||
62 | } | ||
63 | RCU_TRACE(trace_rcu_dyntick(TPS("Start"), | ||
64 | rcu_dynticks_nesting, newval)); | ||
65 | if (IS_ENABLED(CONFIG_RCU_TRACE) && !is_idle_task(current)) { | ||
66 | struct task_struct *idle __maybe_unused = idle_task(smp_processor_id()); | ||
67 | |||
68 | RCU_TRACE(trace_rcu_dyntick(TPS("Entry error: not idle task"), | ||
69 | rcu_dynticks_nesting, newval)); | ||
70 | ftrace_dump(DUMP_ALL); | ||
71 | WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", | ||
72 | current->pid, current->comm, | ||
73 | idle->pid, idle->comm); /* must be idle task! */ | ||
74 | } | ||
75 | rcu_sched_qs(); /* implies rcu_bh_inc() */ | ||
76 | barrier(); | ||
77 | rcu_dynticks_nesting = newval; | ||
78 | } | ||
79 | |||
80 | /* | 52 | /* |
81 | * Enter idle, which is an extended quiescent state if we have fully | 53 | * Enter idle, which is an extended quiescent state if we have fully |
82 | * entered that mode (i.e., if the new value of dynticks_nesting is zero). | 54 | * entered that mode. |
83 | */ | 55 | */ |
84 | void rcu_idle_enter(void) | 56 | void rcu_idle_enter(void) |
85 | { | 57 | { |
86 | unsigned long flags; | ||
87 | long long newval; | ||
88 | |||
89 | local_irq_save(flags); | ||
90 | WARN_ON_ONCE((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0); | ||
91 | if ((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == | ||
92 | DYNTICK_TASK_NEST_VALUE) | ||
93 | newval = 0; | ||
94 | else | ||
95 | newval = rcu_dynticks_nesting - DYNTICK_TASK_NEST_VALUE; | ||
96 | rcu_idle_enter_common(newval); | ||
97 | local_irq_restore(flags); | ||
98 | } | 58 | } |
99 | EXPORT_SYMBOL_GPL(rcu_idle_enter); | 59 | EXPORT_SYMBOL_GPL(rcu_idle_enter); |
100 | 60 | ||
@@ -103,55 +63,14 @@ EXPORT_SYMBOL_GPL(rcu_idle_enter); | |||
103 | */ | 63 | */ |
104 | void rcu_irq_exit(void) | 64 | void rcu_irq_exit(void) |
105 | { | 65 | { |
106 | unsigned long flags; | ||
107 | long long newval; | ||
108 | |||
109 | local_irq_save(flags); | ||
110 | newval = rcu_dynticks_nesting - 1; | ||
111 | WARN_ON_ONCE(newval < 0); | ||
112 | rcu_idle_enter_common(newval); | ||
113 | local_irq_restore(flags); | ||
114 | } | 66 | } |
115 | EXPORT_SYMBOL_GPL(rcu_irq_exit); | 67 | EXPORT_SYMBOL_GPL(rcu_irq_exit); |
116 | 68 | ||
117 | /* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcu/tree.c. */ | ||
118 | static void rcu_idle_exit_common(long long oldval) | ||
119 | { | ||
120 | if (oldval) { | ||
121 | RCU_TRACE(trace_rcu_dyntick(TPS("++="), | ||
122 | oldval, rcu_dynticks_nesting)); | ||
123 | return; | ||
124 | } | ||
125 | RCU_TRACE(trace_rcu_dyntick(TPS("End"), oldval, rcu_dynticks_nesting)); | ||
126 | if (IS_ENABLED(CONFIG_RCU_TRACE) && !is_idle_task(current)) { | ||
127 | struct task_struct *idle __maybe_unused = idle_task(smp_processor_id()); | ||
128 | |||
129 | RCU_TRACE(trace_rcu_dyntick(TPS("Exit error: not idle task"), | ||
130 | oldval, rcu_dynticks_nesting)); | ||
131 | ftrace_dump(DUMP_ALL); | ||
132 | WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", | ||
133 | current->pid, current->comm, | ||
134 | idle->pid, idle->comm); /* must be idle task! */ | ||
135 | } | ||
136 | } | ||
137 | |||
138 | /* | 69 | /* |
139 | * Exit idle, so that we are no longer in an extended quiescent state. | 70 | * Exit idle, so that we are no longer in an extended quiescent state. |
140 | */ | 71 | */ |
141 | void rcu_idle_exit(void) | 72 | void rcu_idle_exit(void) |
142 | { | 73 | { |
143 | unsigned long flags; | ||
144 | long long oldval; | ||
145 | |||
146 | local_irq_save(flags); | ||
147 | oldval = rcu_dynticks_nesting; | ||
148 | WARN_ON_ONCE(rcu_dynticks_nesting < 0); | ||
149 | if (rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) | ||
150 | rcu_dynticks_nesting += DYNTICK_TASK_NEST_VALUE; | ||
151 | else | ||
152 | rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; | ||
153 | rcu_idle_exit_common(oldval); | ||
154 | local_irq_restore(flags); | ||
155 | } | 74 | } |
156 | EXPORT_SYMBOL_GPL(rcu_idle_exit); | 75 | EXPORT_SYMBOL_GPL(rcu_idle_exit); |
157 | 76 | ||
@@ -160,15 +79,6 @@ EXPORT_SYMBOL_GPL(rcu_idle_exit); | |||
160 | */ | 79 | */ |
161 | void rcu_irq_enter(void) | 80 | void rcu_irq_enter(void) |
162 | { | 81 | { |
163 | unsigned long flags; | ||
164 | long long oldval; | ||
165 | |||
166 | local_irq_save(flags); | ||
167 | oldval = rcu_dynticks_nesting; | ||
168 | rcu_dynticks_nesting++; | ||
169 | WARN_ON_ONCE(rcu_dynticks_nesting == 0); | ||
170 | rcu_idle_exit_common(oldval); | ||
171 | local_irq_restore(flags); | ||
172 | } | 82 | } |
173 | EXPORT_SYMBOL_GPL(rcu_irq_enter); | 83 | EXPORT_SYMBOL_GPL(rcu_irq_enter); |
174 | 84 | ||
@@ -179,23 +89,13 @@ EXPORT_SYMBOL_GPL(rcu_irq_enter); | |||
179 | */ | 89 | */ |
180 | bool notrace __rcu_is_watching(void) | 90 | bool notrace __rcu_is_watching(void) |
181 | { | 91 | { |
182 | return rcu_dynticks_nesting; | 92 | return true; |
183 | } | 93 | } |
184 | EXPORT_SYMBOL(__rcu_is_watching); | 94 | EXPORT_SYMBOL(__rcu_is_watching); |
185 | 95 | ||
186 | #endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ | 96 | #endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ |
187 | 97 | ||
188 | /* | 98 | /* |
189 | * Test whether the current CPU was interrupted from idle. Nested | ||
190 | * interrupts don't count, we must be running at the first interrupt | ||
191 | * level. | ||
192 | */ | ||
193 | static int rcu_is_cpu_rrupt_from_idle(void) | ||
194 | { | ||
195 | return rcu_dynticks_nesting <= 1; | ||
196 | } | ||
197 | |||
198 | /* | ||
199 | * Helper function for rcu_sched_qs() and rcu_bh_qs(). | 99 | * Helper function for rcu_sched_qs() and rcu_bh_qs(). |
200 | * Also irqs are disabled to avoid confusion due to interrupt handlers | 100 | * Also irqs are disabled to avoid confusion due to interrupt handlers |
201 | * invoking call_rcu(). | 101 | * invoking call_rcu(). |
@@ -250,7 +150,7 @@ void rcu_bh_qs(void) | |||
250 | void rcu_check_callbacks(int user) | 150 | void rcu_check_callbacks(int user) |
251 | { | 151 | { |
252 | RCU_TRACE(check_cpu_stalls()); | 152 | RCU_TRACE(check_cpu_stalls()); |
253 | if (user || rcu_is_cpu_rrupt_from_idle()) | 153 | if (user) |
254 | rcu_sched_qs(); | 154 | rcu_sched_qs(); |
255 | else if (!in_softirq()) | 155 | else if (!in_softirq()) |
256 | rcu_bh_qs(); | 156 | rcu_bh_qs(); |
@@ -357,6 +257,11 @@ static void __call_rcu(struct rcu_head *head, | |||
357 | rcp->curtail = &head->next; | 257 | rcp->curtail = &head->next; |
358 | RCU_TRACE(rcp->qlen++); | 258 | RCU_TRACE(rcp->qlen++); |
359 | local_irq_restore(flags); | 259 | local_irq_restore(flags); |
260 | |||
261 | if (unlikely(is_idle_task(current))) { | ||
262 | /* force scheduling for rcu_sched_qs() */ | ||
263 | resched_cpu(0); | ||
264 | } | ||
360 | } | 265 | } |
361 | 266 | ||
362 | /* | 267 | /* |
@@ -383,6 +288,8 @@ EXPORT_SYMBOL_GPL(call_rcu_bh); | |||
383 | void __init rcu_init(void) | 288 | void __init rcu_init(void) |
384 | { | 289 | { |
385 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | 290 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); |
291 | RCU_TRACE(reset_cpu_stall_ticks(&rcu_sched_ctrlblk)); | ||
292 | RCU_TRACE(reset_cpu_stall_ticks(&rcu_bh_ctrlblk)); | ||
386 | 293 | ||
387 | rcu_early_boot_tests(); | 294 | rcu_early_boot_tests(); |
388 | } | 295 | } |
diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h index 858c56569127..f94e209a10d6 100644 --- a/kernel/rcu/tiny_plugin.h +++ b/kernel/rcu/tiny_plugin.h | |||
@@ -145,17 +145,16 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp) | |||
145 | rcp->ticks_this_gp++; | 145 | rcp->ticks_this_gp++; |
146 | j = jiffies; | 146 | j = jiffies; |
147 | js = ACCESS_ONCE(rcp->jiffies_stall); | 147 | js = ACCESS_ONCE(rcp->jiffies_stall); |
148 | if (*rcp->curtail && ULONG_CMP_GE(j, js)) { | 148 | if (rcp->rcucblist && ULONG_CMP_GE(j, js)) { |
149 | pr_err("INFO: %s stall on CPU (%lu ticks this GP) idle=%llx (t=%lu jiffies q=%ld)\n", | 149 | pr_err("INFO: %s stall on CPU (%lu ticks this GP) idle=%llx (t=%lu jiffies q=%ld)\n", |
150 | rcp->name, rcp->ticks_this_gp, rcu_dynticks_nesting, | 150 | rcp->name, rcp->ticks_this_gp, DYNTICK_TASK_EXIT_IDLE, |
151 | jiffies - rcp->gp_start, rcp->qlen); | 151 | jiffies - rcp->gp_start, rcp->qlen); |
152 | dump_stack(); | 152 | dump_stack(); |
153 | } | ||
154 | if (*rcp->curtail && ULONG_CMP_GE(j, js)) | ||
155 | ACCESS_ONCE(rcp->jiffies_stall) = jiffies + | 153 | ACCESS_ONCE(rcp->jiffies_stall) = jiffies + |
156 | 3 * rcu_jiffies_till_stall_check() + 3; | 154 | 3 * rcu_jiffies_till_stall_check() + 3; |
157 | else if (ULONG_CMP_GE(j, js)) | 155 | } else if (ULONG_CMP_GE(j, js)) { |
158 | ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check(); | 156 | ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check(); |
157 | } | ||
159 | } | 158 | } |
160 | 159 | ||
161 | static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp) | 160 | static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp) |
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 4c106fcc0d54..48d640ca1a05 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -156,6 +156,10 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) | |||
156 | static void invoke_rcu_core(void); | 156 | static void invoke_rcu_core(void); |
157 | static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); | 157 | static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); |
158 | 158 | ||
159 | /* rcuc/rcub kthread realtime priority */ | ||
160 | static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO; | ||
161 | module_param(kthread_prio, int, 0644); | ||
162 | |||
159 | /* | 163 | /* |
160 | * Track the rcutorture test sequence number and the update version | 164 | * Track the rcutorture test sequence number and the update version |
161 | * number within a given test. The rcutorture_testseq is incremented | 165 | * number within a given test. The rcutorture_testseq is incremented |
@@ -215,6 +219,9 @@ static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { | |||
215 | #endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ | 219 | #endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ |
216 | }; | 220 | }; |
217 | 221 | ||
222 | DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, rcu_qs_ctr); | ||
223 | EXPORT_PER_CPU_SYMBOL_GPL(rcu_qs_ctr); | ||
224 | |||
218 | /* | 225 | /* |
219 | * Let the RCU core know that this CPU has gone through the scheduler, | 226 | * Let the RCU core know that this CPU has gone through the scheduler, |
220 | * which is a quiescent state. This is called when the need for a | 227 | * which is a quiescent state. This is called when the need for a |
@@ -284,6 +291,22 @@ void rcu_note_context_switch(void) | |||
284 | } | 291 | } |
285 | EXPORT_SYMBOL_GPL(rcu_note_context_switch); | 292 | EXPORT_SYMBOL_GPL(rcu_note_context_switch); |
286 | 293 | ||
294 | /* | ||
295 | * Register a quiesecent state for all RCU flavors. If there is an | ||
296 | * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight | ||
297 | * dyntick-idle quiescent state visible to other CPUs (but only for those | ||
298 | * RCU flavors in desparate need of a quiescent state, which will normally | ||
299 | * be none of them). Either way, do a lightweight quiescent state for | ||
300 | * all RCU flavors. | ||
301 | */ | ||
302 | void rcu_all_qs(void) | ||
303 | { | ||
304 | if (unlikely(raw_cpu_read(rcu_sched_qs_mask))) | ||
305 | rcu_momentary_dyntick_idle(); | ||
306 | this_cpu_inc(rcu_qs_ctr); | ||
307 | } | ||
308 | EXPORT_SYMBOL_GPL(rcu_all_qs); | ||
309 | |||
287 | static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */ | 310 | static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */ |
288 | static long qhimark = 10000; /* If this many pending, ignore blimit. */ | 311 | static long qhimark = 10000; /* If this many pending, ignore blimit. */ |
289 | static long qlowmark = 100; /* Once only this many pending, use blimit. */ | 312 | static long qlowmark = 100; /* Once only this many pending, use blimit. */ |
@@ -315,18 +338,54 @@ static void force_quiescent_state(struct rcu_state *rsp); | |||
315 | static int rcu_pending(void); | 338 | static int rcu_pending(void); |
316 | 339 | ||
317 | /* | 340 | /* |
318 | * Return the number of RCU-sched batches processed thus far for debug & stats. | 341 | * Return the number of RCU batches started thus far for debug & stats. |
342 | */ | ||
343 | unsigned long rcu_batches_started(void) | ||
344 | { | ||
345 | return rcu_state_p->gpnum; | ||
346 | } | ||
347 | EXPORT_SYMBOL_GPL(rcu_batches_started); | ||
348 | |||
349 | /* | ||
350 | * Return the number of RCU-sched batches started thus far for debug & stats. | ||
319 | */ | 351 | */ |
320 | long rcu_batches_completed_sched(void) | 352 | unsigned long rcu_batches_started_sched(void) |
353 | { | ||
354 | return rcu_sched_state.gpnum; | ||
355 | } | ||
356 | EXPORT_SYMBOL_GPL(rcu_batches_started_sched); | ||
357 | |||
358 | /* | ||
359 | * Return the number of RCU BH batches started thus far for debug & stats. | ||
360 | */ | ||
361 | unsigned long rcu_batches_started_bh(void) | ||
362 | { | ||
363 | return rcu_bh_state.gpnum; | ||
364 | } | ||
365 | EXPORT_SYMBOL_GPL(rcu_batches_started_bh); | ||
366 | |||
367 | /* | ||
368 | * Return the number of RCU batches completed thus far for debug & stats. | ||
369 | */ | ||
370 | unsigned long rcu_batches_completed(void) | ||
371 | { | ||
372 | return rcu_state_p->completed; | ||
373 | } | ||
374 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | ||
375 | |||
376 | /* | ||
377 | * Return the number of RCU-sched batches completed thus far for debug & stats. | ||
378 | */ | ||
379 | unsigned long rcu_batches_completed_sched(void) | ||
321 | { | 380 | { |
322 | return rcu_sched_state.completed; | 381 | return rcu_sched_state.completed; |
323 | } | 382 | } |
324 | EXPORT_SYMBOL_GPL(rcu_batches_completed_sched); | 383 | EXPORT_SYMBOL_GPL(rcu_batches_completed_sched); |
325 | 384 | ||
326 | /* | 385 | /* |
327 | * Return the number of RCU BH batches processed thus far for debug & stats. | 386 | * Return the number of RCU BH batches completed thus far for debug & stats. |
328 | */ | 387 | */ |
329 | long rcu_batches_completed_bh(void) | 388 | unsigned long rcu_batches_completed_bh(void) |
330 | { | 389 | { |
331 | return rcu_bh_state.completed; | 390 | return rcu_bh_state.completed; |
332 | } | 391 | } |
@@ -930,17 +989,14 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp, | |||
930 | trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti")); | 989 | trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti")); |
931 | return 1; | 990 | return 1; |
932 | } else { | 991 | } else { |
992 | if (ULONG_CMP_LT(ACCESS_ONCE(rdp->gpnum) + ULONG_MAX / 4, | ||
993 | rdp->mynode->gpnum)) | ||
994 | ACCESS_ONCE(rdp->gpwrap) = true; | ||
933 | return 0; | 995 | return 0; |
934 | } | 996 | } |
935 | } | 997 | } |
936 | 998 | ||
937 | /* | 999 | /* |
938 | * This function really isn't for public consumption, but RCU is special in | ||
939 | * that context switches can allow the state machine to make progress. | ||
940 | */ | ||
941 | extern void resched_cpu(int cpu); | ||
942 | |||
943 | /* | ||
944 | * Return true if the specified CPU has passed through a quiescent | 1000 | * Return true if the specified CPU has passed through a quiescent |
945 | * state by virtue of being in or having passed through an dynticks | 1001 | * state by virtue of being in or having passed through an dynticks |
946 | * idle state since the last call to dyntick_save_progress_counter() | 1002 | * idle state since the last call to dyntick_save_progress_counter() |
@@ -1043,6 +1099,22 @@ static void record_gp_stall_check_time(struct rcu_state *rsp) | |||
1043 | j1 = rcu_jiffies_till_stall_check(); | 1099 | j1 = rcu_jiffies_till_stall_check(); |
1044 | ACCESS_ONCE(rsp->jiffies_stall) = j + j1; | 1100 | ACCESS_ONCE(rsp->jiffies_stall) = j + j1; |
1045 | rsp->jiffies_resched = j + j1 / 2; | 1101 | rsp->jiffies_resched = j + j1 / 2; |
1102 | rsp->n_force_qs_gpstart = ACCESS_ONCE(rsp->n_force_qs); | ||
1103 | } | ||
1104 | |||
1105 | /* | ||
1106 | * Complain about starvation of grace-period kthread. | ||
1107 | */ | ||
1108 | static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp) | ||
1109 | { | ||
1110 | unsigned long gpa; | ||
1111 | unsigned long j; | ||
1112 | |||
1113 | j = jiffies; | ||
1114 | gpa = ACCESS_ONCE(rsp->gp_activity); | ||
1115 | if (j - gpa > 2 * HZ) | ||
1116 | pr_err("%s kthread starved for %ld jiffies!\n", | ||
1117 | rsp->name, j - gpa); | ||
1046 | } | 1118 | } |
1047 | 1119 | ||
1048 | /* | 1120 | /* |
@@ -1065,11 +1137,13 @@ static void rcu_dump_cpu_stacks(struct rcu_state *rsp) | |||
1065 | } | 1137 | } |
1066 | } | 1138 | } |
1067 | 1139 | ||
1068 | static void print_other_cpu_stall(struct rcu_state *rsp) | 1140 | static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum) |
1069 | { | 1141 | { |
1070 | int cpu; | 1142 | int cpu; |
1071 | long delta; | 1143 | long delta; |
1072 | unsigned long flags; | 1144 | unsigned long flags; |
1145 | unsigned long gpa; | ||
1146 | unsigned long j; | ||
1073 | int ndetected = 0; | 1147 | int ndetected = 0; |
1074 | struct rcu_node *rnp = rcu_get_root(rsp); | 1148 | struct rcu_node *rnp = rcu_get_root(rsp); |
1075 | long totqlen = 0; | 1149 | long totqlen = 0; |
@@ -1107,30 +1181,34 @@ static void print_other_cpu_stall(struct rcu_state *rsp) | |||
1107 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 1181 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
1108 | } | 1182 | } |
1109 | 1183 | ||
1110 | /* | ||
1111 | * Now rat on any tasks that got kicked up to the root rcu_node | ||
1112 | * due to CPU offlining. | ||
1113 | */ | ||
1114 | rnp = rcu_get_root(rsp); | ||
1115 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
1116 | ndetected += rcu_print_task_stall(rnp); | ||
1117 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1118 | |||
1119 | print_cpu_stall_info_end(); | 1184 | print_cpu_stall_info_end(); |
1120 | for_each_possible_cpu(cpu) | 1185 | for_each_possible_cpu(cpu) |
1121 | totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen; | 1186 | totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen; |
1122 | pr_cont("(detected by %d, t=%ld jiffies, g=%ld, c=%ld, q=%lu)\n", | 1187 | pr_cont("(detected by %d, t=%ld jiffies, g=%ld, c=%ld, q=%lu)\n", |
1123 | smp_processor_id(), (long)(jiffies - rsp->gp_start), | 1188 | smp_processor_id(), (long)(jiffies - rsp->gp_start), |
1124 | (long)rsp->gpnum, (long)rsp->completed, totqlen); | 1189 | (long)rsp->gpnum, (long)rsp->completed, totqlen); |
1125 | if (ndetected == 0) | 1190 | if (ndetected) { |
1126 | pr_err("INFO: Stall ended before state dump start\n"); | ||
1127 | else | ||
1128 | rcu_dump_cpu_stacks(rsp); | 1191 | rcu_dump_cpu_stacks(rsp); |
1192 | } else { | ||
1193 | if (ACCESS_ONCE(rsp->gpnum) != gpnum || | ||
1194 | ACCESS_ONCE(rsp->completed) == gpnum) { | ||
1195 | pr_err("INFO: Stall ended before state dump start\n"); | ||
1196 | } else { | ||
1197 | j = jiffies; | ||
1198 | gpa = ACCESS_ONCE(rsp->gp_activity); | ||
1199 | pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld\n", | ||
1200 | rsp->name, j - gpa, j, gpa, | ||
1201 | jiffies_till_next_fqs); | ||
1202 | /* In this case, the current CPU might be at fault. */ | ||
1203 | sched_show_task(current); | ||
1204 | } | ||
1205 | } | ||
1129 | 1206 | ||
1130 | /* Complain about tasks blocking the grace period. */ | 1207 | /* Complain about tasks blocking the grace period. */ |
1131 | |||
1132 | rcu_print_detail_task_stall(rsp); | 1208 | rcu_print_detail_task_stall(rsp); |
1133 | 1209 | ||
1210 | rcu_check_gp_kthread_starvation(rsp); | ||
1211 | |||
1134 | force_quiescent_state(rsp); /* Kick them all. */ | 1212 | force_quiescent_state(rsp); /* Kick them all. */ |
1135 | } | 1213 | } |
1136 | 1214 | ||
@@ -1155,6 +1233,9 @@ static void print_cpu_stall(struct rcu_state *rsp) | |||
1155 | pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n", | 1233 | pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n", |
1156 | jiffies - rsp->gp_start, | 1234 | jiffies - rsp->gp_start, |
1157 | (long)rsp->gpnum, (long)rsp->completed, totqlen); | 1235 | (long)rsp->gpnum, (long)rsp->completed, totqlen); |
1236 | |||
1237 | rcu_check_gp_kthread_starvation(rsp); | ||
1238 | |||
1158 | rcu_dump_cpu_stacks(rsp); | 1239 | rcu_dump_cpu_stacks(rsp); |
1159 | 1240 | ||
1160 | raw_spin_lock_irqsave(&rnp->lock, flags); | 1241 | raw_spin_lock_irqsave(&rnp->lock, flags); |
@@ -1225,7 +1306,7 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1225 | ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) { | 1306 | ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) { |
1226 | 1307 | ||
1227 | /* They had a few time units to dump stack, so complain. */ | 1308 | /* They had a few time units to dump stack, so complain. */ |
1228 | print_other_cpu_stall(rsp); | 1309 | print_other_cpu_stall(rsp, gpnum); |
1229 | } | 1310 | } |
1230 | } | 1311 | } |
1231 | 1312 | ||
@@ -1562,7 +1643,8 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, | |||
1562 | bool ret; | 1643 | bool ret; |
1563 | 1644 | ||
1564 | /* Handle the ends of any preceding grace periods first. */ | 1645 | /* Handle the ends of any preceding grace periods first. */ |
1565 | if (rdp->completed == rnp->completed) { | 1646 | if (rdp->completed == rnp->completed && |
1647 | !unlikely(ACCESS_ONCE(rdp->gpwrap))) { | ||
1566 | 1648 | ||
1567 | /* No grace period end, so just accelerate recent callbacks. */ | 1649 | /* No grace period end, so just accelerate recent callbacks. */ |
1568 | ret = rcu_accelerate_cbs(rsp, rnp, rdp); | 1650 | ret = rcu_accelerate_cbs(rsp, rnp, rdp); |
@@ -1577,7 +1659,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, | |||
1577 | trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend")); | 1659 | trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend")); |
1578 | } | 1660 | } |
1579 | 1661 | ||
1580 | if (rdp->gpnum != rnp->gpnum) { | 1662 | if (rdp->gpnum != rnp->gpnum || unlikely(ACCESS_ONCE(rdp->gpwrap))) { |
1581 | /* | 1663 | /* |
1582 | * If the current grace period is waiting for this CPU, | 1664 | * If the current grace period is waiting for this CPU, |
1583 | * set up to detect a quiescent state, otherwise don't | 1665 | * set up to detect a quiescent state, otherwise don't |
@@ -1586,8 +1668,10 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, | |||
1586 | rdp->gpnum = rnp->gpnum; | 1668 | rdp->gpnum = rnp->gpnum; |
1587 | trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart")); | 1669 | trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart")); |
1588 | rdp->passed_quiesce = 0; | 1670 | rdp->passed_quiesce = 0; |
1671 | rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr); | ||
1589 | rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask); | 1672 | rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask); |
1590 | zero_cpu_stall_ticks(rdp); | 1673 | zero_cpu_stall_ticks(rdp); |
1674 | ACCESS_ONCE(rdp->gpwrap) = false; | ||
1591 | } | 1675 | } |
1592 | return ret; | 1676 | return ret; |
1593 | } | 1677 | } |
@@ -1601,7 +1685,8 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1601 | local_irq_save(flags); | 1685 | local_irq_save(flags); |
1602 | rnp = rdp->mynode; | 1686 | rnp = rdp->mynode; |
1603 | if ((rdp->gpnum == ACCESS_ONCE(rnp->gpnum) && | 1687 | if ((rdp->gpnum == ACCESS_ONCE(rnp->gpnum) && |
1604 | rdp->completed == ACCESS_ONCE(rnp->completed)) || /* w/out lock. */ | 1688 | rdp->completed == ACCESS_ONCE(rnp->completed) && |
1689 | !unlikely(ACCESS_ONCE(rdp->gpwrap))) || /* w/out lock. */ | ||
1605 | !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */ | 1690 | !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */ |
1606 | local_irq_restore(flags); | 1691 | local_irq_restore(flags); |
1607 | return; | 1692 | return; |
@@ -1621,6 +1706,7 @@ static int rcu_gp_init(struct rcu_state *rsp) | |||
1621 | struct rcu_data *rdp; | 1706 | struct rcu_data *rdp; |
1622 | struct rcu_node *rnp = rcu_get_root(rsp); | 1707 | struct rcu_node *rnp = rcu_get_root(rsp); |
1623 | 1708 | ||
1709 | ACCESS_ONCE(rsp->gp_activity) = jiffies; | ||
1624 | rcu_bind_gp_kthread(); | 1710 | rcu_bind_gp_kthread(); |
1625 | raw_spin_lock_irq(&rnp->lock); | 1711 | raw_spin_lock_irq(&rnp->lock); |
1626 | smp_mb__after_unlock_lock(); | 1712 | smp_mb__after_unlock_lock(); |
@@ -1681,6 +1767,7 @@ static int rcu_gp_init(struct rcu_state *rsp) | |||
1681 | rnp->grphi, rnp->qsmask); | 1767 | rnp->grphi, rnp->qsmask); |
1682 | raw_spin_unlock_irq(&rnp->lock); | 1768 | raw_spin_unlock_irq(&rnp->lock); |
1683 | cond_resched_rcu_qs(); | 1769 | cond_resched_rcu_qs(); |
1770 | ACCESS_ONCE(rsp->gp_activity) = jiffies; | ||
1684 | } | 1771 | } |
1685 | 1772 | ||
1686 | mutex_unlock(&rsp->onoff_mutex); | 1773 | mutex_unlock(&rsp->onoff_mutex); |
@@ -1697,6 +1784,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in) | |||
1697 | unsigned long maxj; | 1784 | unsigned long maxj; |
1698 | struct rcu_node *rnp = rcu_get_root(rsp); | 1785 | struct rcu_node *rnp = rcu_get_root(rsp); |
1699 | 1786 | ||
1787 | ACCESS_ONCE(rsp->gp_activity) = jiffies; | ||
1700 | rsp->n_force_qs++; | 1788 | rsp->n_force_qs++; |
1701 | if (fqs_state == RCU_SAVE_DYNTICK) { | 1789 | if (fqs_state == RCU_SAVE_DYNTICK) { |
1702 | /* Collect dyntick-idle snapshots. */ | 1790 | /* Collect dyntick-idle snapshots. */ |
@@ -1735,6 +1823,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) | |||
1735 | struct rcu_data *rdp; | 1823 | struct rcu_data *rdp; |
1736 | struct rcu_node *rnp = rcu_get_root(rsp); | 1824 | struct rcu_node *rnp = rcu_get_root(rsp); |
1737 | 1825 | ||
1826 | ACCESS_ONCE(rsp->gp_activity) = jiffies; | ||
1738 | raw_spin_lock_irq(&rnp->lock); | 1827 | raw_spin_lock_irq(&rnp->lock); |
1739 | smp_mb__after_unlock_lock(); | 1828 | smp_mb__after_unlock_lock(); |
1740 | gp_duration = jiffies - rsp->gp_start; | 1829 | gp_duration = jiffies - rsp->gp_start; |
@@ -1771,6 +1860,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) | |||
1771 | nocb += rcu_future_gp_cleanup(rsp, rnp); | 1860 | nocb += rcu_future_gp_cleanup(rsp, rnp); |
1772 | raw_spin_unlock_irq(&rnp->lock); | 1861 | raw_spin_unlock_irq(&rnp->lock); |
1773 | cond_resched_rcu_qs(); | 1862 | cond_resched_rcu_qs(); |
1863 | ACCESS_ONCE(rsp->gp_activity) = jiffies; | ||
1774 | } | 1864 | } |
1775 | rnp = rcu_get_root(rsp); | 1865 | rnp = rcu_get_root(rsp); |
1776 | raw_spin_lock_irq(&rnp->lock); | 1866 | raw_spin_lock_irq(&rnp->lock); |
@@ -1820,6 +1910,7 @@ static int __noreturn rcu_gp_kthread(void *arg) | |||
1820 | if (rcu_gp_init(rsp)) | 1910 | if (rcu_gp_init(rsp)) |
1821 | break; | 1911 | break; |
1822 | cond_resched_rcu_qs(); | 1912 | cond_resched_rcu_qs(); |
1913 | ACCESS_ONCE(rsp->gp_activity) = jiffies; | ||
1823 | WARN_ON(signal_pending(current)); | 1914 | WARN_ON(signal_pending(current)); |
1824 | trace_rcu_grace_period(rsp->name, | 1915 | trace_rcu_grace_period(rsp->name, |
1825 | ACCESS_ONCE(rsp->gpnum), | 1916 | ACCESS_ONCE(rsp->gpnum), |
@@ -1863,9 +1954,11 @@ static int __noreturn rcu_gp_kthread(void *arg) | |||
1863 | ACCESS_ONCE(rsp->gpnum), | 1954 | ACCESS_ONCE(rsp->gpnum), |
1864 | TPS("fqsend")); | 1955 | TPS("fqsend")); |
1865 | cond_resched_rcu_qs(); | 1956 | cond_resched_rcu_qs(); |
1957 | ACCESS_ONCE(rsp->gp_activity) = jiffies; | ||
1866 | } else { | 1958 | } else { |
1867 | /* Deal with stray signal. */ | 1959 | /* Deal with stray signal. */ |
1868 | cond_resched_rcu_qs(); | 1960 | cond_resched_rcu_qs(); |
1961 | ACCESS_ONCE(rsp->gp_activity) = jiffies; | ||
1869 | WARN_ON(signal_pending(current)); | 1962 | WARN_ON(signal_pending(current)); |
1870 | trace_rcu_grace_period(rsp->name, | 1963 | trace_rcu_grace_period(rsp->name, |
1871 | ACCESS_ONCE(rsp->gpnum), | 1964 | ACCESS_ONCE(rsp->gpnum), |
@@ -2042,8 +2135,10 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp) | |||
2042 | rnp = rdp->mynode; | 2135 | rnp = rdp->mynode; |
2043 | raw_spin_lock_irqsave(&rnp->lock, flags); | 2136 | raw_spin_lock_irqsave(&rnp->lock, flags); |
2044 | smp_mb__after_unlock_lock(); | 2137 | smp_mb__after_unlock_lock(); |
2045 | if (rdp->passed_quiesce == 0 || rdp->gpnum != rnp->gpnum || | 2138 | if ((rdp->passed_quiesce == 0 && |
2046 | rnp->completed == rnp->gpnum) { | 2139 | rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) || |
2140 | rdp->gpnum != rnp->gpnum || rnp->completed == rnp->gpnum || | ||
2141 | rdp->gpwrap) { | ||
2047 | 2142 | ||
2048 | /* | 2143 | /* |
2049 | * The grace period in which this quiescent state was | 2144 | * The grace period in which this quiescent state was |
@@ -2052,6 +2147,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp) | |||
2052 | * within the current grace period. | 2147 | * within the current grace period. |
2053 | */ | 2148 | */ |
2054 | rdp->passed_quiesce = 0; /* need qs for new gp. */ | 2149 | rdp->passed_quiesce = 0; /* need qs for new gp. */ |
2150 | rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr); | ||
2055 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 2151 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
2056 | return; | 2152 | return; |
2057 | } | 2153 | } |
@@ -2096,7 +2192,8 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp) | |||
2096 | * Was there a quiescent state since the beginning of the grace | 2192 | * Was there a quiescent state since the beginning of the grace |
2097 | * period? If no, then exit and wait for the next call. | 2193 | * period? If no, then exit and wait for the next call. |
2098 | */ | 2194 | */ |
2099 | if (!rdp->passed_quiesce) | 2195 | if (!rdp->passed_quiesce && |
2196 | rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) | ||
2100 | return; | 2197 | return; |
2101 | 2198 | ||
2102 | /* | 2199 | /* |
@@ -2227,6 +2324,46 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp) | |||
2227 | } | 2324 | } |
2228 | 2325 | ||
2229 | /* | 2326 | /* |
2327 | * All CPUs for the specified rcu_node structure have gone offline, | ||
2328 | * and all tasks that were preempted within an RCU read-side critical | ||
2329 | * section while running on one of those CPUs have since exited their RCU | ||
2330 | * read-side critical section. Some other CPU is reporting this fact with | ||
2331 | * the specified rcu_node structure's ->lock held and interrupts disabled. | ||
2332 | * This function therefore goes up the tree of rcu_node structures, | ||
2333 | * clearing the corresponding bits in the ->qsmaskinit fields. Note that | ||
2334 | * the leaf rcu_node structure's ->qsmaskinit field has already been | ||
2335 | * updated | ||
2336 | * | ||
2337 | * This function does check that the specified rcu_node structure has | ||
2338 | * all CPUs offline and no blocked tasks, so it is OK to invoke it | ||
2339 | * prematurely. That said, invoking it after the fact will cost you | ||
2340 | * a needless lock acquisition. So once it has done its work, don't | ||
2341 | * invoke it again. | ||
2342 | */ | ||
2343 | static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf) | ||
2344 | { | ||
2345 | long mask; | ||
2346 | struct rcu_node *rnp = rnp_leaf; | ||
2347 | |||
2348 | if (rnp->qsmaskinit || rcu_preempt_has_tasks(rnp)) | ||
2349 | return; | ||
2350 | for (;;) { | ||
2351 | mask = rnp->grpmask; | ||
2352 | rnp = rnp->parent; | ||
2353 | if (!rnp) | ||
2354 | break; | ||
2355 | raw_spin_lock(&rnp->lock); /* irqs already disabled. */ | ||
2356 | smp_mb__after_unlock_lock(); /* GP memory ordering. */ | ||
2357 | rnp->qsmaskinit &= ~mask; | ||
2358 | if (rnp->qsmaskinit) { | ||
2359 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
2360 | return; | ||
2361 | } | ||
2362 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
2363 | } | ||
2364 | } | ||
2365 | |||
2366 | /* | ||
2230 | * The CPU has been completely removed, and some other CPU is reporting | 2367 | * The CPU has been completely removed, and some other CPU is reporting |
2231 | * this fact from process context. Do the remainder of the cleanup, | 2368 | * this fact from process context. Do the remainder of the cleanup, |
2232 | * including orphaning the outgoing CPU's RCU callbacks, and also | 2369 | * including orphaning the outgoing CPU's RCU callbacks, and also |
@@ -2236,8 +2373,6 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp) | |||
2236 | static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) | 2373 | static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) |
2237 | { | 2374 | { |
2238 | unsigned long flags; | 2375 | unsigned long flags; |
2239 | unsigned long mask; | ||
2240 | int need_report = 0; | ||
2241 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); | 2376 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); |
2242 | struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ | 2377 | struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ |
2243 | 2378 | ||
@@ -2251,40 +2386,15 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) | |||
2251 | /* Orphan the dead CPU's callbacks, and adopt them if appropriate. */ | 2386 | /* Orphan the dead CPU's callbacks, and adopt them if appropriate. */ |
2252 | rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp); | 2387 | rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp); |
2253 | rcu_adopt_orphan_cbs(rsp, flags); | 2388 | rcu_adopt_orphan_cbs(rsp, flags); |
2389 | raw_spin_unlock_irqrestore(&rsp->orphan_lock, flags); | ||
2254 | 2390 | ||
2255 | /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */ | 2391 | /* Remove outgoing CPU from mask in the leaf rcu_node structure. */ |
2256 | mask = rdp->grpmask; /* rnp->grplo is constant. */ | 2392 | raw_spin_lock_irqsave(&rnp->lock, flags); |
2257 | do { | 2393 | smp_mb__after_unlock_lock(); /* Enforce GP memory-order guarantee. */ |
2258 | raw_spin_lock(&rnp->lock); /* irqs already disabled. */ | 2394 | rnp->qsmaskinit &= ~rdp->grpmask; |
2259 | smp_mb__after_unlock_lock(); | 2395 | if (rnp->qsmaskinit == 0 && !rcu_preempt_has_tasks(rnp)) |
2260 | rnp->qsmaskinit &= ~mask; | 2396 | rcu_cleanup_dead_rnp(rnp); |
2261 | if (rnp->qsmaskinit != 0) { | 2397 | rcu_report_qs_rnp(rdp->grpmask, rsp, rnp, flags); /* Rlses rnp->lock. */ |
2262 | if (rnp != rdp->mynode) | ||
2263 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
2264 | break; | ||
2265 | } | ||
2266 | if (rnp == rdp->mynode) | ||
2267 | need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp); | ||
2268 | else | ||
2269 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
2270 | mask = rnp->grpmask; | ||
2271 | rnp = rnp->parent; | ||
2272 | } while (rnp != NULL); | ||
2273 | |||
2274 | /* | ||
2275 | * We still hold the leaf rcu_node structure lock here, and | ||
2276 | * irqs are still disabled. The reason for this subterfuge is | ||
2277 | * because invoking rcu_report_unblock_qs_rnp() with ->orphan_lock | ||
2278 | * held leads to deadlock. | ||
2279 | */ | ||
2280 | raw_spin_unlock(&rsp->orphan_lock); /* irqs remain disabled. */ | ||
2281 | rnp = rdp->mynode; | ||
2282 | if (need_report & RCU_OFL_TASKS_NORM_GP) | ||
2283 | rcu_report_unblock_qs_rnp(rnp, flags); | ||
2284 | else | ||
2285 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
2286 | if (need_report & RCU_OFL_TASKS_EXP_GP) | ||
2287 | rcu_report_exp_rnp(rsp, rnp, true); | ||
2288 | WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL, | 2398 | WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL, |
2289 | "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n", | 2399 | "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n", |
2290 | cpu, rdp->qlen, rdp->nxtlist); | 2400 | cpu, rdp->qlen, rdp->nxtlist); |
@@ -2300,6 +2410,10 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp) | |||
2300 | { | 2410 | { |
2301 | } | 2411 | } |
2302 | 2412 | ||
2413 | static void __maybe_unused rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf) | ||
2414 | { | ||
2415 | } | ||
2416 | |||
2303 | static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) | 2417 | static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) |
2304 | { | 2418 | { |
2305 | } | 2419 | } |
@@ -2496,12 +2610,6 @@ static void force_qs_rnp(struct rcu_state *rsp, | |||
2496 | } | 2610 | } |
2497 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 2611 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
2498 | } | 2612 | } |
2499 | rnp = rcu_get_root(rsp); | ||
2500 | if (rnp->qsmask == 0) { | ||
2501 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
2502 | smp_mb__after_unlock_lock(); | ||
2503 | rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */ | ||
2504 | } | ||
2505 | } | 2613 | } |
2506 | 2614 | ||
2507 | /* | 2615 | /* |
@@ -2601,7 +2709,7 @@ static void rcu_process_callbacks(struct softirq_action *unused) | |||
2601 | * Schedule RCU callback invocation. If the specified type of RCU | 2709 | * Schedule RCU callback invocation. If the specified type of RCU |
2602 | * does not support RCU priority boosting, just do a direct call, | 2710 | * does not support RCU priority boosting, just do a direct call, |
2603 | * otherwise wake up the per-CPU kernel kthread. Note that because we | 2711 | * otherwise wake up the per-CPU kernel kthread. Note that because we |
2604 | * are running on the current CPU with interrupts disabled, the | 2712 | * are running on the current CPU with softirqs disabled, the |
2605 | * rcu_cpu_kthread_task cannot disappear out from under us. | 2713 | * rcu_cpu_kthread_task cannot disappear out from under us. |
2606 | */ | 2714 | */ |
2607 | static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) | 2715 | static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) |
@@ -3141,9 +3249,12 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) | |||
3141 | 3249 | ||
3142 | /* Is the RCU core waiting for a quiescent state from this CPU? */ | 3250 | /* Is the RCU core waiting for a quiescent state from this CPU? */ |
3143 | if (rcu_scheduler_fully_active && | 3251 | if (rcu_scheduler_fully_active && |
3144 | rdp->qs_pending && !rdp->passed_quiesce) { | 3252 | rdp->qs_pending && !rdp->passed_quiesce && |
3253 | rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) { | ||
3145 | rdp->n_rp_qs_pending++; | 3254 | rdp->n_rp_qs_pending++; |
3146 | } else if (rdp->qs_pending && rdp->passed_quiesce) { | 3255 | } else if (rdp->qs_pending && |
3256 | (rdp->passed_quiesce || | ||
3257 | rdp->rcu_qs_ctr_snap != __this_cpu_read(rcu_qs_ctr))) { | ||
3147 | rdp->n_rp_report_qs++; | 3258 | rdp->n_rp_report_qs++; |
3148 | return 1; | 3259 | return 1; |
3149 | } | 3260 | } |
@@ -3167,7 +3278,8 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) | |||
3167 | } | 3278 | } |
3168 | 3279 | ||
3169 | /* Has a new RCU grace period started? */ | 3280 | /* Has a new RCU grace period started? */ |
3170 | if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum) { /* outside lock */ | 3281 | if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum || |
3282 | unlikely(ACCESS_ONCE(rdp->gpwrap))) { /* outside lock */ | ||
3171 | rdp->n_rp_gp_started++; | 3283 | rdp->n_rp_gp_started++; |
3172 | return 1; | 3284 | return 1; |
3173 | } | 3285 | } |
@@ -3350,6 +3462,7 @@ static void _rcu_barrier(struct rcu_state *rsp) | |||
3350 | } else { | 3462 | } else { |
3351 | _rcu_barrier_trace(rsp, "OnlineNoCB", cpu, | 3463 | _rcu_barrier_trace(rsp, "OnlineNoCB", cpu, |
3352 | rsp->n_barrier_done); | 3464 | rsp->n_barrier_done); |
3465 | smp_mb__before_atomic(); | ||
3353 | atomic_inc(&rsp->barrier_cpu_count); | 3466 | atomic_inc(&rsp->barrier_cpu_count); |
3354 | __call_rcu(&rdp->barrier_head, | 3467 | __call_rcu(&rdp->barrier_head, |
3355 | rcu_barrier_callback, rsp, cpu, 0); | 3468 | rcu_barrier_callback, rsp, cpu, 0); |
@@ -3417,9 +3530,6 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) | |||
3417 | /* Set up local state, ensuring consistent view of global state. */ | 3530 | /* Set up local state, ensuring consistent view of global state. */ |
3418 | raw_spin_lock_irqsave(&rnp->lock, flags); | 3531 | raw_spin_lock_irqsave(&rnp->lock, flags); |
3419 | rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo); | 3532 | rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo); |
3420 | init_callback_list(rdp); | ||
3421 | rdp->qlen_lazy = 0; | ||
3422 | ACCESS_ONCE(rdp->qlen) = 0; | ||
3423 | rdp->dynticks = &per_cpu(rcu_dynticks, cpu); | 3533 | rdp->dynticks = &per_cpu(rcu_dynticks, cpu); |
3424 | WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE); | 3534 | WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE); |
3425 | WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1); | 3535 | WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1); |
@@ -3476,6 +3586,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp) | |||
3476 | rdp->gpnum = rnp->completed; | 3586 | rdp->gpnum = rnp->completed; |
3477 | rdp->completed = rnp->completed; | 3587 | rdp->completed = rnp->completed; |
3478 | rdp->passed_quiesce = 0; | 3588 | rdp->passed_quiesce = 0; |
3589 | rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr); | ||
3479 | rdp->qs_pending = 0; | 3590 | rdp->qs_pending = 0; |
3480 | trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl")); | 3591 | trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl")); |
3481 | } | 3592 | } |
@@ -3567,17 +3678,35 @@ static int rcu_pm_notify(struct notifier_block *self, | |||
3567 | static int __init rcu_spawn_gp_kthread(void) | 3678 | static int __init rcu_spawn_gp_kthread(void) |
3568 | { | 3679 | { |
3569 | unsigned long flags; | 3680 | unsigned long flags; |
3681 | int kthread_prio_in = kthread_prio; | ||
3570 | struct rcu_node *rnp; | 3682 | struct rcu_node *rnp; |
3571 | struct rcu_state *rsp; | 3683 | struct rcu_state *rsp; |
3684 | struct sched_param sp; | ||
3572 | struct task_struct *t; | 3685 | struct task_struct *t; |
3573 | 3686 | ||
3687 | /* Force priority into range. */ | ||
3688 | if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1) | ||
3689 | kthread_prio = 1; | ||
3690 | else if (kthread_prio < 0) | ||
3691 | kthread_prio = 0; | ||
3692 | else if (kthread_prio > 99) | ||
3693 | kthread_prio = 99; | ||
3694 | if (kthread_prio != kthread_prio_in) | ||
3695 | pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n", | ||
3696 | kthread_prio, kthread_prio_in); | ||
3697 | |||
3574 | rcu_scheduler_fully_active = 1; | 3698 | rcu_scheduler_fully_active = 1; |
3575 | for_each_rcu_flavor(rsp) { | 3699 | for_each_rcu_flavor(rsp) { |
3576 | t = kthread_run(rcu_gp_kthread, rsp, "%s", rsp->name); | 3700 | t = kthread_create(rcu_gp_kthread, rsp, "%s", rsp->name); |
3577 | BUG_ON(IS_ERR(t)); | 3701 | BUG_ON(IS_ERR(t)); |
3578 | rnp = rcu_get_root(rsp); | 3702 | rnp = rcu_get_root(rsp); |
3579 | raw_spin_lock_irqsave(&rnp->lock, flags); | 3703 | raw_spin_lock_irqsave(&rnp->lock, flags); |
3580 | rsp->gp_kthread = t; | 3704 | rsp->gp_kthread = t; |
3705 | if (kthread_prio) { | ||
3706 | sp.sched_priority = kthread_prio; | ||
3707 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | ||
3708 | } | ||
3709 | wake_up_process(t); | ||
3581 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 3710 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
3582 | } | 3711 | } |
3583 | rcu_spawn_nocb_kthreads(); | 3712 | rcu_spawn_nocb_kthreads(); |
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 8e7b1843896e..119de399eb2f 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h | |||
@@ -27,7 +27,6 @@ | |||
27 | #include <linux/threads.h> | 27 | #include <linux/threads.h> |
28 | #include <linux/cpumask.h> | 28 | #include <linux/cpumask.h> |
29 | #include <linux/seqlock.h> | 29 | #include <linux/seqlock.h> |
30 | #include <linux/irq_work.h> | ||
31 | 30 | ||
32 | /* | 31 | /* |
33 | * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and | 32 | * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and |
@@ -172,11 +171,6 @@ struct rcu_node { | |||
172 | /* queued on this rcu_node structure that */ | 171 | /* queued on this rcu_node structure that */ |
173 | /* are blocking the current grace period, */ | 172 | /* are blocking the current grace period, */ |
174 | /* there can be no such task. */ | 173 | /* there can be no such task. */ |
175 | struct completion boost_completion; | ||
176 | /* Used to ensure that the rt_mutex used */ | ||
177 | /* to carry out the boosting is fully */ | ||
178 | /* released with no future boostee accesses */ | ||
179 | /* before that rt_mutex is re-initialized. */ | ||
180 | struct rt_mutex boost_mtx; | 174 | struct rt_mutex boost_mtx; |
181 | /* Used only for the priority-boosting */ | 175 | /* Used only for the priority-boosting */ |
182 | /* side effect, not as a lock. */ | 176 | /* side effect, not as a lock. */ |
@@ -257,9 +251,12 @@ struct rcu_data { | |||
257 | /* in order to detect GP end. */ | 251 | /* in order to detect GP end. */ |
258 | unsigned long gpnum; /* Highest gp number that this CPU */ | 252 | unsigned long gpnum; /* Highest gp number that this CPU */ |
259 | /* is aware of having started. */ | 253 | /* is aware of having started. */ |
254 | unsigned long rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */ | ||
255 | /* for rcu_all_qs() invocations. */ | ||
260 | bool passed_quiesce; /* User-mode/idle loop etc. */ | 256 | bool passed_quiesce; /* User-mode/idle loop etc. */ |
261 | bool qs_pending; /* Core waits for quiesc state. */ | 257 | bool qs_pending; /* Core waits for quiesc state. */ |
262 | bool beenonline; /* CPU online at least once. */ | 258 | bool beenonline; /* CPU online at least once. */ |
259 | bool gpwrap; /* Possible gpnum/completed wrap. */ | ||
263 | struct rcu_node *mynode; /* This CPU's leaf of hierarchy */ | 260 | struct rcu_node *mynode; /* This CPU's leaf of hierarchy */ |
264 | unsigned long grpmask; /* Mask to apply to leaf qsmask. */ | 261 | unsigned long grpmask; /* Mask to apply to leaf qsmask. */ |
265 | #ifdef CONFIG_RCU_CPU_STALL_INFO | 262 | #ifdef CONFIG_RCU_CPU_STALL_INFO |
@@ -340,14 +337,10 @@ struct rcu_data { | |||
340 | #ifdef CONFIG_RCU_NOCB_CPU | 337 | #ifdef CONFIG_RCU_NOCB_CPU |
341 | struct rcu_head *nocb_head; /* CBs waiting for kthread. */ | 338 | struct rcu_head *nocb_head; /* CBs waiting for kthread. */ |
342 | struct rcu_head **nocb_tail; | 339 | struct rcu_head **nocb_tail; |
343 | atomic_long_t nocb_q_count; /* # CBs waiting for kthread */ | 340 | atomic_long_t nocb_q_count; /* # CBs waiting for nocb */ |
344 | atomic_long_t nocb_q_count_lazy; /* (approximate). */ | 341 | atomic_long_t nocb_q_count_lazy; /* invocation (all stages). */ |
345 | struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */ | 342 | struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */ |
346 | struct rcu_head **nocb_follower_tail; | 343 | struct rcu_head **nocb_follower_tail; |
347 | atomic_long_t nocb_follower_count; /* # CBs ready to invoke. */ | ||
348 | atomic_long_t nocb_follower_count_lazy; /* (approximate). */ | ||
349 | int nocb_p_count; /* # CBs being invoked by kthread */ | ||
350 | int nocb_p_count_lazy; /* (approximate). */ | ||
351 | wait_queue_head_t nocb_wq; /* For nocb kthreads to sleep on. */ | 344 | wait_queue_head_t nocb_wq; /* For nocb kthreads to sleep on. */ |
352 | struct task_struct *nocb_kthread; | 345 | struct task_struct *nocb_kthread; |
353 | int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */ | 346 | int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */ |
@@ -356,8 +349,6 @@ struct rcu_data { | |||
356 | struct rcu_head *nocb_gp_head ____cacheline_internodealigned_in_smp; | 349 | struct rcu_head *nocb_gp_head ____cacheline_internodealigned_in_smp; |
357 | /* CBs waiting for GP. */ | 350 | /* CBs waiting for GP. */ |
358 | struct rcu_head **nocb_gp_tail; | 351 | struct rcu_head **nocb_gp_tail; |
359 | long nocb_gp_count; | ||
360 | long nocb_gp_count_lazy; | ||
361 | bool nocb_leader_sleep; /* Is the nocb leader thread asleep? */ | 352 | bool nocb_leader_sleep; /* Is the nocb leader thread asleep? */ |
362 | struct rcu_data *nocb_next_follower; | 353 | struct rcu_data *nocb_next_follower; |
363 | /* Next follower in wakeup chain. */ | 354 | /* Next follower in wakeup chain. */ |
@@ -488,10 +479,14 @@ struct rcu_state { | |||
488 | /* due to no GP active. */ | 479 | /* due to no GP active. */ |
489 | unsigned long gp_start; /* Time at which GP started, */ | 480 | unsigned long gp_start; /* Time at which GP started, */ |
490 | /* but in jiffies. */ | 481 | /* but in jiffies. */ |
482 | unsigned long gp_activity; /* Time of last GP kthread */ | ||
483 | /* activity in jiffies. */ | ||
491 | unsigned long jiffies_stall; /* Time at which to check */ | 484 | unsigned long jiffies_stall; /* Time at which to check */ |
492 | /* for CPU stalls. */ | 485 | /* for CPU stalls. */ |
493 | unsigned long jiffies_resched; /* Time at which to resched */ | 486 | unsigned long jiffies_resched; /* Time at which to resched */ |
494 | /* a reluctant CPU. */ | 487 | /* a reluctant CPU. */ |
488 | unsigned long n_force_qs_gpstart; /* Snapshot of n_force_qs at */ | ||
489 | /* GP start. */ | ||
495 | unsigned long gp_max; /* Maximum GP duration in */ | 490 | unsigned long gp_max; /* Maximum GP duration in */ |
496 | /* jiffies. */ | 491 | /* jiffies. */ |
497 | const char *name; /* Name of structure. */ | 492 | const char *name; /* Name of structure. */ |
@@ -514,13 +509,6 @@ extern struct list_head rcu_struct_flavors; | |||
514 | #define for_each_rcu_flavor(rsp) \ | 509 | #define for_each_rcu_flavor(rsp) \ |
515 | list_for_each_entry((rsp), &rcu_struct_flavors, flavors) | 510 | list_for_each_entry((rsp), &rcu_struct_flavors, flavors) |
516 | 511 | ||
517 | /* Return values for rcu_preempt_offline_tasks(). */ | ||
518 | |||
519 | #define RCU_OFL_TASKS_NORM_GP 0x1 /* Tasks blocking normal */ | ||
520 | /* GP were moved to root. */ | ||
521 | #define RCU_OFL_TASKS_EXP_GP 0x2 /* Tasks blocking expedited */ | ||
522 | /* GP were moved to root. */ | ||
523 | |||
524 | /* | 512 | /* |
525 | * RCU implementation internal declarations: | 513 | * RCU implementation internal declarations: |
526 | */ | 514 | */ |
@@ -546,27 +534,16 @@ DECLARE_PER_CPU(char, rcu_cpu_has_work); | |||
546 | 534 | ||
547 | /* Forward declarations for rcutree_plugin.h */ | 535 | /* Forward declarations for rcutree_plugin.h */ |
548 | static void rcu_bootup_announce(void); | 536 | static void rcu_bootup_announce(void); |
549 | long rcu_batches_completed(void); | ||
550 | static void rcu_preempt_note_context_switch(void); | 537 | static void rcu_preempt_note_context_switch(void); |
551 | static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); | 538 | static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); |
552 | #ifdef CONFIG_HOTPLUG_CPU | 539 | #ifdef CONFIG_HOTPLUG_CPU |
553 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, | 540 | static bool rcu_preempt_has_tasks(struct rcu_node *rnp); |
554 | unsigned long flags); | ||
555 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | 541 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ |
556 | static void rcu_print_detail_task_stall(struct rcu_state *rsp); | 542 | static void rcu_print_detail_task_stall(struct rcu_state *rsp); |
557 | static int rcu_print_task_stall(struct rcu_node *rnp); | 543 | static int rcu_print_task_stall(struct rcu_node *rnp); |
558 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); | 544 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); |
559 | #ifdef CONFIG_HOTPLUG_CPU | ||
560 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, | ||
561 | struct rcu_node *rnp, | ||
562 | struct rcu_data *rdp); | ||
563 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
564 | static void rcu_preempt_check_callbacks(void); | 545 | static void rcu_preempt_check_callbacks(void); |
565 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); | 546 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); |
566 | #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PREEMPT_RCU) | ||
567 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, | ||
568 | bool wake); | ||
569 | #endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PREEMPT_RCU) */ | ||
570 | static void __init __rcu_init_preempt(void); | 547 | static void __init __rcu_init_preempt(void); |
571 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); | 548 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); |
572 | static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); | 549 | static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); |
@@ -622,24 +599,15 @@ static void rcu_dynticks_task_exit(void); | |||
622 | #endif /* #ifndef RCU_TREE_NONCORE */ | 599 | #endif /* #ifndef RCU_TREE_NONCORE */ |
623 | 600 | ||
624 | #ifdef CONFIG_RCU_TRACE | 601 | #ifdef CONFIG_RCU_TRACE |
625 | #ifdef CONFIG_RCU_NOCB_CPU | 602 | /* Read out queue lengths for tracing. */ |
626 | /* Sum up queue lengths for tracing. */ | ||
627 | static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll) | 603 | static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll) |
628 | { | 604 | { |
629 | *ql = atomic_long_read(&rdp->nocb_q_count) + | 605 | #ifdef CONFIG_RCU_NOCB_CPU |
630 | rdp->nocb_p_count + | 606 | *ql = atomic_long_read(&rdp->nocb_q_count); |
631 | atomic_long_read(&rdp->nocb_follower_count) + | 607 | *qll = atomic_long_read(&rdp->nocb_q_count_lazy); |
632 | rdp->nocb_p_count + rdp->nocb_gp_count; | ||
633 | *qll = atomic_long_read(&rdp->nocb_q_count_lazy) + | ||
634 | rdp->nocb_p_count_lazy + | ||
635 | atomic_long_read(&rdp->nocb_follower_count_lazy) + | ||
636 | rdp->nocb_p_count_lazy + rdp->nocb_gp_count_lazy; | ||
637 | } | ||
638 | #else /* #ifdef CONFIG_RCU_NOCB_CPU */ | 608 | #else /* #ifdef CONFIG_RCU_NOCB_CPU */ |
639 | static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll) | ||
640 | { | ||
641 | *ql = 0; | 609 | *ql = 0; |
642 | *qll = 0; | 610 | *qll = 0; |
643 | } | ||
644 | #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ | 611 | #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ |
612 | } | ||
645 | #endif /* #ifdef CONFIG_RCU_TRACE */ | 613 | #endif /* #ifdef CONFIG_RCU_TRACE */ |
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 3ec85cb5d544..2e850a51bb8f 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h | |||
@@ -34,10 +34,6 @@ | |||
34 | 34 | ||
35 | #include "../locking/rtmutex_common.h" | 35 | #include "../locking/rtmutex_common.h" |
36 | 36 | ||
37 | /* rcuc/rcub kthread realtime priority */ | ||
38 | static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO; | ||
39 | module_param(kthread_prio, int, 0644); | ||
40 | |||
41 | /* | 37 | /* |
42 | * Control variables for per-CPU and per-rcu_node kthreads. These | 38 | * Control variables for per-CPU and per-rcu_node kthreads. These |
43 | * handle all flavors of RCU. | 39 | * handle all flavors of RCU. |
@@ -103,6 +99,8 @@ RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu); | |||
103 | static struct rcu_state *rcu_state_p = &rcu_preempt_state; | 99 | static struct rcu_state *rcu_state_p = &rcu_preempt_state; |
104 | 100 | ||
105 | static int rcu_preempted_readers_exp(struct rcu_node *rnp); | 101 | static int rcu_preempted_readers_exp(struct rcu_node *rnp); |
102 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, | ||
103 | bool wake); | ||
106 | 104 | ||
107 | /* | 105 | /* |
108 | * Tell them what RCU they are running. | 106 | * Tell them what RCU they are running. |
@@ -114,25 +112,6 @@ static void __init rcu_bootup_announce(void) | |||
114 | } | 112 | } |
115 | 113 | ||
116 | /* | 114 | /* |
117 | * Return the number of RCU-preempt batches processed thus far | ||
118 | * for debug and statistics. | ||
119 | */ | ||
120 | static long rcu_batches_completed_preempt(void) | ||
121 | { | ||
122 | return rcu_preempt_state.completed; | ||
123 | } | ||
124 | EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt); | ||
125 | |||
126 | /* | ||
127 | * Return the number of RCU batches processed thus far for debug & stats. | ||
128 | */ | ||
129 | long rcu_batches_completed(void) | ||
130 | { | ||
131 | return rcu_batches_completed_preempt(); | ||
132 | } | ||
133 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | ||
134 | |||
135 | /* | ||
136 | * Record a preemptible-RCU quiescent state for the specified CPU. Note | 115 | * Record a preemptible-RCU quiescent state for the specified CPU. Note |
137 | * that this just means that the task currently running on the CPU is | 116 | * that this just means that the task currently running on the CPU is |
138 | * not in a quiescent state. There might be any number of tasks blocked | 117 | * not in a quiescent state. There might be any number of tasks blocked |
@@ -307,15 +286,25 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t, | |||
307 | } | 286 | } |
308 | 287 | ||
309 | /* | 288 | /* |
289 | * Return true if the specified rcu_node structure has tasks that were | ||
290 | * preempted within an RCU read-side critical section. | ||
291 | */ | ||
292 | static bool rcu_preempt_has_tasks(struct rcu_node *rnp) | ||
293 | { | ||
294 | return !list_empty(&rnp->blkd_tasks); | ||
295 | } | ||
296 | |||
297 | /* | ||
310 | * Handle special cases during rcu_read_unlock(), such as needing to | 298 | * Handle special cases during rcu_read_unlock(), such as needing to |
311 | * notify RCU core processing or task having blocked during the RCU | 299 | * notify RCU core processing or task having blocked during the RCU |
312 | * read-side critical section. | 300 | * read-side critical section. |
313 | */ | 301 | */ |
314 | void rcu_read_unlock_special(struct task_struct *t) | 302 | void rcu_read_unlock_special(struct task_struct *t) |
315 | { | 303 | { |
316 | int empty; | 304 | bool empty; |
317 | int empty_exp; | 305 | bool empty_exp; |
318 | int empty_exp_now; | 306 | bool empty_norm; |
307 | bool empty_exp_now; | ||
319 | unsigned long flags; | 308 | unsigned long flags; |
320 | struct list_head *np; | 309 | struct list_head *np; |
321 | #ifdef CONFIG_RCU_BOOST | 310 | #ifdef CONFIG_RCU_BOOST |
@@ -367,7 +356,8 @@ void rcu_read_unlock_special(struct task_struct *t) | |||
367 | break; | 356 | break; |
368 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 357 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
369 | } | 358 | } |
370 | empty = !rcu_preempt_blocked_readers_cgp(rnp); | 359 | empty = !rcu_preempt_has_tasks(rnp); |
360 | empty_norm = !rcu_preempt_blocked_readers_cgp(rnp); | ||
371 | empty_exp = !rcu_preempted_readers_exp(rnp); | 361 | empty_exp = !rcu_preempted_readers_exp(rnp); |
372 | smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ | 362 | smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ |
373 | np = rcu_next_node_entry(t, rnp); | 363 | np = rcu_next_node_entry(t, rnp); |
@@ -387,13 +377,21 @@ void rcu_read_unlock_special(struct task_struct *t) | |||
387 | #endif /* #ifdef CONFIG_RCU_BOOST */ | 377 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
388 | 378 | ||
389 | /* | 379 | /* |
380 | * If this was the last task on the list, go see if we | ||
381 | * need to propagate ->qsmaskinit bit clearing up the | ||
382 | * rcu_node tree. | ||
383 | */ | ||
384 | if (!empty && !rcu_preempt_has_tasks(rnp)) | ||
385 | rcu_cleanup_dead_rnp(rnp); | ||
386 | |||
387 | /* | ||
390 | * If this was the last task on the current list, and if | 388 | * If this was the last task on the current list, and if |
391 | * we aren't waiting on any CPUs, report the quiescent state. | 389 | * we aren't waiting on any CPUs, report the quiescent state. |
392 | * Note that rcu_report_unblock_qs_rnp() releases rnp->lock, | 390 | * Note that rcu_report_unblock_qs_rnp() releases rnp->lock, |
393 | * so we must take a snapshot of the expedited state. | 391 | * so we must take a snapshot of the expedited state. |
394 | */ | 392 | */ |
395 | empty_exp_now = !rcu_preempted_readers_exp(rnp); | 393 | empty_exp_now = !rcu_preempted_readers_exp(rnp); |
396 | if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) { | 394 | if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) { |
397 | trace_rcu_quiescent_state_report(TPS("preempt_rcu"), | 395 | trace_rcu_quiescent_state_report(TPS("preempt_rcu"), |
398 | rnp->gpnum, | 396 | rnp->gpnum, |
399 | 0, rnp->qsmask, | 397 | 0, rnp->qsmask, |
@@ -408,10 +406,8 @@ void rcu_read_unlock_special(struct task_struct *t) | |||
408 | 406 | ||
409 | #ifdef CONFIG_RCU_BOOST | 407 | #ifdef CONFIG_RCU_BOOST |
410 | /* Unboost if we were boosted. */ | 408 | /* Unboost if we were boosted. */ |
411 | if (drop_boost_mutex) { | 409 | if (drop_boost_mutex) |
412 | rt_mutex_unlock(&rnp->boost_mtx); | 410 | rt_mutex_unlock(&rnp->boost_mtx); |
413 | complete(&rnp->boost_completion); | ||
414 | } | ||
415 | #endif /* #ifdef CONFIG_RCU_BOOST */ | 411 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
416 | 412 | ||
417 | /* | 413 | /* |
@@ -519,99 +515,13 @@ static int rcu_print_task_stall(struct rcu_node *rnp) | |||
519 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | 515 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) |
520 | { | 516 | { |
521 | WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)); | 517 | WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)); |
522 | if (!list_empty(&rnp->blkd_tasks)) | 518 | if (rcu_preempt_has_tasks(rnp)) |
523 | rnp->gp_tasks = rnp->blkd_tasks.next; | 519 | rnp->gp_tasks = rnp->blkd_tasks.next; |
524 | WARN_ON_ONCE(rnp->qsmask); | 520 | WARN_ON_ONCE(rnp->qsmask); |
525 | } | 521 | } |
526 | 522 | ||
527 | #ifdef CONFIG_HOTPLUG_CPU | 523 | #ifdef CONFIG_HOTPLUG_CPU |
528 | 524 | ||
529 | /* | ||
530 | * Handle tasklist migration for case in which all CPUs covered by the | ||
531 | * specified rcu_node have gone offline. Move them up to the root | ||
532 | * rcu_node. The reason for not just moving them to the immediate | ||
533 | * parent is to remove the need for rcu_read_unlock_special() to | ||
534 | * make more than two attempts to acquire the target rcu_node's lock. | ||
535 | * Returns true if there were tasks blocking the current RCU grace | ||
536 | * period. | ||
537 | * | ||
538 | * Returns 1 if there was previously a task blocking the current grace | ||
539 | * period on the specified rcu_node structure. | ||
540 | * | ||
541 | * The caller must hold rnp->lock with irqs disabled. | ||
542 | */ | ||
543 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, | ||
544 | struct rcu_node *rnp, | ||
545 | struct rcu_data *rdp) | ||
546 | { | ||
547 | struct list_head *lp; | ||
548 | struct list_head *lp_root; | ||
549 | int retval = 0; | ||
550 | struct rcu_node *rnp_root = rcu_get_root(rsp); | ||
551 | struct task_struct *t; | ||
552 | |||
553 | if (rnp == rnp_root) { | ||
554 | WARN_ONCE(1, "Last CPU thought to be offlined?"); | ||
555 | return 0; /* Shouldn't happen: at least one CPU online. */ | ||
556 | } | ||
557 | |||
558 | /* If we are on an internal node, complain bitterly. */ | ||
559 | WARN_ON_ONCE(rnp != rdp->mynode); | ||
560 | |||
561 | /* | ||
562 | * Move tasks up to root rcu_node. Don't try to get fancy for | ||
563 | * this corner-case operation -- just put this node's tasks | ||
564 | * at the head of the root node's list, and update the root node's | ||
565 | * ->gp_tasks and ->exp_tasks pointers to those of this node's, | ||
566 | * if non-NULL. This might result in waiting for more tasks than | ||
567 | * absolutely necessary, but this is a good performance/complexity | ||
568 | * tradeoff. | ||
569 | */ | ||
570 | if (rcu_preempt_blocked_readers_cgp(rnp) && rnp->qsmask == 0) | ||
571 | retval |= RCU_OFL_TASKS_NORM_GP; | ||
572 | if (rcu_preempted_readers_exp(rnp)) | ||
573 | retval |= RCU_OFL_TASKS_EXP_GP; | ||
574 | lp = &rnp->blkd_tasks; | ||
575 | lp_root = &rnp_root->blkd_tasks; | ||
576 | while (!list_empty(lp)) { | ||
577 | t = list_entry(lp->next, typeof(*t), rcu_node_entry); | ||
578 | raw_spin_lock(&rnp_root->lock); /* irqs already disabled */ | ||
579 | smp_mb__after_unlock_lock(); | ||
580 | list_del(&t->rcu_node_entry); | ||
581 | t->rcu_blocked_node = rnp_root; | ||
582 | list_add(&t->rcu_node_entry, lp_root); | ||
583 | if (&t->rcu_node_entry == rnp->gp_tasks) | ||
584 | rnp_root->gp_tasks = rnp->gp_tasks; | ||
585 | if (&t->rcu_node_entry == rnp->exp_tasks) | ||
586 | rnp_root->exp_tasks = rnp->exp_tasks; | ||
587 | #ifdef CONFIG_RCU_BOOST | ||
588 | if (&t->rcu_node_entry == rnp->boost_tasks) | ||
589 | rnp_root->boost_tasks = rnp->boost_tasks; | ||
590 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
591 | raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */ | ||
592 | } | ||
593 | |||
594 | rnp->gp_tasks = NULL; | ||
595 | rnp->exp_tasks = NULL; | ||
596 | #ifdef CONFIG_RCU_BOOST | ||
597 | rnp->boost_tasks = NULL; | ||
598 | /* | ||
599 | * In case root is being boosted and leaf was not. Make sure | ||
600 | * that we boost the tasks blocking the current grace period | ||
601 | * in this case. | ||
602 | */ | ||
603 | raw_spin_lock(&rnp_root->lock); /* irqs already disabled */ | ||
604 | smp_mb__after_unlock_lock(); | ||
605 | if (rnp_root->boost_tasks != NULL && | ||
606 | rnp_root->boost_tasks != rnp_root->gp_tasks && | ||
607 | rnp_root->boost_tasks != rnp_root->exp_tasks) | ||
608 | rnp_root->boost_tasks = rnp_root->gp_tasks; | ||
609 | raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */ | ||
610 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
611 | |||
612 | return retval; | ||
613 | } | ||
614 | |||
615 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | 525 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ |
616 | 526 | ||
617 | /* | 527 | /* |
@@ -771,7 +681,7 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp) | |||
771 | 681 | ||
772 | raw_spin_lock_irqsave(&rnp->lock, flags); | 682 | raw_spin_lock_irqsave(&rnp->lock, flags); |
773 | smp_mb__after_unlock_lock(); | 683 | smp_mb__after_unlock_lock(); |
774 | if (list_empty(&rnp->blkd_tasks)) { | 684 | if (!rcu_preempt_has_tasks(rnp)) { |
775 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 685 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
776 | } else { | 686 | } else { |
777 | rnp->exp_tasks = rnp->blkd_tasks.next; | 687 | rnp->exp_tasks = rnp->blkd_tasks.next; |
@@ -933,15 +843,6 @@ static void __init rcu_bootup_announce(void) | |||
933 | } | 843 | } |
934 | 844 | ||
935 | /* | 845 | /* |
936 | * Return the number of RCU batches processed thus far for debug & stats. | ||
937 | */ | ||
938 | long rcu_batches_completed(void) | ||
939 | { | ||
940 | return rcu_batches_completed_sched(); | ||
941 | } | ||
942 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | ||
943 | |||
944 | /* | ||
945 | * Because preemptible RCU does not exist, we never have to check for | 846 | * Because preemptible RCU does not exist, we never have to check for |
946 | * CPUs being in quiescent states. | 847 | * CPUs being in quiescent states. |
947 | */ | 848 | */ |
@@ -960,11 +861,12 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) | |||
960 | 861 | ||
961 | #ifdef CONFIG_HOTPLUG_CPU | 862 | #ifdef CONFIG_HOTPLUG_CPU |
962 | 863 | ||
963 | /* Because preemptible RCU does not exist, no quieting of tasks. */ | 864 | /* |
964 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) | 865 | * Because there is no preemptible RCU, there can be no readers blocked. |
965 | __releases(rnp->lock) | 866 | */ |
867 | static bool rcu_preempt_has_tasks(struct rcu_node *rnp) | ||
966 | { | 868 | { |
967 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 869 | return false; |
968 | } | 870 | } |
969 | 871 | ||
970 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | 872 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ |
@@ -996,23 +898,6 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | |||
996 | WARN_ON_ONCE(rnp->qsmask); | 898 | WARN_ON_ONCE(rnp->qsmask); |
997 | } | 899 | } |
998 | 900 | ||
999 | #ifdef CONFIG_HOTPLUG_CPU | ||
1000 | |||
1001 | /* | ||
1002 | * Because preemptible RCU does not exist, it never needs to migrate | ||
1003 | * tasks that were blocked within RCU read-side critical sections, and | ||
1004 | * such non-existent tasks cannot possibly have been blocking the current | ||
1005 | * grace period. | ||
1006 | */ | ||
1007 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, | ||
1008 | struct rcu_node *rnp, | ||
1009 | struct rcu_data *rdp) | ||
1010 | { | ||
1011 | return 0; | ||
1012 | } | ||
1013 | |||
1014 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
1015 | |||
1016 | /* | 901 | /* |
1017 | * Because preemptible RCU does not exist, it never has any callbacks | 902 | * Because preemptible RCU does not exist, it never has any callbacks |
1018 | * to check. | 903 | * to check. |
@@ -1031,20 +916,6 @@ void synchronize_rcu_expedited(void) | |||
1031 | } | 916 | } |
1032 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | 917 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); |
1033 | 918 | ||
1034 | #ifdef CONFIG_HOTPLUG_CPU | ||
1035 | |||
1036 | /* | ||
1037 | * Because preemptible RCU does not exist, there is never any need to | ||
1038 | * report on tasks preempted in RCU read-side critical sections during | ||
1039 | * expedited RCU grace periods. | ||
1040 | */ | ||
1041 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, | ||
1042 | bool wake) | ||
1043 | { | ||
1044 | } | ||
1045 | |||
1046 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
1047 | |||
1048 | /* | 919 | /* |
1049 | * Because preemptible RCU does not exist, rcu_barrier() is just | 920 | * Because preemptible RCU does not exist, rcu_barrier() is just |
1050 | * another name for rcu_barrier_sched(). | 921 | * another name for rcu_barrier_sched(). |
@@ -1080,7 +951,7 @@ void exit_rcu(void) | |||
1080 | 951 | ||
1081 | static void rcu_initiate_boost_trace(struct rcu_node *rnp) | 952 | static void rcu_initiate_boost_trace(struct rcu_node *rnp) |
1082 | { | 953 | { |
1083 | if (list_empty(&rnp->blkd_tasks)) | 954 | if (!rcu_preempt_has_tasks(rnp)) |
1084 | rnp->n_balk_blkd_tasks++; | 955 | rnp->n_balk_blkd_tasks++; |
1085 | else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL) | 956 | else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL) |
1086 | rnp->n_balk_exp_gp_tasks++; | 957 | rnp->n_balk_exp_gp_tasks++; |
@@ -1127,7 +998,8 @@ static int rcu_boost(struct rcu_node *rnp) | |||
1127 | struct task_struct *t; | 998 | struct task_struct *t; |
1128 | struct list_head *tb; | 999 | struct list_head *tb; |
1129 | 1000 | ||
1130 | if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) | 1001 | if (ACCESS_ONCE(rnp->exp_tasks) == NULL && |
1002 | ACCESS_ONCE(rnp->boost_tasks) == NULL) | ||
1131 | return 0; /* Nothing left to boost. */ | 1003 | return 0; /* Nothing left to boost. */ |
1132 | 1004 | ||
1133 | raw_spin_lock_irqsave(&rnp->lock, flags); | 1005 | raw_spin_lock_irqsave(&rnp->lock, flags); |
@@ -1175,15 +1047,11 @@ static int rcu_boost(struct rcu_node *rnp) | |||
1175 | */ | 1047 | */ |
1176 | t = container_of(tb, struct task_struct, rcu_node_entry); | 1048 | t = container_of(tb, struct task_struct, rcu_node_entry); |
1177 | rt_mutex_init_proxy_locked(&rnp->boost_mtx, t); | 1049 | rt_mutex_init_proxy_locked(&rnp->boost_mtx, t); |
1178 | init_completion(&rnp->boost_completion); | ||
1179 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 1050 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
1180 | /* Lock only for side effect: boosts task t's priority. */ | 1051 | /* Lock only for side effect: boosts task t's priority. */ |
1181 | rt_mutex_lock(&rnp->boost_mtx); | 1052 | rt_mutex_lock(&rnp->boost_mtx); |
1182 | rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */ | 1053 | rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */ |
1183 | 1054 | ||
1184 | /* Wait for boostee to be done w/boost_mtx before reinitializing. */ | ||
1185 | wait_for_completion(&rnp->boost_completion); | ||
1186 | |||
1187 | return ACCESS_ONCE(rnp->exp_tasks) != NULL || | 1055 | return ACCESS_ONCE(rnp->exp_tasks) != NULL || |
1188 | ACCESS_ONCE(rnp->boost_tasks) != NULL; | 1056 | ACCESS_ONCE(rnp->boost_tasks) != NULL; |
1189 | } | 1057 | } |
@@ -1416,12 +1284,8 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) | |||
1416 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) | 1284 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) |
1417 | if ((mask & 0x1) && cpu != outgoingcpu) | 1285 | if ((mask & 0x1) && cpu != outgoingcpu) |
1418 | cpumask_set_cpu(cpu, cm); | 1286 | cpumask_set_cpu(cpu, cm); |
1419 | if (cpumask_weight(cm) == 0) { | 1287 | if (cpumask_weight(cm) == 0) |
1420 | cpumask_setall(cm); | 1288 | cpumask_setall(cm); |
1421 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) | ||
1422 | cpumask_clear_cpu(cpu, cm); | ||
1423 | WARN_ON_ONCE(cpumask_weight(cm) == 0); | ||
1424 | } | ||
1425 | set_cpus_allowed_ptr(t, cm); | 1289 | set_cpus_allowed_ptr(t, cm); |
1426 | free_cpumask_var(cm); | 1290 | free_cpumask_var(cm); |
1427 | } | 1291 | } |
@@ -1446,12 +1310,8 @@ static void __init rcu_spawn_boost_kthreads(void) | |||
1446 | for_each_possible_cpu(cpu) | 1310 | for_each_possible_cpu(cpu) |
1447 | per_cpu(rcu_cpu_has_work, cpu) = 0; | 1311 | per_cpu(rcu_cpu_has_work, cpu) = 0; |
1448 | BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); | 1312 | BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); |
1449 | rnp = rcu_get_root(rcu_state_p); | 1313 | rcu_for_each_leaf_node(rcu_state_p, rnp) |
1450 | (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); | 1314 | (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); |
1451 | if (NUM_RCU_NODES > 1) { | ||
1452 | rcu_for_each_leaf_node(rcu_state_p, rnp) | ||
1453 | (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); | ||
1454 | } | ||
1455 | } | 1315 | } |
1456 | 1316 | ||
1457 | static void rcu_prepare_kthreads(int cpu) | 1317 | static void rcu_prepare_kthreads(int cpu) |
@@ -1605,7 +1465,8 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void) | |||
1605 | * completed since we last checked and there are | 1465 | * completed since we last checked and there are |
1606 | * callbacks not yet ready to invoke. | 1466 | * callbacks not yet ready to invoke. |
1607 | */ | 1467 | */ |
1608 | if (rdp->completed != rnp->completed && | 1468 | if ((rdp->completed != rnp->completed || |
1469 | unlikely(ACCESS_ONCE(rdp->gpwrap))) && | ||
1609 | rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_NEXT_TAIL]) | 1470 | rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_NEXT_TAIL]) |
1610 | note_gp_changes(rsp, rdp); | 1471 | note_gp_changes(rsp, rdp); |
1611 | 1472 | ||
@@ -1898,11 +1759,12 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu) | |||
1898 | ticks_value = rsp->gpnum - rdp->gpnum; | 1759 | ticks_value = rsp->gpnum - rdp->gpnum; |
1899 | } | 1760 | } |
1900 | print_cpu_stall_fast_no_hz(fast_no_hz, cpu); | 1761 | print_cpu_stall_fast_no_hz(fast_no_hz, cpu); |
1901 | pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n", | 1762 | pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u fqs=%ld %s\n", |
1902 | cpu, ticks_value, ticks_title, | 1763 | cpu, ticks_value, ticks_title, |
1903 | atomic_read(&rdtp->dynticks) & 0xfff, | 1764 | atomic_read(&rdtp->dynticks) & 0xfff, |
1904 | rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting, | 1765 | rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting, |
1905 | rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu), | 1766 | rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu), |
1767 | ACCESS_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart, | ||
1906 | fast_no_hz); | 1768 | fast_no_hz); |
1907 | } | 1769 | } |
1908 | 1770 | ||
@@ -2056,9 +1918,26 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force) | |||
2056 | static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu) | 1918 | static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu) |
2057 | { | 1919 | { |
2058 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); | 1920 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); |
1921 | unsigned long ret; | ||
1922 | #ifdef CONFIG_PROVE_RCU | ||
2059 | struct rcu_head *rhp; | 1923 | struct rcu_head *rhp; |
1924 | #endif /* #ifdef CONFIG_PROVE_RCU */ | ||
2060 | 1925 | ||
2061 | /* No-CBs CPUs might have callbacks on any of three lists. */ | 1926 | /* |
1927 | * Check count of all no-CBs callbacks awaiting invocation. | ||
1928 | * There needs to be a barrier before this function is called, | ||
1929 | * but associated with a prior determination that no more | ||
1930 | * callbacks would be posted. In the worst case, the first | ||
1931 | * barrier in _rcu_barrier() suffices (but the caller cannot | ||
1932 | * necessarily rely on this, not a substitute for the caller | ||
1933 | * getting the concurrency design right!). There must also be | ||
1934 | * a barrier between the following load an posting of a callback | ||
1935 | * (if a callback is in fact needed). This is associated with an | ||
1936 | * atomic_inc() in the caller. | ||
1937 | */ | ||
1938 | ret = atomic_long_read(&rdp->nocb_q_count); | ||
1939 | |||
1940 | #ifdef CONFIG_PROVE_RCU | ||
2062 | rhp = ACCESS_ONCE(rdp->nocb_head); | 1941 | rhp = ACCESS_ONCE(rdp->nocb_head); |
2063 | if (!rhp) | 1942 | if (!rhp) |
2064 | rhp = ACCESS_ONCE(rdp->nocb_gp_head); | 1943 | rhp = ACCESS_ONCE(rdp->nocb_gp_head); |
@@ -2072,8 +1951,9 @@ static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu) | |||
2072 | cpu, rhp->func); | 1951 | cpu, rhp->func); |
2073 | WARN_ON_ONCE(1); | 1952 | WARN_ON_ONCE(1); |
2074 | } | 1953 | } |
1954 | #endif /* #ifdef CONFIG_PROVE_RCU */ | ||
2075 | 1955 | ||
2076 | return !!rhp; | 1956 | return !!ret; |
2077 | } | 1957 | } |
2078 | 1958 | ||
2079 | /* | 1959 | /* |
@@ -2095,9 +1975,10 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp, | |||
2095 | struct task_struct *t; | 1975 | struct task_struct *t; |
2096 | 1976 | ||
2097 | /* Enqueue the callback on the nocb list and update counts. */ | 1977 | /* Enqueue the callback on the nocb list and update counts. */ |
1978 | atomic_long_add(rhcount, &rdp->nocb_q_count); | ||
1979 | /* rcu_barrier() relies on ->nocb_q_count add before xchg. */ | ||
2098 | old_rhpp = xchg(&rdp->nocb_tail, rhtp); | 1980 | old_rhpp = xchg(&rdp->nocb_tail, rhtp); |
2099 | ACCESS_ONCE(*old_rhpp) = rhp; | 1981 | ACCESS_ONCE(*old_rhpp) = rhp; |
2100 | atomic_long_add(rhcount, &rdp->nocb_q_count); | ||
2101 | atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy); | 1982 | atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy); |
2102 | smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */ | 1983 | smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */ |
2103 | 1984 | ||
@@ -2288,9 +2169,6 @@ wait_again: | |||
2288 | /* Move callbacks to wait-for-GP list, which is empty. */ | 2169 | /* Move callbacks to wait-for-GP list, which is empty. */ |
2289 | ACCESS_ONCE(rdp->nocb_head) = NULL; | 2170 | ACCESS_ONCE(rdp->nocb_head) = NULL; |
2290 | rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head); | 2171 | rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head); |
2291 | rdp->nocb_gp_count = atomic_long_xchg(&rdp->nocb_q_count, 0); | ||
2292 | rdp->nocb_gp_count_lazy = | ||
2293 | atomic_long_xchg(&rdp->nocb_q_count_lazy, 0); | ||
2294 | gotcbs = true; | 2172 | gotcbs = true; |
2295 | } | 2173 | } |
2296 | 2174 | ||
@@ -2338,9 +2216,6 @@ wait_again: | |||
2338 | /* Append callbacks to follower's "done" list. */ | 2216 | /* Append callbacks to follower's "done" list. */ |
2339 | tail = xchg(&rdp->nocb_follower_tail, rdp->nocb_gp_tail); | 2217 | tail = xchg(&rdp->nocb_follower_tail, rdp->nocb_gp_tail); |
2340 | *tail = rdp->nocb_gp_head; | 2218 | *tail = rdp->nocb_gp_head; |
2341 | atomic_long_add(rdp->nocb_gp_count, &rdp->nocb_follower_count); | ||
2342 | atomic_long_add(rdp->nocb_gp_count_lazy, | ||
2343 | &rdp->nocb_follower_count_lazy); | ||
2344 | smp_mb__after_atomic(); /* Store *tail before wakeup. */ | 2219 | smp_mb__after_atomic(); /* Store *tail before wakeup. */ |
2345 | if (rdp != my_rdp && tail == &rdp->nocb_follower_head) { | 2220 | if (rdp != my_rdp && tail == &rdp->nocb_follower_head) { |
2346 | /* | 2221 | /* |
@@ -2415,13 +2290,11 @@ static int rcu_nocb_kthread(void *arg) | |||
2415 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty"); | 2290 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty"); |
2416 | ACCESS_ONCE(rdp->nocb_follower_head) = NULL; | 2291 | ACCESS_ONCE(rdp->nocb_follower_head) = NULL; |
2417 | tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head); | 2292 | tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head); |
2418 | c = atomic_long_xchg(&rdp->nocb_follower_count, 0); | ||
2419 | cl = atomic_long_xchg(&rdp->nocb_follower_count_lazy, 0); | ||
2420 | rdp->nocb_p_count += c; | ||
2421 | rdp->nocb_p_count_lazy += cl; | ||
2422 | 2293 | ||
2423 | /* Each pass through the following loop invokes a callback. */ | 2294 | /* Each pass through the following loop invokes a callback. */ |
2424 | trace_rcu_batch_start(rdp->rsp->name, cl, c, -1); | 2295 | trace_rcu_batch_start(rdp->rsp->name, |
2296 | atomic_long_read(&rdp->nocb_q_count_lazy), | ||
2297 | atomic_long_read(&rdp->nocb_q_count), -1); | ||
2425 | c = cl = 0; | 2298 | c = cl = 0; |
2426 | while (list) { | 2299 | while (list) { |
2427 | next = list->next; | 2300 | next = list->next; |
@@ -2443,9 +2316,9 @@ static int rcu_nocb_kthread(void *arg) | |||
2443 | list = next; | 2316 | list = next; |
2444 | } | 2317 | } |
2445 | trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1); | 2318 | trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1); |
2446 | ACCESS_ONCE(rdp->nocb_p_count) = rdp->nocb_p_count - c; | 2319 | smp_mb__before_atomic(); /* _add after CB invocation. */ |
2447 | ACCESS_ONCE(rdp->nocb_p_count_lazy) = | 2320 | atomic_long_add(-c, &rdp->nocb_q_count); |
2448 | rdp->nocb_p_count_lazy - cl; | 2321 | atomic_long_add(-cl, &rdp->nocb_q_count_lazy); |
2449 | rdp->n_nocbs_invoked += c; | 2322 | rdp->n_nocbs_invoked += c; |
2450 | } | 2323 | } |
2451 | return 0; | 2324 | return 0; |
diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c index 5cdc62e1beeb..fbb6240509ea 100644 --- a/kernel/rcu/tree_trace.c +++ b/kernel/rcu/tree_trace.c | |||
@@ -46,6 +46,8 @@ | |||
46 | #define RCU_TREE_NONCORE | 46 | #define RCU_TREE_NONCORE |
47 | #include "tree.h" | 47 | #include "tree.h" |
48 | 48 | ||
49 | DECLARE_PER_CPU_SHARED_ALIGNED(unsigned long, rcu_qs_ctr); | ||
50 | |||
49 | static int r_open(struct inode *inode, struct file *file, | 51 | static int r_open(struct inode *inode, struct file *file, |
50 | const struct seq_operations *op) | 52 | const struct seq_operations *op) |
51 | { | 53 | { |
@@ -115,11 +117,13 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) | |||
115 | 117 | ||
116 | if (!rdp->beenonline) | 118 | if (!rdp->beenonline) |
117 | return; | 119 | return; |
118 | seq_printf(m, "%3d%cc=%ld g=%ld pq=%d qp=%d", | 120 | seq_printf(m, "%3d%cc=%ld g=%ld pq=%d/%d qp=%d", |
119 | rdp->cpu, | 121 | rdp->cpu, |
120 | cpu_is_offline(rdp->cpu) ? '!' : ' ', | 122 | cpu_is_offline(rdp->cpu) ? '!' : ' ', |
121 | ulong2long(rdp->completed), ulong2long(rdp->gpnum), | 123 | ulong2long(rdp->completed), ulong2long(rdp->gpnum), |
122 | rdp->passed_quiesce, rdp->qs_pending); | 124 | rdp->passed_quiesce, |
125 | rdp->rcu_qs_ctr_snap == per_cpu(rcu_qs_ctr, rdp->cpu), | ||
126 | rdp->qs_pending); | ||
123 | seq_printf(m, " dt=%d/%llx/%d df=%lu", | 127 | seq_printf(m, " dt=%d/%llx/%d df=%lu", |
124 | atomic_read(&rdp->dynticks->dynticks), | 128 | atomic_read(&rdp->dynticks->dynticks), |
125 | rdp->dynticks->dynticks_nesting, | 129 | rdp->dynticks->dynticks_nesting, |
diff --git a/kernel/softirq.c b/kernel/softirq.c index 501baa9ac1be..8cdb98847c7b 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -656,9 +656,8 @@ static void run_ksoftirqd(unsigned int cpu) | |||
656 | * in the task stack here. | 656 | * in the task stack here. |
657 | */ | 657 | */ |
658 | __do_softirq(); | 658 | __do_softirq(); |
659 | rcu_note_context_switch(); | ||
660 | local_irq_enable(); | 659 | local_irq_enable(); |
661 | cond_resched(); | 660 | cond_resched_rcu_qs(); |
662 | return; | 661 | return; |
663 | } | 662 | } |
664 | local_irq_enable(); | 663 | local_irq_enable(); |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 5f2ce616c046..a2ca213c71ca 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -1215,6 +1215,7 @@ config RCU_TORTURE_TEST | |||
1215 | tristate "torture tests for RCU" | 1215 | tristate "torture tests for RCU" |
1216 | depends on DEBUG_KERNEL | 1216 | depends on DEBUG_KERNEL |
1217 | select TORTURE_TEST | 1217 | select TORTURE_TEST |
1218 | select SRCU | ||
1218 | default n | 1219 | default n |
1219 | help | 1220 | help |
1220 | This option provides a kernel module that runs torture tests | 1221 | This option provides a kernel module that runs torture tests |
@@ -1257,7 +1258,7 @@ config RCU_CPU_STALL_TIMEOUT | |||
1257 | config RCU_CPU_STALL_INFO | 1258 | config RCU_CPU_STALL_INFO |
1258 | bool "Print additional diagnostics on RCU CPU stall" | 1259 | bool "Print additional diagnostics on RCU CPU stall" |
1259 | depends on (TREE_RCU || PREEMPT_RCU) && DEBUG_KERNEL | 1260 | depends on (TREE_RCU || PREEMPT_RCU) && DEBUG_KERNEL |
1260 | default n | 1261 | default y |
1261 | help | 1262 | help |
1262 | For each stalled CPU that is aware of the current RCU grace | 1263 | For each stalled CPU that is aware of the current RCU grace |
1263 | period, print out additional per-CPU diagnostic information | 1264 | period, print out additional per-CPU diagnostic information |
diff --git a/mm/Kconfig b/mm/Kconfig index 1d1ae6b078fd..4395b12869c8 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
@@ -325,6 +325,7 @@ config VIRT_TO_BUS | |||
325 | 325 | ||
326 | config MMU_NOTIFIER | 326 | config MMU_NOTIFIER |
327 | bool | 327 | bool |
328 | select SRCU | ||
328 | 329 | ||
329 | config KSM | 330 | config KSM |
330 | bool "Enable KSM for page merging" | 331 | bool "Enable KSM for page merging" |
diff --git a/security/tomoyo/Kconfig b/security/tomoyo/Kconfig index 8eb779b9d77f..604e718d68d3 100644 --- a/security/tomoyo/Kconfig +++ b/security/tomoyo/Kconfig | |||
@@ -5,6 +5,7 @@ config SECURITY_TOMOYO | |||
5 | select SECURITYFS | 5 | select SECURITYFS |
6 | select SECURITY_PATH | 6 | select SECURITY_PATH |
7 | select SECURITY_NETWORK | 7 | select SECURITY_NETWORK |
8 | select SRCU | ||
8 | default n | 9 | default n |
9 | help | 10 | help |
10 | This selects TOMOYO Linux, pathname-based access control. | 11 | This selects TOMOYO Linux, pathname-based access control. |
diff --git a/tools/testing/selftests/rcutorture/bin/cpus2use.sh b/tools/testing/selftests/rcutorture/bin/cpus2use.sh index abe14b7f36e9..bb99cde3f5f9 100755 --- a/tools/testing/selftests/rcutorture/bin/cpus2use.sh +++ b/tools/testing/selftests/rcutorture/bin/cpus2use.sh | |||
@@ -24,7 +24,7 @@ | |||
24 | 24 | ||
25 | ncpus=`grep '^processor' /proc/cpuinfo | wc -l` | 25 | ncpus=`grep '^processor' /proc/cpuinfo | wc -l` |
26 | idlecpus=`mpstat | tail -1 | \ | 26 | idlecpus=`mpstat | tail -1 | \ |
27 | awk -v ncpus=$ncpus '{ print ncpus * ($7 + $12) / 100 }'` | 27 | awk -v ncpus=$ncpus '{ print ncpus * ($7 + $NF) / 100 }'` |
28 | awk -v ncpus=$ncpus -v idlecpus=$idlecpus < /dev/null ' | 28 | awk -v ncpus=$ncpus -v idlecpus=$idlecpus < /dev/null ' |
29 | BEGIN { | 29 | BEGIN { |
30 | cpus2use = idlecpus; | 30 | cpus2use = idlecpus; |
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh index d6cc07fc137f..559e01ac86be 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh | |||
@@ -30,6 +30,7 @@ else | |||
30 | echo Unreadable results directory: $i | 30 | echo Unreadable results directory: $i |
31 | exit 1 | 31 | exit 1 |
32 | fi | 32 | fi |
33 | . tools/testing/selftests/rcutorture/bin/functions.sh | ||
33 | 34 | ||
34 | configfile=`echo $i | sed -e 's/^.*\///'` | 35 | configfile=`echo $i | sed -e 's/^.*\///'` |
35 | ngps=`grep ver: $i/console.log 2> /dev/null | tail -1 | sed -e 's/^.* ver: //' -e 's/ .*$//'` | 36 | ngps=`grep ver: $i/console.log 2> /dev/null | tail -1 | sed -e 's/^.* ver: //' -e 's/ .*$//'` |
@@ -48,4 +49,21 @@ else | |||
48 | title="$title ($ngpsps per second)" | 49 | title="$title ($ngpsps per second)" |
49 | fi | 50 | fi |
50 | echo $title | 51 | echo $title |
52 | nclosecalls=`grep --binary-files=text 'torture: Reader Batch' $i/console.log | tail -1 | awk '{for (i=NF-8;i<=NF;i++) sum+=$i; } END {print sum}'` | ||
53 | if test -z "$nclosecalls" | ||
54 | then | ||
55 | exit 0 | ||
56 | fi | ||
57 | if test "$nclosecalls" -eq 0 | ||
58 | then | ||
59 | exit 0 | ||
60 | fi | ||
61 | # Compute number of close calls per tenth of an hour | ||
62 | nclosecalls10=`awk -v nclosecalls=$nclosecalls -v dur=$dur 'BEGIN { print int(nclosecalls * 36000 / dur) }' < /dev/null` | ||
63 | if test $nclosecalls10 -gt 5 -a $nclosecalls -gt 1 | ||
64 | then | ||
65 | print_bug $nclosecalls "Reader Batch close calls in" $(($dur/60)) minute run: $i | ||
66 | else | ||
67 | print_warning $nclosecalls "Reader Batch close calls in" $(($dur/60)) minute run: $i | ||
68 | fi | ||
51 | fi | 69 | fi |
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh index 8ca9f21f2efc..5236e073919d 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh | |||
@@ -8,9 +8,9 @@ | |||
8 | # | 8 | # |
9 | # Usage: kvm-test-1-run.sh config builddir resdir minutes qemu-args boot_args | 9 | # Usage: kvm-test-1-run.sh config builddir resdir minutes qemu-args boot_args |
10 | # | 10 | # |
11 | # qemu-args defaults to "-nographic", along with arguments specifying the | 11 | # qemu-args defaults to "-enable-kvm -soundhw pcspk -nographic", along with |
12 | # number of CPUs and other options generated from | 12 | # arguments specifying the number of CPUs and other |
13 | # the underlying CPU architecture. | 13 | # options generated from the underlying CPU architecture. |
14 | # boot_args defaults to value returned by the per_version_boot_params | 14 | # boot_args defaults to value returned by the per_version_boot_params |
15 | # shell function. | 15 | # shell function. |
16 | # | 16 | # |
@@ -138,7 +138,7 @@ then | |||
138 | fi | 138 | fi |
139 | 139 | ||
140 | # Generate -smp qemu argument. | 140 | # Generate -smp qemu argument. |
141 | qemu_args="-nographic $qemu_args" | 141 | qemu_args="-enable-kvm -soundhw pcspk -nographic $qemu_args" |
142 | cpu_count=`configNR_CPUS.sh $config_template` | 142 | cpu_count=`configNR_CPUS.sh $config_template` |
143 | cpu_count=`configfrag_boot_cpus "$boot_args" "$config_template" "$cpu_count"` | 143 | cpu_count=`configfrag_boot_cpus "$boot_args" "$config_template" "$cpu_count"` |
144 | vcpus=`identify_qemu_vcpus` | 144 | vcpus=`identify_qemu_vcpus` |
@@ -168,6 +168,7 @@ then | |||
168 | touch $resdir/buildonly | 168 | touch $resdir/buildonly |
169 | exit 0 | 169 | exit 0 |
170 | fi | 170 | fi |
171 | echo "NOTE: $QEMU either did not run or was interactive" > $builddir/console.log | ||
171 | echo $QEMU $qemu_args -m 512 -kernel $resdir/bzImage -append \"$qemu_append $boot_args\" > $resdir/qemu-cmd | 172 | echo $QEMU $qemu_args -m 512 -kernel $resdir/bzImage -append \"$qemu_append $boot_args\" > $resdir/qemu-cmd |
172 | ( $QEMU $qemu_args -m 512 -kernel $resdir/bzImage -append "$qemu_append $boot_args"; echo $? > $resdir/qemu-retval ) & | 173 | ( $QEMU $qemu_args -m 512 -kernel $resdir/bzImage -append "$qemu_append $boot_args"; echo $? > $resdir/qemu-retval ) & |
173 | qemu_pid=$! | 174 | qemu_pid=$! |
diff --git a/tools/testing/selftests/rcutorture/bin/parse-build.sh b/tools/testing/selftests/rcutorture/bin/parse-build.sh index 499d1e598e42..a6b57622c2e5 100755 --- a/tools/testing/selftests/rcutorture/bin/parse-build.sh +++ b/tools/testing/selftests/rcutorture/bin/parse-build.sh | |||
@@ -26,12 +26,15 @@ | |||
26 | # | 26 | # |
27 | # Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 27 | # Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> |
28 | 28 | ||
29 | T=$1 | 29 | F=$1 |
30 | title=$2 | 30 | title=$2 |
31 | T=/tmp/parse-build.sh.$$ | ||
32 | trap 'rm -rf $T' 0 | ||
33 | mkdir $T | ||
31 | 34 | ||
32 | . functions.sh | 35 | . functions.sh |
33 | 36 | ||
34 | if grep -q CC < $T | 37 | if grep -q CC < $F |
35 | then | 38 | then |
36 | : | 39 | : |
37 | else | 40 | else |
@@ -39,18 +42,21 @@ else | |||
39 | exit 1 | 42 | exit 1 |
40 | fi | 43 | fi |
41 | 44 | ||
42 | if grep -q "error:" < $T | 45 | if grep -q "error:" < $F |
43 | then | 46 | then |
44 | print_bug $title build errors: | 47 | print_bug $title build errors: |
45 | grep "error:" < $T | 48 | grep "error:" < $F |
46 | exit 2 | 49 | exit 2 |
47 | fi | 50 | fi |
48 | exit 0 | ||
49 | 51 | ||
50 | if egrep -q "rcu[^/]*\.c.*warning:|rcu.*\.h.*warning:" < $T | 52 | grep warning: < $F > $T/warnings |
53 | grep "include/linux/*rcu*\.h:" $T/warnings > $T/hwarnings | ||
54 | grep "kernel/rcu/[^/]*:" $T/warnings > $T/cwarnings | ||
55 | cat $T/hwarnings $T/cwarnings > $T/rcuwarnings | ||
56 | if test -s $T/rcuwarnings | ||
51 | then | 57 | then |
52 | print_warning $title build errors: | 58 | print_warning $title build errors: |
53 | egrep "rcu[^/]*\.c.*warning:|rcu.*\.h.*warning:" < $T | 59 | cat $T/rcuwarnings |
54 | exit 2 | 60 | exit 2 |
55 | fi | 61 | fi |
56 | exit 0 | 62 | exit 0 |
diff --git a/tools/testing/selftests/rcutorture/bin/parse-console.sh b/tools/testing/selftests/rcutorture/bin/parse-console.sh index f962ba4cf68b..d8f35cf116be 100755 --- a/tools/testing/selftests/rcutorture/bin/parse-console.sh +++ b/tools/testing/selftests/rcutorture/bin/parse-console.sh | |||
@@ -36,7 +36,7 @@ if grep -Pq '\x00' < $file | |||
36 | then | 36 | then |
37 | print_warning Console output contains nul bytes, old qemu still running? | 37 | print_warning Console output contains nul bytes, old qemu still running? |
38 | fi | 38 | fi |
39 | egrep 'Badness|WARNING:|Warn|BUG|===========|Call Trace:|Oops:' < $file | grep -v 'ODEBUG: ' | grep -v 'Warning: unable to open an initial console' > $T | 39 | egrep 'Badness|WARNING:|Warn|BUG|===========|Call Trace:|Oops:|Stall ended before state dump start' < $file | grep -v 'ODEBUG: ' | grep -v 'Warning: unable to open an initial console' > $T |
40 | if test -s $T | 40 | if test -s $T |
41 | then | 41 | then |
42 | print_warning Assertion failure in $file $title | 42 | print_warning Assertion failure in $file $title |