diff options
88 files changed, 4187 insertions, 4014 deletions
diff --git a/Documentation/RCU/Design/Data-Structures/BigTreeClassicRCUBH.svg b/Documentation/RCU/Design/Data-Structures/BigTreeClassicRCUBH.svg deleted file mode 100644 index 9bbb1944f962..000000000000 --- a/Documentation/RCU/Design/Data-Structures/BigTreeClassicRCUBH.svg +++ /dev/null | |||
@@ -1,499 +0,0 @@ | |||
1 | <?xml version="1.0" encoding="UTF-8" standalone="no"?> | ||
2 | <!-- Creator: fig2dev Version 3.2 Patchlevel 5e --> | ||
3 | |||
4 | <!-- CreationDate: Wed Dec 9 17:26:09 2015 --> | ||
5 | |||
6 | <!-- Magnification: 2.000 --> | ||
7 | |||
8 | <svg | ||
9 | xmlns:dc="http://purl.org/dc/elements/1.1/" | ||
10 | xmlns:cc="http://creativecommons.org/ns#" | ||
11 | xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" | ||
12 | xmlns:svg="http://www.w3.org/2000/svg" | ||
13 | xmlns="http://www.w3.org/2000/svg" | ||
14 | xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" | ||
15 | xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" | ||
16 | width="5.7in" | ||
17 | height="6.6in" | ||
18 | viewBox="-44 -44 6838 7888" | ||
19 | id="svg2" | ||
20 | version="1.1" | ||
21 | inkscape:version="0.48.4 r9939" | ||
22 | sodipodi:docname="BigTreeClassicRCUBH.fig"> | ||
23 | <metadata | ||
24 | id="metadata110"> | ||
25 | <rdf:RDF> | ||
26 | <cc:Work | ||
27 | rdf:about=""> | ||
28 | <dc:format>image/svg+xml</dc:format> | ||
29 | <dc:type | ||
30 | rdf:resource="http://purl.org/dc/dcmitype/StillImage" /> | ||
31 | <dc:title></dc:title> | ||
32 | </cc:Work> | ||
33 | </rdf:RDF> | ||
34 | </metadata> | ||
35 | <defs | ||
36 | id="defs108"> | ||
37 | <marker | ||
38 | inkscape:stockid="Arrow1Mend" | ||
39 | orient="auto" | ||
40 | refY="0.0" | ||
41 | refX="0.0" | ||
42 | id="Arrow1Mend" | ||
43 | style="overflow:visible;"> | ||
44 | <path | ||
45 | id="path3868" | ||
46 | d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z " | ||
47 | style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;" | ||
48 | transform="scale(0.4) rotate(180) translate(10,0)" /> | ||
49 | </marker> | ||
50 | <marker | ||
51 | inkscape:stockid="Arrow2Mend" | ||
52 | orient="auto" | ||
53 | refY="0.0" | ||
54 | refX="0.0" | ||
55 | id="Arrow2Mend" | ||
56 | style="overflow:visible;"> | ||
57 | <path | ||
58 | id="path3886" | ||
59 | style="fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;" | ||
60 | d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z " | ||
61 | transform="scale(0.6) rotate(180) translate(0,0)" /> | ||
62 | </marker> | ||
63 | </defs> | ||
64 | <sodipodi:namedview | ||
65 | pagecolor="#ffffff" | ||
66 | bordercolor="#666666" | ||
67 | borderopacity="1" | ||
68 | objecttolerance="10" | ||
69 | gridtolerance="10" | ||
70 | guidetolerance="10" | ||
71 | inkscape:pageopacity="0" | ||
72 | inkscape:pageshadow="2" | ||
73 | inkscape:window-width="878" | ||
74 | inkscape:window-height="1148" | ||
75 | id="namedview106" | ||
76 | showgrid="false" | ||
77 | inkscape:zoom="1.3547758" | ||
78 | inkscape:cx="256.5" | ||
79 | inkscape:cy="297" | ||
80 | inkscape:window-x="45" | ||
81 | inkscape:window-y="24" | ||
82 | inkscape:window-maximized="0" | ||
83 | inkscape:current-layer="g4" /> | ||
84 | <g | ||
85 | style="stroke-width:.025in; fill:none" | ||
86 | id="g4"> | ||
87 | <!-- Line: box --> | ||
88 | <rect | ||
89 | x="450" | ||
90 | y="0" | ||
91 | width="6300" | ||
92 | height="7350" | ||
93 | rx="0" | ||
94 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; " | ||
95 | id="rect6" /> | ||
96 | <!-- Line: box --> | ||
97 | <rect | ||
98 | x="4950" | ||
99 | y="4950" | ||
100 | width="1500" | ||
101 | height="900" | ||
102 | rx="0" | ||
103 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; " | ||
104 | id="rect8" /> | ||
105 | <!-- Line: box --> | ||
106 | <rect | ||
107 | x="750" | ||
108 | y="600" | ||
109 | width="5700" | ||
110 | height="3750" | ||
111 | rx="0" | ||
112 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; " | ||
113 | id="rect10" /> | ||
114 | <!-- Line: box --> | ||
115 | <rect | ||
116 | x="0" | ||
117 | y="450" | ||
118 | width="6300" | ||
119 | height="7350" | ||
120 | rx="0" | ||
121 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; " | ||
122 | id="rect12" /> | ||
123 | <!-- Line: box --> | ||
124 | <rect | ||
125 | x="300" | ||
126 | y="1050" | ||
127 | width="5700" | ||
128 | height="3750" | ||
129 | rx="0" | ||
130 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; " | ||
131 | id="rect14" /> | ||
132 | <!-- Circle --> | ||
133 | <circle | ||
134 | cx="2850" | ||
135 | cy="3900" | ||
136 | r="76" | ||
137 | style="fill:#000000;stroke:#000000;stroke-width:14;" | ||
138 | id="circle16" /> | ||
139 | <!-- Circle --> | ||
140 | <circle | ||
141 | cx="3150" | ||
142 | cy="3900" | ||
143 | r="76" | ||
144 | style="fill:#000000;stroke:#000000;stroke-width:14;" | ||
145 | id="circle18" /> | ||
146 | <!-- Circle --> | ||
147 | <circle | ||
148 | cx="3450" | ||
149 | cy="3900" | ||
150 | r="76" | ||
151 | style="fill:#000000;stroke:#000000;stroke-width:14;" | ||
152 | id="circle20" /> | ||
153 | <!-- Circle --> | ||
154 | <circle | ||
155 | cx="1350" | ||
156 | cy="5100" | ||
157 | r="76" | ||
158 | style="fill:#000000;stroke:#000000;stroke-width:14;" | ||
159 | id="circle22" /> | ||
160 | <!-- Circle --> | ||
161 | <circle | ||
162 | cx="1650" | ||
163 | cy="5100" | ||
164 | r="76" | ||
165 | style="fill:#000000;stroke:#000000;stroke-width:14;" | ||
166 | id="circle24" /> | ||
167 | <!-- Circle --> | ||
168 | <circle | ||
169 | cx="1950" | ||
170 | cy="5100" | ||
171 | r="76" | ||
172 | style="fill:#000000;stroke:#000000;stroke-width:14;" | ||
173 | id="circle26" /> | ||
174 | <!-- Circle --> | ||
175 | <circle | ||
176 | cx="4350" | ||
177 | cy="5100" | ||
178 | r="76" | ||
179 | style="fill:#000000;stroke:#000000;stroke-width:14;" | ||
180 | id="circle28" /> | ||
181 | <!-- Circle --> | ||
182 | <circle | ||
183 | cx="4650" | ||
184 | cy="5100" | ||
185 | r="76" | ||
186 | style="fill:#000000;stroke:#000000;stroke-width:14;" | ||
187 | id="circle30" /> | ||
188 | <!-- Circle --> | ||
189 | <circle | ||
190 | cx="4950" | ||
191 | cy="5100" | ||
192 | r="76" | ||
193 | style="fill:#000000;stroke:#000000;stroke-width:14;" | ||
194 | id="circle32" /> | ||
195 | <!-- Line --> | ||
196 | <polyline | ||
197 | points="1350,3450 2350,2590 " | ||
198 | style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | ||
199 | id="polyline34" /> | ||
200 | <!-- Arrowhead on XXXpoint 1350 3450 - 2444 2510--> | ||
201 | <!-- Line --> | ||
202 | <polyline | ||
203 | points="4950,3450 3948,2590 " | ||
204 | style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | ||
205 | id="polyline38" /> | ||
206 | <!-- Arrowhead on XXXpoint 4950 3450 - 3854 2510--> | ||
207 | <!-- Line: box --> | ||
208 | <rect | ||
209 | x="750" | ||
210 | y="3450" | ||
211 | width="1800" | ||
212 | height="900" | ||
213 | rx="0" | ||
214 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; " | ||
215 | id="rect42" /> | ||
216 | <!-- Line --> | ||
217 | <polyline | ||
218 | points="2250,5400 2250,4414 " | ||
219 | style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | ||
220 | id="polyline44" /> | ||
221 | <!-- Arrowhead on XXXpoint 2250 5400 - 2250 4290--> | ||
222 | <!-- Line: box --> | ||
223 | <rect | ||
224 | x="1500" | ||
225 | y="5400" | ||
226 | width="1500" | ||
227 | height="900" | ||
228 | rx="0" | ||
229 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; " | ||
230 | id="rect48" /> | ||
231 | <!-- Line: box --> | ||
232 | <rect | ||
233 | x="300" | ||
234 | y="6600" | ||
235 | width="1500" | ||
236 | height="900" | ||
237 | rx="0" | ||
238 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; " | ||
239 | id="rect50" /> | ||
240 | <!-- Line: box --> | ||
241 | <rect | ||
242 | x="3750" | ||
243 | y="3450" | ||
244 | width="1800" | ||
245 | height="900" | ||
246 | rx="0" | ||
247 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; " | ||
248 | id="rect52" /> | ||
249 | <!-- Line: box --> | ||
250 | <rect | ||
251 | x="4500" | ||
252 | y="5400" | ||
253 | width="1500" | ||
254 | height="900" | ||
255 | rx="0" | ||
256 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; " | ||
257 | id="rect54" /> | ||
258 | <!-- Line: box --> | ||
259 | <rect | ||
260 | x="3300" | ||
261 | y="6600" | ||
262 | width="1500" | ||
263 | height="900" | ||
264 | rx="0" | ||
265 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; " | ||
266 | id="rect56" /> | ||
267 | <!-- Line: box --> | ||
268 | <rect | ||
269 | x="2250" | ||
270 | y="1650" | ||
271 | width="1800" | ||
272 | height="900" | ||
273 | rx="0" | ||
274 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; " | ||
275 | id="rect58" /> | ||
276 | <!-- Text --> | ||
277 | <text | ||
278 | xml:space="preserve" | ||
279 | x="6450" | ||
280 | y="300" | ||
281 | fill="#000000" | ||
282 | font-family="Helvetica" | ||
283 | font-style="normal" | ||
284 | font-weight="normal" | ||
285 | font-size="192" | ||
286 | text-anchor="end" | ||
287 | id="text60">rcu_bh</text> | ||
288 | <!-- Text --> | ||
289 | <text | ||
290 | xml:space="preserve" | ||
291 | x="3150" | ||
292 | y="1950" | ||
293 | fill="#000000" | ||
294 | font-family="Courier" | ||
295 | font-style="normal" | ||
296 | font-weight="bold" | ||
297 | font-size="192" | ||
298 | text-anchor="middle" | ||
299 | id="text62">struct</text> | ||
300 | <!-- Text --> | ||
301 | <text | ||
302 | xml:space="preserve" | ||
303 | x="3150" | ||
304 | y="2250" | ||
305 | fill="#000000" | ||
306 | font-family="Courier" | ||
307 | font-style="normal" | ||
308 | font-weight="bold" | ||
309 | font-size="192" | ||
310 | text-anchor="middle" | ||
311 | id="text64">rcu_node</text> | ||
312 | <!-- Text --> | ||
313 | <text | ||
314 | xml:space="preserve" | ||
315 | x="1650" | ||
316 | y="3750" | ||
317 | fill="#000000" | ||
318 | font-family="Courier" | ||
319 | font-style="normal" | ||
320 | font-weight="bold" | ||
321 | font-size="192" | ||
322 | text-anchor="middle" | ||
323 | id="text66">struct</text> | ||
324 | <!-- Text --> | ||
325 | <text | ||
326 | xml:space="preserve" | ||
327 | x="1650" | ||
328 | y="4050" | ||
329 | fill="#000000" | ||
330 | font-family="Courier" | ||
331 | font-style="normal" | ||
332 | font-weight="bold" | ||
333 | font-size="192" | ||
334 | text-anchor="middle" | ||
335 | id="text68">rcu_node</text> | ||
336 | <!-- Text --> | ||
337 | <text | ||
338 | xml:space="preserve" | ||
339 | x="4650" | ||
340 | y="4050" | ||
341 | fill="#000000" | ||
342 | font-family="Courier" | ||
343 | font-style="normal" | ||
344 | font-weight="bold" | ||
345 | font-size="192" | ||
346 | text-anchor="middle" | ||
347 | id="text70">rcu_node</text> | ||
348 | <!-- Text --> | ||
349 | <text | ||
350 | xml:space="preserve" | ||
351 | x="4650" | ||
352 | y="3750" | ||
353 | fill="#000000" | ||
354 | font-family="Courier" | ||
355 | font-style="normal" | ||
356 | font-weight="bold" | ||
357 | font-size="192" | ||
358 | text-anchor="middle" | ||
359 | id="text72">struct</text> | ||
360 | <!-- Text --> | ||
361 | <text | ||
362 | xml:space="preserve" | ||
363 | x="2250" | ||
364 | y="5700" | ||
365 | fill="#000000" | ||
366 | font-family="Courier" | ||
367 | font-style="normal" | ||
368 | font-weight="bold" | ||
369 | font-size="192" | ||
370 | text-anchor="middle" | ||
371 | id="text74">struct</text> | ||
372 | <!-- Text --> | ||
373 | <text | ||
374 | xml:space="preserve" | ||
375 | x="2250" | ||
376 | y="6000" | ||
377 | fill="#000000" | ||
378 | font-family="Courier" | ||
379 | font-style="normal" | ||
380 | font-weight="bold" | ||
381 | font-size="192" | ||
382 | text-anchor="middle" | ||
383 | id="text76">rcu_data</text> | ||
384 | <!-- Text --> | ||
385 | <text | ||
386 | xml:space="preserve" | ||
387 | x="1050" | ||
388 | y="6900" | ||
389 | fill="#000000" | ||
390 | font-family="Courier" | ||
391 | font-style="normal" | ||
392 | font-weight="bold" | ||
393 | font-size="192" | ||
394 | text-anchor="middle" | ||
395 | id="text78">struct</text> | ||
396 | <!-- Text --> | ||
397 | <text | ||
398 | xml:space="preserve" | ||
399 | x="1050" | ||
400 | y="7200" | ||
401 | fill="#000000" | ||
402 | font-family="Courier" | ||
403 | font-style="normal" | ||
404 | font-weight="bold" | ||
405 | font-size="192" | ||
406 | text-anchor="middle" | ||
407 | id="text80">rcu_data</text> | ||
408 | <!-- Text --> | ||
409 | <text | ||
410 | xml:space="preserve" | ||
411 | x="5250" | ||
412 | y="5700" | ||
413 | fill="#000000" | ||
414 | font-family="Courier" | ||
415 | font-style="normal" | ||
416 | font-weight="bold" | ||
417 | font-size="192" | ||
418 | text-anchor="middle" | ||
419 | id="text82">struct</text> | ||
420 | <!-- Text --> | ||
421 | <text | ||
422 | xml:space="preserve" | ||
423 | x="5250" | ||
424 | y="6000" | ||
425 | fill="#000000" | ||
426 | font-family="Courier" | ||
427 | font-style="normal" | ||
428 | font-weight="bold" | ||
429 | font-size="192" | ||
430 | text-anchor="middle" | ||
431 | id="text84">rcu_data</text> | ||
432 | <!-- Text --> | ||
433 | <text | ||
434 | xml:space="preserve" | ||
435 | x="4050" | ||
436 | y="6900" | ||
437 | fill="#000000" | ||
438 | font-family="Courier" | ||
439 | font-style="normal" | ||
440 | font-weight="bold" | ||
441 | font-size="192" | ||
442 | text-anchor="middle" | ||
443 | id="text86">struct</text> | ||
444 | <!-- Text --> | ||
445 | <text | ||
446 | xml:space="preserve" | ||
447 | x="4050" | ||
448 | y="7200" | ||
449 | fill="#000000" | ||
450 | font-family="Courier" | ||
451 | font-style="normal" | ||
452 | font-weight="bold" | ||
453 | font-size="192" | ||
454 | text-anchor="middle" | ||
455 | id="text88">rcu_data</text> | ||
456 | <!-- Text --> | ||
457 | <text | ||
458 | xml:space="preserve" | ||
459 | x="450" | ||
460 | y="1350" | ||
461 | fill="#000000" | ||
462 | font-family="Courier" | ||
463 | font-style="normal" | ||
464 | font-weight="bold" | ||
465 | font-size="192" | ||
466 | text-anchor="start" | ||
467 | id="text90">struct rcu_state</text> | ||
468 | <!-- Text --> | ||
469 | <text | ||
470 | xml:space="preserve" | ||
471 | x="6000" | ||
472 | y="750" | ||
473 | fill="#000000" | ||
474 | font-family="Helvetica" | ||
475 | font-style="normal" | ||
476 | font-weight="normal" | ||
477 | font-size="192" | ||
478 | text-anchor="end" | ||
479 | id="text92">rcu_sched</text> | ||
480 | <!-- Line --> | ||
481 | <polyline | ||
482 | points="5250,5400 5250,4414 " | ||
483 | style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | ||
484 | id="polyline94" /> | ||
485 | <!-- Arrowhead on XXXpoint 5250 5400 - 5250 4290--> | ||
486 | <!-- Line --> | ||
487 | <polyline | ||
488 | points="4050,6600 4050,4414 " | ||
489 | style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | ||
490 | id="polyline98" /> | ||
491 | <!-- Arrowhead on XXXpoint 4050 6600 - 4050 4290--> | ||
492 | <!-- Line --> | ||
493 | <polyline | ||
494 | points="1050,6600 1050,4414 " | ||
495 | style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | ||
496 | id="polyline102" /> | ||
497 | <!-- Arrowhead on XXXpoint 1050 6600 - 1050 4290--> | ||
498 | </g> | ||
499 | </svg> | ||
diff --git a/Documentation/RCU/Design/Data-Structures/BigTreeClassicRCUBHdyntick.svg b/Documentation/RCU/Design/Data-Structures/BigTreeClassicRCUBHdyntick.svg deleted file mode 100644 index 21ba7823479d..000000000000 --- a/Documentation/RCU/Design/Data-Structures/BigTreeClassicRCUBHdyntick.svg +++ /dev/null | |||
@@ -1,695 +0,0 @@ | |||
1 | <?xml version="1.0" encoding="UTF-8" standalone="no"?> | ||
2 | <!-- Creator: fig2dev Version 3.2 Patchlevel 5e --> | ||
3 | |||
4 | <!-- CreationDate: Wed Dec 9 17:20:02 2015 --> | ||
5 | |||
6 | <!-- Magnification: 2.000 --> | ||
7 | |||
8 | <svg | ||
9 | xmlns:dc="http://purl.org/dc/elements/1.1/" | ||
10 | xmlns:cc="http://creativecommons.org/ns#" | ||
11 | xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" | ||
12 | xmlns:svg="http://www.w3.org/2000/svg" | ||
13 | xmlns="http://www.w3.org/2000/svg" | ||
14 | xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" | ||
15 | xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" | ||
16 | width="5.7in" | ||
17 | height="8.6in" | ||
18 | viewBox="-44 -44 6838 10288" | ||
19 | id="svg2" | ||
20 | version="1.1" | ||
21 | inkscape:version="0.48.4 r9939" | ||
22 | sodipodi:docname="BigTreeClassicRCUBHdyntick.fig"> | ||
23 | <metadata | ||
24 | id="metadata166"> | ||
25 | <rdf:RDF> | ||
26 | <cc:Work | ||
27 | rdf:about=""> | ||
28 | <dc:format>image/svg+xml</dc:format> | ||
29 | <dc:type | ||
30 | rdf:resource="http://purl.org/dc/dcmitype/StillImage" /> | ||
31 | <dc:title></dc:title> | ||
32 | </cc:Work> | ||
33 | </rdf:RDF> | ||
34 | </metadata> | ||
35 | <defs | ||
36 | id="defs164"> | ||
37 | <marker | ||
38 | inkscape:stockid="Arrow1Mend" | ||
39 | orient="auto" | ||
40 | refY="0.0" | ||
41 | refX="0.0" | ||
42 | id="Arrow1Mend" | ||
43 | style="overflow:visible;"> | ||
44 | <path | ||
45 | id="path3924" | ||
46 | d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z " | ||
47 | style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;" | ||
48 | transform="scale(0.4) rotate(180) translate(10,0)" /> | ||
49 | </marker> | ||
50 | <marker | ||
51 | inkscape:stockid="Arrow2Lend" | ||
52 | orient="auto" | ||
53 | refY="0.0" | ||
54 | refX="0.0" | ||
55 | id="Arrow2Lend" | ||
56 | style="overflow:visible;"> | ||
57 | <path | ||
58 | id="path3936" | ||
59 | style="fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;" | ||
60 | d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z " | ||
61 | transform="scale(1.1) rotate(180) translate(1,0)" /> | ||
62 | </marker> | ||
63 | </defs> | ||
64 | <sodipodi:namedview | ||
65 | pagecolor="#ffffff" | ||
66 | bordercolor="#666666" | ||
67 | borderopacity="1" | ||
68 | objecttolerance="10" | ||
69 | gridtolerance="10" | ||
70 | guidetolerance="10" | ||
71 | inkscape:pageopacity="0" | ||
72 | inkscape:pageshadow="2" | ||
73 | inkscape:window-width="845" | ||
74 | inkscape:window-height="988" | ||
75 | id="namedview162" | ||
76 | showgrid="false" | ||
77 | inkscape:zoom="1.0452196" | ||
78 | inkscape:cx="256.5" | ||
79 | inkscape:cy="387.00003" | ||
80 | inkscape:window-x="356" | ||
81 | inkscape:window-y="61" | ||
82 | inkscape:window-maximized="0" | ||
83 | inkscape:current-layer="g4" /> | ||
84 | <g | ||
85 | style="stroke-width:.025in; fill:none" | ||
86 | id="g4"> | ||
87 | <!-- Line: box --> | ||
88 | <rect | ||
89 | x="450" | ||
90 | y="0" | ||
91 | width="6300" | ||
92 | height="7350" | ||
93 | rx="0" | ||
94 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; " | ||
95 | id="rect6" /> | ||
96 | <!-- Line: box --> | ||
97 | <rect | ||
98 | x="4950" | ||
99 | y="4950" | ||
100 | width="1500" | ||
101 | height="900" | ||
102 | rx="0" | ||
103 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; " | ||
104 | id="rect8" /> | ||
105 | <!-- Line: box --> | ||
106 | <rect | ||
107 | x="750" | ||
108 | y="600" | ||
109 | width="5700" | ||
110 | height="3750" | ||
111 | rx="0" | ||
112 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; " | ||
113 | id="rect10" /> | ||
114 | <!-- Line --> | ||
115 | <polyline | ||
116 | points="5250,8100 5688,5912 " | ||
117 | style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; " | ||
118 | id="polyline12" /> | ||
119 | <!-- Arrowhead on XXXpoint 5250 8100 - 5710 5790--> | ||
120 | <polyline | ||
121 | points="5714 6068 5704 5822 5598 6044 " | ||
122 | style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; " | ||
123 | id="polyline14" /> | ||
124 | <!-- Line --> | ||
125 | <polyline | ||
126 | points="4050,9300 4486,7262 " | ||
127 | style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; " | ||
128 | id="polyline16" /> | ||
129 | <!-- Arrowhead on XXXpoint 4050 9300 - 4512 7140--> | ||
130 | <polyline | ||
131 | points="4514 7418 4506 7172 4396 7394 " | ||
132 | style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; " | ||
133 | id="polyline18" /> | ||
134 | <!-- Line --> | ||
135 | <polyline | ||
136 | points="1040,9300 1476,7262 " | ||
137 | style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; " | ||
138 | id="polyline20" /> | ||
139 | <!-- Arrowhead on XXXpoint 1040 9300 - 1502 7140--> | ||
140 | <polyline | ||
141 | points="1504 7418 1496 7172 1386 7394 " | ||
142 | style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; " | ||
143 | id="polyline22" /> | ||
144 | <!-- Line --> | ||
145 | <polyline | ||
146 | points="2240,8100 2676,6062 " | ||
147 | style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; " | ||
148 | id="polyline24" /> | ||
149 | <!-- Arrowhead on XXXpoint 2240 8100 - 2702 5940--> | ||
150 | <polyline | ||
151 | points="2704 6218 2696 5972 2586 6194 " | ||
152 | style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; " | ||
153 | id="polyline26" /> | ||
154 | <!-- Line: box --> | ||
155 | <rect | ||
156 | x="0" | ||
157 | y="450" | ||
158 | width="6300" | ||
159 | height="7350" | ||
160 | rx="0" | ||
161 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; " | ||
162 | id="rect28" /> | ||
163 | <!-- Line: box --> | ||
164 | <rect | ||
165 | x="300" | ||
166 | y="1050" | ||
167 | width="5700" | ||
168 | height="3750" | ||
169 | rx="0" | ||
170 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; " | ||
171 | id="rect30" /> | ||
172 | <!-- Line --> | ||
173 | <polyline | ||
174 | points="1350,3450 2350,2590 " | ||
175 | style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | ||
176 | id="polyline32" /> | ||
177 | <!-- Arrowhead on XXXpoint 1350 3450 - 2444 2510--> | ||
178 | <!-- Line --> | ||
179 | <polyline | ||
180 | points="4950,3450 3948,2590 " | ||
181 | style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | ||
182 | id="polyline36" /> | ||
183 | <!-- Arrowhead on XXXpoint 4950 3450 - 3854 2510--> | ||
184 | <!-- Line --> | ||
185 | <polyline | ||
186 | points="4050,6600 4050,4414 " | ||
187 | style="stroke:#00d1d1;stroke-width:30.00455750000000066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | ||
188 | id="polyline40" /> | ||
189 | <!-- Arrowhead on XXXpoint 4050 6600 - 4050 4290--> | ||
190 | <!-- Line --> | ||
191 | <polyline | ||
192 | points="1050,6600 1050,4414 " | ||
193 | style="stroke:#00d1d1;stroke-width:30.00455750000000066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | ||
194 | id="polyline44" /> | ||
195 | <!-- Arrowhead on XXXpoint 1050 6600 - 1050 4290--> | ||
196 | <!-- Line --> | ||
197 | <polyline | ||
198 | points="2250,5400 2250,4414 " | ||
199 | style="stroke:#00d1d1;stroke-width:30.00455750000000066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | ||
200 | id="polyline48" /> | ||
201 | <!-- Arrowhead on XXXpoint 2250 5400 - 2250 4290--> | ||
202 | <!-- Line --> | ||
203 | <polyline | ||
204 | points="2250,8100 2250,6364 " | ||
205 | style="stroke:#00ff00;stroke-width:30;stroke-linejoin:miter;stroke-linecap:butt;marker-end:url(#Arrow1Mend)" | ||
206 | id="polyline52" /> | ||
207 | <!-- Arrowhead on XXXpoint 2250 8100 - 2250 6240--> | ||
208 | <!-- Line --> | ||
209 | <polyline | ||
210 | points="1050,9300 1050,7564 " | ||
211 | style="stroke:#00ff00;stroke-width:30;stroke-linejoin:miter;stroke-linecap:butt;marker-end:url(#Arrow1Mend)" | ||
212 | id="polyline56" /> | ||
213 | <!-- Arrowhead on XXXpoint 1050 9300 - 1050 7440--> | ||
214 | <!-- Line --> | ||
215 | <polyline | ||
216 | points="4050,9300 4050,7564 " | ||
217 | style="stroke:#00ff00;stroke-width:30;stroke-linejoin:miter;stroke-linecap:butt;marker-end:url(#Arrow1Mend)" | ||
218 | id="polyline60" /> | ||
219 | <!-- Arrowhead on XXXpoint 4050 9300 - 4050 7440--> | ||
220 | <!-- Line --> | ||
221 | <polyline | ||
222 | points="5250,8100 5250,6364 " | ||
223 | style="stroke:#00ff00;stroke-width:30;stroke-linejoin:miter;stroke-linecap:butt;marker-end:url(#Arrow1Mend)" | ||
224 | id="polyline64" /> | ||
225 | <!-- Arrowhead on XXXpoint 5250 8100 - 5250 6240--> | ||
226 | <!-- Circle --> | ||
227 | <circle | ||
228 | cx="2850" | ||
229 | cy="3900" | ||
230 | r="76" | ||
231 | style="fill:#000000;stroke:#000000;stroke-width:14;" | ||
232 | id="circle68" /> | ||
233 | <!-- Circle --> | ||
234 | <circle | ||
235 | cx="3150" | ||
236 | cy="3900" | ||
237 | r="76" | ||
238 | style="fill:#000000;stroke:#000000;stroke-width:14;" | ||
239 | id="circle70" /> | ||
240 | <!-- Circle --> | ||
241 | <circle | ||
242 | cx="3450" | ||
243 | cy="3900" | ||
244 | r="76" | ||
245 | style="fill:#000000;stroke:#000000;stroke-width:14;" | ||
246 | id="circle72" /> | ||
247 | <!-- Circle --> | ||
248 | <circle | ||
249 | cx="1350" | ||
250 | cy="5100" | ||
251 | r="76" | ||
252 | style="fill:#000000;stroke:#000000;stroke-width:14;" | ||
253 | id="circle74" /> | ||
254 | <!-- Circle --> | ||
255 | <circle | ||
256 | cx="1650" | ||
257 | cy="5100" | ||
258 | r="76" | ||
259 | style="fill:#000000;stroke:#000000;stroke-width:14;" | ||
260 | id="circle76" /> | ||
261 | <!-- Circle --> | ||
262 | <circle | ||
263 | cx="1950" | ||
264 | cy="5100" | ||
265 | r="76" | ||
266 | style="fill:#000000;stroke:#000000;stroke-width:14;" | ||
267 | id="circle78" /> | ||
268 | <!-- Circle --> | ||
269 | <circle | ||
270 | cx="4350" | ||
271 | cy="5100" | ||
272 | r="76" | ||
273 | style="fill:#000000;stroke:#000000;stroke-width:14;" | ||
274 | id="circle80" /> | ||
275 | <!-- Circle --> | ||
276 | <circle | ||
277 | cx="4650" | ||
278 | cy="5100" | ||
279 | r="76" | ||
280 | style="fill:#000000;stroke:#000000;stroke-width:14;" | ||
281 | id="circle82" /> | ||
282 | <!-- Circle --> | ||
283 | <circle | ||
284 | cx="4950" | ||
285 | cy="5100" | ||
286 | r="76" | ||
287 | style="fill:#000000;stroke:#000000;stroke-width:14;" | ||
288 | id="circle84" /> | ||
289 | <!-- Line: box --> | ||
290 | <rect | ||
291 | x="750" | ||
292 | y="3450" | ||
293 | width="1800" | ||
294 | height="900" | ||
295 | rx="0" | ||
296 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; " | ||
297 | id="rect86" /> | ||
298 | <!-- Line: box --> | ||
299 | <rect | ||
300 | x="300" | ||
301 | y="6600" | ||
302 | width="1500" | ||
303 | height="900" | ||
304 | rx="0" | ||
305 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; " | ||
306 | id="rect88" /> | ||
307 | <!-- Line: box --> | ||
308 | <rect | ||
309 | x="3750" | ||
310 | y="3450" | ||
311 | width="1800" | ||
312 | height="900" | ||
313 | rx="0" | ||
314 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; " | ||
315 | id="rect90" /> | ||
316 | <!-- Line: box --> | ||
317 | <rect | ||
318 | x="4500" | ||
319 | y="5400" | ||
320 | width="1500" | ||
321 | height="900" | ||
322 | rx="0" | ||
323 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; " | ||
324 | id="rect92" /> | ||
325 | <!-- Line: box --> | ||
326 | <rect | ||
327 | x="3300" | ||
328 | y="6600" | ||
329 | width="1500" | ||
330 | height="900" | ||
331 | rx="0" | ||
332 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; " | ||
333 | id="rect94" /> | ||
334 | <!-- Line: box --> | ||
335 | <rect | ||
336 | x="2250" | ||
337 | y="1650" | ||
338 | width="1800" | ||
339 | height="900" | ||
340 | rx="0" | ||
341 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; " | ||
342 | id="rect96" /> | ||
343 | <!-- Line: box --> | ||
344 | <rect | ||
345 | x="0" | ||
346 | y="9300" | ||
347 | width="2100" | ||
348 | height="900" | ||
349 | rx="0" | ||
350 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; " | ||
351 | id="rect98" /> | ||
352 | <!-- Line: box --> | ||
353 | <rect | ||
354 | x="1350" | ||
355 | y="8100" | ||
356 | width="2100" | ||
357 | height="900" | ||
358 | rx="0" | ||
359 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; " | ||
360 | id="rect100" /> | ||
361 | <!-- Line: box --> | ||
362 | <rect | ||
363 | x="3000" | ||
364 | y="9300" | ||
365 | width="2100" | ||
366 | height="900" | ||
367 | rx="0" | ||
368 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; " | ||
369 | id="rect102" /> | ||
370 | <!-- Line: box --> | ||
371 | <rect | ||
372 | x="4350" | ||
373 | y="8100" | ||
374 | width="2100" | ||
375 | height="900" | ||
376 | rx="0" | ||
377 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; " | ||
378 | id="rect104" /> | ||
379 | <!-- Line: box --> | ||
380 | <rect | ||
381 | x="1500" | ||
382 | y="5400" | ||
383 | width="1500" | ||
384 | height="900" | ||
385 | rx="0" | ||
386 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; " | ||
387 | id="rect106" /> | ||
388 | <!-- Text --> | ||
389 | <text | ||
390 | xml:space="preserve" | ||
391 | x="6450" | ||
392 | y="300" | ||
393 | fill="#000000" | ||
394 | font-family="Helvetica" | ||
395 | font-style="normal" | ||
396 | font-weight="normal" | ||
397 | font-size="192" | ||
398 | text-anchor="end" | ||
399 | id="text108">rcu_bh</text> | ||
400 | <!-- Text --> | ||
401 | <text | ||
402 | xml:space="preserve" | ||
403 | x="3150" | ||
404 | y="1950" | ||
405 | fill="#000000" | ||
406 | font-family="Courier" | ||
407 | font-style="normal" | ||
408 | font-weight="bold" | ||
409 | font-size="192" | ||
410 | text-anchor="middle" | ||
411 | id="text110">struct</text> | ||
412 | <!-- Text --> | ||
413 | <text | ||
414 | xml:space="preserve" | ||
415 | x="3150" | ||
416 | y="2250" | ||
417 | fill="#000000" | ||
418 | font-family="Courier" | ||
419 | font-style="normal" | ||
420 | font-weight="bold" | ||
421 | font-size="192" | ||
422 | text-anchor="middle" | ||
423 | id="text112">rcu_node</text> | ||
424 | <!-- Text --> | ||
425 | <text | ||
426 | xml:space="preserve" | ||
427 | x="1650" | ||
428 | y="3750" | ||
429 | fill="#000000" | ||
430 | font-family="Courier" | ||
431 | font-style="normal" | ||
432 | font-weight="bold" | ||
433 | font-size="192" | ||
434 | text-anchor="middle" | ||
435 | id="text114">struct</text> | ||
436 | <!-- Text --> | ||
437 | <text | ||
438 | xml:space="preserve" | ||
439 | x="1650" | ||
440 | y="4050" | ||
441 | fill="#000000" | ||
442 | font-family="Courier" | ||
443 | font-style="normal" | ||
444 | font-weight="bold" | ||
445 | font-size="192" | ||
446 | text-anchor="middle" | ||
447 | id="text116">rcu_node</text> | ||
448 | <!-- Text --> | ||
449 | <text | ||
450 | xml:space="preserve" | ||
451 | x="4650" | ||
452 | y="4050" | ||
453 | fill="#000000" | ||
454 | font-family="Courier" | ||
455 | font-style="normal" | ||
456 | font-weight="bold" | ||
457 | font-size="192" | ||
458 | text-anchor="middle" | ||
459 | id="text118">rcu_node</text> | ||
460 | <!-- Text --> | ||
461 | <text | ||
462 | xml:space="preserve" | ||
463 | x="4650" | ||
464 | y="3750" | ||
465 | fill="#000000" | ||
466 | font-family="Courier" | ||
467 | font-style="normal" | ||
468 | font-weight="bold" | ||
469 | font-size="192" | ||
470 | text-anchor="middle" | ||
471 | id="text120">struct</text> | ||
472 | <!-- Text --> | ||
473 | <text | ||
474 | xml:space="preserve" | ||
475 | x="2250" | ||
476 | y="5700" | ||
477 | fill="#000000" | ||
478 | font-family="Courier" | ||
479 | font-style="normal" | ||
480 | font-weight="bold" | ||
481 | font-size="192" | ||
482 | text-anchor="middle" | ||
483 | id="text122">struct</text> | ||
484 | <!-- Text --> | ||
485 | <text | ||
486 | xml:space="preserve" | ||
487 | x="2250" | ||
488 | y="6000" | ||
489 | fill="#000000" | ||
490 | font-family="Courier" | ||
491 | font-style="normal" | ||
492 | font-weight="bold" | ||
493 | font-size="192" | ||
494 | text-anchor="middle" | ||
495 | id="text124">rcu_data</text> | ||
496 | <!-- Text --> | ||
497 | <text | ||
498 | xml:space="preserve" | ||
499 | x="1050" | ||
500 | y="6900" | ||
501 | fill="#000000" | ||
502 | font-family="Courier" | ||
503 | font-style="normal" | ||
504 | font-weight="bold" | ||
505 | font-size="192" | ||
506 | text-anchor="middle" | ||
507 | id="text126">struct</text> | ||
508 | <!-- Text --> | ||
509 | <text | ||
510 | xml:space="preserve" | ||
511 | x="1050" | ||
512 | y="7200" | ||
513 | fill="#000000" | ||
514 | font-family="Courier" | ||
515 | font-style="normal" | ||
516 | font-weight="bold" | ||
517 | font-size="192" | ||
518 | text-anchor="middle" | ||
519 | id="text128">rcu_data</text> | ||
520 | <!-- Text --> | ||
521 | <text | ||
522 | xml:space="preserve" | ||
523 | x="5250" | ||
524 | y="5700" | ||
525 | fill="#000000" | ||
526 | font-family="Courier" | ||
527 | font-style="normal" | ||
528 | font-weight="bold" | ||
529 | font-size="192" | ||
530 | text-anchor="middle" | ||
531 | id="text130">struct</text> | ||
532 | <!-- Text --> | ||
533 | <text | ||
534 | xml:space="preserve" | ||
535 | x="5250" | ||
536 | y="6000" | ||
537 | fill="#000000" | ||
538 | font-family="Courier" | ||
539 | font-style="normal" | ||
540 | font-weight="bold" | ||
541 | font-size="192" | ||
542 | text-anchor="middle" | ||
543 | id="text132">rcu_data</text> | ||
544 | <!-- Text --> | ||
545 | <text | ||
546 | xml:space="preserve" | ||
547 | x="4050" | ||
548 | y="6900" | ||
549 | fill="#000000" | ||
550 | font-family="Courier" | ||
551 | font-style="normal" | ||
552 | font-weight="bold" | ||
553 | font-size="192" | ||
554 | text-anchor="middle" | ||
555 | id="text134">struct</text> | ||
556 | <!-- Text --> | ||
557 | <text | ||
558 | xml:space="preserve" | ||
559 | x="4050" | ||
560 | y="7200" | ||
561 | fill="#000000" | ||
562 | font-family="Courier" | ||
563 | font-style="normal" | ||
564 | font-weight="bold" | ||
565 | font-size="192" | ||
566 | text-anchor="middle" | ||
567 | id="text136">rcu_data</text> | ||
568 | <!-- Text --> | ||
569 | <text | ||
570 | xml:space="preserve" | ||
571 | x="450" | ||
572 | y="1350" | ||
573 | fill="#000000" | ||
574 | font-family="Courier" | ||
575 | font-style="normal" | ||
576 | font-weight="bold" | ||
577 | font-size="192" | ||
578 | text-anchor="start" | ||
579 | id="text138">struct rcu_state</text> | ||
580 | <!-- Text --> | ||
581 | <text | ||
582 | xml:space="preserve" | ||
583 | x="1050" | ||
584 | y="9600" | ||
585 | fill="#000000" | ||
586 | font-family="Courier" | ||
587 | font-style="normal" | ||
588 | font-weight="bold" | ||
589 | font-size="192" | ||
590 | text-anchor="middle" | ||
591 | id="text140">struct</text> | ||
592 | <!-- Text --> | ||
593 | <text | ||
594 | xml:space="preserve" | ||
595 | x="1050" | ||
596 | y="9900" | ||
597 | fill="#000000" | ||
598 | font-family="Courier" | ||
599 | font-style="normal" | ||
600 | font-weight="bold" | ||
601 | font-size="192" | ||
602 | text-anchor="middle" | ||
603 | id="text142">rcu_dynticks</text> | ||
604 | <!-- Text --> | ||
605 | <text | ||
606 | xml:space="preserve" | ||
607 | x="4050" | ||
608 | y="9600" | ||
609 | fill="#000000" | ||
610 | font-family="Courier" | ||
611 | font-style="normal" | ||
612 | font-weight="bold" | ||
613 | font-size="192" | ||
614 | text-anchor="middle" | ||
615 | id="text144">struct</text> | ||
616 | <!-- Text --> | ||
617 | <text | ||
618 | xml:space="preserve" | ||
619 | x="4050" | ||
620 | y="9900" | ||
621 | fill="#000000" | ||
622 | font-family="Courier" | ||
623 | font-style="normal" | ||
624 | font-weight="bold" | ||
625 | font-size="192" | ||
626 | text-anchor="middle" | ||
627 | id="text146">rcu_dynticks</text> | ||
628 | <!-- Text --> | ||
629 | <text | ||
630 | xml:space="preserve" | ||
631 | x="2400" | ||
632 | y="8400" | ||
633 | fill="#000000" | ||
634 | font-family="Courier" | ||
635 | font-style="normal" | ||
636 | font-weight="bold" | ||
637 | font-size="192" | ||
638 | text-anchor="middle" | ||
639 | id="text148">struct</text> | ||
640 | <!-- Text --> | ||
641 | <text | ||
642 | xml:space="preserve" | ||
643 | x="2400" | ||
644 | y="8700" | ||
645 | fill="#000000" | ||
646 | font-family="Courier" | ||
647 | font-style="normal" | ||
648 | font-weight="bold" | ||
649 | font-size="192" | ||
650 | text-anchor="middle" | ||
651 | id="text150">rcu_dynticks</text> | ||
652 | <!-- Text --> | ||
653 | <text | ||
654 | xml:space="preserve" | ||
655 | x="5400" | ||
656 | y="8400" | ||
657 | fill="#000000" | ||
658 | font-family="Courier" | ||
659 | font-style="normal" | ||
660 | font-weight="bold" | ||
661 | font-size="192" | ||
662 | text-anchor="middle" | ||
663 | id="text152">struct</text> | ||
664 | <!-- Text --> | ||
665 | <text | ||
666 | xml:space="preserve" | ||
667 | x="5400" | ||
668 | y="8700" | ||
669 | fill="#000000" | ||
670 | font-family="Courier" | ||
671 | font-style="normal" | ||
672 | font-weight="bold" | ||
673 | font-size="192" | ||
674 | text-anchor="middle" | ||
675 | id="text154">rcu_dynticks</text> | ||
676 | <!-- Text --> | ||
677 | <text | ||
678 | xml:space="preserve" | ||
679 | x="6000" | ||
680 | y="750" | ||
681 | fill="#000000" | ||
682 | font-family="Helvetica" | ||
683 | font-style="normal" | ||
684 | font-weight="normal" | ||
685 | font-size="192" | ||
686 | text-anchor="end" | ||
687 | id="text156">rcu_sched</text> | ||
688 | <!-- Line --> | ||
689 | <polyline | ||
690 | points="5250,5400 5250,4414 " | ||
691 | style="stroke:#00d1d1;stroke-width:30.00455750000000066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | ||
692 | id="polyline158" /> | ||
693 | <!-- Arrowhead on XXXpoint 5250 5400 - 5250 4290--> | ||
694 | </g> | ||
695 | </svg> | ||
diff --git a/Documentation/RCU/Design/Data-Structures/BigTreePreemptRCUBHdyntick.svg b/Documentation/RCU/Design/Data-Structures/BigTreePreemptRCUBHdyntick.svg deleted file mode 100644 index 15adcac036c7..000000000000 --- a/Documentation/RCU/Design/Data-Structures/BigTreePreemptRCUBHdyntick.svg +++ /dev/null | |||
@@ -1,741 +0,0 @@ | |||
1 | <?xml version="1.0" encoding="UTF-8" standalone="no"?> | ||
2 | <!-- Creator: fig2dev Version 3.2 Patchlevel 5e --> | ||
3 | |||
4 | <!-- CreationDate: Wed Dec 9 17:32:59 2015 --> | ||
5 | |||
6 | <!-- Magnification: 2.000 --> | ||
7 | |||
8 | <svg | ||
9 | xmlns:dc="http://purl.org/dc/elements/1.1/" | ||
10 | xmlns:cc="http://creativecommons.org/ns#" | ||
11 | xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" | ||
12 | xmlns:svg="http://www.w3.org/2000/svg" | ||
13 | xmlns="http://www.w3.org/2000/svg" | ||
14 | xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" | ||
15 | xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" | ||
16 | width="6.1in" | ||
17 | height="8.9in" | ||
18 | viewBox="-44 -44 7288 10738" | ||
19 | id="svg2" | ||
20 | version="1.1" | ||
21 | inkscape:version="0.48.4 r9939" | ||
22 | sodipodi:docname="BigTreePreemptRCUBHdyntick.fig"> | ||
23 | <metadata | ||
24 | id="metadata182"> | ||
25 | <rdf:RDF> | ||
26 | <cc:Work | ||
27 | rdf:about=""> | ||
28 | <dc:format>image/svg+xml</dc:format> | ||
29 | <dc:type | ||
30 | rdf:resource="http://purl.org/dc/dcmitype/StillImage" /> | ||
31 | <dc:title></dc:title> | ||
32 | </cc:Work> | ||
33 | </rdf:RDF> | ||
34 | </metadata> | ||
35 | <defs | ||
36 | id="defs180"> | ||
37 | <marker | ||
38 | inkscape:stockid="Arrow1Mend" | ||
39 | orient="auto" | ||
40 | refY="0.0" | ||
41 | refX="0.0" | ||
42 | id="Arrow1Mend" | ||
43 | style="overflow:visible;"> | ||
44 | <path | ||
45 | id="path3940" | ||
46 | d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z " | ||
47 | style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;" | ||
48 | transform="scale(0.4) rotate(180) translate(10,0)" /> | ||
49 | </marker> | ||
50 | </defs> | ||
51 | <sodipodi:namedview | ||
52 | pagecolor="#ffffff" | ||
53 | bordercolor="#666666" | ||
54 | borderopacity="1" | ||
55 | objecttolerance="10" | ||
56 | gridtolerance="10" | ||
57 | guidetolerance="10" | ||
58 | inkscape:pageopacity="0" | ||
59 | inkscape:pageshadow="2" | ||
60 | inkscape:window-width="874" | ||
61 | inkscape:window-height="1148" | ||
62 | id="namedview178" | ||
63 | showgrid="false" | ||
64 | inkscape:zoom="1.2097379" | ||
65 | inkscape:cx="274.5" | ||
66 | inkscape:cy="400.49997" | ||
67 | inkscape:window-x="946" | ||
68 | inkscape:window-y="24" | ||
69 | inkscape:window-maximized="0" | ||
70 | inkscape:current-layer="g4" /> | ||
71 | <g | ||
72 | style="stroke-width:.025in; fill:none" | ||
73 | id="g4"> | ||
74 | <!-- Line: box --> | ||
75 | <rect | ||
76 | x="900" | ||
77 | y="0" | ||
78 | width="6300" | ||
79 | height="7350" | ||
80 | rx="0" | ||
81 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; " | ||
82 | id="rect6" /> | ||
83 | <!-- Line: box --> | ||
84 | <rect | ||
85 | x="1200" | ||
86 | y="600" | ||
87 | width="5700" | ||
88 | height="3750" | ||
89 | rx="0" | ||
90 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; " | ||
91 | id="rect8" /> | ||
92 | <!-- Line: box --> | ||
93 | <rect | ||
94 | x="5400" | ||
95 | y="4950" | ||
96 | width="1500" | ||
97 | height="900" | ||
98 | rx="0" | ||
99 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; " | ||
100 | id="rect10" /> | ||
101 | <!-- Line: box --> | ||
102 | <rect | ||
103 | x="450" | ||
104 | y="450" | ||
105 | width="6300" | ||
106 | height="7350" | ||
107 | rx="0" | ||
108 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; " | ||
109 | id="rect12" /> | ||
110 | <!-- Line: box --> | ||
111 | <rect | ||
112 | x="750" | ||
113 | y="1050" | ||
114 | width="5700" | ||
115 | height="3750" | ||
116 | rx="0" | ||
117 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; " | ||
118 | id="rect14" /> | ||
119 | <!-- Line: box --> | ||
120 | <rect | ||
121 | x="4950" | ||
122 | y="5400" | ||
123 | width="1500" | ||
124 | height="900" | ||
125 | rx="0" | ||
126 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; " | ||
127 | id="rect16" /> | ||
128 | <!-- Line --> | ||
129 | <polyline | ||
130 | points="5250,8550 5688,6362 " | ||
131 | style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; " | ||
132 | id="polyline18" /> | ||
133 | <!-- Arrowhead on XXXpoint 5250 8550 - 5710 6240--> | ||
134 | <polyline | ||
135 | points="5714 6518 5704 6272 5598 6494 " | ||
136 | style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; " | ||
137 | id="polyline20" /> | ||
138 | <!-- Line --> | ||
139 | <polyline | ||
140 | points="4050,9750 4486,7712 " | ||
141 | style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; " | ||
142 | id="polyline22" /> | ||
143 | <!-- Arrowhead on XXXpoint 4050 9750 - 4512 7590--> | ||
144 | <polyline | ||
145 | points="4514 7868 4506 7622 4396 7844 " | ||
146 | style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; " | ||
147 | id="polyline24" /> | ||
148 | <!-- Line --> | ||
149 | <polyline | ||
150 | points="1040,9750 1476,7712 " | ||
151 | style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; " | ||
152 | id="polyline26" /> | ||
153 | <!-- Arrowhead on XXXpoint 1040 9750 - 1502 7590--> | ||
154 | <polyline | ||
155 | points="1504 7868 1496 7622 1386 7844 " | ||
156 | style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; " | ||
157 | id="polyline28" /> | ||
158 | <!-- Line --> | ||
159 | <polyline | ||
160 | points="2240,8550 2676,6512 " | ||
161 | style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; " | ||
162 | id="polyline30" /> | ||
163 | <!-- Arrowhead on XXXpoint 2240 8550 - 2702 6390--> | ||
164 | <polyline | ||
165 | points="2704 6668 2696 6422 2586 6644 " | ||
166 | style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; " | ||
167 | id="polyline32" /> | ||
168 | <!-- Line --> | ||
169 | <polyline | ||
170 | points="4050,9750 5682,6360 " | ||
171 | style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; " | ||
172 | id="polyline34" /> | ||
173 | <!-- Arrowhead on XXXpoint 4050 9750 - 5736 6246--> | ||
174 | <polyline | ||
175 | points="5672 6518 5722 6276 5562 6466 " | ||
176 | style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; " | ||
177 | id="polyline36" /> | ||
178 | <!-- Line --> | ||
179 | <polyline | ||
180 | points="1010,9750 2642,6360 " | ||
181 | style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; " | ||
182 | id="polyline38" /> | ||
183 | <!-- Arrowhead on XXXpoint 1010 9750 - 2696 6246--> | ||
184 | <polyline | ||
185 | points="2632 6518 2682 6276 2522 6466 " | ||
186 | style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; " | ||
187 | id="polyline40" /> | ||
188 | <!-- Line: box --> | ||
189 | <rect | ||
190 | x="0" | ||
191 | y="900" | ||
192 | width="6300" | ||
193 | height="7350" | ||
194 | rx="0" | ||
195 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; " | ||
196 | id="rect42" /> | ||
197 | <!-- Line: box --> | ||
198 | <rect | ||
199 | x="300" | ||
200 | y="1500" | ||
201 | width="5700" | ||
202 | height="3750" | ||
203 | rx="0" | ||
204 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; " | ||
205 | id="rect44" /> | ||
206 | <!-- Line --> | ||
207 | <polyline | ||
208 | points="1350,3900 2350,3040 " | ||
209 | style="stroke:#00d1d1;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | ||
210 | id="polyline46" /> | ||
211 | <!-- Arrowhead on XXXpoint 1350 3900 - 2444 2960--> | ||
212 | <!-- Line --> | ||
213 | <polyline | ||
214 | points="4950,3900 3948,3040 " | ||
215 | style="stroke:#00d1d1;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | ||
216 | id="polyline50" /> | ||
217 | <!-- Arrowhead on XXXpoint 4950 3900 - 3854 2960--> | ||
218 | <!-- Line --> | ||
219 | <polyline | ||
220 | points="4050,7050 4050,4864 " | ||
221 | style="stroke:#00d1d1;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | ||
222 | id="polyline54" /> | ||
223 | <!-- Arrowhead on XXXpoint 4050 7050 - 4050 4740--> | ||
224 | <!-- Line --> | ||
225 | <polyline | ||
226 | points="1050,7050 1050,4864 " | ||
227 | style="stroke:#00d1d1;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | ||
228 | id="polyline58" /> | ||
229 | <!-- Arrowhead on XXXpoint 1050 7050 - 1050 4740--> | ||
230 | <!-- Line --> | ||
231 | <polyline | ||
232 | points="2250,5850 2250,4864 " | ||
233 | style="stroke:#00d1d1;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | ||
234 | id="polyline62" /> | ||
235 | <!-- Arrowhead on XXXpoint 2250 5850 - 2250 4740--> | ||
236 | <!-- Line --> | ||
237 | <polyline | ||
238 | points="2250,8550 2250,6814 " | ||
239 | style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; " | ||
240 | id="polyline66" /> | ||
241 | <!-- Arrowhead on XXXpoint 2250 8550 - 2250 6690--> | ||
242 | <!-- Line --> | ||
243 | <polyline | ||
244 | points="1050,9750 1050,8014 " | ||
245 | style="stroke:#00ff00;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | ||
246 | id="polyline70" /> | ||
247 | <!-- Arrowhead on XXXpoint 1050 9750 - 1050 7890--> | ||
248 | <!-- Line --> | ||
249 | <polyline | ||
250 | points="4050,9750 4050,8014 " | ||
251 | style="stroke:#00ff00;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | ||
252 | id="polyline74" /> | ||
253 | <!-- Arrowhead on XXXpoint 4050 9750 - 4050 7890--> | ||
254 | <!-- Line --> | ||
255 | <polyline | ||
256 | points="5250,8550 5250,6814 " | ||
257 | style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; " | ||
258 | id="polyline78" /> | ||
259 | <!-- Arrowhead on XXXpoint 5250 8550 - 5250 6690--> | ||
260 | <!-- Circle --> | ||
261 | <circle | ||
262 | cx="2850" | ||
263 | cy="4350" | ||
264 | r="76" | ||
265 | style="fill:#000000;stroke:#000000;stroke-width:14;" | ||
266 | id="circle82" /> | ||
267 | <!-- Circle --> | ||
268 | <circle | ||
269 | cx="3150" | ||
270 | cy="4350" | ||
271 | r="76" | ||
272 | style="fill:#000000;stroke:#000000;stroke-width:14;" | ||
273 | id="circle84" /> | ||
274 | <!-- Circle --> | ||
275 | <circle | ||
276 | cx="3450" | ||
277 | cy="4350" | ||
278 | r="76" | ||
279 | style="fill:#000000;stroke:#000000;stroke-width:14;" | ||
280 | id="circle86" /> | ||
281 | <!-- Circle --> | ||
282 | <circle | ||
283 | cx="1350" | ||
284 | cy="5550" | ||
285 | r="76" | ||
286 | style="fill:#000000;stroke:#000000;stroke-width:14;" | ||
287 | id="circle88" /> | ||
288 | <!-- Circle --> | ||
289 | <circle | ||
290 | cx="1650" | ||
291 | cy="5550" | ||
292 | r="76" | ||
293 | style="fill:#000000;stroke:#000000;stroke-width:14;" | ||
294 | id="circle90" /> | ||
295 | <!-- Circle --> | ||
296 | <circle | ||
297 | cx="1950" | ||
298 | cy="5550" | ||
299 | r="76" | ||
300 | style="fill:#000000;stroke:#000000;stroke-width:14;" | ||
301 | id="circle92" /> | ||
302 | <!-- Circle --> | ||
303 | <circle | ||
304 | cx="4350" | ||
305 | cy="5550" | ||
306 | r="76" | ||
307 | style="fill:#000000;stroke:#000000;stroke-width:14;" | ||
308 | id="circle94" /> | ||
309 | <!-- Circle --> | ||
310 | <circle | ||
311 | cx="4650" | ||
312 | cy="5550" | ||
313 | r="76" | ||
314 | style="fill:#000000;stroke:#000000;stroke-width:14;" | ||
315 | id="circle96" /> | ||
316 | <!-- Circle --> | ||
317 | <circle | ||
318 | cx="4950" | ||
319 | cy="5550" | ||
320 | r="76" | ||
321 | style="fill:#000000;stroke:#000000;stroke-width:14;" | ||
322 | id="circle98" /> | ||
323 | <!-- Line: box --> | ||
324 | <rect | ||
325 | x="750" | ||
326 | y="3900" | ||
327 | width="1800" | ||
328 | height="900" | ||
329 | rx="0" | ||
330 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; " | ||
331 | id="rect100" /> | ||
332 | <!-- Line: box --> | ||
333 | <rect | ||
334 | x="300" | ||
335 | y="7050" | ||
336 | width="1500" | ||
337 | height="900" | ||
338 | rx="0" | ||
339 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; " | ||
340 | id="rect102" /> | ||
341 | <!-- Line: box --> | ||
342 | <rect | ||
343 | x="3750" | ||
344 | y="3900" | ||
345 | width="1800" | ||
346 | height="900" | ||
347 | rx="0" | ||
348 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; " | ||
349 | id="rect104" /> | ||
350 | <!-- Line: box --> | ||
351 | <rect | ||
352 | x="4500" | ||
353 | y="5850" | ||
354 | width="1500" | ||
355 | height="900" | ||
356 | rx="0" | ||
357 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; " | ||
358 | id="rect106" /> | ||
359 | <!-- Line: box --> | ||
360 | <rect | ||
361 | x="3300" | ||
362 | y="7050" | ||
363 | width="1500" | ||
364 | height="900" | ||
365 | rx="0" | ||
366 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; " | ||
367 | id="rect108" /> | ||
368 | <!-- Line: box --> | ||
369 | <rect | ||
370 | x="2250" | ||
371 | y="2100" | ||
372 | width="1800" | ||
373 | height="900" | ||
374 | rx="0" | ||
375 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; " | ||
376 | id="rect110" /> | ||
377 | <!-- Line: box --> | ||
378 | <rect | ||
379 | x="0" | ||
380 | y="9750" | ||
381 | width="2100" | ||
382 | height="900" | ||
383 | rx="0" | ||
384 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; " | ||
385 | id="rect112" /> | ||
386 | <!-- Line: box --> | ||
387 | <rect | ||
388 | x="1350" | ||
389 | y="8550" | ||
390 | width="2100" | ||
391 | height="900" | ||
392 | rx="0" | ||
393 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; " | ||
394 | id="rect114" /> | ||
395 | <!-- Line: box --> | ||
396 | <rect | ||
397 | x="3000" | ||
398 | y="9750" | ||
399 | width="2100" | ||
400 | height="900" | ||
401 | rx="0" | ||
402 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; " | ||
403 | id="rect116" /> | ||
404 | <!-- Line: box --> | ||
405 | <rect | ||
406 | x="4350" | ||
407 | y="8550" | ||
408 | width="2100" | ||
409 | height="900" | ||
410 | rx="0" | ||
411 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; " | ||
412 | id="rect118" /> | ||
413 | <!-- Line: box --> | ||
414 | <rect | ||
415 | x="1500" | ||
416 | y="5850" | ||
417 | width="1500" | ||
418 | height="900" | ||
419 | rx="0" | ||
420 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; " | ||
421 | id="rect120" /> | ||
422 | <!-- Text --> | ||
423 | <text | ||
424 | xml:space="preserve" | ||
425 | x="6450" | ||
426 | y="750" | ||
427 | fill="#000000" | ||
428 | font-family="Helvetica" | ||
429 | font-style="normal" | ||
430 | font-weight="normal" | ||
431 | font-size="192" | ||
432 | text-anchor="end" | ||
433 | id="text122">rcu_bh</text> | ||
434 | <!-- Text --> | ||
435 | <text | ||
436 | xml:space="preserve" | ||
437 | x="3150" | ||
438 | y="2400" | ||
439 | fill="#000000" | ||
440 | font-family="Courier" | ||
441 | font-style="normal" | ||
442 | font-weight="bold" | ||
443 | font-size="192" | ||
444 | text-anchor="middle" | ||
445 | id="text124">struct</text> | ||
446 | <!-- Text --> | ||
447 | <text | ||
448 | xml:space="preserve" | ||
449 | x="3150" | ||
450 | y="2700" | ||
451 | fill="#000000" | ||
452 | font-family="Courier" | ||
453 | font-style="normal" | ||
454 | font-weight="bold" | ||
455 | font-size="192" | ||
456 | text-anchor="middle" | ||
457 | id="text126">rcu_node</text> | ||
458 | <!-- Text --> | ||
459 | <text | ||
460 | xml:space="preserve" | ||
461 | x="1650" | ||
462 | y="4200" | ||
463 | fill="#000000" | ||
464 | font-family="Courier" | ||
465 | font-style="normal" | ||
466 | font-weight="bold" | ||
467 | font-size="192" | ||
468 | text-anchor="middle" | ||
469 | id="text128">struct</text> | ||
470 | <!-- Text --> | ||
471 | <text | ||
472 | xml:space="preserve" | ||
473 | x="1650" | ||
474 | y="4500" | ||
475 | fill="#000000" | ||
476 | font-family="Courier" | ||
477 | font-style="normal" | ||
478 | font-weight="bold" | ||
479 | font-size="192" | ||
480 | text-anchor="middle" | ||
481 | id="text130">rcu_node</text> | ||
482 | <!-- Text --> | ||
483 | <text | ||
484 | xml:space="preserve" | ||
485 | x="4650" | ||
486 | y="4500" | ||
487 | fill="#000000" | ||
488 | font-family="Courier" | ||
489 | font-style="normal" | ||
490 | font-weight="bold" | ||
491 | font-size="192" | ||
492 | text-anchor="middle" | ||
493 | id="text132">rcu_node</text> | ||
494 | <!-- Text --> | ||
495 | <text | ||
496 | xml:space="preserve" | ||
497 | x="4650" | ||
498 | y="4200" | ||
499 | fill="#000000" | ||
500 | font-family="Courier" | ||
501 | font-style="normal" | ||
502 | font-weight="bold" | ||
503 | font-size="192" | ||
504 | text-anchor="middle" | ||
505 | id="text134">struct</text> | ||
506 | <!-- Text --> | ||
507 | <text | ||
508 | xml:space="preserve" | ||
509 | x="2250" | ||
510 | y="6150" | ||
511 | fill="#000000" | ||
512 | font-family="Courier" | ||
513 | font-style="normal" | ||
514 | font-weight="bold" | ||
515 | font-size="192" | ||
516 | text-anchor="middle" | ||
517 | id="text136">struct</text> | ||
518 | <!-- Text --> | ||
519 | <text | ||
520 | xml:space="preserve" | ||
521 | x="2250" | ||
522 | y="6450" | ||
523 | fill="#000000" | ||
524 | font-family="Courier" | ||
525 | font-style="normal" | ||
526 | font-weight="bold" | ||
527 | font-size="192" | ||
528 | text-anchor="middle" | ||
529 | id="text138">rcu_data</text> | ||
530 | <!-- Text --> | ||
531 | <text | ||
532 | xml:space="preserve" | ||
533 | x="1050" | ||
534 | y="7350" | ||
535 | fill="#000000" | ||
536 | font-family="Courier" | ||
537 | font-style="normal" | ||
538 | font-weight="bold" | ||
539 | font-size="192" | ||
540 | text-anchor="middle" | ||
541 | id="text140">struct</text> | ||
542 | <!-- Text --> | ||
543 | <text | ||
544 | xml:space="preserve" | ||
545 | x="1050" | ||
546 | y="7650" | ||
547 | fill="#000000" | ||
548 | font-family="Courier" | ||
549 | font-style="normal" | ||
550 | font-weight="bold" | ||
551 | font-size="192" | ||
552 | text-anchor="middle" | ||
553 | id="text142">rcu_data</text> | ||
554 | <!-- Text --> | ||
555 | <text | ||
556 | xml:space="preserve" | ||
557 | x="5250" | ||
558 | y="6150" | ||
559 | fill="#000000" | ||
560 | font-family="Courier" | ||
561 | font-style="normal" | ||
562 | font-weight="bold" | ||
563 | font-size="192" | ||
564 | text-anchor="middle" | ||
565 | id="text144">struct</text> | ||
566 | <!-- Text --> | ||
567 | <text | ||
568 | xml:space="preserve" | ||
569 | x="5250" | ||
570 | y="6450" | ||
571 | fill="#000000" | ||
572 | font-family="Courier" | ||
573 | font-style="normal" | ||
574 | font-weight="bold" | ||
575 | font-size="192" | ||
576 | text-anchor="middle" | ||
577 | id="text146">rcu_data</text> | ||
578 | <!-- Text --> | ||
579 | <text | ||
580 | xml:space="preserve" | ||
581 | x="4050" | ||
582 | y="7350" | ||
583 | fill="#000000" | ||
584 | font-family="Courier" | ||
585 | font-style="normal" | ||
586 | font-weight="bold" | ||
587 | font-size="192" | ||
588 | text-anchor="middle" | ||
589 | id="text148">struct</text> | ||
590 | <!-- Text --> | ||
591 | <text | ||
592 | xml:space="preserve" | ||
593 | x="4050" | ||
594 | y="7650" | ||
595 | fill="#000000" | ||
596 | font-family="Courier" | ||
597 | font-style="normal" | ||
598 | font-weight="bold" | ||
599 | font-size="192" | ||
600 | text-anchor="middle" | ||
601 | id="text150">rcu_data</text> | ||
602 | <!-- Text --> | ||
603 | <text | ||
604 | xml:space="preserve" | ||
605 | x="450" | ||
606 | y="1800" | ||
607 | fill="#000000" | ||
608 | font-family="Courier" | ||
609 | font-style="normal" | ||
610 | font-weight="bold" | ||
611 | font-size="192" | ||
612 | text-anchor="start" | ||
613 | id="text152">struct rcu_state</text> | ||
614 | <!-- Text --> | ||
615 | <text | ||
616 | xml:space="preserve" | ||
617 | x="1050" | ||
618 | y="10050" | ||
619 | fill="#000000" | ||
620 | font-family="Courier" | ||
621 | font-style="normal" | ||
622 | font-weight="bold" | ||
623 | font-size="192" | ||
624 | text-anchor="middle" | ||
625 | id="text154">struct</text> | ||
626 | <!-- Text --> | ||
627 | <text | ||
628 | xml:space="preserve" | ||
629 | x="1050" | ||
630 | y="10350" | ||
631 | fill="#000000" | ||
632 | font-family="Courier" | ||
633 | font-style="normal" | ||
634 | font-weight="bold" | ||
635 | font-size="192" | ||
636 | text-anchor="middle" | ||
637 | id="text156">rcu_dynticks</text> | ||
638 | <!-- Text --> | ||
639 | <text | ||
640 | xml:space="preserve" | ||
641 | x="4050" | ||
642 | y="10050" | ||
643 | fill="#000000" | ||
644 | font-family="Courier" | ||
645 | font-style="normal" | ||
646 | font-weight="bold" | ||
647 | font-size="192" | ||
648 | text-anchor="middle" | ||
649 | id="text158">struct</text> | ||
650 | <!-- Text --> | ||
651 | <text | ||
652 | xml:space="preserve" | ||
653 | x="4050" | ||
654 | y="10350" | ||
655 | fill="#000000" | ||
656 | font-family="Courier" | ||
657 | font-style="normal" | ||
658 | font-weight="bold" | ||
659 | font-size="192" | ||
660 | text-anchor="middle" | ||
661 | id="text160">rcu_dynticks</text> | ||
662 | <!-- Text --> | ||
663 | <text | ||
664 | xml:space="preserve" | ||
665 | x="2400" | ||
666 | y="8850" | ||
667 | fill="#000000" | ||
668 | font-family="Courier" | ||
669 | font-style="normal" | ||
670 | font-weight="bold" | ||
671 | font-size="192" | ||
672 | text-anchor="middle" | ||
673 | id="text162">struct</text> | ||
674 | <!-- Text --> | ||
675 | <text | ||
676 | xml:space="preserve" | ||
677 | x="2400" | ||
678 | y="9150" | ||
679 | fill="#000000" | ||
680 | font-family="Courier" | ||
681 | font-style="normal" | ||
682 | font-weight="bold" | ||
683 | font-size="192" | ||
684 | text-anchor="middle" | ||
685 | id="text164">rcu_dynticks</text> | ||
686 | <!-- Text --> | ||
687 | <text | ||
688 | xml:space="preserve" | ||
689 | x="5400" | ||
690 | y="8850" | ||
691 | fill="#000000" | ||
692 | font-family="Courier" | ||
693 | font-style="normal" | ||
694 | font-weight="bold" | ||
695 | font-size="192" | ||
696 | text-anchor="middle" | ||
697 | id="text166">struct</text> | ||
698 | <!-- Text --> | ||
699 | <text | ||
700 | xml:space="preserve" | ||
701 | x="5400" | ||
702 | y="9150" | ||
703 | fill="#000000" | ||
704 | font-family="Courier" | ||
705 | font-style="normal" | ||
706 | font-weight="bold" | ||
707 | font-size="192" | ||
708 | text-anchor="middle" | ||
709 | id="text168">rcu_dynticks</text> | ||
710 | <!-- Text --> | ||
711 | <text | ||
712 | xml:space="preserve" | ||
713 | x="6900" | ||
714 | y="300" | ||
715 | fill="#000000" | ||
716 | font-family="Helvetica" | ||
717 | font-style="normal" | ||
718 | font-weight="normal" | ||
719 | font-size="192" | ||
720 | text-anchor="end" | ||
721 | id="text170">rcu_preempt</text> | ||
722 | <!-- Text --> | ||
723 | <text | ||
724 | xml:space="preserve" | ||
725 | x="6000" | ||
726 | y="1200" | ||
727 | fill="#000000" | ||
728 | font-family="Helvetica" | ||
729 | font-style="normal" | ||
730 | font-weight="normal" | ||
731 | font-size="192" | ||
732 | text-anchor="end" | ||
733 | id="text172">rcu_sched</text> | ||
734 | <!-- Line --> | ||
735 | <polyline | ||
736 | points="5250,5850 5250,4864 " | ||
737 | style="stroke:#00d1d1;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | ||
738 | id="polyline174" /> | ||
739 | <!-- Arrowhead on XXXpoint 5250 5850 - 5250 4740--> | ||
740 | </g> | ||
741 | </svg> | ||
diff --git a/Documentation/RCU/Design/Data-Structures/BigTreePreemptRCUBHdyntickCB.svg b/Documentation/RCU/Design/Data-Structures/BigTreePreemptRCUBHdyntickCB.svg index bbc3801470d0..3a1a4f85dc3a 100644 --- a/Documentation/RCU/Design/Data-Structures/BigTreePreemptRCUBHdyntickCB.svg +++ b/Documentation/RCU/Design/Data-Structures/BigTreePreemptRCUBHdyntickCB.svg | |||
@@ -13,12 +13,12 @@ | |||
13 | xmlns="http://www.w3.org/2000/svg" | 13 | xmlns="http://www.w3.org/2000/svg" |
14 | xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" | 14 | xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" |
15 | xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" | 15 | xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" |
16 | width="7.4in" | 16 | width="7.4000001in" |
17 | height="9.9in" | 17 | height="7.9000001in" |
18 | viewBox="-44 -44 8938 11938" | 18 | viewBox="-44 -44 8938 9526.283" |
19 | id="svg2" | 19 | id="svg2" |
20 | version="1.1" | 20 | version="1.1" |
21 | inkscape:version="0.48.4 r9939" | 21 | inkscape:version="0.92.2pre0 (973e216, 2017-07-25)" |
22 | sodipodi:docname="BigTreePreemptRCUBHdyntickCB.svg"> | 22 | sodipodi:docname="BigTreePreemptRCUBHdyntickCB.svg"> |
23 | <metadata | 23 | <metadata |
24 | id="metadata212"> | 24 | id="metadata212"> |
@@ -37,15 +37,46 @@ | |||
37 | <marker | 37 | <marker |
38 | inkscape:stockid="Arrow1Mend" | 38 | inkscape:stockid="Arrow1Mend" |
39 | orient="auto" | 39 | orient="auto" |
40 | refY="0.0" | 40 | refY="0" |
41 | refX="0.0" | 41 | refX="0" |
42 | id="marker1177" | ||
43 | style="overflow:visible" | ||
44 | inkscape:isstock="true"> | ||
45 | <path | ||
46 | id="path897" | ||
47 | d="M 0,0 5,-5 -12.5,0 5,5 Z" | ||
48 | style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1" | ||
49 | transform="matrix(-0.4,0,0,-0.4,-4,0)" | ||
50 | inkscape:connector-curvature="0" /> | ||
51 | </marker> | ||
52 | <marker | ||
53 | inkscape:stockid="Arrow1Lend" | ||
54 | orient="auto" | ||
55 | refY="0" | ||
56 | refX="0" | ||
57 | id="Arrow1Lend" | ||
58 | style="overflow:visible" | ||
59 | inkscape:isstock="true"> | ||
60 | <path | ||
61 | id="path891" | ||
62 | d="M 0,0 5,-5 -12.5,0 5,5 Z" | ||
63 | style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1" | ||
64 | transform="matrix(-0.8,0,0,-0.8,-10,0)" | ||
65 | inkscape:connector-curvature="0" /> | ||
66 | </marker> | ||
67 | <marker | ||
68 | inkscape:stockid="Arrow1Mend" | ||
69 | orient="auto" | ||
70 | refY="0" | ||
71 | refX="0" | ||
42 | id="Arrow1Mend" | 72 | id="Arrow1Mend" |
43 | style="overflow:visible;"> | 73 | style="overflow:visible"> |
44 | <path | 74 | <path |
45 | id="path3970" | 75 | id="path3970" |
46 | d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z " | 76 | d="M 0,0 5,-5 -12.5,0 5,5 Z" |
47 | style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;" | 77 | style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt" |
48 | transform="scale(0.4) rotate(180) translate(10,0)" /> | 78 | transform="matrix(-0.4,0,0,-0.4,-4,0)" |
79 | inkscape:connector-curvature="0" /> | ||
49 | </marker> | 80 | </marker> |
50 | </defs> | 81 | </defs> |
51 | <sodipodi:namedview | 82 | <sodipodi:namedview |
@@ -57,802 +88,575 @@ | |||
57 | guidetolerance="10" | 88 | guidetolerance="10" |
58 | inkscape:pageopacity="0" | 89 | inkscape:pageopacity="0" |
59 | inkscape:pageshadow="2" | 90 | inkscape:pageshadow="2" |
60 | inkscape:window-width="881" | 91 | inkscape:window-width="1920" |
61 | inkscape:window-height="1128" | 92 | inkscape:window-height="1019" |
62 | id="namedview208" | 93 | id="namedview208" |
63 | showgrid="false" | 94 | showgrid="false" |
64 | inkscape:zoom="1.0195195" | 95 | inkscape:zoom="1.0195195" |
65 | inkscape:cx="333" | 96 | inkscape:cx="166.25478" |
66 | inkscape:cy="445.49997" | 97 | inkscape:cy="362.18693" |
67 | inkscape:window-x="936" | 98 | inkscape:window-x="0" |
68 | inkscape:window-y="24" | 99 | inkscape:window-y="0" |
69 | inkscape:window-maximized="0" | 100 | inkscape:window-maximized="1" |
70 | inkscape:current-layer="g4" /> | 101 | inkscape:current-layer="g4" /> |
71 | <g | 102 | <g |
72 | style="stroke-width:.025in; fill:none" | 103 | style="fill:none;stroke-width:0.025in" |
73 | id="g4"> | 104 | id="g4" |
105 | transform="translate(0,-2415.6743)"> | ||
74 | <!-- Line: box --> | 106 | <!-- Line: box --> |
75 | <rect | ||
76 | x="900" | ||
77 | y="0" | ||
78 | width="6300" | ||
79 | height="7350" | ||
80 | rx="0" | ||
81 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; " | ||
82 | id="rect6" /> | ||
83 | <!-- Line: box --> | 107 | <!-- Line: box --> |
84 | <rect | ||
85 | x="1200" | ||
86 | y="600" | ||
87 | width="5700" | ||
88 | height="3750" | ||
89 | rx="0" | ||
90 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; " | ||
91 | id="rect8" /> | ||
92 | <!-- Line: box --> | 108 | <!-- Line: box --> |
93 | <rect | ||
94 | x="5400" | ||
95 | y="4950" | ||
96 | width="1500" | ||
97 | height="900" | ||
98 | rx="0" | ||
99 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; " | ||
100 | id="rect10" /> | ||
101 | <!-- Line: box --> | 109 | <!-- Line: box --> |
102 | <rect | ||
103 | x="450" | ||
104 | y="450" | ||
105 | width="6300" | ||
106 | height="7350" | ||
107 | rx="0" | ||
108 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; " | ||
109 | id="rect12" /> | ||
110 | <!-- Line: box --> | 110 | <!-- Line: box --> |
111 | <rect | ||
112 | x="750" | ||
113 | y="1050" | ||
114 | width="5700" | ||
115 | height="3750" | ||
116 | rx="0" | ||
117 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; " | ||
118 | id="rect14" /> | ||
119 | <!-- Line: box --> | 111 | <!-- Line: box --> |
120 | <rect | ||
121 | x="4950" | ||
122 | y="5400" | ||
123 | width="1500" | ||
124 | height="900" | ||
125 | rx="0" | ||
126 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; " | ||
127 | id="rect16" /> | ||
128 | <!-- Line --> | 112 | <!-- Line --> |
129 | <polyline | ||
130 | points="5250,8550 5688,6362 " | ||
131 | style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; " | ||
132 | id="polyline18" /> | ||
133 | <!-- Arrowhead on XXXpoint 5250 8550 - 5710 6240--> | 113 | <!-- Arrowhead on XXXpoint 5250 8550 - 5710 6240--> |
134 | <polyline | 114 | <polyline |
135 | points="5714 6518 5704 6272 5598 6494 " | 115 | points="5714 6518 5704 6272 5598 6494 " |
136 | style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; " | 116 | style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8" |
137 | id="polyline20" /> | 117 | id="polyline20" |
118 | transform="matrix(1,0,0,0.95854605,12.340758,1579.9033)" /> | ||
138 | <!-- Line --> | 119 | <!-- Line --> |
139 | <polyline | ||
140 | points="4050,9750 4486,7712 " | ||
141 | style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; " | ||
142 | id="polyline22" /> | ||
143 | <!-- Arrowhead on XXXpoint 4050 9750 - 4512 7590--> | 120 | <!-- Arrowhead on XXXpoint 4050 9750 - 4512 7590--> |
144 | <polyline | 121 | <polyline |
145 | points="4514 7868 4506 7622 4396 7844 " | 122 | points="4514 7868 4506 7622 4396 7844 " |
146 | style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; " | 123 | style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8" |
147 | id="polyline24" /> | 124 | id="polyline24" |
125 | transform="matrix(1,0,0,0.95854605,12.340758,1579.9033)" /> | ||
148 | <!-- Line --> | 126 | <!-- Line --> |
149 | <polyline | ||
150 | points="1040,9750 1476,7712 " | ||
151 | style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; " | ||
152 | id="polyline26" /> | ||
153 | <!-- Arrowhead on XXXpoint 1040 9750 - 1502 7590--> | 127 | <!-- Arrowhead on XXXpoint 1040 9750 - 1502 7590--> |
154 | <polyline | 128 | <polyline |
155 | points="1504 7868 1496 7622 1386 7844 " | 129 | points="1504 7868 1496 7622 1386 7844 " |
156 | style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; " | 130 | style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8" |
157 | id="polyline28" /> | 131 | id="polyline28" |
132 | transform="matrix(1,0,0,0.95854605,12.340758,1579.9033)" /> | ||
158 | <!-- Line --> | 133 | <!-- Line --> |
159 | <polyline | ||
160 | points="2240,8550 2676,6512 " | ||
161 | style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; " | ||
162 | id="polyline30" /> | ||
163 | <!-- Arrowhead on XXXpoint 2240 8550 - 2702 6390--> | 134 | <!-- Arrowhead on XXXpoint 2240 8550 - 2702 6390--> |
164 | <polyline | 135 | <polyline |
165 | points="2704 6668 2696 6422 2586 6644 " | 136 | points="2704 6668 2696 6422 2586 6644 " |
166 | style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; " | 137 | style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8" |
167 | id="polyline32" /> | 138 | id="polyline32" |
139 | transform="matrix(1,0,0,0.95854605,12.340758,1579.9033)" /> | ||
168 | <!-- Line --> | 140 | <!-- Line --> |
169 | <polyline | ||
170 | points="4050,9600 5692,6062 " | ||
171 | style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; " | ||
172 | id="polyline34" /> | ||
173 | <!-- Arrowhead on XXXpoint 4050 9600 - 5744 5948--> | 141 | <!-- Arrowhead on XXXpoint 4050 9600 - 5744 5948--> |
174 | <polyline | 142 | <polyline |
175 | points="5682 6220 5730 5978 5574 6170 " | 143 | points="5682 6220 5730 5978 5574 6170 " |
176 | style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; " | 144 | style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8" |
177 | id="polyline36" /> | 145 | id="polyline36" |
146 | transform="matrix(1,0,0,0.95854605,12.340758,1579.9033)" /> | ||
178 | <!-- Line --> | 147 | <!-- Line --> |
179 | <polyline | ||
180 | points="1086,9600 2728,6062 " | ||
181 | style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; " | ||
182 | id="polyline38" /> | ||
183 | <!-- Arrowhead on XXXpoint 1086 9600 - 2780 5948--> | 148 | <!-- Arrowhead on XXXpoint 1086 9600 - 2780 5948--> |
184 | <polyline | 149 | <polyline |
185 | points="2718 6220 2766 5978 2610 6170 " | 150 | points="2718 6220 2766 5978 2610 6170 " |
186 | style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; " | 151 | style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8" |
187 | id="polyline40" /> | 152 | id="polyline40" |
153 | transform="matrix(1,0,0,0.95854605,12.340758,1579.9033)" /> | ||
188 | <!-- Line: box --> | 154 | <!-- Line: box --> |
189 | <rect | 155 | <rect |
190 | x="0" | 156 | x="12.340758" |
191 | y="900" | 157 | y="2442.5947" |
192 | width="6300" | 158 | width="6300" |
193 | height="7350" | 159 | height="7045.3135" |
194 | rx="0" | 160 | rx="0" |
195 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; " | 161 | style="fill:#ffffff;stroke:#000000;stroke-width:29.37160873;stroke-linecap:butt;stroke-linejoin:miter" |
196 | id="rect42" /> | 162 | id="rect42" /> |
197 | <!-- Line: box --> | 163 | <!-- Line: box --> |
198 | <rect | 164 | <rect |
199 | x="300" | 165 | x="312.34076" |
200 | y="1500" | 166 | y="3017.7224" |
201 | width="5700" | 167 | width="5700" |
202 | height="3750" | 168 | height="3594.5476" |
203 | rx="0" | 169 | rx="0" |
204 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; " | 170 | style="fill:#ffff00;stroke:#000000;stroke-width:29.37160873;stroke-linecap:butt;stroke-linejoin:miter" |
205 | id="rect44" /> | 171 | id="rect44" /> |
206 | <!-- Line --> | 172 | <!-- Line --> |
207 | <polyline | 173 | <polyline |
208 | points="1350,3900 2350,3040 " | 174 | points="1350,3900 2350,3040 " |
209 | style="stroke:#00d1d1;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | 175 | style="stroke:#00d1d1;stroke-width:29.99464035;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" |
210 | id="polyline46" /> | 176 | id="polyline46" |
177 | transform="matrix(1,0,0,0.95854605,12.340758,1579.9033)" /> | ||
211 | <!-- Arrowhead on XXXpoint 1350 3900 - 2444 2960--> | 178 | <!-- Arrowhead on XXXpoint 1350 3900 - 2444 2960--> |
212 | <!-- Line --> | 179 | <!-- Line --> |
213 | <polyline | 180 | <polyline |
214 | points="4950,3900 3948,3040 " | 181 | points="4950,3900 3948,3040 " |
215 | style="stroke:#00d1d1;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | 182 | style="stroke:#00d1d1;stroke-width:29.99464035;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" |
216 | id="polyline50" /> | 183 | id="polyline50" |
184 | transform="matrix(1,0,0,0.95854605,12.340758,1579.9033)" /> | ||
217 | <!-- Arrowhead on XXXpoint 4950 3900 - 3854 2960--> | 185 | <!-- Arrowhead on XXXpoint 4950 3900 - 3854 2960--> |
218 | <!-- Line --> | 186 | <!-- Line --> |
219 | <polyline | 187 | <polyline |
220 | points="4050,7050 4050,4864 " | 188 | points="4050,7050 4050,4864 " |
221 | style="stroke:#00d1d1;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | 189 | style="stroke:#00d1d1;stroke-width:29.99464035;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" |
222 | id="polyline54" /> | 190 | id="polyline54" |
191 | transform="matrix(1,0,0,0.95854605,12.340758,1579.9033)" /> | ||
223 | <!-- Arrowhead on XXXpoint 4050 7050 - 4050 4740--> | 192 | <!-- Arrowhead on XXXpoint 4050 7050 - 4050 4740--> |
224 | <!-- Line --> | 193 | <!-- Line --> |
225 | <polyline | 194 | <polyline |
226 | points="1050,7050 1050,4864 " | 195 | points="1050,7050 1050,4864 " |
227 | style="stroke:#00d1d1;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | 196 | style="stroke:#00d1d1;stroke-width:29.99464035;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" |
228 | id="polyline58" /> | 197 | id="polyline58" |
198 | transform="matrix(1,0,0,0.95854605,12.340758,1579.9033)" /> | ||
229 | <!-- Arrowhead on XXXpoint 1050 7050 - 1050 4740--> | 199 | <!-- Arrowhead on XXXpoint 1050 7050 - 1050 4740--> |
230 | <!-- Line --> | 200 | <!-- Line --> |
231 | <polyline | 201 | <polyline |
232 | points="2250,5850 2250,4864 " | 202 | points="2250,5850 2250,4864 " |
233 | style="stroke:#00d1d1;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | 203 | style="stroke:#00d1d1;stroke-width:29.99464035;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" |
234 | id="polyline62" /> | 204 | id="polyline62" |
205 | transform="matrix(1,0,0,0.95854605,12.340758,1579.9033)" /> | ||
235 | <!-- Arrowhead on XXXpoint 2250 5850 - 2250 4740--> | 206 | <!-- Arrowhead on XXXpoint 2250 5850 - 2250 4740--> |
236 | <!-- Line --> | 207 | <!-- Line --> |
237 | <polyline | ||
238 | points="2250,8550 2250,6814 " | ||
239 | style="stroke:#00ff00;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | ||
240 | id="polyline66" /> | ||
241 | <!-- Arrowhead on XXXpoint 2250 8550 - 2250 6690--> | 208 | <!-- Arrowhead on XXXpoint 2250 8550 - 2250 6690--> |
242 | <!-- Line --> | 209 | <!-- Line --> |
243 | <polyline | ||
244 | points="1050,9750 1050,8014 " | ||
245 | style="stroke:#00ff00;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | ||
246 | id="polyline70" /> | ||
247 | <!-- Arrowhead on XXXpoint 1050 9750 - 1050 7890--> | 210 | <!-- Arrowhead on XXXpoint 1050 9750 - 1050 7890--> |
248 | <!-- Line --> | 211 | <!-- Line --> |
249 | <polyline | ||
250 | points="4050,9750 4050,8014 " | ||
251 | style="stroke:#00ff00;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | ||
252 | id="polyline74" /> | ||
253 | <!-- Arrowhead on XXXpoint 4050 9750 - 4050 7890--> | 212 | <!-- Arrowhead on XXXpoint 4050 9750 - 4050 7890--> |
254 | <!-- Line --> | 213 | <!-- Line --> |
255 | <polyline | ||
256 | points="5250,8550 5250,6814 " | ||
257 | style="stroke:#00ff00;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | ||
258 | id="polyline78" /> | ||
259 | <!-- Arrowhead on XXXpoint 5250 8550 - 5250 6690--> | 214 | <!-- Arrowhead on XXXpoint 5250 8550 - 5250 6690--> |
260 | <!-- Line --> | 215 | <!-- Line --> |
261 | <polyline | ||
262 | points="6000,6300 8048,7910 " | ||
263 | style="stroke:#87cfff;stroke-width:30;stroke-linejoin:miter;stroke-linecap:butt;marker-end:url(#Arrow1Mend)" | ||
264 | id="polyline82" /> | ||
265 | <!-- Arrowhead on XXXpoint 6000 6300 - 8146 7986--> | 216 | <!-- Arrowhead on XXXpoint 6000 6300 - 8146 7986--> |
266 | <!-- Circle --> | 217 | <!-- Circle --> |
267 | <circle | 218 | <ellipse |
268 | cx="2850" | 219 | cx="2862.3408" |
269 | cy="4350" | 220 | cy="5749.5786" |
270 | r="76" | 221 | style="fill:#000000;stroke:#000000;stroke-width:13.70675087" |
271 | style="fill:#000000;stroke:#000000;stroke-width:14;" | 222 | id="circle86" |
272 | id="circle86" /> | 223 | rx="76" |
224 | ry="72.849495" /> | ||
273 | <!-- Circle --> | 225 | <!-- Circle --> |
274 | <circle | 226 | <ellipse |
275 | cx="3150" | 227 | cx="3162.3408" |
276 | cy="4350" | 228 | cy="5749.5786" |
277 | r="76" | 229 | style="fill:#000000;stroke:#000000;stroke-width:13.70675087" |
278 | style="fill:#000000;stroke:#000000;stroke-width:14;" | 230 | id="circle88" |
279 | id="circle88" /> | 231 | rx="76" |
232 | ry="72.849495" /> | ||
280 | <!-- Circle --> | 233 | <!-- Circle --> |
281 | <circle | 234 | <ellipse |
282 | cx="3450" | 235 | cx="3462.3408" |
283 | cy="4350" | 236 | cy="5749.5786" |
284 | r="76" | 237 | style="fill:#000000;stroke:#000000;stroke-width:13.70675087" |
285 | style="fill:#000000;stroke:#000000;stroke-width:14;" | 238 | id="circle90" |
286 | id="circle90" /> | 239 | rx="76" |
240 | ry="72.849495" /> | ||
287 | <!-- Circle --> | 241 | <!-- Circle --> |
288 | <circle | 242 | <ellipse |
289 | cx="1350" | 243 | cx="1362.3407" |
290 | cy="5550" | 244 | cy="6899.834" |
291 | r="76" | 245 | style="fill:#000000;stroke:#000000;stroke-width:13.70675087" |
292 | style="fill:#000000;stroke:#000000;stroke-width:14;" | 246 | id="circle92" |
293 | id="circle92" /> | 247 | rx="76" |
248 | ry="72.849495" /> | ||
294 | <!-- Circle --> | 249 | <!-- Circle --> |
295 | <circle | 250 | <ellipse |
296 | cx="1650" | 251 | cx="1662.3407" |
297 | cy="5550" | 252 | cy="6899.834" |
298 | r="76" | 253 | style="fill:#000000;stroke:#000000;stroke-width:13.70675087" |
299 | style="fill:#000000;stroke:#000000;stroke-width:14;" | 254 | id="circle94" |
300 | id="circle94" /> | 255 | rx="76" |
256 | ry="72.849495" /> | ||
301 | <!-- Circle --> | 257 | <!-- Circle --> |
302 | <circle | 258 | <ellipse |
303 | cx="1950" | 259 | cx="1962.3407" |
304 | cy="5550" | 260 | cy="6899.834" |
305 | r="76" | 261 | style="fill:#000000;stroke:#000000;stroke-width:13.70675087" |
306 | style="fill:#000000;stroke:#000000;stroke-width:14;" | 262 | id="circle96" |
307 | id="circle96" /> | 263 | rx="76" |
264 | ry="72.849495" /> | ||
308 | <!-- Circle --> | 265 | <!-- Circle --> |
309 | <circle | 266 | <ellipse |
310 | cx="4350" | 267 | cx="4362.3408" |
311 | cy="5550" | 268 | cy="6899.834" |
312 | r="76" | 269 | style="fill:#000000;stroke:#000000;stroke-width:13.70675087" |
313 | style="fill:#000000;stroke:#000000;stroke-width:14;" | 270 | id="circle98" |
314 | id="circle98" /> | 271 | rx="76" |
272 | ry="72.849495" /> | ||
315 | <!-- Circle --> | 273 | <!-- Circle --> |
316 | <circle | 274 | <ellipse |
317 | cx="4650" | 275 | cx="4662.3408" |
318 | cy="5550" | 276 | cy="6899.834" |
319 | r="76" | 277 | style="fill:#000000;stroke:#000000;stroke-width:13.70675087" |
320 | style="fill:#000000;stroke:#000000;stroke-width:14;" | 278 | id="circle100" |
321 | id="circle100" /> | 279 | rx="76" |
280 | ry="72.849495" /> | ||
322 | <!-- Circle --> | 281 | <!-- Circle --> |
323 | <circle | 282 | <ellipse |
324 | cx="4950" | 283 | cx="4962.3408" |
325 | cy="5550" | 284 | cy="6899.834" |
326 | r="76" | 285 | style="fill:#000000;stroke:#000000;stroke-width:13.70675087" |
327 | style="fill:#000000;stroke:#000000;stroke-width:14;" | 286 | id="circle102" |
328 | id="circle102" /> | 287 | rx="76" |
288 | ry="72.849495" /> | ||
329 | <!-- Line: box --> | 289 | <!-- Line: box --> |
330 | <rect | 290 | <rect |
331 | x="7350" | 291 | x="6745.3027" |
332 | y="7950" | 292 | y="8146.0654" |
333 | width="1500" | 293 | width="1500" |
334 | height="900" | 294 | height="862.69141" |
335 | rx="0" | 295 | rx="0" |
336 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; " | 296 | style="stroke:#000000;stroke-width:29.37160873;stroke-linecap:butt;stroke-linejoin:miter" |
337 | id="rect104" /> | 297 | id="rect104" /> |
338 | <!-- Line: box --> | 298 | <!-- Line: box --> |
339 | <rect | 299 | <rect |
340 | x="7350" | 300 | x="6745.3027" |
341 | y="9450" | 301 | y="9583.8857" |
342 | width="1500" | 302 | width="1500" |
343 | height="900" | 303 | height="862.69141" |
344 | rx="0" | 304 | rx="0" |
345 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; " | 305 | style="stroke:#000000;stroke-width:29.37160873;stroke-linecap:butt;stroke-linejoin:miter" |
346 | id="rect106" /> | 306 | id="rect106" /> |
347 | <!-- Line --> | 307 | <!-- Line --> |
348 | <polyline | 308 | <polyline |
349 | points="8100,8850 8100,9384 " | 309 | points="8100,8850 8100,9384 " |
350 | style="stroke:#000000;stroke-width:30;stroke-linejoin:miter;stroke-linecap:butt;marker-end:url(#Arrow1Mend)" | 310 | style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#Arrow1Mend)" |
351 | id="polyline108" /> | 311 | id="polyline108" |
312 | transform="matrix(1,0,0,0.95854605,-604.69715,525.62477)" /> | ||
352 | <!-- Arrowhead on XXXpoint 8100 8850 - 8100 9510--> | 313 | <!-- Arrowhead on XXXpoint 8100 8850 - 8100 9510--> |
353 | <!-- Line: box --> | 314 | <!-- Line: box --> |
354 | <rect | 315 | <rect |
355 | x="7350" | 316 | x="6745.3027" |
356 | y="10950" | 317 | y="11021.704" |
357 | width="1500" | 318 | width="1500" |
358 | height="900" | 319 | height="862.69141" |
359 | rx="0" | 320 | rx="0" |
360 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; " | 321 | style="stroke:#000000;stroke-width:29.37160873;stroke-linecap:butt;stroke-linejoin:miter" |
361 | id="rect112" /> | 322 | id="rect112" /> |
362 | <!-- Line --> | 323 | <!-- Line --> |
363 | <polyline | 324 | <polyline |
364 | points="8100,10350 8100,10884 " | 325 | points="8100,10350 8100,10884 " |
365 | style="stroke:#000000;stroke-width:30;stroke-linejoin:miter;stroke-linecap:butt;marker-end:url(#Arrow1Mend)" | 326 | style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#Arrow1Mend)" |
366 | id="polyline114" /> | 327 | id="polyline114" |
328 | transform="matrix(1,0,0,0.95854605,-604.69715,525.62477)" /> | ||
367 | <!-- Arrowhead on XXXpoint 8100 10350 - 8100 11010--> | 329 | <!-- Arrowhead on XXXpoint 8100 10350 - 8100 11010--> |
368 | <!-- Line: box --> | 330 | <!-- Line: box --> |
369 | <rect | 331 | <rect |
370 | x="750" | 332 | x="762.34076" |
371 | y="3900" | 333 | y="5318.2324" |
372 | width="1800" | 334 | width="1800" |
373 | height="900" | 335 | height="862.69141" |
374 | rx="0" | 336 | rx="0" |
375 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; " | 337 | style="fill:#ffbfbf;stroke:#000000;stroke-width:29.37160873;stroke-linecap:butt;stroke-linejoin:miter" |
376 | id="rect118" /> | 338 | id="rect118" /> |
377 | <!-- Line: box --> | 339 | <!-- Line: box --> |
378 | <rect | 340 | <rect |
379 | x="300" | 341 | x="312.34076" |
380 | y="7050" | 342 | y="8337.6533" |
381 | width="1500" | 343 | width="1500" |
382 | height="900" | 344 | height="862.69141" |
383 | rx="0" | 345 | rx="0" |
384 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; " | 346 | style="fill:#87cfff;stroke:#000000;stroke-width:29.37160873;stroke-linecap:butt;stroke-linejoin:miter" |
385 | id="rect120" /> | 347 | id="rect120" /> |
386 | <!-- Line: box --> | 348 | <!-- Line: box --> |
387 | <rect | 349 | <rect |
388 | x="3750" | 350 | x="3762.3408" |
389 | y="3900" | 351 | y="5318.2324" |
390 | width="1800" | 352 | width="1800" |
391 | height="900" | 353 | height="862.69141" |
392 | rx="0" | 354 | rx="0" |
393 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; " | 355 | style="fill:#ffbfbf;stroke:#000000;stroke-width:29.37160873;stroke-linecap:butt;stroke-linejoin:miter" |
394 | id="rect122" /> | 356 | id="rect122" /> |
395 | <!-- Line: box --> | 357 | <!-- Line: box --> |
396 | <rect | 358 | <rect |
397 | x="4500" | 359 | x="4512.3408" |
398 | y="5850" | 360 | y="7187.3975" |
399 | width="1500" | 361 | width="1500" |
400 | height="900" | 362 | height="862.69141" |
401 | rx="0" | 363 | rx="0" |
402 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; " | 364 | style="fill:#87cfff;stroke:#000000;stroke-width:29.37160873;stroke-linecap:butt;stroke-linejoin:miter" |
403 | id="rect124" /> | 365 | id="rect124" /> |
404 | <!-- Line: box --> | 366 | <!-- Line: box --> |
405 | <rect | 367 | <rect |
406 | x="3300" | 368 | x="3312.3408" |
407 | y="7050" | 369 | y="8337.6533" |
408 | width="1500" | 370 | width="1500" |
409 | height="900" | 371 | height="862.69141" |
410 | rx="0" | 372 | rx="0" |
411 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; " | 373 | style="fill:#87cfff;stroke:#000000;stroke-width:29.37160873;stroke-linecap:butt;stroke-linejoin:miter" |
412 | id="rect126" /> | 374 | id="rect126" /> |
413 | <!-- Line: box --> | 375 | <!-- Line: box --> |
414 | <rect | 376 | <rect |
415 | x="2250" | 377 | x="2262.3408" |
416 | y="2100" | 378 | y="3592.8503" |
417 | width="1800" | 379 | width="1800" |
418 | height="900" | 380 | height="862.69141" |
419 | rx="0" | 381 | rx="0" |
420 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; " | 382 | style="fill:#ffbfbf;stroke:#000000;stroke-width:29.37160873;stroke-linecap:butt;stroke-linejoin:miter" |
421 | id="rect128" /> | 383 | id="rect128" /> |
422 | <!-- Line: box --> | 384 | <!-- Line: box --> |
423 | <rect | ||
424 | x="0" | ||
425 | y="9750" | ||
426 | width="2100" | ||
427 | height="900" | ||
428 | rx="0" | ||
429 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; " | ||
430 | id="rect130" /> | ||
431 | <!-- Line: box --> | 385 | <!-- Line: box --> |
432 | <rect | ||
433 | x="1350" | ||
434 | y="8550" | ||
435 | width="2100" | ||
436 | height="900" | ||
437 | rx="0" | ||
438 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; " | ||
439 | id="rect132" /> | ||
440 | <!-- Line: box --> | 386 | <!-- Line: box --> |
441 | <rect | ||
442 | x="3000" | ||
443 | y="9750" | ||
444 | width="2100" | ||
445 | height="900" | ||
446 | rx="0" | ||
447 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; " | ||
448 | id="rect134" /> | ||
449 | <!-- Line: box --> | 387 | <!-- Line: box --> |
450 | <rect | ||
451 | x="4350" | ||
452 | y="8550" | ||
453 | width="2100" | ||
454 | height="900" | ||
455 | rx="0" | ||
456 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; " | ||
457 | id="rect136" /> | ||
458 | <!-- Line: box --> | 388 | <!-- Line: box --> |
459 | <rect | 389 | <rect |
460 | x="1500" | 390 | x="1512.3407" |
461 | y="5850" | 391 | y="7187.3975" |
462 | width="1500" | 392 | width="1500" |
463 | height="900" | 393 | height="862.69141" |
464 | rx="0" | 394 | rx="0" |
465 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; " | 395 | style="fill:#87cfff;stroke:#000000;stroke-width:29.37160873;stroke-linecap:butt;stroke-linejoin:miter" |
466 | id="rect138" /> | 396 | id="rect138" /> |
467 | <!-- Text --> | 397 | <!-- Text --> |
468 | <text | 398 | <text |
469 | xml:space="preserve" | 399 | xml:space="preserve" |
470 | x="8100" | 400 | x="7338.3037" |
471 | y="8250" | 401 | y="8614.0625" |
472 | fill="#000000" | ||
473 | font-family="Courier" | ||
474 | font-style="normal" | 402 | font-style="normal" |
475 | font-weight="bold" | 403 | font-weight="bold" |
476 | font-size="192" | 404 | font-size="192" |
477 | text-anchor="middle" | 405 | id="text140" |
478 | id="text140">struct</text> | 406 | style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:middle;fill:#000000;stroke-width:0.02447634in" |
407 | transform="scale(1.0213945,0.97905363)">struct</text> | ||
479 | <!-- Text --> | 408 | <!-- Text --> |
480 | <text | 409 | <text |
481 | xml:space="preserve" | 410 | xml:space="preserve" |
482 | x="8100" | 411 | x="7338.3037" |
483 | y="8550" | 412 | y="8907.7783" |
484 | fill="#000000" | ||
485 | font-family="Courier" | ||
486 | font-style="normal" | 413 | font-style="normal" |
487 | font-weight="bold" | 414 | font-weight="bold" |
488 | font-size="192" | 415 | font-size="192" |
489 | text-anchor="middle" | 416 | id="text142" |
490 | id="text142">rcu_head</text> | 417 | style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:middle;fill:#000000;stroke-width:0.02447634in" |
418 | transform="scale(1.0213945,0.97905363)">rcu_head</text> | ||
491 | <!-- Text --> | 419 | <!-- Text --> |
492 | <text | 420 | <text |
493 | xml:space="preserve" | 421 | xml:space="preserve" |
494 | x="8100" | 422 | x="7338.3037" |
495 | y="9750" | 423 | y="10082.644" |
496 | fill="#000000" | ||
497 | font-family="Courier" | ||
498 | font-style="normal" | 424 | font-style="normal" |
499 | font-weight="bold" | 425 | font-weight="bold" |
500 | font-size="192" | 426 | font-size="192" |
501 | text-anchor="middle" | 427 | id="text144" |
502 | id="text144">struct</text> | 428 | style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:middle;fill:#000000;stroke-width:0.02447634in" |
429 | transform="scale(1.0213945,0.97905363)">struct</text> | ||
503 | <!-- Text --> | 430 | <!-- Text --> |
504 | <text | 431 | <text |
505 | xml:space="preserve" | 432 | xml:space="preserve" |
506 | x="8100" | 433 | x="7338.3037" |
507 | y="10050" | 434 | y="10376.36" |
508 | fill="#000000" | ||
509 | font-family="Courier" | ||
510 | font-style="normal" | 435 | font-style="normal" |
511 | font-weight="bold" | 436 | font-weight="bold" |
512 | font-size="192" | 437 | font-size="192" |
513 | text-anchor="middle" | 438 | id="text146" |
514 | id="text146">rcu_head</text> | 439 | style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:middle;fill:#000000;stroke-width:0.02447634in" |
440 | transform="scale(1.0213945,0.97905363)">rcu_head</text> | ||
515 | <!-- Text --> | 441 | <!-- Text --> |
516 | <text | 442 | <text |
517 | xml:space="preserve" | 443 | xml:space="preserve" |
518 | x="8100" | 444 | x="7338.3037" |
519 | y="11250" | 445 | y="11551.224" |
520 | fill="#000000" | ||
521 | font-family="Courier" | ||
522 | font-style="normal" | 446 | font-style="normal" |
523 | font-weight="bold" | 447 | font-weight="bold" |
524 | font-size="192" | 448 | font-size="192" |
525 | text-anchor="middle" | 449 | id="text148" |
526 | id="text148">struct</text> | 450 | style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:middle;fill:#000000;stroke-width:0.02447634in" |
451 | transform="scale(1.0213945,0.97905363)">struct</text> | ||
527 | <!-- Text --> | 452 | <!-- Text --> |
528 | <text | 453 | <text |
529 | xml:space="preserve" | 454 | xml:space="preserve" |
530 | x="8100" | 455 | x="7338.3037" |
531 | y="11550" | 456 | y="11844.94" |
532 | fill="#000000" | ||
533 | font-family="Courier" | ||
534 | font-style="normal" | 457 | font-style="normal" |
535 | font-weight="bold" | 458 | font-weight="bold" |
536 | font-size="192" | 459 | font-size="192" |
537 | text-anchor="middle" | 460 | id="text150" |
538 | id="text150">rcu_head</text> | 461 | style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:middle;fill:#000000;stroke-width:0.02447634in" |
462 | transform="scale(1.0213945,0.97905363)">rcu_head</text> | ||
539 | <!-- Text --> | 463 | <!-- Text --> |
540 | <text | 464 | <text |
541 | xml:space="preserve" | 465 | xml:space="preserve" |
542 | x="6000" | 466 | x="5886.4043" |
543 | y="1200" | 467 | y="2788.5688" |
544 | fill="#000000" | ||
545 | font-family="Helvetica" | ||
546 | font-style="normal" | 468 | font-style="normal" |
547 | font-weight="normal" | 469 | font-weight="normal" |
548 | font-size="192" | 470 | font-size="192" |
549 | text-anchor="end" | 471 | id="text152" |
550 | id="text152">rcu_sched</text> | 472 | style="font-style:normal;font-weight:normal;font-size:187.978302px;font-family:Helvetica;text-anchor:end;fill:#000000;stroke-width:0.02447634in" |
473 | transform="scale(1.0213945,0.97905363)">rcu_state</text> | ||
551 | <!-- Text --> | 474 | <!-- Text --> |
552 | <text | ||
553 | xml:space="preserve" | ||
554 | x="6450" | ||
555 | y="750" | ||
556 | fill="#000000" | ||
557 | font-family="Helvetica" | ||
558 | font-style="normal" | ||
559 | font-weight="normal" | ||
560 | font-size="192" | ||
561 | text-anchor="end" | ||
562 | id="text154">rcu_bh</text> | ||
563 | <!-- Text --> | 475 | <!-- Text --> |
564 | <text | 476 | <text |
565 | xml:space="preserve" | 477 | xml:space="preserve" |
566 | x="3150" | 478 | x="3096.1016" |
567 | y="2400" | 479 | y="3963.4336" |
568 | fill="#000000" | ||
569 | font-family="Courier" | ||
570 | font-style="normal" | 480 | font-style="normal" |
571 | font-weight="bold" | 481 | font-weight="bold" |
572 | font-size="192" | 482 | font-size="192" |
573 | text-anchor="middle" | 483 | id="text156" |
574 | id="text156">struct</text> | 484 | style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:middle;fill:#000000;stroke-width:0.02447634in" |
485 | transform="scale(1.0213945,0.97905363)">struct</text> | ||
575 | <!-- Text --> | 486 | <!-- Text --> |
576 | <text | 487 | <text |
577 | xml:space="preserve" | 488 | xml:space="preserve" |
578 | x="3150" | 489 | x="3096.1016" |
579 | y="2700" | 490 | y="4257.1494" |
580 | fill="#000000" | ||
581 | font-family="Courier" | ||
582 | font-style="normal" | 491 | font-style="normal" |
583 | font-weight="bold" | 492 | font-weight="bold" |
584 | font-size="192" | 493 | font-size="192" |
585 | text-anchor="middle" | 494 | id="text158" |
586 | id="text158">rcu_node</text> | 495 | style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:middle;fill:#000000;stroke-width:0.02447634in" |
496 | transform="scale(1.0213945,0.97905363)">rcu_node</text> | ||
587 | <!-- Text --> | 497 | <!-- Text --> |
588 | <text | 498 | <text |
589 | xml:space="preserve" | 499 | xml:space="preserve" |
590 | x="1650" | 500 | x="1627.5209" |
591 | y="4200" | 501 | y="5725.7305" |
592 | fill="#000000" | ||
593 | font-family="Courier" | ||
594 | font-style="normal" | 502 | font-style="normal" |
595 | font-weight="bold" | 503 | font-weight="bold" |
596 | font-size="192" | 504 | font-size="192" |
597 | text-anchor="middle" | 505 | id="text160" |
598 | id="text160">struct</text> | 506 | style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:middle;fill:#000000;stroke-width:0.02447634in" |
507 | transform="scale(1.0213945,0.97905363)">struct</text> | ||
599 | <!-- Text --> | 508 | <!-- Text --> |
600 | <text | 509 | <text |
601 | xml:space="preserve" | 510 | xml:space="preserve" |
602 | x="1650" | 511 | x="1627.5209" |
603 | y="4500" | 512 | y="6019.4463" |
604 | fill="#000000" | ||
605 | font-family="Courier" | ||
606 | font-style="normal" | 513 | font-style="normal" |
607 | font-weight="bold" | 514 | font-weight="bold" |
608 | font-size="192" | 515 | font-size="192" |
609 | text-anchor="middle" | 516 | id="text162" |
610 | id="text162">rcu_node</text> | 517 | style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:middle;fill:#000000;stroke-width:0.02447634in" |
518 | transform="scale(1.0213945,0.97905363)">rcu_node</text> | ||
611 | <!-- Text --> | 519 | <!-- Text --> |
612 | <text | 520 | <text |
613 | xml:space="preserve" | 521 | xml:space="preserve" |
614 | x="4650" | 522 | x="4564.6821" |
615 | y="4500" | 523 | y="6019.4463" |
616 | fill="#000000" | ||
617 | font-family="Courier" | ||
618 | font-style="normal" | 524 | font-style="normal" |
619 | font-weight="bold" | 525 | font-weight="bold" |
620 | font-size="192" | 526 | font-size="192" |
621 | text-anchor="middle" | 527 | id="text164" |
622 | id="text164">rcu_node</text> | 528 | style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:middle;fill:#000000;stroke-width:0.02447634in" |
529 | transform="scale(1.0213945,0.97905363)">rcu_node</text> | ||
623 | <!-- Text --> | 530 | <!-- Text --> |
624 | <text | 531 | <text |
625 | xml:space="preserve" | 532 | xml:space="preserve" |
626 | x="4650" | 533 | x="4564.6821" |
627 | y="4200" | 534 | y="5725.7305" |
628 | fill="#000000" | ||
629 | font-family="Courier" | ||
630 | font-style="normal" | 535 | font-style="normal" |
631 | font-weight="bold" | 536 | font-weight="bold" |
632 | font-size="192" | 537 | font-size="192" |
633 | text-anchor="middle" | 538 | id="text166" |
634 | id="text166">struct</text> | 539 | style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:middle;fill:#000000;stroke-width:0.02447634in" |
540 | transform="scale(1.0213945,0.97905363)">struct</text> | ||
635 | <!-- Text --> | 541 | <!-- Text --> |
636 | <text | 542 | <text |
637 | xml:space="preserve" | 543 | xml:space="preserve" |
638 | x="2250" | 544 | x="2214.9531" |
639 | y="6150" | 545 | y="7634.8848" |
640 | fill="#000000" | ||
641 | font-family="Courier" | ||
642 | font-style="normal" | 546 | font-style="normal" |
643 | font-weight="bold" | 547 | font-weight="bold" |
644 | font-size="192" | 548 | font-size="192" |
645 | text-anchor="middle" | 549 | id="text168" |
646 | id="text168">struct</text> | 550 | style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:middle;fill:#000000;stroke-width:0.02447634in" |
551 | transform="scale(1.0213945,0.97905363)">struct</text> | ||
647 | <!-- Text --> | 552 | <!-- Text --> |
648 | <text | 553 | <text |
649 | xml:space="preserve" | 554 | xml:space="preserve" |
650 | x="2250" | 555 | x="2214.9531" |
651 | y="6450" | 556 | y="7928.6011" |
652 | fill="#000000" | ||
653 | font-family="Courier" | ||
654 | font-style="normal" | 557 | font-style="normal" |
655 | font-weight="bold" | 558 | font-weight="bold" |
656 | font-size="192" | 559 | font-size="192" |
657 | text-anchor="middle" | 560 | id="text170" |
658 | id="text170">rcu_data</text> | 561 | style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:middle;fill:#000000;stroke-width:0.02447634in" |
562 | transform="scale(1.0213945,0.97905363)">rcu_data</text> | ||
659 | <!-- Text --> | 563 | <!-- Text --> |
660 | <text | 564 | <text |
661 | xml:space="preserve" | 565 | xml:space="preserve" |
662 | x="1050" | 566 | x="1040.0886" |
663 | y="7350" | 567 | y="8809.749" |
664 | fill="#000000" | ||
665 | font-family="Courier" | ||
666 | font-style="normal" | 568 | font-style="normal" |
667 | font-weight="bold" | 569 | font-weight="bold" |
668 | font-size="192" | 570 | font-size="192" |
669 | text-anchor="middle" | 571 | id="text172" |
670 | id="text172">struct</text> | 572 | style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:middle;fill:#000000;stroke-width:0.02447634in" |
573 | transform="scale(1.0213945,0.97905363)">struct</text> | ||
671 | <!-- Text --> | 574 | <!-- Text --> |
672 | <text | 575 | <text |
673 | xml:space="preserve" | 576 | xml:space="preserve" |
674 | x="1050" | 577 | x="1040.0886" |
675 | y="7650" | 578 | y="9103.4648" |
676 | fill="#000000" | ||
677 | font-family="Courier" | ||
678 | font-style="normal" | 579 | font-style="normal" |
679 | font-weight="bold" | 580 | font-weight="bold" |
680 | font-size="192" | 581 | font-size="192" |
681 | text-anchor="middle" | 582 | id="text174" |
682 | id="text174">rcu_data</text> | 583 | style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:middle;fill:#000000;stroke-width:0.02447634in" |
584 | transform="scale(1.0213945,0.97905363)">rcu_data</text> | ||
683 | <!-- Text --> | 585 | <!-- Text --> |
684 | <text | 586 | <text |
685 | xml:space="preserve" | 587 | xml:space="preserve" |
686 | x="5250" | 588 | x="5152.1138" |
687 | y="6150" | 589 | y="7634.8848" |
688 | fill="#000000" | ||
689 | font-family="Courier" | ||
690 | font-style="normal" | 590 | font-style="normal" |
691 | font-weight="bold" | 591 | font-weight="bold" |
692 | font-size="192" | 592 | font-size="192" |
693 | text-anchor="middle" | 593 | id="text176" |
694 | id="text176">struct</text> | 594 | style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:middle;fill:#000000;stroke-width:0.02447634in" |
595 | transform="scale(1.0213945,0.97905363)">struct</text> | ||
695 | <!-- Text --> | 596 | <!-- Text --> |
696 | <text | 597 | <text |
697 | xml:space="preserve" | 598 | xml:space="preserve" |
698 | x="5250" | 599 | x="5152.1138" |
699 | y="6450" | 600 | y="7928.6011" |
700 | fill="#000000" | ||
701 | font-family="Courier" | ||
702 | font-style="normal" | 601 | font-style="normal" |
703 | font-weight="bold" | 602 | font-weight="bold" |
704 | font-size="192" | 603 | font-size="192" |
705 | text-anchor="middle" | 604 | id="text178" |
706 | id="text178">rcu_data</text> | 605 | style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:middle;fill:#000000;stroke-width:0.02447634in" |
606 | transform="scale(1.0213945,0.97905363)">rcu_data</text> | ||
707 | <!-- Text --> | 607 | <!-- Text --> |
708 | <text | 608 | <text |
709 | xml:space="preserve" | 609 | xml:space="preserve" |
710 | x="4050" | 610 | x="3977.2495" |
711 | y="7350" | 611 | y="8809.749" |
712 | fill="#000000" | ||
713 | font-family="Courier" | ||
714 | font-style="normal" | 612 | font-style="normal" |
715 | font-weight="bold" | 613 | font-weight="bold" |
716 | font-size="192" | 614 | font-size="192" |
717 | text-anchor="middle" | 615 | id="text180" |
718 | id="text180">struct</text> | 616 | style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:middle;fill:#000000;stroke-width:0.02447634in" |
617 | transform="scale(1.0213945,0.97905363)">struct</text> | ||
719 | <!-- Text --> | 618 | <!-- Text --> |
720 | <text | 619 | <text |
721 | xml:space="preserve" | 620 | xml:space="preserve" |
722 | x="4050" | 621 | x="3977.2495" |
723 | y="7650" | 622 | y="9103.4648" |
724 | fill="#000000" | ||
725 | font-family="Courier" | ||
726 | font-style="normal" | 623 | font-style="normal" |
727 | font-weight="bold" | 624 | font-weight="bold" |
728 | font-size="192" | 625 | font-size="192" |
729 | text-anchor="middle" | 626 | id="text182" |
730 | id="text182">rcu_data</text> | 627 | style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:middle;fill:#000000;stroke-width:0.02447634in" |
628 | transform="scale(1.0213945,0.97905363)">rcu_data</text> | ||
731 | <!-- Text --> | 629 | <!-- Text --> |
732 | <text | 630 | <text |
733 | xml:space="preserve" | 631 | xml:space="preserve" |
734 | x="450" | 632 | x="452.6564" |
735 | y="1800" | 633 | y="3376.0012" |
736 | fill="#000000" | ||
737 | font-family="Courier" | ||
738 | font-style="normal" | 634 | font-style="normal" |
739 | font-weight="bold" | 635 | font-weight="bold" |
740 | font-size="192" | 636 | font-size="192" |
741 | text-anchor="start" | 637 | id="text184" |
742 | id="text184">struct rcu_state</text> | 638 | style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:start;fill:#000000;stroke-width:0.02447634in" |
639 | transform="scale(1.0213945,0.97905363)">struct rcu_state</text> | ||
743 | <!-- Text --> | 640 | <!-- Text --> |
744 | <text | ||
745 | xml:space="preserve" | ||
746 | x="1050" | ||
747 | y="10050" | ||
748 | fill="#000000" | ||
749 | font-family="Courier" | ||
750 | font-style="normal" | ||
751 | font-weight="bold" | ||
752 | font-size="192" | ||
753 | text-anchor="middle" | ||
754 | id="text186">struct</text> | ||
755 | <!-- Text --> | 641 | <!-- Text --> |
756 | <text | ||
757 | xml:space="preserve" | ||
758 | x="1050" | ||
759 | y="10350" | ||
760 | fill="#000000" | ||
761 | font-family="Courier" | ||
762 | font-style="normal" | ||
763 | font-weight="bold" | ||
764 | font-size="192" | ||
765 | text-anchor="middle" | ||
766 | id="text188">rcu_dynticks</text> | ||
767 | <!-- Text --> | 642 | <!-- Text --> |
768 | <text | ||
769 | xml:space="preserve" | ||
770 | x="4050" | ||
771 | y="10050" | ||
772 | fill="#000000" | ||
773 | font-family="Courier" | ||
774 | font-style="normal" | ||
775 | font-weight="bold" | ||
776 | font-size="192" | ||
777 | text-anchor="middle" | ||
778 | id="text190">struct</text> | ||
779 | <!-- Text --> | 643 | <!-- Text --> |
780 | <text | ||
781 | xml:space="preserve" | ||
782 | x="4050" | ||
783 | y="10350" | ||
784 | fill="#000000" | ||
785 | font-family="Courier" | ||
786 | font-style="normal" | ||
787 | font-weight="bold" | ||
788 | font-size="192" | ||
789 | text-anchor="middle" | ||
790 | id="text192">rcu_dynticks</text> | ||
791 | <!-- Text --> | 644 | <!-- Text --> |
792 | <text | ||
793 | xml:space="preserve" | ||
794 | x="2400" | ||
795 | y="8850" | ||
796 | fill="#000000" | ||
797 | font-family="Courier" | ||
798 | font-style="normal" | ||
799 | font-weight="bold" | ||
800 | font-size="192" | ||
801 | text-anchor="middle" | ||
802 | id="text194">struct</text> | ||
803 | <!-- Text --> | 645 | <!-- Text --> |
804 | <text | ||
805 | xml:space="preserve" | ||
806 | x="2400" | ||
807 | y="9150" | ||
808 | fill="#000000" | ||
809 | font-family="Courier" | ||
810 | font-style="normal" | ||
811 | font-weight="bold" | ||
812 | font-size="192" | ||
813 | text-anchor="middle" | ||
814 | id="text196">rcu_dynticks</text> | ||
815 | <!-- Text --> | 646 | <!-- Text --> |
816 | <text | ||
817 | xml:space="preserve" | ||
818 | x="5400" | ||
819 | y="8850" | ||
820 | fill="#000000" | ||
821 | font-family="Courier" | ||
822 | font-style="normal" | ||
823 | font-weight="bold" | ||
824 | font-size="192" | ||
825 | text-anchor="middle" | ||
826 | id="text198">struct</text> | ||
827 | <!-- Text --> | 647 | <!-- Text --> |
828 | <text | ||
829 | xml:space="preserve" | ||
830 | x="5400" | ||
831 | y="9150" | ||
832 | fill="#000000" | ||
833 | font-family="Courier" | ||
834 | font-style="normal" | ||
835 | font-weight="bold" | ||
836 | font-size="192" | ||
837 | text-anchor="middle" | ||
838 | id="text200">rcu_dynticks</text> | ||
839 | <!-- Text --> | 648 | <!-- Text --> |
840 | <text | ||
841 | xml:space="preserve" | ||
842 | x="6900" | ||
843 | y="300" | ||
844 | fill="#000000" | ||
845 | font-family="Helvetica" | ||
846 | font-style="normal" | ||
847 | font-weight="normal" | ||
848 | font-size="192" | ||
849 | text-anchor="end" | ||
850 | id="text202">rcu_preempt</text> | ||
851 | <!-- Line --> | 649 | <!-- Line --> |
852 | <polyline | 650 | <polyline |
853 | points="5250,5850 5250,4864 " | 651 | points="5250,5850 5250,4864 " |
854 | style="stroke:#00d1d1;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | 652 | style="stroke:#00d1d1;stroke-width:29.99464035;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" |
855 | id="polyline204" /> | 653 | id="polyline204" |
654 | transform="matrix(1,0,0,0.95854605,12.340758,1579.9033)" /> | ||
856 | <!-- Arrowhead on XXXpoint 5250 5850 - 5250 4740--> | 655 | <!-- Arrowhead on XXXpoint 5250 5850 - 5250 4740--> |
656 | <path | ||
657 | style="fill:none;stroke:#000000;stroke-width:34.24744034;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#marker1177)" | ||
658 | d="m 6000.1472,7564.2558 c 1498.5508,0 1498.5508,0 1498.5508,0 v 520.0252" | ||
659 | id="path886" | ||
660 | inkscape:connector-curvature="0" /> | ||
857 | </g> | 661 | </g> |
858 | </svg> | 662 | </svg> |
diff --git a/Documentation/RCU/Design/Data-Structures/Data-Structures.html b/Documentation/RCU/Design/Data-Structures/Data-Structures.html index 1d2051c0c3fc..18f179807563 100644 --- a/Documentation/RCU/Design/Data-Structures/Data-Structures.html +++ b/Documentation/RCU/Design/Data-Structures/Data-Structures.html | |||
@@ -23,8 +23,6 @@ to each other. | |||
23 | The <tt>rcu_segcblist</tt> Structure</a> | 23 | The <tt>rcu_segcblist</tt> Structure</a> |
24 | <li> <a href="#The rcu_data Structure"> | 24 | <li> <a href="#The rcu_data Structure"> |
25 | The <tt>rcu_data</tt> Structure</a> | 25 | The <tt>rcu_data</tt> Structure</a> |
26 | <li> <a href="#The rcu_dynticks Structure"> | ||
27 | The <tt>rcu_dynticks</tt> Structure</a> | ||
28 | <li> <a href="#The rcu_head Structure"> | 26 | <li> <a href="#The rcu_head Structure"> |
29 | The <tt>rcu_head</tt> Structure</a> | 27 | The <tt>rcu_head</tt> Structure</a> |
30 | <li> <a href="#RCU-Specific Fields in the task_struct Structure"> | 28 | <li> <a href="#RCU-Specific Fields in the task_struct Structure"> |
@@ -127,9 +125,11 @@ CPUs, RCU would configure the <tt>rcu_node</tt> tree as follows: | |||
127 | </p><p>RCU currently permits up to a four-level tree, which on a 64-bit system | 125 | </p><p>RCU currently permits up to a four-level tree, which on a 64-bit system |
128 | accommodates up to 4,194,304 CPUs, though only a mere 524,288 CPUs for | 126 | accommodates up to 4,194,304 CPUs, though only a mere 524,288 CPUs for |
129 | 32-bit systems. | 127 | 32-bit systems. |
130 | On the other hand, you can set <tt>CONFIG_RCU_FANOUT</tt> to be | 128 | On the other hand, you can set both <tt>CONFIG_RCU_FANOUT</tt> and |
131 | as small as 2 if you wish, which would permit only 16 CPUs, which | 129 | <tt>CONFIG_RCU_FANOUT_LEAF</tt> to be as small as 2, which would result |
132 | is useful for testing. | 130 | in a 16-CPU test using a 4-level tree. |
131 | This can be useful for testing large-system capabilities on small test | ||
132 | machines. | ||
133 | 133 | ||
134 | </p><p>This multi-level combining tree allows us to get most of the | 134 | </p><p>This multi-level combining tree allows us to get most of the |
135 | performance and scalability | 135 | performance and scalability |
@@ -154,44 +154,9 @@ on that root <tt>rcu_node</tt> structure remains acceptably low. | |||
154 | keeping lock contention under control at all tree levels regardless | 154 | keeping lock contention under control at all tree levels regardless |
155 | of the level of loading on the system. | 155 | of the level of loading on the system. |
156 | 156 | ||
157 | </p><p>The Linux kernel actually supports multiple flavors of RCU | ||
158 | running concurrently, so RCU builds separate data structures for each | ||
159 | flavor. | ||
160 | For example, for <tt>CONFIG_TREE_RCU=y</tt> kernels, RCU provides | ||
161 | rcu_sched and rcu_bh, as shown below: | ||
162 | |||
163 | </p><p><img src="BigTreeClassicRCUBH.svg" alt="BigTreeClassicRCUBH.svg" width="33%"> | ||
164 | |||
165 | </p><p>Energy efficiency is increasingly important, and for that | ||
166 | reason the Linux kernel provides <tt>CONFIG_NO_HZ_IDLE</tt>, which | ||
167 | turns off the scheduling-clock interrupts on idle CPUs, which in | ||
168 | turn allows those CPUs to attain deeper sleep states and to consume | ||
169 | less energy. | ||
170 | CPUs whose scheduling-clock interrupts have been turned off are | ||
171 | said to be in <i>dyntick-idle mode</i>. | ||
172 | RCU must handle dyntick-idle CPUs specially | ||
173 | because RCU would otherwise wake up each CPU on every grace period, | ||
174 | which would defeat the whole purpose of <tt>CONFIG_NO_HZ_IDLE</tt>. | ||
175 | RCU uses the <tt>rcu_dynticks</tt> structure to track | ||
176 | which CPUs are in dyntick idle mode, as shown below: | ||
177 | |||
178 | </p><p><img src="BigTreeClassicRCUBHdyntick.svg" alt="BigTreeClassicRCUBHdyntick.svg" width="33%"> | ||
179 | |||
180 | </p><p>However, if a CPU is in dyntick-idle mode, it is in that mode | ||
181 | for all flavors of RCU. | ||
182 | Therefore, a single <tt>rcu_dynticks</tt> structure is allocated per | ||
183 | CPU, and all of a given CPU's <tt>rcu_data</tt> structures share | ||
184 | that <tt>rcu_dynticks</tt>, as shown in the figure. | ||
185 | |||
186 | </p><p>Kernels built with <tt>CONFIG_PREEMPT_RCU</tt> support | ||
187 | rcu_preempt in addition to rcu_sched and rcu_bh, as shown below: | ||
188 | |||
189 | </p><p><img src="BigTreePreemptRCUBHdyntick.svg" alt="BigTreePreemptRCUBHdyntick.svg" width="35%"> | ||
190 | |||
191 | </p><p>RCU updaters wait for normal grace periods by registering | 157 | </p><p>RCU updaters wait for normal grace periods by registering |
192 | RCU callbacks, either directly via <tt>call_rcu()</tt> and | 158 | RCU callbacks, either directly via <tt>call_rcu()</tt> and |
193 | friends (namely <tt>call_rcu_bh()</tt> and <tt>call_rcu_sched()</tt>), | 159 | friends (namely <tt>call_rcu_bh()</tt> and <tt>call_rcu_sched()</tt>), |
194 | there being a separate interface per flavor of RCU) | ||
195 | or indirectly via <tt>synchronize_rcu()</tt> and friends. | 160 | or indirectly via <tt>synchronize_rcu()</tt> and friends. |
196 | RCU callbacks are represented by <tt>rcu_head</tt> structures, | 161 | RCU callbacks are represented by <tt>rcu_head</tt> structures, |
197 | which are queued on <tt>rcu_data</tt> structures while they are | 162 | which are queued on <tt>rcu_data</tt> structures while they are |
@@ -214,9 +179,6 @@ its own synchronization: | |||
214 | <li> Each <tt>rcu_node</tt> structure has a spinlock. | 179 | <li> Each <tt>rcu_node</tt> structure has a spinlock. |
215 | <li> The fields in <tt>rcu_data</tt> are private to the corresponding | 180 | <li> The fields in <tt>rcu_data</tt> are private to the corresponding |
216 | CPU, although a few can be read and written by other CPUs. | 181 | CPU, although a few can be read and written by other CPUs. |
217 | <li> Similarly, the fields in <tt>rcu_dynticks</tt> are private | ||
218 | to the corresponding CPU, although a few can be read by | ||
219 | other CPUs. | ||
220 | </ol> | 182 | </ol> |
221 | 183 | ||
222 | <p>It is important to note that different data structures can have | 184 | <p>It is important to note that different data structures can have |
@@ -272,11 +234,6 @@ follows: | |||
272 | access to this information from the corresponding CPU. | 234 | access to this information from the corresponding CPU. |
273 | Finally, this structure records past dyntick-idle state | 235 | Finally, this structure records past dyntick-idle state |
274 | for the corresponding CPU and also tracks statistics. | 236 | for the corresponding CPU and also tracks statistics. |
275 | <li> <tt>rcu_dynticks</tt>: | ||
276 | This per-CPU structure tracks the current dyntick-idle | ||
277 | state for the corresponding CPU. | ||
278 | Unlike the other three structures, the <tt>rcu_dynticks</tt> | ||
279 | structure is not replicated per RCU flavor. | ||
280 | <li> <tt>rcu_head</tt>: | 237 | <li> <tt>rcu_head</tt>: |
281 | This structure represents RCU callbacks, and is the | 238 | This structure represents RCU callbacks, and is the |
282 | only structure allocated and managed by RCU users. | 239 | only structure allocated and managed by RCU users. |
@@ -287,14 +244,14 @@ follows: | |||
287 | <p>If all you wanted from this article was a general notion of how | 244 | <p>If all you wanted from this article was a general notion of how |
288 | RCU's data structures are related, you are done. | 245 | RCU's data structures are related, you are done. |
289 | Otherwise, each of the following sections give more details on | 246 | Otherwise, each of the following sections give more details on |
290 | the <tt>rcu_state</tt>, <tt>rcu_node</tt>, <tt>rcu_data</tt>, | 247 | the <tt>rcu_state</tt>, <tt>rcu_node</tt> and <tt>rcu_data</tt> data |
291 | and <tt>rcu_dynticks</tt> data structures. | 248 | structures. |
292 | 249 | ||
293 | <h3><a name="The rcu_state Structure"> | 250 | <h3><a name="The rcu_state Structure"> |
294 | The <tt>rcu_state</tt> Structure</a></h3> | 251 | The <tt>rcu_state</tt> Structure</a></h3> |
295 | 252 | ||
296 | <p>The <tt>rcu_state</tt> structure is the base structure that | 253 | <p>The <tt>rcu_state</tt> structure is the base structure that |
297 | represents a flavor of RCU. | 254 | represents the state of RCU in the system. |
298 | This structure forms the interconnection between the | 255 | This structure forms the interconnection between the |
299 | <tt>rcu_node</tt> and <tt>rcu_data</tt> structures, | 256 | <tt>rcu_node</tt> and <tt>rcu_data</tt> structures, |
300 | tracks grace periods, contains the lock used to | 257 | tracks grace periods, contains the lock used to |
@@ -389,7 +346,7 @@ sequence number. | |||
389 | The bottom two bits are the state of the current grace period, | 346 | The bottom two bits are the state of the current grace period, |
390 | which can be zero for not yet started or one for in progress. | 347 | which can be zero for not yet started or one for in progress. |
391 | In other words, if the bottom two bits of <tt>->gp_seq</tt> are | 348 | In other words, if the bottom two bits of <tt>->gp_seq</tt> are |
392 | zero, the corresponding flavor of RCU is idle. | 349 | zero, then RCU is idle. |
393 | Any other value in the bottom two bits indicates that something is broken. | 350 | Any other value in the bottom two bits indicates that something is broken. |
394 | This field is protected by the root <tt>rcu_node</tt> structure's | 351 | This field is protected by the root <tt>rcu_node</tt> structure's |
395 | <tt>->lock</tt> field. | 352 | <tt>->lock</tt> field. |
@@ -419,10 +376,10 @@ as follows: | |||
419 | grace period in jiffies. | 376 | grace period in jiffies. |
420 | It is protected by the root <tt>rcu_node</tt>'s <tt>->lock</tt>. | 377 | It is protected by the root <tt>rcu_node</tt>'s <tt>->lock</tt>. |
421 | 378 | ||
422 | <p>The <tt>->name</tt> field points to the name of the RCU flavor | 379 | <p>The <tt>->name</tt> and <tt>->abbr</tt> fields distinguish |
423 | (for example, “rcu_sched”), and is constant. | 380 | between preemptible RCU (“rcu_preempt” and “p”) |
424 | The <tt>->abbr</tt> field contains a one-character abbreviation, | 381 | and non-preemptible RCU (“rcu_sched” and “s”). |
425 | for example, “s” for RCU-sched. | 382 | These fields are used for diagnostic and tracing purposes. |
426 | 383 | ||
427 | <h3><a name="The rcu_node Structure"> | 384 | <h3><a name="The rcu_node Structure"> |
428 | The <tt>rcu_node</tt> Structure</a></h3> | 385 | The <tt>rcu_node</tt> Structure</a></h3> |
@@ -971,25 +928,31 @@ this <tt>rcu_segcblist</tt> structure, <i>not</i> the <tt>->head</tt> | |||
971 | pointer. | 928 | pointer. |
972 | The reason for this is that all the ready-to-invoke callbacks | 929 | The reason for this is that all the ready-to-invoke callbacks |
973 | (that is, those in the <tt>RCU_DONE_TAIL</tt> segment) are extracted | 930 | (that is, those in the <tt>RCU_DONE_TAIL</tt> segment) are extracted |
974 | all at once at callback-invocation time. | 931 | all at once at callback-invocation time (<tt>rcu_do_batch</tt>), due |
932 | to which <tt>->head</tt> may be set to NULL if there are no not-done | ||
933 | callbacks remaining in the <tt>rcu_segcblist</tt>. | ||
975 | If callback invocation must be postponed, for example, because a | 934 | If callback invocation must be postponed, for example, because a |
976 | high-priority process just woke up on this CPU, then the remaining | 935 | high-priority process just woke up on this CPU, then the remaining |
977 | callbacks are placed back on the <tt>RCU_DONE_TAIL</tt> segment. | 936 | callbacks are placed back on the <tt>RCU_DONE_TAIL</tt> segment and |
978 | Either way, the <tt>->len</tt> and <tt>->len_lazy</tt> counts | 937 | <tt>->head</tt> once again points to the start of the segment. |
979 | are adjusted after the corresponding callbacks have been invoked, and so | 938 | In short, the head field can briefly be <tt>NULL</tt> even though the |
980 | again it is the <tt>->len</tt> count that accurately reflects whether | 939 | CPU has callbacks present the entire time. |
981 | or not there are callbacks associated with this <tt>rcu_segcblist</tt> | 940 | Therefore, it is not appropriate to test the <tt>->head</tt> pointer |
982 | structure. | 941 | for <tt>NULL</tt>. |
942 | |||
943 | <p>In contrast, the <tt>->len</tt> and <tt>->len_lazy</tt> counts | ||
944 | are adjusted only after the corresponding callbacks have been invoked. | ||
945 | This means that the <tt>->len</tt> count is zero only if | ||
946 | the <tt>rcu_segcblist</tt> structure really is devoid of callbacks. | ||
983 | Of course, off-CPU sampling of the <tt>->len</tt> count requires | 947 | Of course, off-CPU sampling of the <tt>->len</tt> count requires |
984 | the use of appropriate synchronization, for example, memory barriers. | 948 | careful use of appropriate synchronization, for example, memory barriers. |
985 | This synchronization can be a bit subtle, particularly in the case | 949 | This synchronization can be a bit subtle, particularly in the case |
986 | of <tt>rcu_barrier()</tt>. | 950 | of <tt>rcu_barrier()</tt>. |
987 | 951 | ||
988 | <h3><a name="The rcu_data Structure"> | 952 | <h3><a name="The rcu_data Structure"> |
989 | The <tt>rcu_data</tt> Structure</a></h3> | 953 | The <tt>rcu_data</tt> Structure</a></h3> |
990 | 954 | ||
991 | <p>The <tt>rcu_data</tt> maintains the per-CPU state for the | 955 | <p>The <tt>rcu_data</tt> maintains the per-CPU state for the RCU subsystem. |
992 | corresponding flavor of RCU. | ||
993 | The fields in this structure may be accessed only from the corresponding | 956 | The fields in this structure may be accessed only from the corresponding |
994 | CPU (and from tracing) unless otherwise stated. | 957 | CPU (and from tracing) unless otherwise stated. |
995 | This structure is the | 958 | This structure is the |
@@ -1015,30 +978,19 @@ as follows: | |||
1015 | 978 | ||
1016 | <pre> | 979 | <pre> |
1017 | 1 int cpu; | 980 | 1 int cpu; |
1018 | 2 struct rcu_state *rsp; | 981 | 2 struct rcu_node *mynode; |
1019 | 3 struct rcu_node *mynode; | 982 | 3 unsigned long grpmask; |
1020 | 4 struct rcu_dynticks *dynticks; | 983 | 4 bool beenonline; |
1021 | 5 unsigned long grpmask; | ||
1022 | 6 bool beenonline; | ||
1023 | </pre> | 984 | </pre> |
1024 | 985 | ||
1025 | <p>The <tt>->cpu</tt> field contains the number of the | 986 | <p>The <tt>->cpu</tt> field contains the number of the |
1026 | corresponding CPU, the <tt>->rsp</tt> pointer references | 987 | corresponding CPU and the <tt>->mynode</tt> field references the |
1027 | the corresponding <tt>rcu_state</tt> structure (and is most frequently | 988 | corresponding <tt>rcu_node</tt> structure. |
1028 | used to locate the name of the corresponding flavor of RCU for tracing), | ||
1029 | and the <tt>->mynode</tt> field references the corresponding | ||
1030 | <tt>rcu_node</tt> structure. | ||
1031 | The <tt>->mynode</tt> is used to propagate quiescent states | 989 | The <tt>->mynode</tt> is used to propagate quiescent states |
1032 | up the combining tree. | 990 | up the combining tree. |
1033 | <p>The <tt>->dynticks</tt> pointer references the | 991 | These two fields are constant and therefore do not require synchronization. |
1034 | <tt>rcu_dynticks</tt> structure corresponding to this | ||
1035 | CPU. | ||
1036 | Recall that a single per-CPU instance of the <tt>rcu_dynticks</tt> | ||
1037 | structure is shared among all flavors of RCU. | ||
1038 | These first four fields are constant and therefore require not | ||
1039 | synchronization. | ||
1040 | 992 | ||
1041 | </p><p>The <tt>->grpmask</tt> field indicates the bit in | 993 | <p>The <tt>->grpmask</tt> field indicates the bit in |
1042 | the <tt>->mynode->qsmask</tt> corresponding to this | 994 | the <tt>->mynode->qsmask</tt> corresponding to this |
1043 | <tt>rcu_data</tt> structure, and is also used when propagating | 995 | <tt>rcu_data</tt> structure, and is also used when propagating |
1044 | quiescent states. | 996 | quiescent states. |
@@ -1057,12 +1009,12 @@ as follows: | |||
1057 | 3 bool cpu_no_qs; | 1009 | 3 bool cpu_no_qs; |
1058 | 4 bool core_needs_qs; | 1010 | 4 bool core_needs_qs; |
1059 | 5 bool gpwrap; | 1011 | 5 bool gpwrap; |
1060 | 6 unsigned long rcu_qs_ctr_snap; | ||
1061 | </pre> | 1012 | </pre> |
1062 | 1013 | ||
1063 | <p>The <tt>->gp_seq</tt> and <tt>->gp_seq_needed</tt> | 1014 | <p>The <tt>->gp_seq</tt> field is the counterpart of the field of the same |
1064 | fields are the counterparts of the fields of the same name | 1015 | name in the <tt>rcu_state</tt> and <tt>rcu_node</tt> structures. The |
1065 | in the <tt>rcu_state</tt> and <tt>rcu_node</tt> structures. | 1016 | <tt>->gp_seq_needed</tt> field is the counterpart of the field of the same |
1017 | name in the rcu_node</tt> structure. | ||
1066 | They may each lag up to one behind their <tt>rcu_node</tt> | 1018 | They may each lag up to one behind their <tt>rcu_node</tt> |
1067 | counterparts, but in <tt>CONFIG_NO_HZ_IDLE</tt> and | 1019 | counterparts, but in <tt>CONFIG_NO_HZ_IDLE</tt> and |
1068 | <tt>CONFIG_NO_HZ_FULL</tt> kernels can lag | 1020 | <tt>CONFIG_NO_HZ_FULL</tt> kernels can lag |
@@ -1103,10 +1055,6 @@ CPU has remained idle for so long that the | |||
1103 | <tt>gp_seq</tt> counter is in danger of overflow, which | 1055 | <tt>gp_seq</tt> counter is in danger of overflow, which |
1104 | will cause the CPU to disregard the values of its counters on | 1056 | will cause the CPU to disregard the values of its counters on |
1105 | its next exit from idle. | 1057 | its next exit from idle. |
1106 | Finally, the <tt>rcu_qs_ctr_snap</tt> field is used to detect | ||
1107 | cases where a given operation has resulted in a quiescent state | ||
1108 | for all flavors of RCU, for example, <tt>cond_resched()</tt> | ||
1109 | when RCU has indicated a need for quiescent states. | ||
1110 | 1058 | ||
1111 | <h5>RCU Callback Handling</h5> | 1059 | <h5>RCU Callback Handling</h5> |
1112 | 1060 | ||
@@ -1179,26 +1127,22 @@ Finally, the <tt>->dynticks_fqs</tt> field is used to | |||
1179 | count the number of times this CPU is determined to be in | 1127 | count the number of times this CPU is determined to be in |
1180 | dyntick-idle state, and is used for tracing and debugging purposes. | 1128 | dyntick-idle state, and is used for tracing and debugging purposes. |
1181 | 1129 | ||
1182 | <h3><a name="The rcu_dynticks Structure"> | 1130 | <p> |
1183 | The <tt>rcu_dynticks</tt> Structure</a></h3> | 1131 | This portion of the rcu_data structure is declared as follows: |
1184 | |||
1185 | <p>The <tt>rcu_dynticks</tt> maintains the per-CPU dyntick-idle state | ||
1186 | for the corresponding CPU. | ||
1187 | Unlike the other structures, <tt>rcu_dynticks</tt> is not | ||
1188 | replicated over the different flavors of RCU. | ||
1189 | The fields in this structure may be accessed only from the corresponding | ||
1190 | CPU (and from tracing) unless otherwise stated. | ||
1191 | Its fields are as follows: | ||
1192 | 1132 | ||
1193 | <pre> | 1133 | <pre> |
1194 | 1 long dynticks_nesting; | 1134 | 1 long dynticks_nesting; |
1195 | 2 long dynticks_nmi_nesting; | 1135 | 2 long dynticks_nmi_nesting; |
1196 | 3 atomic_t dynticks; | 1136 | 3 atomic_t dynticks; |
1197 | 4 bool rcu_need_heavy_qs; | 1137 | 4 bool rcu_need_heavy_qs; |
1198 | 5 unsigned long rcu_qs_ctr; | 1138 | 5 bool rcu_urgent_qs; |
1199 | 6 bool rcu_urgent_qs; | ||
1200 | </pre> | 1139 | </pre> |
1201 | 1140 | ||
1141 | <p>These fields in the rcu_data structure maintain the per-CPU dyntick-idle | ||
1142 | state for the corresponding CPU. | ||
1143 | The fields may be accessed only from the corresponding CPU (and from tracing) | ||
1144 | unless otherwise stated. | ||
1145 | |||
1202 | <p>The <tt>->dynticks_nesting</tt> field counts the | 1146 | <p>The <tt>->dynticks_nesting</tt> field counts the |
1203 | nesting depth of process execution, so that in normal circumstances | 1147 | nesting depth of process execution, so that in normal circumstances |
1204 | this counter has value zero or one. | 1148 | this counter has value zero or one. |
@@ -1240,19 +1184,12 @@ it is willing to call for heavy-weight dyntick-counter operations. | |||
1240 | This flag is checked by RCU's context-switch and <tt>cond_resched()</tt> | 1184 | This flag is checked by RCU's context-switch and <tt>cond_resched()</tt> |
1241 | code, which provide a momentary idle sojourn in response. | 1185 | code, which provide a momentary idle sojourn in response. |
1242 | 1186 | ||
1243 | </p><p>The <tt>->rcu_qs_ctr</tt> field is used to record | ||
1244 | quiescent states from <tt>cond_resched()</tt>. | ||
1245 | Because <tt>cond_resched()</tt> can execute quite frequently, this | ||
1246 | must be quite lightweight, as in a non-atomic increment of this | ||
1247 | per-CPU field. | ||
1248 | |||
1249 | </p><p>Finally, the <tt>->rcu_urgent_qs</tt> field is used to record | 1187 | </p><p>Finally, the <tt>->rcu_urgent_qs</tt> field is used to record |
1250 | the fact that the RCU core code would really like to see a quiescent | 1188 | the fact that the RCU core code would really like to see a quiescent state from |
1251 | state from the corresponding CPU, with the various other fields indicating | 1189 | the corresponding CPU, with the various other fields indicating just how badly |
1252 | just how badly RCU wants this quiescent state. | 1190 | RCU wants this quiescent state. |
1253 | This flag is checked by RCU's context-switch and <tt>cond_resched()</tt> | 1191 | This flag is checked by RCU's context-switch path |
1254 | code, which, if nothing else, non-atomically increment <tt>->rcu_qs_ctr</tt> | 1192 | (<tt>rcu_note_context_switch</tt>) and the cond_resched code. |
1255 | in response. | ||
1256 | 1193 | ||
1257 | <table> | 1194 | <table> |
1258 | <tr><th> </th></tr> | 1195 | <tr><th> </th></tr> |
@@ -1425,11 +1362,11 @@ the last part of the array, thus traversing only the leaf | |||
1425 | <h3><a name="Summary"> | 1362 | <h3><a name="Summary"> |
1426 | Summary</a></h3> | 1363 | Summary</a></h3> |
1427 | 1364 | ||
1428 | So each flavor of RCU is represented by an <tt>rcu_state</tt> structure, | 1365 | So the state of RCU is represented by an <tt>rcu_state</tt> structure, |
1429 | which contains a combining tree of <tt>rcu_node</tt> and | 1366 | which contains a combining tree of <tt>rcu_node</tt> and |
1430 | <tt>rcu_data</tt> structures. | 1367 | <tt>rcu_data</tt> structures. |
1431 | Finally, in <tt>CONFIG_NO_HZ_IDLE</tt> kernels, each CPU's dyntick-idle | 1368 | Finally, in <tt>CONFIG_NO_HZ_IDLE</tt> kernels, each CPU's dyntick-idle |
1432 | state is tracked by an <tt>rcu_dynticks</tt> structure. | 1369 | state is tracked by dynticks-related fields in the <tt>rcu_data</tt> structure. |
1433 | 1370 | ||
1434 | If you made it this far, you are well prepared to read the code | 1371 | If you made it this far, you are well prepared to read the code |
1435 | walkthroughs in the other articles in this series. | 1372 | walkthroughs in the other articles in this series. |
diff --git a/Documentation/RCU/Design/Data-Structures/blkd_task.svg b/Documentation/RCU/Design/Data-Structures/blkd_task.svg index 00e810bb8419..bed13e9ecab8 100644 --- a/Documentation/RCU/Design/Data-Structures/blkd_task.svg +++ b/Documentation/RCU/Design/Data-Structures/blkd_task.svg | |||
@@ -14,12 +14,12 @@ | |||
14 | xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" | 14 | xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" |
15 | xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" | 15 | xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" |
16 | width="10.1in" | 16 | width="10.1in" |
17 | height="8.6in" | 17 | height="6.5999999in" |
18 | viewBox="-44 -44 12088 10288" | 18 | viewBox="-44 -44 12088 7895.4414" |
19 | id="svg2" | 19 | id="svg2" |
20 | version="1.1" | 20 | version="1.1" |
21 | inkscape:version="0.48.4 r9939" | 21 | inkscape:version="0.92.2pre0 (973e216, 2017-07-25)" |
22 | sodipodi:docname="blkd_task.fig"> | 22 | sodipodi:docname="blkd_task.svg"> |
23 | <metadata | 23 | <metadata |
24 | id="metadata212"> | 24 | id="metadata212"> |
25 | <rdf:RDF> | 25 | <rdf:RDF> |
@@ -37,15 +37,16 @@ | |||
37 | <marker | 37 | <marker |
38 | inkscape:stockid="Arrow1Mend" | 38 | inkscape:stockid="Arrow1Mend" |
39 | orient="auto" | 39 | orient="auto" |
40 | refY="0.0" | 40 | refY="0" |
41 | refX="0.0" | 41 | refX="0" |
42 | id="Arrow1Mend" | 42 | id="Arrow1Mend" |
43 | style="overflow:visible;"> | 43 | style="overflow:visible"> |
44 | <path | 44 | <path |
45 | id="path3970" | 45 | id="path3970" |
46 | d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z " | 46 | d="M 0,0 5,-5 -12.5,0 5,5 Z" |
47 | style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;" | 47 | style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt" |
48 | transform="scale(0.4) rotate(180) translate(10,0)" /> | 48 | transform="matrix(-0.4,0,0,-0.4,-4,0)" |
49 | inkscape:connector-curvature="0" /> | ||
49 | </marker> | 50 | </marker> |
50 | </defs> | 51 | </defs> |
51 | <sodipodi:namedview | 52 | <sodipodi:namedview |
@@ -57,787 +58,574 @@ | |||
57 | guidetolerance="10" | 58 | guidetolerance="10" |
58 | inkscape:pageopacity="0" | 59 | inkscape:pageopacity="0" |
59 | inkscape:pageshadow="2" | 60 | inkscape:pageshadow="2" |
60 | inkscape:window-width="1087" | 61 | inkscape:window-width="1920" |
61 | inkscape:window-height="1144" | 62 | inkscape:window-height="1019" |
62 | id="namedview208" | 63 | id="namedview208" |
63 | showgrid="false" | 64 | showgrid="false" |
64 | inkscape:zoom="1.0495049" | 65 | inkscape:zoom="1.0495049" |
65 | inkscape:cx="454.50003" | 66 | inkscape:cx="456.40569" |
66 | inkscape:cy="387.00003" | 67 | inkscape:cy="348.88682" |
67 | inkscape:window-x="833" | 68 | inkscape:window-x="0" |
68 | inkscape:window-y="28" | 69 | inkscape:window-y="0" |
69 | inkscape:window-maximized="0" | 70 | inkscape:window-maximized="1" |
70 | inkscape:current-layer="g4" /> | 71 | inkscape:current-layer="g4" |
72 | showguides="false" /> | ||
71 | <g | 73 | <g |
72 | style="stroke-width:.025in; fill:none" | 74 | style="fill:none;stroke-width:0.025in" |
73 | id="g4"> | 75 | id="g4" |
76 | transform="translate(0,-2393.6637)"> | ||
74 | <!-- Line: box --> | 77 | <!-- Line: box --> |
75 | <rect | ||
76 | x="450" | ||
77 | y="0" | ||
78 | width="6300" | ||
79 | height="7350" | ||
80 | rx="0" | ||
81 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; " | ||
82 | id="rect6" /> | ||
83 | <!-- Line: box --> | 78 | <!-- Line: box --> |
84 | <rect | ||
85 | x="4950" | ||
86 | y="4950" | ||
87 | width="1500" | ||
88 | height="900" | ||
89 | rx="0" | ||
90 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; " | ||
91 | id="rect8" /> | ||
92 | <!-- Line: box --> | 79 | <!-- Line: box --> |
93 | <rect | ||
94 | x="750" | ||
95 | y="600" | ||
96 | width="5700" | ||
97 | height="3750" | ||
98 | rx="0" | ||
99 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; " | ||
100 | id="rect10" /> | ||
101 | <!-- Line --> | 80 | <!-- Line --> |
102 | <polyline | ||
103 | points="5250,8100 5688,5912 " | ||
104 | style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; " | ||
105 | id="polyline12" /> | ||
106 | <!-- Arrowhead on XXXpoint 5250 8100 - 5710 5790--> | 81 | <!-- Arrowhead on XXXpoint 5250 8100 - 5710 5790--> |
107 | <polyline | 82 | <polyline |
108 | points="5714 6068 5704 5822 5598 6044 " | 83 | points="5714 6068 5704 5822 5598 6044 " |
109 | style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; " | 84 | style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8" |
110 | id="polyline14" /> | 85 | id="polyline14" |
86 | transform="translate(23.757862,2185.7233)" /> | ||
111 | <!-- Line --> | 87 | <!-- Line --> |
112 | <polyline | ||
113 | points="4050,9300 4486,7262 " | ||
114 | style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; " | ||
115 | id="polyline16" /> | ||
116 | <!-- Arrowhead on XXXpoint 4050 9300 - 4512 7140--> | 88 | <!-- Arrowhead on XXXpoint 4050 9300 - 4512 7140--> |
117 | <polyline | 89 | <polyline |
118 | points="4514 7418 4506 7172 4396 7394 " | 90 | points="4514 7418 4506 7172 4396 7394 " |
119 | style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; " | 91 | style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8" |
120 | id="polyline18" /> | 92 | id="polyline18" |
93 | transform="translate(23.757862,2185.7233)" /> | ||
121 | <!-- Line --> | 94 | <!-- Line --> |
122 | <polyline | ||
123 | points="1040,9300 1476,7262 " | ||
124 | style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; " | ||
125 | id="polyline20" /> | ||
126 | <!-- Arrowhead on XXXpoint 1040 9300 - 1502 7140--> | 95 | <!-- Arrowhead on XXXpoint 1040 9300 - 1502 7140--> |
127 | <polyline | 96 | <polyline |
128 | points="1504 7418 1496 7172 1386 7394 " | 97 | points="1504 7418 1496 7172 1386 7394 " |
129 | style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; " | 98 | style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8" |
130 | id="polyline22" /> | 99 | id="polyline22" |
100 | transform="translate(23.757862,2185.7233)" /> | ||
131 | <!-- Line --> | 101 | <!-- Line --> |
132 | <polyline | ||
133 | points="2240,8100 2676,6062 " | ||
134 | style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; " | ||
135 | id="polyline24" /> | ||
136 | <!-- Arrowhead on XXXpoint 2240 8100 - 2702 5940--> | 102 | <!-- Arrowhead on XXXpoint 2240 8100 - 2702 5940--> |
137 | <polyline | 103 | <polyline |
138 | points="2704 6218 2696 5972 2586 6194 " | 104 | points="2704 6218 2696 5972 2586 6194 " |
139 | style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; " | 105 | style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8" |
140 | id="polyline26" /> | 106 | id="polyline26" |
107 | transform="translate(23.757862,2185.7233)" /> | ||
141 | <!-- Line: box --> | 108 | <!-- Line: box --> |
142 | <rect | 109 | <rect |
143 | x="0" | 110 | x="23.757858" |
144 | y="450" | 111 | y="2635.7231" |
145 | width="6300" | 112 | width="6300" |
146 | height="7350" | 113 | height="7350" |
147 | rx="0" | 114 | rx="0" |
148 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; " | 115 | style="fill:#ffffff;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter" |
149 | id="rect28" /> | 116 | id="rect28" /> |
150 | <!-- Line: box --> | 117 | <!-- Line: box --> |
151 | <rect | 118 | <rect |
152 | x="300" | 119 | x="323.75787" |
153 | y="1050" | 120 | y="3235.7231" |
154 | width="5700" | 121 | width="5700" |
155 | height="3750" | 122 | height="3750" |
156 | rx="0" | 123 | rx="0" |
157 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; " | 124 | style="fill:#ffff00;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter" |
158 | id="rect30" /> | 125 | id="rect30" /> |
159 | <!-- Line --> | 126 | <!-- Line --> |
160 | <polyline | 127 | <polyline |
161 | points="1350,3450 2350,2590 " | 128 | points="1350,3450 2350,2590 " |
162 | style="stroke:#00d1d1;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | 129 | style="stroke:#00d1d1;stroke-width:30.00057793;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" |
163 | id="polyline32" /> | 130 | id="polyline32" |
131 | transform="translate(23.757862,2185.7233)" /> | ||
164 | <!-- Arrowhead on XXXpoint 1350 3450 - 2444 2510--> | 132 | <!-- Arrowhead on XXXpoint 1350 3450 - 2444 2510--> |
165 | <!-- Line --> | 133 | <!-- Line --> |
166 | <polyline | 134 | <polyline |
167 | points="4950,3450 3948,2590 " | 135 | points="4950,3450 3948,2590 " |
168 | style="stroke:#00d1d1;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | 136 | style="stroke:#00d1d1;stroke-width:30.00057793;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" |
169 | id="polyline36" /> | 137 | id="polyline36" |
138 | transform="translate(23.757862,2185.7233)" /> | ||
170 | <!-- Arrowhead on XXXpoint 4950 3450 - 3854 2510--> | 139 | <!-- Arrowhead on XXXpoint 4950 3450 - 3854 2510--> |
171 | <!-- Line --> | 140 | <!-- Line --> |
172 | <polyline | 141 | <polyline |
173 | points="4050,6600 4050,4414 " | 142 | points="4050,6600 4050,4414 " |
174 | style="stroke:#00d1d1;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | 143 | style="stroke:#00d1d1;stroke-width:30.00057793;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" |
175 | id="polyline40" /> | 144 | id="polyline40" |
145 | transform="translate(23.757862,2185.7233)" /> | ||
176 | <!-- Arrowhead on XXXpoint 4050 6600 - 4050 4290--> | 146 | <!-- Arrowhead on XXXpoint 4050 6600 - 4050 4290--> |
177 | <!-- Line --> | 147 | <!-- Line --> |
178 | <polyline | 148 | <polyline |
179 | points="1050,6600 1050,4414 " | 149 | points="1050,6600 1050,4414 " |
180 | style="stroke:#00d1d1;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | 150 | style="stroke:#00d1d1;stroke-width:30.00057793;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" |
181 | id="polyline44" /> | 151 | id="polyline44" |
152 | transform="translate(23.757862,2185.7233)" /> | ||
182 | <!-- Arrowhead on XXXpoint 1050 6600 - 1050 4290--> | 153 | <!-- Arrowhead on XXXpoint 1050 6600 - 1050 4290--> |
183 | <!-- Line --> | 154 | <!-- Line --> |
184 | <polyline | 155 | <polyline |
185 | points="2250,5400 2250,4414 " | 156 | points="2250,5400 2250,4414 " |
186 | style="stroke:#00d1d1;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | 157 | style="stroke:#00d1d1;stroke-width:30.00057793;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" |
187 | id="polyline48" /> | 158 | id="polyline48" |
159 | transform="translate(23.757862,2185.7233)" /> | ||
188 | <!-- Arrowhead on XXXpoint 2250 5400 - 2250 4290--> | 160 | <!-- Arrowhead on XXXpoint 2250 5400 - 2250 4290--> |
189 | <!-- Line --> | 161 | <!-- Line --> |
190 | <polyline | ||
191 | points="2250,8100 2250,6364 " | ||
192 | style="stroke:#00ff00;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | ||
193 | id="polyline52" /> | ||
194 | <!-- Arrowhead on XXXpoint 2250 8100 - 2250 6240--> | 162 | <!-- Arrowhead on XXXpoint 2250 8100 - 2250 6240--> |
195 | <!-- Line --> | 163 | <!-- Line --> |
196 | <polyline | ||
197 | points="1050,9300 1050,7564 " | ||
198 | style="stroke:#00ff00;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | ||
199 | id="polyline56" /> | ||
200 | <!-- Arrowhead on XXXpoint 1050 9300 - 1050 7440--> | 164 | <!-- Arrowhead on XXXpoint 1050 9300 - 1050 7440--> |
201 | <!-- Line --> | 165 | <!-- Line --> |
202 | <polyline | ||
203 | points="4050,9300 4050,7564 " | ||
204 | style="stroke:#00ff00;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | ||
205 | id="polyline60" /> | ||
206 | <!-- Arrowhead on XXXpoint 4050 9300 - 4050 7440--> | 166 | <!-- Arrowhead on XXXpoint 4050 9300 - 4050 7440--> |
207 | <!-- Line --> | 167 | <!-- Line --> |
208 | <polyline | ||
209 | points="5250,8100 5250,6364 " | ||
210 | style="stroke:#00ff00;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | ||
211 | id="polyline64" /> | ||
212 | <!-- Arrowhead on XXXpoint 5250 8100 - 5250 6240--> | 168 | <!-- Arrowhead on XXXpoint 5250 8100 - 5250 6240--> |
213 | <!-- Circle --> | 169 | <!-- Circle --> |
214 | <circle | 170 | <circle |
215 | cx="2850" | 171 | cx="2873.7581" |
216 | cy="3900" | 172 | cy="6085.7236" |
217 | r="76" | 173 | r="76" |
218 | style="fill:#000000;stroke:#000000;stroke-width:14;" | 174 | style="fill:#000000;stroke:#000000;stroke-width:14" |
219 | id="circle68" /> | 175 | id="circle68" /> |
220 | <!-- Circle --> | 176 | <!-- Circle --> |
221 | <circle | 177 | <circle |
222 | cx="3150" | 178 | cx="3173.7581" |
223 | cy="3900" | 179 | cy="6085.7236" |
224 | r="76" | 180 | r="76" |
225 | style="fill:#000000;stroke:#000000;stroke-width:14;" | 181 | style="fill:#000000;stroke:#000000;stroke-width:14" |
226 | id="circle70" /> | 182 | id="circle70" /> |
227 | <!-- Circle --> | 183 | <!-- Circle --> |
228 | <circle | 184 | <circle |
229 | cx="3450" | 185 | cx="3473.7581" |
230 | cy="3900" | 186 | cy="6085.7236" |
231 | r="76" | 187 | r="76" |
232 | style="fill:#000000;stroke:#000000;stroke-width:14;" | 188 | style="fill:#000000;stroke:#000000;stroke-width:14" |
233 | id="circle72" /> | 189 | id="circle72" /> |
234 | <!-- Circle --> | 190 | <!-- Circle --> |
235 | <circle | 191 | <circle |
236 | cx="1350" | 192 | cx="1373.7578" |
237 | cy="5100" | 193 | cy="7285.7236" |
238 | r="76" | 194 | r="76" |
239 | style="fill:#000000;stroke:#000000;stroke-width:14;" | 195 | style="fill:#000000;stroke:#000000;stroke-width:14" |
240 | id="circle74" /> | 196 | id="circle74" /> |
241 | <!-- Circle --> | 197 | <!-- Circle --> |
242 | <circle | 198 | <circle |
243 | cx="1650" | 199 | cx="1673.7578" |
244 | cy="5100" | 200 | cy="7285.7236" |
245 | r="76" | 201 | r="76" |
246 | style="fill:#000000;stroke:#000000;stroke-width:14;" | 202 | style="fill:#000000;stroke:#000000;stroke-width:14" |
247 | id="circle76" /> | 203 | id="circle76" /> |
248 | <!-- Circle --> | 204 | <!-- Circle --> |
249 | <circle | 205 | <circle |
250 | cx="1950" | 206 | cx="1973.7578" |
251 | cy="5100" | 207 | cy="7285.7236" |
252 | r="76" | 208 | r="76" |
253 | style="fill:#000000;stroke:#000000;stroke-width:14;" | 209 | style="fill:#000000;stroke:#000000;stroke-width:14" |
254 | id="circle78" /> | 210 | id="circle78" /> |
255 | <!-- Circle --> | 211 | <!-- Circle --> |
256 | <circle | 212 | <circle |
257 | cx="4350" | 213 | cx="4373.7578" |
258 | cy="5100" | 214 | cy="7285.7236" |
259 | r="76" | 215 | r="76" |
260 | style="fill:#000000;stroke:#000000;stroke-width:14;" | 216 | style="fill:#000000;stroke:#000000;stroke-width:14" |
261 | id="circle80" /> | 217 | id="circle80" /> |
262 | <!-- Circle --> | 218 | <!-- Circle --> |
263 | <circle | 219 | <circle |
264 | cx="4650" | 220 | cx="4673.7578" |
265 | cy="5100" | 221 | cy="7285.7236" |
266 | r="76" | 222 | r="76" |
267 | style="fill:#000000;stroke:#000000;stroke-width:14;" | 223 | style="fill:#000000;stroke:#000000;stroke-width:14" |
268 | id="circle82" /> | 224 | id="circle82" /> |
269 | <!-- Circle --> | 225 | <!-- Circle --> |
270 | <circle | 226 | <circle |
271 | cx="4950" | 227 | cx="4973.7578" |
272 | cy="5100" | 228 | cy="7285.7236" |
273 | r="76" | 229 | r="76" |
274 | style="fill:#000000;stroke:#000000;stroke-width:14;" | 230 | style="fill:#000000;stroke:#000000;stroke-width:14" |
275 | id="circle84" /> | 231 | id="circle84" /> |
276 | <!-- Line: box --> | 232 | <!-- Line: box --> |
277 | <rect | 233 | <rect |
278 | x="750" | 234 | x="773.75781" |
279 | y="3450" | 235 | y="5635.7236" |
280 | width="1800" | 236 | width="1800" |
281 | height="900" | 237 | height="900" |
282 | rx="0" | 238 | rx="0" |
283 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; " | 239 | style="fill:#ffbfbf;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter" |
284 | id="rect86" /> | 240 | id="rect86" /> |
285 | <!-- Line: box --> | 241 | <!-- Line: box --> |
286 | <rect | 242 | <rect |
287 | x="300" | 243 | x="323.75787" |
288 | y="6600" | 244 | y="8785.7227" |
289 | width="1500" | 245 | width="1500" |
290 | height="900" | 246 | height="900" |
291 | rx="0" | 247 | rx="0" |
292 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; " | 248 | style="fill:#87cfff;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter" |
293 | id="rect88" /> | 249 | id="rect88" /> |
294 | <!-- Line: box --> | 250 | <!-- Line: box --> |
295 | <rect | 251 | <rect |
296 | x="4500" | 252 | x="4523.7578" |
297 | y="5400" | 253 | y="7585.7236" |
298 | width="1500" | 254 | width="1500" |
299 | height="900" | 255 | height="900" |
300 | rx="0" | 256 | rx="0" |
301 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; " | 257 | style="fill:#87cfff;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter" |
302 | id="rect90" /> | 258 | id="rect90" /> |
303 | <!-- Line: box --> | 259 | <!-- Line: box --> |
304 | <rect | 260 | <rect |
305 | x="3300" | 261 | x="3323.7581" |
306 | y="6600" | 262 | y="8785.7227" |
307 | width="1500" | 263 | width="1500" |
308 | height="900" | 264 | height="900" |
309 | rx="0" | 265 | rx="0" |
310 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; " | 266 | style="fill:#87cfff;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter" |
311 | id="rect92" /> | 267 | id="rect92" /> |
312 | <!-- Line: box --> | 268 | <!-- Line: box --> |
313 | <rect | 269 | <rect |
314 | x="2250" | 270 | x="2273.7581" |
315 | y="1650" | 271 | y="3835.7231" |
316 | width="1800" | 272 | width="1800" |
317 | height="900" | 273 | height="900" |
318 | rx="0" | 274 | rx="0" |
319 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; " | 275 | style="fill:#ffbfbf;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter" |
320 | id="rect94" /> | 276 | id="rect94" /> |
321 | <!-- Line: box --> | 277 | <!-- Line: box --> |
322 | <rect | ||
323 | x="0" | ||
324 | y="9300" | ||
325 | width="2100" | ||
326 | height="900" | ||
327 | rx="0" | ||
328 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; " | ||
329 | id="rect96" /> | ||
330 | <!-- Line: box --> | 278 | <!-- Line: box --> |
331 | <rect | ||
332 | x="1350" | ||
333 | y="8100" | ||
334 | width="2100" | ||
335 | height="900" | ||
336 | rx="0" | ||
337 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; " | ||
338 | id="rect98" /> | ||
339 | <!-- Line: box --> | 279 | <!-- Line: box --> |
340 | <rect | ||
341 | x="3000" | ||
342 | y="9300" | ||
343 | width="2100" | ||
344 | height="900" | ||
345 | rx="0" | ||
346 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; " | ||
347 | id="rect100" /> | ||
348 | <!-- Line: box --> | 280 | <!-- Line: box --> |
349 | <rect | ||
350 | x="4350" | ||
351 | y="8100" | ||
352 | width="2100" | ||
353 | height="900" | ||
354 | rx="0" | ||
355 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; " | ||
356 | id="rect102" /> | ||
357 | <!-- Line: box --> | 281 | <!-- Line: box --> |
358 | <rect | 282 | <rect |
359 | x="1500" | 283 | x="1523.7578" |
360 | y="5400" | 284 | y="7585.7236" |
361 | width="1500" | 285 | width="1500" |
362 | height="900" | 286 | height="900" |
363 | rx="0" | 287 | rx="0" |
364 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; " | 288 | style="fill:#87cfff;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter" |
365 | id="rect104" /> | 289 | id="rect104" /> |
366 | <!-- Line --> | 290 | <!-- Line --> |
367 | <polygon | 291 | <polygon |
368 | points="5550,3450 7350,2850 7350,5100 5550,4350 5550,3450 " | 292 | points="7350,2850 7350,5100 5550,4350 5550,3450 " |
369 | style="stroke:#000000;stroke-width:14; stroke-linejoin:miter; stroke-linecap:butt; stroke-dasharray:120 120;fill:#ffbfbf; " | 293 | style="fill:#ffbfbf;stroke:#000000;stroke-width:14;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:120, 120" |
370 | id="polygon106" /> | 294 | id="polygon106" |
295 | transform="translate(23.757862,2185.7233)" /> | ||
371 | <!-- Line --> | 296 | <!-- Line --> |
372 | <polyline | 297 | <polyline |
373 | points="9300,3150 10734,3150 " | 298 | points="9300,3150 10734,3150 " |
374 | style="stroke:#000000;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | 299 | style="stroke:#000000;stroke-width:30.00057793;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" |
375 | id="polyline108" /> | 300 | id="polyline108" |
301 | transform="translate(23.757862,2185.7233)" /> | ||
376 | <!-- Arrowhead on XXXpoint 9300 3150 - 10860 3150--> | 302 | <!-- Arrowhead on XXXpoint 9300 3150 - 10860 3150--> |
377 | <!-- Line: box --> | 303 | <!-- Line: box --> |
378 | <rect | 304 | <rect |
379 | x="10800" | 305 | x="10823.758" |
380 | y="2850" | 306 | y="5035.7236" |
381 | width="1200" | 307 | width="1200" |
382 | height="750" | 308 | height="750" |
383 | rx="0" | 309 | rx="0" |
384 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; " | 310 | style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter" |
385 | id="rect112" /> | 311 | id="rect112" /> |
386 | <!-- Line --> | 312 | <!-- Line --> |
387 | <polyline | 313 | <polyline |
388 | points="11400,3600 11400,4284 " | 314 | points="11400,3600 11400,4284 " |
389 | style="stroke:#000000;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | 315 | style="stroke:#000000;stroke-width:30.00057793;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" |
390 | id="polyline114" /> | 316 | id="polyline114" |
317 | transform="translate(23.757862,2185.7233)" /> | ||
391 | <!-- Arrowhead on XXXpoint 11400 3600 - 11400 4410--> | 318 | <!-- Arrowhead on XXXpoint 11400 3600 - 11400 4410--> |
392 | <!-- Line: box --> | 319 | <!-- Line: box --> |
393 | <rect | 320 | <rect |
394 | x="10800" | 321 | x="10823.758" |
395 | y="4350" | 322 | y="6535.7236" |
396 | width="1200" | 323 | width="1200" |
397 | height="750" | 324 | height="750" |
398 | rx="0" | 325 | rx="0" |
399 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; " | 326 | style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter" |
400 | id="rect118" /> | 327 | id="rect118" /> |
401 | <!-- Line --> | 328 | <!-- Line --> |
402 | <polyline | 329 | <polyline |
403 | points="11400,5100 11400,5784 " | 330 | points="11400,5100 11400,5784 " |
404 | style="stroke:#000000;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | 331 | style="stroke:#000000;stroke-width:30.00057793;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" |
405 | id="polyline120" /> | 332 | id="polyline120" |
333 | transform="translate(23.757862,2185.7233)" /> | ||
406 | <!-- Arrowhead on XXXpoint 11400 5100 - 11400 5910--> | 334 | <!-- Arrowhead on XXXpoint 11400 5100 - 11400 5910--> |
407 | <!-- Line: box --> | 335 | <!-- Line: box --> |
408 | <rect | 336 | <rect |
409 | x="10800" | 337 | x="10823.758" |
410 | y="5850" | 338 | y="8035.7236" |
411 | width="1200" | 339 | width="1200" |
412 | height="750" | 340 | height="750" |
413 | rx="0" | 341 | rx="0" |
414 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; " | 342 | style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter" |
415 | id="rect124" /> | 343 | id="rect124" /> |
416 | <!-- Line --> | 344 | <!-- Line --> |
417 | <polyline | 345 | <polyline |
418 | points="9300,3900 9900,3900 9900,4650 10734,4650 " | 346 | points="9300,3900 9900,3900 9900,4650 10734,4650 " |
419 | style="stroke:#000000;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | 347 | style="stroke:#000000;stroke-width:30.00057793;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" |
420 | id="polyline126" /> | 348 | id="polyline126" |
349 | transform="translate(23.757862,2185.7233)" /> | ||
421 | <!-- Arrowhead on XXXpoint 9900 4650 - 10860 4650--> | 350 | <!-- Arrowhead on XXXpoint 9900 4650 - 10860 4650--> |
422 | <!-- Line --> | 351 | <!-- Line --> |
423 | <polyline | 352 | <polyline |
424 | points="9300,4650 9600,4650 9600,6150 10734,6150 " | 353 | points="9300,4650 9600,4650 9600,6150 10734,6150 " |
425 | style="stroke:#000000;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | 354 | style="stroke:#000000;stroke-width:30.00057793;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" |
426 | id="polyline130" /> | 355 | id="polyline130" |
356 | transform="translate(23.757862,2185.7233)" /> | ||
427 | <!-- Arrowhead on XXXpoint 9600 6150 - 10860 6150--> | 357 | <!-- Arrowhead on XXXpoint 9600 6150 - 10860 6150--> |
428 | <!-- Text --> | 358 | <!-- Text --> |
429 | <text | ||
430 | xml:space="preserve" | ||
431 | x="6450" | ||
432 | y="300" | ||
433 | fill="#000000" | ||
434 | font-family="Helvetica" | ||
435 | font-style="normal" | ||
436 | font-weight="normal" | ||
437 | font-size="192" | ||
438 | text-anchor="end" | ||
439 | id="text134">rcu_bh</text> | ||
440 | <!-- Text --> | 359 | <!-- Text --> |
441 | <text | 360 | <text |
442 | xml:space="preserve" | 361 | xml:space="preserve" |
443 | x="3150" | 362 | x="3173.7581" |
444 | y="1950" | 363 | y="4135.7231" |
445 | fill="#000000" | ||
446 | font-family="Courier" | ||
447 | font-style="normal" | 364 | font-style="normal" |
448 | font-weight="bold" | 365 | font-weight="bold" |
449 | font-size="192" | 366 | font-size="192" |
450 | text-anchor="middle" | 367 | id="text136" |
451 | id="text136">struct</text> | 368 | style="font-style:normal;font-weight:bold;font-size:192px;font-family:Courier;text-anchor:middle;fill:#000000">struct</text> |
452 | <!-- Text --> | 369 | <!-- Text --> |
453 | <text | 370 | <text |
454 | xml:space="preserve" | 371 | xml:space="preserve" |
455 | x="3150" | 372 | x="3173.7581" |
456 | y="2250" | 373 | y="4435.7236" |
457 | fill="#000000" | ||
458 | font-family="Courier" | ||
459 | font-style="normal" | 374 | font-style="normal" |
460 | font-weight="bold" | 375 | font-weight="bold" |
461 | font-size="192" | 376 | font-size="192" |
462 | text-anchor="middle" | 377 | id="text138" |
463 | id="text138">rcu_node</text> | 378 | style="font-style:normal;font-weight:bold;font-size:192px;font-family:Courier;text-anchor:middle;fill:#000000">rcu_node</text> |
464 | <!-- Text --> | 379 | <!-- Text --> |
465 | <text | 380 | <text |
466 | xml:space="preserve" | 381 | xml:space="preserve" |
467 | x="1650" | 382 | x="1673.7578" |
468 | y="3750" | 383 | y="5935.7236" |
469 | fill="#000000" | ||
470 | font-family="Courier" | ||
471 | font-style="normal" | 384 | font-style="normal" |
472 | font-weight="bold" | 385 | font-weight="bold" |
473 | font-size="192" | 386 | font-size="192" |
474 | text-anchor="middle" | 387 | id="text140" |
475 | id="text140">struct</text> | 388 | style="font-style:normal;font-weight:bold;font-size:192px;font-family:Courier;text-anchor:middle;fill:#000000">struct</text> |
476 | <!-- Text --> | 389 | <!-- Text --> |
477 | <text | 390 | <text |
478 | xml:space="preserve" | 391 | xml:space="preserve" |
479 | x="1650" | 392 | x="1673.7578" |
480 | y="4050" | 393 | y="6235.7236" |
481 | fill="#000000" | ||
482 | font-family="Courier" | ||
483 | font-style="normal" | 394 | font-style="normal" |
484 | font-weight="bold" | 395 | font-weight="bold" |
485 | font-size="192" | 396 | font-size="192" |
486 | text-anchor="middle" | 397 | id="text142" |
487 | id="text142">rcu_node</text> | 398 | style="font-style:normal;font-weight:bold;font-size:192px;font-family:Courier;text-anchor:middle;fill:#000000">rcu_node</text> |
488 | <!-- Text --> | 399 | <!-- Text --> |
489 | <text | 400 | <text |
490 | xml:space="preserve" | 401 | xml:space="preserve" |
491 | x="2250" | 402 | x="2273.7581" |
492 | y="5700" | 403 | y="7885.7236" |
493 | fill="#000000" | ||
494 | font-family="Courier" | ||
495 | font-style="normal" | 404 | font-style="normal" |
496 | font-weight="bold" | 405 | font-weight="bold" |
497 | font-size="192" | 406 | font-size="192" |
498 | text-anchor="middle" | 407 | id="text144" |
499 | id="text144">struct</text> | 408 | style="font-style:normal;font-weight:bold;font-size:192px;font-family:Courier;text-anchor:middle;fill:#000000">struct</text> |
500 | <!-- Text --> | 409 | <!-- Text --> |
501 | <text | 410 | <text |
502 | xml:space="preserve" | 411 | xml:space="preserve" |
503 | x="2250" | 412 | x="2273.7581" |
504 | y="6000" | 413 | y="8185.7236" |
505 | fill="#000000" | ||
506 | font-family="Courier" | ||
507 | font-style="normal" | 414 | font-style="normal" |
508 | font-weight="bold" | 415 | font-weight="bold" |
509 | font-size="192" | 416 | font-size="192" |
510 | text-anchor="middle" | 417 | id="text146" |
511 | id="text146">rcu_data</text> | 418 | style="font-style:normal;font-weight:bold;font-size:192px;font-family:Courier;text-anchor:middle;fill:#000000">rcu_data</text> |
512 | <!-- Text --> | 419 | <!-- Text --> |
513 | <text | 420 | <text |
514 | xml:space="preserve" | 421 | xml:space="preserve" |
515 | x="1050" | 422 | x="1073.7578" |
516 | y="6900" | 423 | y="9085.7227" |
517 | fill="#000000" | ||
518 | font-family="Courier" | ||
519 | font-style="normal" | 424 | font-style="normal" |
520 | font-weight="bold" | 425 | font-weight="bold" |
521 | font-size="192" | 426 | font-size="192" |
522 | text-anchor="middle" | 427 | id="text148" |
523 | id="text148">struct</text> | 428 | style="font-style:normal;font-weight:bold;font-size:192px;font-family:Courier;text-anchor:middle;fill:#000000">struct</text> |
524 | <!-- Text --> | 429 | <!-- Text --> |
525 | <text | 430 | <text |
526 | xml:space="preserve" | 431 | xml:space="preserve" |
527 | x="1050" | 432 | x="1073.7578" |
528 | y="7200" | 433 | y="9385.7227" |
529 | fill="#000000" | ||
530 | font-family="Courier" | ||
531 | font-style="normal" | 434 | font-style="normal" |
532 | font-weight="bold" | 435 | font-weight="bold" |
533 | font-size="192" | 436 | font-size="192" |
534 | text-anchor="middle" | 437 | id="text150" |
535 | id="text150">rcu_data</text> | 438 | style="font-style:normal;font-weight:bold;font-size:192px;font-family:Courier;text-anchor:middle;fill:#000000">rcu_data</text> |
536 | <!-- Text --> | 439 | <!-- Text --> |
537 | <text | 440 | <text |
538 | xml:space="preserve" | 441 | xml:space="preserve" |
539 | x="5250" | 442 | x="5273.7578" |
540 | y="5700" | 443 | y="7885.7236" |
541 | fill="#000000" | ||
542 | font-family="Courier" | ||
543 | font-style="normal" | 444 | font-style="normal" |
544 | font-weight="bold" | 445 | font-weight="bold" |
545 | font-size="192" | 446 | font-size="192" |
546 | text-anchor="middle" | 447 | id="text152" |
547 | id="text152">struct</text> | 448 | style="font-style:normal;font-weight:bold;font-size:192px;font-family:Courier;text-anchor:middle;fill:#000000">struct</text> |
548 | <!-- Text --> | 449 | <!-- Text --> |
549 | <text | 450 | <text |
550 | xml:space="preserve" | 451 | xml:space="preserve" |
551 | x="5250" | 452 | x="5273.7578" |
552 | y="6000" | 453 | y="8185.7236" |
553 | fill="#000000" | ||
554 | font-family="Courier" | ||
555 | font-style="normal" | 454 | font-style="normal" |
556 | font-weight="bold" | 455 | font-weight="bold" |
557 | font-size="192" | 456 | font-size="192" |
558 | text-anchor="middle" | 457 | id="text154" |
559 | id="text154">rcu_data</text> | 458 | style="font-style:normal;font-weight:bold;font-size:192px;font-family:Courier;text-anchor:middle;fill:#000000">rcu_data</text> |
560 | <!-- Text --> | 459 | <!-- Text --> |
561 | <text | 460 | <text |
562 | xml:space="preserve" | 461 | xml:space="preserve" |
563 | x="4050" | 462 | x="4073.7578" |
564 | y="6900" | 463 | y="9085.7227" |
565 | fill="#000000" | ||
566 | font-family="Courier" | ||
567 | font-style="normal" | 464 | font-style="normal" |
568 | font-weight="bold" | 465 | font-weight="bold" |
569 | font-size="192" | 466 | font-size="192" |
570 | text-anchor="middle" | 467 | id="text156" |
571 | id="text156">struct</text> | 468 | style="font-style:normal;font-weight:bold;font-size:192px;font-family:Courier;text-anchor:middle;fill:#000000">struct</text> |
572 | <!-- Text --> | 469 | <!-- Text --> |
573 | <text | 470 | <text |
574 | xml:space="preserve" | 471 | xml:space="preserve" |
575 | x="4050" | 472 | x="4073.7578" |
576 | y="7200" | 473 | y="9385.7227" |
577 | fill="#000000" | ||
578 | font-family="Courier" | ||
579 | font-style="normal" | 474 | font-style="normal" |
580 | font-weight="bold" | 475 | font-weight="bold" |
581 | font-size="192" | 476 | font-size="192" |
582 | text-anchor="middle" | 477 | id="text158" |
583 | id="text158">rcu_data</text> | 478 | style="font-style:normal;font-weight:bold;font-size:192px;font-family:Courier;text-anchor:middle;fill:#000000">rcu_data</text> |
584 | <!-- Text --> | 479 | <!-- Text --> |
585 | <text | 480 | <text |
586 | xml:space="preserve" | 481 | xml:space="preserve" |
587 | x="450" | 482 | x="473.75784" |
588 | y="1350" | 483 | y="3535.7231" |
589 | fill="#000000" | ||
590 | font-family="Courier" | ||
591 | font-style="normal" | 484 | font-style="normal" |
592 | font-weight="bold" | 485 | font-weight="bold" |
593 | font-size="192" | 486 | font-size="192" |
594 | text-anchor="start" | 487 | id="text160" |
595 | id="text160">struct rcu_state</text> | 488 | style="font-style:normal;font-weight:bold;font-size:192px;font-family:Courier;text-anchor:start;fill:#000000">struct rcu_state</text> |
596 | <!-- Text --> | 489 | <!-- Text --> |
597 | <text | ||
598 | xml:space="preserve" | ||
599 | x="1050" | ||
600 | y="9600" | ||
601 | fill="#000000" | ||
602 | font-family="Courier" | ||
603 | font-style="normal" | ||
604 | font-weight="bold" | ||
605 | font-size="192" | ||
606 | text-anchor="middle" | ||
607 | id="text162">struct</text> | ||
608 | <!-- Text --> | 490 | <!-- Text --> |
609 | <text | ||
610 | xml:space="preserve" | ||
611 | x="1050" | ||
612 | y="9900" | ||
613 | fill="#000000" | ||
614 | font-family="Courier" | ||
615 | font-style="normal" | ||
616 | font-weight="bold" | ||
617 | font-size="192" | ||
618 | text-anchor="middle" | ||
619 | id="text164">rcu_dynticks</text> | ||
620 | <!-- Text --> | 491 | <!-- Text --> |
621 | <text | ||
622 | xml:space="preserve" | ||
623 | x="4050" | ||
624 | y="9600" | ||
625 | fill="#000000" | ||
626 | font-family="Courier" | ||
627 | font-style="normal" | ||
628 | font-weight="bold" | ||
629 | font-size="192" | ||
630 | text-anchor="middle" | ||
631 | id="text166">struct</text> | ||
632 | <!-- Text --> | 492 | <!-- Text --> |
633 | <text | ||
634 | xml:space="preserve" | ||
635 | x="4050" | ||
636 | y="9900" | ||
637 | fill="#000000" | ||
638 | font-family="Courier" | ||
639 | font-style="normal" | ||
640 | font-weight="bold" | ||
641 | font-size="192" | ||
642 | text-anchor="middle" | ||
643 | id="text168">rcu_dynticks</text> | ||
644 | <!-- Text --> | 493 | <!-- Text --> |
645 | <text | ||
646 | xml:space="preserve" | ||
647 | x="2400" | ||
648 | y="8400" | ||
649 | fill="#000000" | ||
650 | font-family="Courier" | ||
651 | font-style="normal" | ||
652 | font-weight="bold" | ||
653 | font-size="192" | ||
654 | text-anchor="middle" | ||
655 | id="text170">struct</text> | ||
656 | <!-- Text --> | 494 | <!-- Text --> |
657 | <text | ||
658 | xml:space="preserve" | ||
659 | x="2400" | ||
660 | y="8700" | ||
661 | fill="#000000" | ||
662 | font-family="Courier" | ||
663 | font-style="normal" | ||
664 | font-weight="bold" | ||
665 | font-size="192" | ||
666 | text-anchor="middle" | ||
667 | id="text172">rcu_dynticks</text> | ||
668 | <!-- Text --> | 495 | <!-- Text --> |
669 | <text | ||
670 | xml:space="preserve" | ||
671 | x="5400" | ||
672 | y="8400" | ||
673 | fill="#000000" | ||
674 | font-family="Courier" | ||
675 | font-style="normal" | ||
676 | font-weight="bold" | ||
677 | font-size="192" | ||
678 | text-anchor="middle" | ||
679 | id="text174">struct</text> | ||
680 | <!-- Text --> | 496 | <!-- Text --> |
681 | <text | ||
682 | xml:space="preserve" | ||
683 | x="5400" | ||
684 | y="8700" | ||
685 | fill="#000000" | ||
686 | font-family="Courier" | ||
687 | font-style="normal" | ||
688 | font-weight="bold" | ||
689 | font-size="192" | ||
690 | text-anchor="middle" | ||
691 | id="text176">rcu_dynticks</text> | ||
692 | <!-- Text --> | 497 | <!-- Text --> |
693 | <text | 498 | <text |
694 | xml:space="preserve" | 499 | xml:space="preserve" |
695 | x="6000" | 500 | x="6023.7578" |
696 | y="750" | 501 | y="2935.7231" |
697 | fill="#000000" | ||
698 | font-family="Helvetica" | ||
699 | font-style="normal" | 502 | font-style="normal" |
700 | font-weight="normal" | 503 | font-weight="normal" |
701 | font-size="192" | 504 | font-size="192" |
702 | text-anchor="end" | 505 | id="text178" |
703 | id="text178">rcu_sched</text> | 506 | style="font-style:normal;font-weight:normal;font-size:192px;font-family:Helvetica;text-anchor:end;fill:#000000">rcu_state</text> |
704 | <!-- Text --> | 507 | <!-- Text --> |
705 | <text | 508 | <text |
706 | xml:space="preserve" | 509 | xml:space="preserve" |
707 | x="11400" | 510 | x="11423.758" |
708 | y="3300" | 511 | y="5485.7236" |
709 | fill="#000000" | ||
710 | font-family="Helvetica" | ||
711 | font-style="normal" | 512 | font-style="normal" |
712 | font-weight="normal" | 513 | font-weight="normal" |
713 | font-size="216" | 514 | font-size="216" |
714 | text-anchor="middle" | 515 | id="text180" |
715 | id="text180">T3</text> | 516 | style="font-style:normal;font-weight:normal;font-size:216px;font-family:Helvetica;text-anchor:middle;fill:#000000">T3</text> |
716 | <!-- Text --> | 517 | <!-- Text --> |
717 | <text | 518 | <text |
718 | xml:space="preserve" | 519 | xml:space="preserve" |
719 | x="11400" | 520 | x="11423.758" |
720 | y="4800" | 521 | y="6985.7236" |
721 | fill="#000000" | ||
722 | font-family="Helvetica" | ||
723 | font-style="normal" | 522 | font-style="normal" |
724 | font-weight="normal" | 523 | font-weight="normal" |
725 | font-size="216" | 524 | font-size="216" |
726 | text-anchor="middle" | 525 | id="text182" |
727 | id="text182">T2</text> | 526 | style="font-style:normal;font-weight:normal;font-size:216px;font-family:Helvetica;text-anchor:middle;fill:#000000">T2</text> |
728 | <!-- Text --> | 527 | <!-- Text --> |
729 | <text | 528 | <text |
730 | xml:space="preserve" | 529 | xml:space="preserve" |
731 | x="11400" | 530 | x="11423.758" |
732 | y="6300" | 531 | y="8485.7227" |
733 | fill="#000000" | ||
734 | font-family="Helvetica" | ||
735 | font-style="normal" | 532 | font-style="normal" |
736 | font-weight="normal" | 533 | font-weight="normal" |
737 | font-size="216" | 534 | font-size="216" |
738 | text-anchor="middle" | 535 | id="text184" |
739 | id="text184">T1</text> | 536 | style="font-style:normal;font-weight:normal;font-size:216px;font-family:Helvetica;text-anchor:middle;fill:#000000">T1</text> |
740 | <!-- Line --> | 537 | <!-- Line --> |
741 | <polyline | 538 | <polyline |
742 | points="5250,5400 5250,4414 " | 539 | points="5250,5400 5250,4414 " |
743 | style="stroke:#00d1d1;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" | 540 | style="stroke:#00d1d1;stroke-width:30.00057793;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)" |
744 | id="polyline186" /> | 541 | id="polyline186" |
542 | transform="translate(23.757862,2185.7233)" /> | ||
745 | <!-- Arrowhead on XXXpoint 5250 5400 - 5250 4290--> | 543 | <!-- Arrowhead on XXXpoint 5250 5400 - 5250 4290--> |
746 | <!-- Line: box --> | 544 | <!-- Line: box --> |
747 | <rect | 545 | <rect |
748 | x="3750" | 546 | x="3773.7581" |
749 | y="3450" | 547 | y="5635.7236" |
750 | width="1800" | 548 | width="1800" |
751 | height="900" | 549 | height="900" |
752 | rx="0" | 550 | rx="0" |
753 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; " | 551 | style="fill:#ffbfbf;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter" |
754 | id="rect190" /> | 552 | id="rect190" /> |
755 | <!-- Line: box --> | 553 | <!-- Line: box --> |
756 | <rect | 554 | <rect |
757 | x="7350" | 555 | x="7373.7578" |
758 | y="2850" | 556 | y="5035.7236" |
759 | width="1950" | 557 | width="1950" |
760 | height="750" | 558 | height="750" |
761 | rx="0" | 559 | rx="0" |
762 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; " | 560 | style="fill:#ffbfbf;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter" |
763 | id="rect192" /> | 561 | id="rect192" /> |
764 | <!-- Line: box --> | 562 | <!-- Line: box --> |
765 | <rect | 563 | <rect |
766 | x="7350" | 564 | x="7373.7578" |
767 | y="3600" | 565 | y="5785.7236" |
768 | width="1950" | 566 | width="1950" |
769 | height="750" | 567 | height="750" |
770 | rx="0" | 568 | rx="0" |
771 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; " | 569 | style="fill:#ffbfbf;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter" |
772 | id="rect194" /> | 570 | id="rect194" /> |
773 | <!-- Line: box --> | 571 | <!-- Line: box --> |
774 | <rect | 572 | <rect |
775 | x="7350" | 573 | x="7373.7578" |
776 | y="4350" | 574 | y="6535.7236" |
777 | width="1950" | 575 | width="1950" |
778 | height="750" | 576 | height="750" |
779 | rx="0" | 577 | rx="0" |
780 | style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; " | 578 | style="fill:#ffbfbf;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter" |
781 | id="rect196" /> | 579 | id="rect196" /> |
782 | <!-- Text --> | 580 | <!-- Text --> |
783 | <text | 581 | <text |
784 | xml:space="preserve" | 582 | xml:space="preserve" |
785 | x="4650" | 583 | x="4673.7578" |
786 | y="4050" | 584 | y="6235.7236" |
787 | fill="#000000" | ||
788 | font-family="Courier" | ||
789 | font-style="normal" | 585 | font-style="normal" |
790 | font-weight="bold" | 586 | font-weight="bold" |
791 | font-size="192" | 587 | font-size="192" |
792 | text-anchor="middle" | 588 | id="text198" |
793 | id="text198">rcu_node</text> | 589 | style="font-style:normal;font-weight:bold;font-size:192px;font-family:Courier;text-anchor:middle;fill:#000000">rcu_node</text> |
794 | <!-- Text --> | 590 | <!-- Text --> |
795 | <text | 591 | <text |
796 | xml:space="preserve" | 592 | xml:space="preserve" |
797 | x="4650" | 593 | x="4673.7578" |
798 | y="3750" | 594 | y="5935.7236" |
799 | fill="#000000" | ||
800 | font-family="Courier" | ||
801 | font-style="normal" | 595 | font-style="normal" |
802 | font-weight="bold" | 596 | font-weight="bold" |
803 | font-size="192" | 597 | font-size="192" |
804 | text-anchor="middle" | 598 | id="text200" |
805 | id="text200">struct</text> | 599 | style="font-style:normal;font-weight:bold;font-size:192px;font-family:Courier;text-anchor:middle;fill:#000000">struct</text> |
806 | <!-- Text --> | 600 | <!-- Text --> |
807 | <text | 601 | <text |
808 | xml:space="preserve" | 602 | xml:space="preserve" |
809 | x="7500" | 603 | x="7523.7578" |
810 | y="3300" | 604 | y="5485.7236" |
811 | fill="#000000" | ||
812 | font-family="Courier" | ||
813 | font-style="normal" | 605 | font-style="normal" |
814 | font-weight="bold" | 606 | font-weight="bold" |
815 | font-size="192" | 607 | font-size="192" |
816 | text-anchor="start" | 608 | id="text202" |
817 | id="text202">blkd_tasks</text> | 609 | style="font-style:normal;font-weight:bold;font-size:192px;font-family:Courier;text-anchor:start;fill:#000000">blkd_tasks</text> |
818 | <!-- Text --> | 610 | <!-- Text --> |
819 | <text | 611 | <text |
820 | xml:space="preserve" | 612 | xml:space="preserve" |
821 | x="7500" | 613 | x="7523.7578" |
822 | y="4050" | 614 | y="6235.7236" |
823 | fill="#000000" | ||
824 | font-family="Courier" | ||
825 | font-style="normal" | 615 | font-style="normal" |
826 | font-weight="bold" | 616 | font-weight="bold" |
827 | font-size="192" | 617 | font-size="192" |
828 | text-anchor="start" | 618 | id="text204" |
829 | id="text204">gp_tasks</text> | 619 | style="font-style:normal;font-weight:bold;font-size:192px;font-family:Courier;text-anchor:start;fill:#000000">gp_tasks</text> |
830 | <!-- Text --> | 620 | <!-- Text --> |
831 | <text | 621 | <text |
832 | xml:space="preserve" | 622 | xml:space="preserve" |
833 | x="7500" | 623 | x="7523.7578" |
834 | y="4800" | 624 | y="6985.7236" |
835 | fill="#000000" | ||
836 | font-family="Courier" | ||
837 | font-style="normal" | 625 | font-style="normal" |
838 | font-weight="bold" | 626 | font-weight="bold" |
839 | font-size="192" | 627 | font-size="192" |
840 | text-anchor="start" | 628 | id="text206" |
841 | id="text206">exp_tasks</text> | 629 | style="font-style:normal;font-weight:bold;font-size:192px;font-family:Courier;text-anchor:start;fill:#000000">exp_tasks</text> |
842 | </g> | 630 | </g> |
843 | </svg> | 631 | </svg> |
diff --git a/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html b/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html index e62c7c34a369..8e4f873b979f 100644 --- a/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html +++ b/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html | |||
@@ -160,9 +160,9 @@ was in flight. | |||
160 | If the CPU is idle, then <tt>sync_sched_exp_handler()</tt> reports | 160 | If the CPU is idle, then <tt>sync_sched_exp_handler()</tt> reports |
161 | the quiescent state. | 161 | the quiescent state. |
162 | 162 | ||
163 | <p> | 163 | <p> Otherwise, the handler forces a future context switch by setting the |
164 | Otherwise, the handler invokes <tt>resched_cpu()</tt>, which forces | 164 | NEED_RESCHED flag of the current task's thread flag and the CPU preempt |
165 | a future context switch. | 165 | counter. |
166 | At the time of the context switch, the CPU reports the quiescent state. | 166 | At the time of the context switch, the CPU reports the quiescent state. |
167 | Should the CPU go offline first, it will report the quiescent state | 167 | Should the CPU go offline first, it will report the quiescent state |
168 | at that time. | 168 | at that time. |
diff --git a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html index a346ce0116eb..e4d94fba6c89 100644 --- a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html +++ b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html | |||
@@ -77,7 +77,7 @@ The key point is that the lock-acquisition functions, including | |||
77 | <tt>smp_mb__after_unlock_lock()</tt> immediately after successful | 77 | <tt>smp_mb__after_unlock_lock()</tt> immediately after successful |
78 | acquisition of the lock. | 78 | acquisition of the lock. |
79 | 79 | ||
80 | <p>Therefore, for any given <tt>rcu_node</tt> struction, any access | 80 | <p>Therefore, for any given <tt>rcu_node</tt> structure, any access |
81 | happening before one of the above lock-release functions will be seen | 81 | happening before one of the above lock-release functions will be seen |
82 | by all CPUs as happening before any access happening after a later | 82 | by all CPUs as happening before any access happening after a later |
83 | one of the above lock-acquisition functions. | 83 | one of the above lock-acquisition functions. |
diff --git a/Documentation/RCU/Design/Requirements/Requirements.html b/Documentation/RCU/Design/Requirements/Requirements.html index 43c4e2f05f40..9fca73e03a98 100644 --- a/Documentation/RCU/Design/Requirements/Requirements.html +++ b/Documentation/RCU/Design/Requirements/Requirements.html | |||
@@ -900,8 +900,6 @@ Except where otherwise noted, these non-guarantees were premeditated. | |||
900 | Grace Periods Don't Partition Read-Side Critical Sections</a> | 900 | Grace Periods Don't Partition Read-Side Critical Sections</a> |
901 | <li> <a href="#Read-Side Critical Sections Don't Partition Grace Periods"> | 901 | <li> <a href="#Read-Side Critical Sections Don't Partition Grace Periods"> |
902 | Read-Side Critical Sections Don't Partition Grace Periods</a> | 902 | Read-Side Critical Sections Don't Partition Grace Periods</a> |
903 | <li> <a href="#Disabling Preemption Does Not Block Grace Periods"> | ||
904 | Disabling Preemption Does Not Block Grace Periods</a> | ||
905 | </ol> | 903 | </ol> |
906 | 904 | ||
907 | <h3><a name="Readers Impose Minimal Ordering">Readers Impose Minimal Ordering</a></h3> | 905 | <h3><a name="Readers Impose Minimal Ordering">Readers Impose Minimal Ordering</a></h3> |
@@ -1259,54 +1257,6 @@ of RCU grace periods. | |||
1259 | <tr><td> </td></tr> | 1257 | <tr><td> </td></tr> |
1260 | </table> | 1258 | </table> |
1261 | 1259 | ||
1262 | <h3><a name="Disabling Preemption Does Not Block Grace Periods"> | ||
1263 | Disabling Preemption Does Not Block Grace Periods</a></h3> | ||
1264 | |||
1265 | <p> | ||
1266 | There was a time when disabling preemption on any given CPU would block | ||
1267 | subsequent grace periods. | ||
1268 | However, this was an accident of implementation and is not a requirement. | ||
1269 | And in the current Linux-kernel implementation, disabling preemption | ||
1270 | on a given CPU in fact does not block grace periods, as Oleg Nesterov | ||
1271 | <a href="https://lkml.kernel.org/g/20150614193825.GA19582@redhat.com">demonstrated</a>. | ||
1272 | |||
1273 | <p> | ||
1274 | If you need a preempt-disable region to block grace periods, you need to add | ||
1275 | <tt>rcu_read_lock()</tt> and <tt>rcu_read_unlock()</tt>, for example | ||
1276 | as follows: | ||
1277 | |||
1278 | <blockquote> | ||
1279 | <pre> | ||
1280 | 1 preempt_disable(); | ||
1281 | 2 rcu_read_lock(); | ||
1282 | 3 do_something(); | ||
1283 | 4 rcu_read_unlock(); | ||
1284 | 5 preempt_enable(); | ||
1285 | 6 | ||
1286 | 7 /* Spinlocks implicitly disable preemption. */ | ||
1287 | 8 spin_lock(&mylock); | ||
1288 | 9 rcu_read_lock(); | ||
1289 | 10 do_something(); | ||
1290 | 11 rcu_read_unlock(); | ||
1291 | 12 spin_unlock(&mylock); | ||
1292 | </pre> | ||
1293 | </blockquote> | ||
1294 | |||
1295 | <p> | ||
1296 | In theory, you could enter the RCU read-side critical section first, | ||
1297 | but it is more efficient to keep the entire RCU read-side critical | ||
1298 | section contained in the preempt-disable region as shown above. | ||
1299 | Of course, RCU read-side critical sections that extend outside of | ||
1300 | preempt-disable regions will work correctly, but such critical sections | ||
1301 | can be preempted, which forces <tt>rcu_read_unlock()</tt> to do | ||
1302 | more work. | ||
1303 | And no, this is <i>not</i> an invitation to enclose all of your RCU | ||
1304 | read-side critical sections within preempt-disable regions, because | ||
1305 | doing so would degrade real-time response. | ||
1306 | |||
1307 | <p> | ||
1308 | This non-requirement appeared with preemptible RCU. | ||
1309 | |||
1310 | <h2><a name="Parallelism Facts of Life">Parallelism Facts of Life</a></h2> | 1260 | <h2><a name="Parallelism Facts of Life">Parallelism Facts of Life</a></h2> |
1311 | 1261 | ||
1312 | <p> | 1262 | <p> |
@@ -1381,6 +1331,7 @@ Classes of quality-of-implementation requirements are as follows: | |||
1381 | <ol> | 1331 | <ol> |
1382 | <li> <a href="#Specialization">Specialization</a> | 1332 | <li> <a href="#Specialization">Specialization</a> |
1383 | <li> <a href="#Performance and Scalability">Performance and Scalability</a> | 1333 | <li> <a href="#Performance and Scalability">Performance and Scalability</a> |
1334 | <li> <a href="#Forward Progress">Forward Progress</a> | ||
1384 | <li> <a href="#Composability">Composability</a> | 1335 | <li> <a href="#Composability">Composability</a> |
1385 | <li> <a href="#Corner Cases">Corner Cases</a> | 1336 | <li> <a href="#Corner Cases">Corner Cases</a> |
1386 | </ol> | 1337 | </ol> |
@@ -1645,7 +1596,7 @@ used in place of <tt>synchronize_rcu()</tt> as follows: | |||
1645 | 16 struct foo *p; | 1596 | 16 struct foo *p; |
1646 | 17 | 1597 | 17 |
1647 | 18 spin_lock(&gp_lock); | 1598 | 18 spin_lock(&gp_lock); |
1648 | 19 p = rcu_dereference(gp); | 1599 | 19 p = rcu_access_pointer(gp); |
1649 | 20 if (!p) { | 1600 | 20 if (!p) { |
1650 | 21 spin_unlock(&gp_lock); | 1601 | 21 spin_unlock(&gp_lock); |
1651 | 22 return false; | 1602 | 22 return false; |
@@ -1822,6 +1773,106 @@ so it is too early to tell whether they will stand the test of time. | |||
1822 | RCU thus provides a range of tools to allow updaters to strike the | 1773 | RCU thus provides a range of tools to allow updaters to strike the |
1823 | required tradeoff between latency, flexibility and CPU overhead. | 1774 | required tradeoff between latency, flexibility and CPU overhead. |
1824 | 1775 | ||
1776 | <h3><a name="Forward Progress">Forward Progress</a></h3> | ||
1777 | |||
1778 | <p> | ||
1779 | In theory, delaying grace-period completion and callback invocation | ||
1780 | is harmless. | ||
1781 | In practice, not only are memory sizes finite but also callbacks sometimes | ||
1782 | do wakeups, and sufficiently deferred wakeups can be difficult | ||
1783 | to distinguish from system hangs. | ||
1784 | Therefore, RCU must provide a number of mechanisms to promote forward | ||
1785 | progress. | ||
1786 | |||
1787 | <p> | ||
1788 | These mechanisms are not foolproof, nor can they be. | ||
1789 | For one simple example, an infinite loop in an RCU read-side critical | ||
1790 | section must by definition prevent later grace periods from ever completing. | ||
1791 | For a more involved example, consider a 64-CPU system built with | ||
1792 | <tt>CONFIG_RCU_NOCB_CPU=y</tt> and booted with <tt>rcu_nocbs=1-63</tt>, | ||
1793 | where CPUs 1 through 63 spin in tight loops that invoke | ||
1794 | <tt>call_rcu()</tt>. | ||
1795 | Even if these tight loops also contain calls to <tt>cond_resched()</tt> | ||
1796 | (thus allowing grace periods to complete), CPU 0 simply will | ||
1797 | not be able to invoke callbacks as fast as the other 63 CPUs can | ||
1798 | register them, at least not until the system runs out of memory. | ||
1799 | In both of these examples, the Spiderman principle applies: With great | ||
1800 | power comes great responsibility. | ||
1801 | However, short of this level of abuse, RCU is required to | ||
1802 | ensure timely completion of grace periods and timely invocation of | ||
1803 | callbacks. | ||
1804 | |||
1805 | <p> | ||
1806 | RCU takes the following steps to encourage timely completion of | ||
1807 | grace periods: | ||
1808 | |||
1809 | <ol> | ||
1810 | <li> If a grace period fails to complete within 100 milliseconds, | ||
1811 | RCU causes future invocations of <tt>cond_resched()</tt> on | ||
1812 | the holdout CPUs to provide an RCU quiescent state. | ||
1813 | RCU also causes those CPUs' <tt>need_resched()</tt> invocations | ||
1814 | to return <tt>true</tt>, but only after the corresponding CPU's | ||
1815 | next scheduling-clock. | ||
1816 | <li> CPUs mentioned in the <tt>nohz_full</tt> kernel boot parameter | ||
1817 | can run indefinitely in the kernel without scheduling-clock | ||
1818 | interrupts, which defeats the above <tt>need_resched()</tt> | ||
1819 | strategem. | ||
1820 | RCU will therefore invoke <tt>resched_cpu()</tt> on any | ||
1821 | <tt>nohz_full</tt> CPUs still holding out after | ||
1822 | 109 milliseconds. | ||
1823 | <li> In kernels built with <tt>CONFIG_RCU_BOOST=y</tt>, if a given | ||
1824 | task that has been preempted within an RCU read-side critical | ||
1825 | section is holding out for more than 500 milliseconds, | ||
1826 | RCU will resort to priority boosting. | ||
1827 | <li> If a CPU is still holding out 10 seconds into the grace | ||
1828 | period, RCU will invoke <tt>resched_cpu()</tt> on it regardless | ||
1829 | of its <tt>nohz_full</tt> state. | ||
1830 | </ol> | ||
1831 | |||
1832 | <p> | ||
1833 | The above values are defaults for systems running with <tt>HZ=1000</tt>. | ||
1834 | They will vary as the value of <tt>HZ</tt> varies, and can also be | ||
1835 | changed using the relevant Kconfig options and kernel boot parameters. | ||
1836 | RCU currently does not do much sanity checking of these | ||
1837 | parameters, so please use caution when changing them. | ||
1838 | Note that these forward-progress measures are provided only for RCU, | ||
1839 | not for | ||
1840 | <a href="#Sleepable RCU">SRCU</a> or | ||
1841 | <a href="#Tasks RCU">Tasks RCU</a>. | ||
1842 | |||
1843 | <p> | ||
1844 | RCU takes the following steps in <tt>call_rcu()</tt> to encourage timely | ||
1845 | invocation of callbacks when any given non-<tt>rcu_nocbs</tt> CPU has | ||
1846 | 10,000 callbacks, or has 10,000 more callbacks than it had the last time | ||
1847 | encouragement was provided: | ||
1848 | |||
1849 | <ol> | ||
1850 | <li> Starts a grace period, if one is not already in progress. | ||
1851 | <li> Forces immediate checking for quiescent states, rather than | ||
1852 | waiting for three milliseconds to have elapsed since the | ||
1853 | beginning of the grace period. | ||
1854 | <li> Immediately tags the CPU's callbacks with their grace period | ||
1855 | completion numbers, rather than waiting for the <tt>RCU_SOFTIRQ</tt> | ||
1856 | handler to get around to it. | ||
1857 | <li> Lifts callback-execution batch limits, which speeds up callback | ||
1858 | invocation at the expense of degrading realtime response. | ||
1859 | </ol> | ||
1860 | |||
1861 | <p> | ||
1862 | Again, these are default values when running at <tt>HZ=1000</tt>, | ||
1863 | and can be overridden. | ||
1864 | Again, these forward-progress measures are provided only for RCU, | ||
1865 | not for | ||
1866 | <a href="#Sleepable RCU">SRCU</a> or | ||
1867 | <a href="#Tasks RCU">Tasks RCU</a>. | ||
1868 | Even for RCU, callback-invocation forward progress for <tt>rcu_nocbs</tt> | ||
1869 | CPUs is much less well-developed, in part because workloads benefiting | ||
1870 | from <tt>rcu_nocbs</tt> CPUs tend to invoke <tt>call_rcu()</tt> | ||
1871 | relatively infrequently. | ||
1872 | If workloads emerge that need both <tt>rcu_nocbs</tt> CPUs and high | ||
1873 | <tt>call_rcu()</tt> invocation rates, then additional forward-progress | ||
1874 | work will be required. | ||
1875 | |||
1825 | <h3><a name="Composability">Composability</a></h3> | 1876 | <h3><a name="Composability">Composability</a></h3> |
1826 | 1877 | ||
1827 | <p> | 1878 | <p> |
@@ -2272,7 +2323,7 @@ that meets this requirement. | |||
2272 | Furthermore, NMI handlers can be interrupted by what appear to RCU | 2323 | Furthermore, NMI handlers can be interrupted by what appear to RCU |
2273 | to be normal interrupts. | 2324 | to be normal interrupts. |
2274 | One way that this can happen is for code that directly invokes | 2325 | One way that this can happen is for code that directly invokes |
2275 | <tt>rcu_irq_enter()</tt> and </tt>rcu_irq_exit()</tt> to be called | 2326 | <tt>rcu_irq_enter()</tt> and <tt>rcu_irq_exit()</tt> to be called |
2276 | from an NMI handler. | 2327 | from an NMI handler. |
2277 | This astonishing fact of life prompted the current code structure, | 2328 | This astonishing fact of life prompted the current code structure, |
2278 | which has <tt>rcu_irq_enter()</tt> invoking <tt>rcu_nmi_enter()</tt> | 2329 | which has <tt>rcu_irq_enter()</tt> invoking <tt>rcu_nmi_enter()</tt> |
@@ -2294,7 +2345,7 @@ via <tt>del_timer_sync()</tt> or similar. | |||
2294 | <p> | 2345 | <p> |
2295 | Unfortunately, there is no way to cancel an RCU callback; | 2346 | Unfortunately, there is no way to cancel an RCU callback; |
2296 | once you invoke <tt>call_rcu()</tt>, the callback function is | 2347 | once you invoke <tt>call_rcu()</tt>, the callback function is |
2297 | going to eventually be invoked, unless the system goes down first. | 2348 | eventually going to be invoked, unless the system goes down first. |
2298 | Because it is normally considered socially irresponsible to crash the system | 2349 | Because it is normally considered socially irresponsible to crash the system |
2299 | in response to a module unload request, we need some other way | 2350 | in response to a module unload request, we need some other way |
2300 | to deal with in-flight RCU callbacks. | 2351 | to deal with in-flight RCU callbacks. |
@@ -2424,23 +2475,37 @@ for context-switch-heavy <tt>CONFIG_NO_HZ_FULL=y</tt> workloads, | |||
2424 | but there is room for further improvement. | 2475 | but there is room for further improvement. |
2425 | 2476 | ||
2426 | <p> | 2477 | <p> |
2427 | In the past, it was forbidden to disable interrupts across an | 2478 | It is forbidden to hold any of scheduler's runqueue or priority-inheritance |
2428 | <tt>rcu_read_unlock()</tt> unless that interrupt-disabled region | 2479 | spinlocks across an <tt>rcu_read_unlock()</tt> unless interrupts have been |
2429 | of code also included the matching <tt>rcu_read_lock()</tt>. | 2480 | disabled across the entire RCU read-side critical section, that is, |
2430 | Violating this restriction could result in deadlocks involving the | 2481 | up to and including the matching <tt>rcu_read_lock()</tt>. |
2431 | scheduler's runqueue and priority-inheritance spinlocks. | 2482 | Violating this restriction can result in deadlocks involving these |
2432 | This restriction was lifted when interrupt-disabled calls to | 2483 | scheduler spinlocks. |
2433 | <tt>rcu_read_unlock()</tt> started deferring the reporting of | 2484 | There was hope that this restriction might be lifted when interrupt-disabled |
2434 | the resulting RCU-preempt quiescent state until the end of that | 2485 | calls to <tt>rcu_read_unlock()</tt> started deferring the reporting of |
2486 | the resulting RCU-preempt quiescent state until the end of the corresponding | ||
2435 | interrupts-disabled region. | 2487 | interrupts-disabled region. |
2436 | This deferred reporting means that the scheduler's runqueue and | 2488 | Unfortunately, timely reporting of the corresponding quiescent state |
2437 | priority-inheritance locks cannot be held while reporting an RCU-preempt | 2489 | to expedited grace periods requires a call to <tt>raise_softirq()</tt>, |
2438 | quiescent state, which lifts the earlier restriction, at least from | 2490 | which can acquire these scheduler spinlocks. |
2439 | a deadlock perspective. | 2491 | In addition, real-time systems using RCU priority boosting |
2440 | Unfortunately, real-time systems using RCU priority boosting may | ||
2441 | need this restriction to remain in effect because deferred | 2492 | need this restriction to remain in effect because deferred |
2442 | quiescent-state reporting also defers deboosting, which in turn | 2493 | quiescent-state reporting would also defer deboosting, which in turn |
2443 | degrades real-time latencies. | 2494 | would degrade real-time latencies. |
2495 | |||
2496 | <p> | ||
2497 | In theory, if a given RCU read-side critical section could be | ||
2498 | guaranteed to be less than one second in duration, holding a scheduler | ||
2499 | spinlock across that critical section's <tt>rcu_read_unlock()</tt> | ||
2500 | would require only that preemption be disabled across the entire | ||
2501 | RCU read-side critical section, not interrupts. | ||
2502 | Unfortunately, given the possibility of vCPU preemption, long-running | ||
2503 | interrupts, and so on, it is not possible in practice to guarantee | ||
2504 | that a given RCU read-side critical section will complete in less than | ||
2505 | one second. | ||
2506 | Therefore, as noted above, if scheduler spinlocks are held across | ||
2507 | a given call to <tt>rcu_read_unlock()</tt>, interrupts must be | ||
2508 | disabled across the entire RCU read-side critical section. | ||
2444 | 2509 | ||
2445 | <h3><a name="Tracing and RCU">Tracing and RCU</a></h3> | 2510 | <h3><a name="Tracing and RCU">Tracing and RCU</a></h3> |
2446 | 2511 | ||
@@ -3233,6 +3298,11 @@ For example, RCU callback overhead might be charged back to the | |||
3233 | originating <tt>call_rcu()</tt> instance, though probably not | 3298 | originating <tt>call_rcu()</tt> instance, though probably not |
3234 | in production kernels. | 3299 | in production kernels. |
3235 | 3300 | ||
3301 | <p> | ||
3302 | Additional work may be required to provide reasonable forward-progress | ||
3303 | guarantees under heavy load for grace periods and for callback | ||
3304 | invocation. | ||
3305 | |||
3236 | <h2><a name="Summary">Summary</a></h2> | 3306 | <h2><a name="Summary">Summary</a></h2> |
3237 | 3307 | ||
3238 | <p> | 3308 | <p> |
diff --git a/Documentation/RCU/checklist.txt b/Documentation/RCU/checklist.txt index 49747717d905..6f469864d9f5 100644 --- a/Documentation/RCU/checklist.txt +++ b/Documentation/RCU/checklist.txt | |||
@@ -63,7 +63,7 @@ over a rather long period of time, but improvements are always welcome! | |||
63 | pointer must be covered by rcu_read_lock(), rcu_read_lock_bh(), | 63 | pointer must be covered by rcu_read_lock(), rcu_read_lock_bh(), |
64 | rcu_read_lock_sched(), or by the appropriate update-side lock. | 64 | rcu_read_lock_sched(), or by the appropriate update-side lock. |
65 | Disabling of preemption can serve as rcu_read_lock_sched(), but | 65 | Disabling of preemption can serve as rcu_read_lock_sched(), but |
66 | is less readable. | 66 | is less readable and prevents lockdep from detecting locking issues. |
67 | 67 | ||
68 | Letting RCU-protected pointers "leak" out of an RCU read-side | 68 | Letting RCU-protected pointers "leak" out of an RCU read-side |
69 | critical section is every bid as bad as letting them leak out | 69 | critical section is every bid as bad as letting them leak out |
@@ -285,11 +285,7 @@ over a rather long period of time, but improvements are always welcome! | |||
285 | here is that superuser already has lots of ways to crash | 285 | here is that superuser already has lots of ways to crash |
286 | the machine. | 286 | the machine. |
287 | 287 | ||
288 | d. Use call_rcu_bh() rather than call_rcu(), in order to take | 288 | d. Periodically invoke synchronize_rcu(), permitting a limited |
289 | advantage of call_rcu_bh()'s faster grace periods. (This | ||
290 | is only a partial solution, though.) | ||
291 | |||
292 | e. Periodically invoke synchronize_rcu(), permitting a limited | ||
293 | number of updates per grace period. | 289 | number of updates per grace period. |
294 | 290 | ||
295 | The same cautions apply to call_rcu_bh(), call_rcu_sched(), | 291 | The same cautions apply to call_rcu_bh(), call_rcu_sched(), |
@@ -324,37 +320,14 @@ over a rather long period of time, but improvements are always welcome! | |||
324 | will break Alpha, cause aggressive compilers to generate bad code, | 320 | will break Alpha, cause aggressive compilers to generate bad code, |
325 | and confuse people trying to read your code. | 321 | and confuse people trying to read your code. |
326 | 322 | ||
327 | 11. Note that synchronize_rcu() -only- guarantees to wait until | 323 | 11. Any lock acquired by an RCU callback must be acquired elsewhere |
328 | all currently executing rcu_read_lock()-protected RCU read-side | ||
329 | critical sections complete. It does -not- necessarily guarantee | ||
330 | that all currently running interrupts, NMIs, preempt_disable() | ||
331 | code, or idle loops will complete. Therefore, if your | ||
332 | read-side critical sections are protected by something other | ||
333 | than rcu_read_lock(), do -not- use synchronize_rcu(). | ||
334 | |||
335 | Similarly, disabling preemption is not an acceptable substitute | ||
336 | for rcu_read_lock(). Code that attempts to use preemption | ||
337 | disabling where it should be using rcu_read_lock() will break | ||
338 | in CONFIG_PREEMPT=y kernel builds. | ||
339 | |||
340 | If you want to wait for interrupt handlers, NMI handlers, and | ||
341 | code under the influence of preempt_disable(), you instead | ||
342 | need to use synchronize_irq() or synchronize_sched(). | ||
343 | |||
344 | This same limitation also applies to synchronize_rcu_bh() | ||
345 | and synchronize_srcu(), as well as to the asynchronous and | ||
346 | expedited forms of the three primitives, namely call_rcu(), | ||
347 | call_rcu_bh(), call_srcu(), synchronize_rcu_expedited(), | ||
348 | synchronize_rcu_bh_expedited(), and synchronize_srcu_expedited(). | ||
349 | |||
350 | 12. Any lock acquired by an RCU callback must be acquired elsewhere | ||
351 | with softirq disabled, e.g., via spin_lock_irqsave(), | 324 | with softirq disabled, e.g., via spin_lock_irqsave(), |
352 | spin_lock_bh(), etc. Failing to disable irq on a given | 325 | spin_lock_bh(), etc. Failing to disable irq on a given |
353 | acquisition of that lock will result in deadlock as soon as | 326 | acquisition of that lock will result in deadlock as soon as |
354 | the RCU softirq handler happens to run your RCU callback while | 327 | the RCU softirq handler happens to run your RCU callback while |
355 | interrupting that acquisition's critical section. | 328 | interrupting that acquisition's critical section. |
356 | 329 | ||
357 | 13. RCU callbacks can be and are executed in parallel. In many cases, | 330 | 12. RCU callbacks can be and are executed in parallel. In many cases, |
358 | the callback code simply wrappers around kfree(), so that this | 331 | the callback code simply wrappers around kfree(), so that this |
359 | is not an issue (or, more accurately, to the extent that it is | 332 | is not an issue (or, more accurately, to the extent that it is |
360 | an issue, the memory-allocator locking handles it). However, | 333 | an issue, the memory-allocator locking handles it). However, |
@@ -370,7 +343,7 @@ over a rather long period of time, but improvements are always welcome! | |||
370 | not the case, a self-spawning RCU callback would prevent the | 343 | not the case, a self-spawning RCU callback would prevent the |
371 | victim CPU from ever going offline.) | 344 | victim CPU from ever going offline.) |
372 | 345 | ||
373 | 14. Unlike other forms of RCU, it -is- permissible to block in an | 346 | 13. Unlike other forms of RCU, it -is- permissible to block in an |
374 | SRCU read-side critical section (demarked by srcu_read_lock() | 347 | SRCU read-side critical section (demarked by srcu_read_lock() |
375 | and srcu_read_unlock()), hence the "SRCU": "sleepable RCU". | 348 | and srcu_read_unlock()), hence the "SRCU": "sleepable RCU". |
376 | Please note that if you don't need to sleep in read-side critical | 349 | Please note that if you don't need to sleep in read-side critical |
@@ -414,7 +387,7 @@ over a rather long period of time, but improvements are always welcome! | |||
414 | Note that rcu_dereference() and rcu_assign_pointer() relate to | 387 | Note that rcu_dereference() and rcu_assign_pointer() relate to |
415 | SRCU just as they do to other forms of RCU. | 388 | SRCU just as they do to other forms of RCU. |
416 | 389 | ||
417 | 15. The whole point of call_rcu(), synchronize_rcu(), and friends | 390 | 14. The whole point of call_rcu(), synchronize_rcu(), and friends |
418 | is to wait until all pre-existing readers have finished before | 391 | is to wait until all pre-existing readers have finished before |
419 | carrying out some otherwise-destructive operation. It is | 392 | carrying out some otherwise-destructive operation. It is |
420 | therefore critically important to -first- remove any path | 393 | therefore critically important to -first- remove any path |
@@ -426,13 +399,13 @@ over a rather long period of time, but improvements are always welcome! | |||
426 | is the caller's responsibility to guarantee that any subsequent | 399 | is the caller's responsibility to guarantee that any subsequent |
427 | readers will execute safely. | 400 | readers will execute safely. |
428 | 401 | ||
429 | 16. The various RCU read-side primitives do -not- necessarily contain | 402 | 15. The various RCU read-side primitives do -not- necessarily contain |
430 | memory barriers. You should therefore plan for the CPU | 403 | memory barriers. You should therefore plan for the CPU |
431 | and the compiler to freely reorder code into and out of RCU | 404 | and the compiler to freely reorder code into and out of RCU |
432 | read-side critical sections. It is the responsibility of the | 405 | read-side critical sections. It is the responsibility of the |
433 | RCU update-side primitives to deal with this. | 406 | RCU update-side primitives to deal with this. |
434 | 407 | ||
435 | 17. Use CONFIG_PROVE_LOCKING, CONFIG_DEBUG_OBJECTS_RCU_HEAD, and the | 408 | 16. Use CONFIG_PROVE_LOCKING, CONFIG_DEBUG_OBJECTS_RCU_HEAD, and the |
436 | __rcu sparse checks to validate your RCU code. These can help | 409 | __rcu sparse checks to validate your RCU code. These can help |
437 | find problems as follows: | 410 | find problems as follows: |
438 | 411 | ||
@@ -455,7 +428,7 @@ over a rather long period of time, but improvements are always welcome! | |||
455 | These debugging aids can help you find problems that are | 428 | These debugging aids can help you find problems that are |
456 | otherwise extremely difficult to spot. | 429 | otherwise extremely difficult to spot. |
457 | 430 | ||
458 | 18. If you register a callback using call_rcu(), call_rcu_bh(), | 431 | 17. If you register a callback using call_rcu(), call_rcu_bh(), |
459 | call_rcu_sched(), or call_srcu(), and pass in a function defined | 432 | call_rcu_sched(), or call_srcu(), and pass in a function defined |
460 | within a loadable module, then it in necessary to wait for | 433 | within a loadable module, then it in necessary to wait for |
461 | all pending callbacks to be invoked after the last invocation | 434 | all pending callbacks to be invoked after the last invocation |
@@ -469,8 +442,8 @@ over a rather long period of time, but improvements are always welcome! | |||
469 | You instead need to use one of the barrier functions: | 442 | You instead need to use one of the barrier functions: |
470 | 443 | ||
471 | o call_rcu() -> rcu_barrier() | 444 | o call_rcu() -> rcu_barrier() |
472 | o call_rcu_bh() -> rcu_barrier_bh() | 445 | o call_rcu_bh() -> rcu_barrier() |
473 | o call_rcu_sched() -> rcu_barrier_sched() | 446 | o call_rcu_sched() -> rcu_barrier() |
474 | o call_srcu() -> srcu_barrier() | 447 | o call_srcu() -> srcu_barrier() |
475 | 448 | ||
476 | However, these barrier functions are absolutely -not- guaranteed | 449 | However, these barrier functions are absolutely -not- guaranteed |
diff --git a/Documentation/RCU/stallwarn.txt b/Documentation/RCU/stallwarn.txt index 491043fd976f..073dbc12d1ea 100644 --- a/Documentation/RCU/stallwarn.txt +++ b/Documentation/RCU/stallwarn.txt | |||
@@ -176,9 +176,8 @@ causing stalls, and that the stall was affecting RCU-sched. This message | |||
176 | will normally be followed by stack dumps for each CPU. Please note that | 176 | will normally be followed by stack dumps for each CPU. Please note that |
177 | PREEMPT_RCU builds can be stalled by tasks as well as by CPUs, and that | 177 | PREEMPT_RCU builds can be stalled by tasks as well as by CPUs, and that |
178 | the tasks will be indicated by PID, for example, "P3421". It is even | 178 | the tasks will be indicated by PID, for example, "P3421". It is even |
179 | possible for a rcu_preempt_state stall to be caused by both CPUs -and- | 179 | possible for an rcu_state stall to be caused by both CPUs -and- tasks, |
180 | tasks, in which case the offending CPUs and tasks will all be called | 180 | in which case the offending CPUs and tasks will all be called out in the list. |
181 | out in the list. | ||
182 | 181 | ||
183 | CPU 2's "(3 GPs behind)" indicates that this CPU has not interacted with | 182 | CPU 2's "(3 GPs behind)" indicates that this CPU has not interacted with |
184 | the RCU core for the past three grace periods. In contrast, CPU 16's "(0 | 183 | the RCU core for the past three grace periods. In contrast, CPU 16's "(0 |
@@ -206,7 +205,7 @@ handlers are no longer able to execute on this CPU. This can happen if | |||
206 | the stalled CPU is spinning with interrupts are disabled, or, in -rt | 205 | the stalled CPU is spinning with interrupts are disabled, or, in -rt |
207 | kernels, if a high-priority process is starving RCU's softirq handler. | 206 | kernels, if a high-priority process is starving RCU's softirq handler. |
208 | 207 | ||
209 | The "fps=" shows the number of force-quiescent-state idle/offline | 208 | The "fqs=" shows the number of force-quiescent-state idle/offline |
210 | detection passes that the grace-period kthread has made across this | 209 | detection passes that the grace-period kthread has made across this |
211 | CPU since the last time that this CPU noted the beginning of a grace | 210 | CPU since the last time that this CPU noted the beginning of a grace |
212 | period. | 211 | period. |
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt index 86d82f7f3500..4a6854318b17 100644 --- a/Documentation/RCU/whatisRCU.txt +++ b/Documentation/RCU/whatisRCU.txt | |||
@@ -266,7 +266,7 @@ rcu_dereference() | |||
266 | unnecessary overhead on Alpha CPUs. | 266 | unnecessary overhead on Alpha CPUs. |
267 | 267 | ||
268 | Note that the value returned by rcu_dereference() is valid | 268 | Note that the value returned by rcu_dereference() is valid |
269 | only within the enclosing RCU read-side critical section. | 269 | only within the enclosing RCU read-side critical section [1]. |
270 | For example, the following is -not- legal: | 270 | For example, the following is -not- legal: |
271 | 271 | ||
272 | rcu_read_lock(); | 272 | rcu_read_lock(); |
@@ -292,6 +292,19 @@ rcu_dereference() | |||
292 | typically used indirectly, via the _rcu list-manipulation | 292 | typically used indirectly, via the _rcu list-manipulation |
293 | primitives, such as list_for_each_entry_rcu(). | 293 | primitives, such as list_for_each_entry_rcu(). |
294 | 294 | ||
295 | [1] The variant rcu_dereference_protected() can be used outside | ||
296 | of an RCU read-side critical section as long as the usage is | ||
297 | protected by locks acquired by the update-side code. This variant | ||
298 | avoids the lockdep warning that would happen when using (for | ||
299 | example) rcu_dereference() without rcu_read_lock() protection. | ||
300 | Using rcu_dereference_protected() also has the advantage | ||
301 | of permitting compiler optimizations that rcu_dereference() | ||
302 | must prohibit. The rcu_dereference_protected() variant takes | ||
303 | a lockdep expression to indicate which locks must be acquired | ||
304 | by the caller. If the indicated protection is not provided, | ||
305 | a lockdep splat is emitted. See RCU/Design/Requirements.html | ||
306 | and the API's code comments for more details and example usage. | ||
307 | |||
295 | The following diagram shows how each API communicates among the | 308 | The following diagram shows how each API communicates among the |
296 | reader, updater, and reclaimer. | 309 | reader, updater, and reclaimer. |
297 | 310 | ||
@@ -322,28 +335,27 @@ to their callers and (2) call_rcu() callbacks may be invoked. Efficient | |||
322 | implementations of the RCU infrastructure make heavy use of batching in | 335 | implementations of the RCU infrastructure make heavy use of batching in |
323 | order to amortize their overhead over many uses of the corresponding APIs. | 336 | order to amortize their overhead over many uses of the corresponding APIs. |
324 | 337 | ||
325 | There are no fewer than three RCU mechanisms in the Linux kernel; the | 338 | There are at least three flavors of RCU usage in the Linux kernel. The diagram |
326 | diagram above shows the first one, which is by far the most commonly used. | 339 | above shows the most common one. On the updater side, the rcu_assign_pointer(), |
327 | The rcu_dereference() and rcu_assign_pointer() primitives are used for | 340 | sychronize_rcu() and call_rcu() primitives used are the same for all three |
328 | all three mechanisms, but different defer and protect primitives are | 341 | flavors. However for protection (on the reader side), the primitives used vary |
329 | used as follows: | 342 | depending on the flavor: |
330 | |||
331 | Defer Protect | ||
332 | 343 | ||
333 | a. synchronize_rcu() rcu_read_lock() / rcu_read_unlock() | 344 | a. rcu_read_lock() / rcu_read_unlock() |
334 | call_rcu() rcu_dereference() | 345 | rcu_dereference() |
335 | 346 | ||
336 | b. synchronize_rcu_bh() rcu_read_lock_bh() / rcu_read_unlock_bh() | 347 | b. rcu_read_lock_bh() / rcu_read_unlock_bh() |
337 | call_rcu_bh() rcu_dereference_bh() | 348 | local_bh_disable() / local_bh_enable() |
349 | rcu_dereference_bh() | ||
338 | 350 | ||
339 | c. synchronize_sched() rcu_read_lock_sched() / rcu_read_unlock_sched() | 351 | c. rcu_read_lock_sched() / rcu_read_unlock_sched() |
340 | call_rcu_sched() preempt_disable() / preempt_enable() | 352 | preempt_disable() / preempt_enable() |
341 | local_irq_save() / local_irq_restore() | 353 | local_irq_save() / local_irq_restore() |
342 | hardirq enter / hardirq exit | 354 | hardirq enter / hardirq exit |
343 | NMI enter / NMI exit | 355 | NMI enter / NMI exit |
344 | rcu_dereference_sched() | 356 | rcu_dereference_sched() |
345 | 357 | ||
346 | These three mechanisms are used as follows: | 358 | These three flavors are used as follows: |
347 | 359 | ||
348 | a. RCU applied to normal data structures. | 360 | a. RCU applied to normal data structures. |
349 | 361 | ||
@@ -867,18 +879,20 @@ RCU: Critical sections Grace period Barrier | |||
867 | 879 | ||
868 | bh: Critical sections Grace period Barrier | 880 | bh: Critical sections Grace period Barrier |
869 | 881 | ||
870 | rcu_read_lock_bh call_rcu_bh rcu_barrier_bh | 882 | rcu_read_lock_bh call_rcu rcu_barrier |
871 | rcu_read_unlock_bh synchronize_rcu_bh | 883 | rcu_read_unlock_bh synchronize_rcu |
872 | rcu_dereference_bh synchronize_rcu_bh_expedited | 884 | [local_bh_disable] synchronize_rcu_expedited |
885 | [and friends] | ||
886 | rcu_dereference_bh | ||
873 | rcu_dereference_bh_check | 887 | rcu_dereference_bh_check |
874 | rcu_dereference_bh_protected | 888 | rcu_dereference_bh_protected |
875 | rcu_read_lock_bh_held | 889 | rcu_read_lock_bh_held |
876 | 890 | ||
877 | sched: Critical sections Grace period Barrier | 891 | sched: Critical sections Grace period Barrier |
878 | 892 | ||
879 | rcu_read_lock_sched synchronize_sched rcu_barrier_sched | 893 | rcu_read_lock_sched call_rcu rcu_barrier |
880 | rcu_read_unlock_sched call_rcu_sched | 894 | rcu_read_unlock_sched synchronize_rcu |
881 | [preempt_disable] synchronize_sched_expedited | 895 | [preempt_disable] synchronize_rcu_expedited |
882 | [and friends] | 896 | [and friends] |
883 | rcu_read_lock_sched_notrace | 897 | rcu_read_lock_sched_notrace |
884 | rcu_read_unlock_sched_notrace | 898 | rcu_read_unlock_sched_notrace |
@@ -890,8 +904,8 @@ sched: Critical sections Grace period Barrier | |||
890 | 904 | ||
891 | SRCU: Critical sections Grace period Barrier | 905 | SRCU: Critical sections Grace period Barrier |
892 | 906 | ||
893 | srcu_read_lock synchronize_srcu srcu_barrier | 907 | srcu_read_lock call_srcu srcu_barrier |
894 | srcu_read_unlock call_srcu | 908 | srcu_read_unlock synchronize_srcu |
895 | srcu_dereference synchronize_srcu_expedited | 909 | srcu_dereference synchronize_srcu_expedited |
896 | srcu_dereference_check | 910 | srcu_dereference_check |
897 | srcu_read_lock_held | 911 | srcu_read_lock_held |
@@ -1034,7 +1048,7 @@ Answer: Just as PREEMPT_RT permits preemption of spinlock | |||
1034 | spinlocks blocking while in RCU read-side critical | 1048 | spinlocks blocking while in RCU read-side critical |
1035 | sections. | 1049 | sections. |
1036 | 1050 | ||
1037 | Why the apparent inconsistency? Because it is it | 1051 | Why the apparent inconsistency? Because it is |
1038 | possible to use priority boosting to keep the RCU | 1052 | possible to use priority boosting to keep the RCU |
1039 | grace periods short if need be (for example, if running | 1053 | grace periods short if need be (for example, if running |
1040 | short of memory). In contrast, if blocking waiting | 1054 | short of memory). In contrast, if blocking waiting |
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index aefd358a5ca3..4f7c2367b2b6 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt | |||
@@ -3748,24 +3748,6 @@ | |||
3748 | in microseconds. The default of zero says | 3748 | in microseconds. The default of zero says |
3749 | no holdoff. | 3749 | no holdoff. |
3750 | 3750 | ||
3751 | rcutorture.cbflood_inter_holdoff= [KNL] | ||
3752 | Set holdoff time (jiffies) between successive | ||
3753 | callback-flood tests. | ||
3754 | |||
3755 | rcutorture.cbflood_intra_holdoff= [KNL] | ||
3756 | Set holdoff time (jiffies) between successive | ||
3757 | bursts of callbacks within a given callback-flood | ||
3758 | test. | ||
3759 | |||
3760 | rcutorture.cbflood_n_burst= [KNL] | ||
3761 | Set the number of bursts making up a given | ||
3762 | callback-flood test. Set this to zero to | ||
3763 | disable callback-flood testing. | ||
3764 | |||
3765 | rcutorture.cbflood_n_per_burst= [KNL] | ||
3766 | Set the number of callbacks to be registered | ||
3767 | in a given burst of a callback-flood test. | ||
3768 | |||
3769 | rcutorture.fqs_duration= [KNL] | 3751 | rcutorture.fqs_duration= [KNL] |
3770 | Set duration of force_quiescent_state bursts | 3752 | Set duration of force_quiescent_state bursts |
3771 | in microseconds. | 3753 | in microseconds. |
@@ -3778,6 +3760,23 @@ | |||
3778 | Set wait time between force_quiescent_state bursts | 3760 | Set wait time between force_quiescent_state bursts |
3779 | in seconds. | 3761 | in seconds. |
3780 | 3762 | ||
3763 | rcutorture.fwd_progress= [KNL] | ||
3764 | Enable RCU grace-period forward-progress testing | ||
3765 | for the types of RCU supporting this notion. | ||
3766 | |||
3767 | rcutorture.fwd_progress_div= [KNL] | ||
3768 | Specify the fraction of a CPU-stall-warning | ||
3769 | period to do tight-loop forward-progress testing. | ||
3770 | |||
3771 | rcutorture.fwd_progress_holdoff= [KNL] | ||
3772 | Number of seconds to wait between successive | ||
3773 | forward-progress tests. | ||
3774 | |||
3775 | rcutorture.fwd_progress_need_resched= [KNL] | ||
3776 | Enclose cond_resched() calls within checks for | ||
3777 | need_resched() during tight-loop forward-progress | ||
3778 | testing. | ||
3779 | |||
3781 | rcutorture.gp_cond= [KNL] | 3780 | rcutorture.gp_cond= [KNL] |
3782 | Use conditional/asynchronous update-side | 3781 | Use conditional/asynchronous update-side |
3783 | primitives, if available. | 3782 | primitives, if available. |
diff --git a/MAINTAINERS b/MAINTAINERS index 6682420421c1..164da4ff11c1 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -4098,7 +4098,7 @@ S: Supported | |||
4098 | F: drivers/net/ethernet/chelsio/cxgb4vf/ | 4098 | F: drivers/net/ethernet/chelsio/cxgb4vf/ |
4099 | 4099 | ||
4100 | CXL (IBM Coherent Accelerator Processor Interface CAPI) DRIVER | 4100 | CXL (IBM Coherent Accelerator Processor Interface CAPI) DRIVER |
4101 | M: Frederic Barrat <fbarrat@linux.vnet.ibm.com> | 4101 | M: Frederic Barrat <fbarrat@linux.ibm.com> |
4102 | M: Andrew Donnellan <andrew.donnellan@au1.ibm.com> | 4102 | M: Andrew Donnellan <andrew.donnellan@au1.ibm.com> |
4103 | L: linuxppc-dev@lists.ozlabs.org | 4103 | L: linuxppc-dev@lists.ozlabs.org |
4104 | S: Supported | 4104 | S: Supported |
@@ -4110,9 +4110,9 @@ F: Documentation/powerpc/cxl.txt | |||
4110 | F: Documentation/ABI/testing/sysfs-class-cxl | 4110 | F: Documentation/ABI/testing/sysfs-class-cxl |
4111 | 4111 | ||
4112 | CXLFLASH (IBM Coherent Accelerator Processor Interface CAPI Flash) SCSI DRIVER | 4112 | CXLFLASH (IBM Coherent Accelerator Processor Interface CAPI Flash) SCSI DRIVER |
4113 | M: Manoj N. Kumar <manoj@linux.vnet.ibm.com> | 4113 | M: Manoj N. Kumar <manoj@linux.ibm.com> |
4114 | M: Matthew R. Ochs <mrochs@linux.vnet.ibm.com> | 4114 | M: Matthew R. Ochs <mrochs@linux.ibm.com> |
4115 | M: Uma Krishnan <ukrishn@linux.vnet.ibm.com> | 4115 | M: Uma Krishnan <ukrishn@linux.ibm.com> |
4116 | L: linux-scsi@vger.kernel.org | 4116 | L: linux-scsi@vger.kernel.org |
4117 | S: Supported | 4117 | S: Supported |
4118 | F: drivers/scsi/cxlflash/ | 4118 | F: drivers/scsi/cxlflash/ |
@@ -5493,7 +5493,7 @@ S: Orphan | |||
5493 | F: fs/efs/ | 5493 | F: fs/efs/ |
5494 | 5494 | ||
5495 | EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER | 5495 | EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER |
5496 | M: Douglas Miller <dougmill@linux.vnet.ibm.com> | 5496 | M: Douglas Miller <dougmill@linux.ibm.com> |
5497 | L: netdev@vger.kernel.org | 5497 | L: netdev@vger.kernel.org |
5498 | S: Maintained | 5498 | S: Maintained |
5499 | F: drivers/net/ethernet/ibm/ehea/ | 5499 | F: drivers/net/ethernet/ibm/ehea/ |
@@ -5631,7 +5631,7 @@ F: Documentation/filesystems/ext4/ext4.rst | |||
5631 | F: fs/ext4/ | 5631 | F: fs/ext4/ |
5632 | 5632 | ||
5633 | Extended Verification Module (EVM) | 5633 | Extended Verification Module (EVM) |
5634 | M: Mimi Zohar <zohar@linux.vnet.ibm.com> | 5634 | M: Mimi Zohar <zohar@linux.ibm.com> |
5635 | L: linux-integrity@vger.kernel.org | 5635 | L: linux-integrity@vger.kernel.org |
5636 | S: Supported | 5636 | S: Supported |
5637 | F: security/integrity/evm/ | 5637 | F: security/integrity/evm/ |
@@ -5841,7 +5841,7 @@ F: include/linux/firmware.h | |||
5841 | 5841 | ||
5842 | FLASH ADAPTER DRIVER (IBM Flash Adapter 900GB Full Height PCI Flash Card) | 5842 | FLASH ADAPTER DRIVER (IBM Flash Adapter 900GB Full Height PCI Flash Card) |
5843 | M: Joshua Morris <josh.h.morris@us.ibm.com> | 5843 | M: Joshua Morris <josh.h.morris@us.ibm.com> |
5844 | M: Philip Kelleher <pjk1939@linux.vnet.ibm.com> | 5844 | M: Philip Kelleher <pjk1939@linux.ibm.com> |
5845 | S: Maintained | 5845 | S: Maintained |
5846 | F: drivers/block/rsxx/ | 5846 | F: drivers/block/rsxx/ |
5847 | 5847 | ||
@@ -6108,7 +6108,7 @@ F: include/linux/fscrypt*.h | |||
6108 | F: Documentation/filesystems/fscrypt.rst | 6108 | F: Documentation/filesystems/fscrypt.rst |
6109 | 6109 | ||
6110 | FSI-ATTACHED I2C DRIVER | 6110 | FSI-ATTACHED I2C DRIVER |
6111 | M: Eddie James <eajames@linux.vnet.ibm.com> | 6111 | M: Eddie James <eajames@linux.ibm.com> |
6112 | L: linux-i2c@vger.kernel.org | 6112 | L: linux-i2c@vger.kernel.org |
6113 | L: openbmc@lists.ozlabs.org (moderated for non-subscribers) | 6113 | L: openbmc@lists.ozlabs.org (moderated for non-subscribers) |
6114 | S: Maintained | 6114 | S: Maintained |
@@ -6284,8 +6284,7 @@ S: Supported | |||
6284 | F: drivers/uio/uio_pci_generic.c | 6284 | F: drivers/uio/uio_pci_generic.c |
6285 | 6285 | ||
6286 | GENWQE (IBM Generic Workqueue Card) | 6286 | GENWQE (IBM Generic Workqueue Card) |
6287 | M: Frank Haverkamp <haver@linux.vnet.ibm.com> | 6287 | M: Frank Haverkamp <haver@linux.ibm.com> |
6288 | M: Guilherme G. Piccoli <gpiccoli@linux.vnet.ibm.com> | ||
6289 | S: Supported | 6288 | S: Supported |
6290 | F: drivers/misc/genwqe/ | 6289 | F: drivers/misc/genwqe/ |
6291 | 6290 | ||
@@ -7075,8 +7074,7 @@ F: crypto/842.c | |||
7075 | F: lib/842/ | 7074 | F: lib/842/ |
7076 | 7075 | ||
7077 | IBM Power in-Nest Crypto Acceleration | 7076 | IBM Power in-Nest Crypto Acceleration |
7078 | M: Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com> | 7077 | M: Paulo Flabiano Smorigo <pfsmorigo@linux.ibm.com> |
7079 | M: Paulo Flabiano Smorigo <pfsmorigo@linux.vnet.ibm.com> | ||
7080 | L: linux-crypto@vger.kernel.org | 7078 | L: linux-crypto@vger.kernel.org |
7081 | S: Supported | 7079 | S: Supported |
7082 | F: drivers/crypto/nx/Makefile | 7080 | F: drivers/crypto/nx/Makefile |
@@ -7093,8 +7091,8 @@ S: Supported | |||
7093 | F: drivers/scsi/ipr.* | 7091 | F: drivers/scsi/ipr.* |
7094 | 7092 | ||
7095 | IBM Power SRIOV Virtual NIC Device Driver | 7093 | IBM Power SRIOV Virtual NIC Device Driver |
7096 | M: Thomas Falcon <tlfalcon@linux.vnet.ibm.com> | 7094 | M: Thomas Falcon <tlfalcon@linux.ibm.com> |
7097 | M: John Allen <jallen@linux.vnet.ibm.com> | 7095 | M: John Allen <jallen@linux.ibm.com> |
7098 | L: netdev@vger.kernel.org | 7096 | L: netdev@vger.kernel.org |
7099 | S: Supported | 7097 | S: Supported |
7100 | F: drivers/net/ethernet/ibm/ibmvnic.* | 7098 | F: drivers/net/ethernet/ibm/ibmvnic.* |
@@ -7109,41 +7107,38 @@ F: arch/powerpc/include/asm/vas.h | |||
7109 | F: arch/powerpc/include/uapi/asm/vas.h | 7107 | F: arch/powerpc/include/uapi/asm/vas.h |
7110 | 7108 | ||
7111 | IBM Power Virtual Ethernet Device Driver | 7109 | IBM Power Virtual Ethernet Device Driver |
7112 | M: Thomas Falcon <tlfalcon@linux.vnet.ibm.com> | 7110 | M: Thomas Falcon <tlfalcon@linux.ibm.com> |
7113 | L: netdev@vger.kernel.org | 7111 | L: netdev@vger.kernel.org |
7114 | S: Supported | 7112 | S: Supported |
7115 | F: drivers/net/ethernet/ibm/ibmveth.* | 7113 | F: drivers/net/ethernet/ibm/ibmveth.* |
7116 | 7114 | ||
7117 | IBM Power Virtual FC Device Drivers | 7115 | IBM Power Virtual FC Device Drivers |
7118 | M: Tyrel Datwyler <tyreld@linux.vnet.ibm.com> | 7116 | M: Tyrel Datwyler <tyreld@linux.ibm.com> |
7119 | L: linux-scsi@vger.kernel.org | 7117 | L: linux-scsi@vger.kernel.org |
7120 | S: Supported | 7118 | S: Supported |
7121 | F: drivers/scsi/ibmvscsi/ibmvfc* | 7119 | F: drivers/scsi/ibmvscsi/ibmvfc* |
7122 | 7120 | ||
7123 | IBM Power Virtual Management Channel Driver | 7121 | IBM Power Virtual Management Channel Driver |
7124 | M: Bryant G. Ly <bryantly@linux.vnet.ibm.com> | 7122 | M: Steven Royer <seroyer@linux.ibm.com> |
7125 | M: Steven Royer <seroyer@linux.vnet.ibm.com> | ||
7126 | S: Supported | 7123 | S: Supported |
7127 | F: drivers/misc/ibmvmc.* | 7124 | F: drivers/misc/ibmvmc.* |
7128 | 7125 | ||
7129 | IBM Power Virtual SCSI Device Drivers | 7126 | IBM Power Virtual SCSI Device Drivers |
7130 | M: Tyrel Datwyler <tyreld@linux.vnet.ibm.com> | 7127 | M: Tyrel Datwyler <tyreld@linux.ibm.com> |
7131 | L: linux-scsi@vger.kernel.org | 7128 | L: linux-scsi@vger.kernel.org |
7132 | S: Supported | 7129 | S: Supported |
7133 | F: drivers/scsi/ibmvscsi/ibmvscsi* | 7130 | F: drivers/scsi/ibmvscsi/ibmvscsi* |
7134 | F: include/scsi/viosrp.h | 7131 | F: include/scsi/viosrp.h |
7135 | 7132 | ||
7136 | IBM Power Virtual SCSI Device Target Driver | 7133 | IBM Power Virtual SCSI Device Target Driver |
7137 | M: Bryant G. Ly <bryantly@linux.vnet.ibm.com> | 7134 | M: Michael Cyr <mikecyr@linux.ibm.com> |
7138 | M: Michael Cyr <mikecyr@linux.vnet.ibm.com> | ||
7139 | L: linux-scsi@vger.kernel.org | 7135 | L: linux-scsi@vger.kernel.org |
7140 | L: target-devel@vger.kernel.org | 7136 | L: target-devel@vger.kernel.org |
7141 | S: Supported | 7137 | S: Supported |
7142 | F: drivers/scsi/ibmvscsi_tgt/ | 7138 | F: drivers/scsi/ibmvscsi_tgt/ |
7143 | 7139 | ||
7144 | IBM Power VMX Cryptographic instructions | 7140 | IBM Power VMX Cryptographic instructions |
7145 | M: Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com> | 7141 | M: Paulo Flabiano Smorigo <pfsmorigo@linux.ibm.com> |
7146 | M: Paulo Flabiano Smorigo <pfsmorigo@linux.vnet.ibm.com> | ||
7147 | L: linux-crypto@vger.kernel.org | 7142 | L: linux-crypto@vger.kernel.org |
7148 | S: Supported | 7143 | S: Supported |
7149 | F: drivers/crypto/vmx/Makefile | 7144 | F: drivers/crypto/vmx/Makefile |
@@ -7420,7 +7415,7 @@ S: Maintained | |||
7420 | L: linux-crypto@vger.kernel.org | 7415 | L: linux-crypto@vger.kernel.org |
7421 | 7416 | ||
7422 | INTEGRITY MEASUREMENT ARCHITECTURE (IMA) | 7417 | INTEGRITY MEASUREMENT ARCHITECTURE (IMA) |
7423 | M: Mimi Zohar <zohar@linux.vnet.ibm.com> | 7418 | M: Mimi Zohar <zohar@linux.ibm.com> |
7424 | M: Dmitry Kasatkin <dmitry.kasatkin@gmail.com> | 7419 | M: Dmitry Kasatkin <dmitry.kasatkin@gmail.com> |
7425 | L: linux-integrity@vger.kernel.org | 7420 | L: linux-integrity@vger.kernel.org |
7426 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/zohar/linux-integrity.git | 7421 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/zohar/linux-integrity.git |
@@ -8021,9 +8016,8 @@ S: Maintained | |||
8021 | F: drivers/media/platform/rcar_jpu.c | 8016 | F: drivers/media/platform/rcar_jpu.c |
8022 | 8017 | ||
8023 | JSM Neo PCI based serial card | 8018 | JSM Neo PCI based serial card |
8024 | M: Guilherme G. Piccoli <gpiccoli@linux.vnet.ibm.com> | ||
8025 | L: linux-serial@vger.kernel.org | 8019 | L: linux-serial@vger.kernel.org |
8026 | S: Maintained | 8020 | S: Orphan |
8027 | F: drivers/tty/serial/jsm/ | 8021 | F: drivers/tty/serial/jsm/ |
8028 | 8022 | ||
8029 | K10TEMP HARDWARE MONITORING DRIVER | 8023 | K10TEMP HARDWARE MONITORING DRIVER |
@@ -8253,7 +8247,7 @@ F: include/uapi/linux/kexec.h | |||
8253 | F: kernel/kexec* | 8247 | F: kernel/kexec* |
8254 | 8248 | ||
8255 | KEYS-ENCRYPTED | 8249 | KEYS-ENCRYPTED |
8256 | M: Mimi Zohar <zohar@linux.vnet.ibm.com> | 8250 | M: Mimi Zohar <zohar@linux.ibm.com> |
8257 | L: linux-integrity@vger.kernel.org | 8251 | L: linux-integrity@vger.kernel.org |
8258 | L: keyrings@vger.kernel.org | 8252 | L: keyrings@vger.kernel.org |
8259 | S: Supported | 8253 | S: Supported |
@@ -8262,9 +8256,9 @@ F: include/keys/encrypted-type.h | |||
8262 | F: security/keys/encrypted-keys/ | 8256 | F: security/keys/encrypted-keys/ |
8263 | 8257 | ||
8264 | KEYS-TRUSTED | 8258 | KEYS-TRUSTED |
8265 | M: James Bottomley <jejb@linux.vnet.ibm.com> | 8259 | M: James Bottomley <jejb@linux.ibm.com> |
8266 | M: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com> | 8260 | M: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com> |
8267 | M: Mimi Zohar <zohar@linux.vnet.ibm.com> | 8261 | M: Mimi Zohar <zohar@linuxibm.com> |
8268 | L: linux-integrity@vger.kernel.org | 8262 | L: linux-integrity@vger.kernel.org |
8269 | L: keyrings@vger.kernel.org | 8263 | L: keyrings@vger.kernel.org |
8270 | S: Supported | 8264 | S: Supported |
@@ -8317,7 +8311,7 @@ F: lib/test_kmod.c | |||
8317 | F: tools/testing/selftests/kmod/ | 8311 | F: tools/testing/selftests/kmod/ |
8318 | 8312 | ||
8319 | KPROBES | 8313 | KPROBES |
8320 | M: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> | 8314 | M: Naveen N. Rao <naveen.n.rao@linux.ibm.com> |
8321 | M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> | 8315 | M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> |
8322 | M: "David S. Miller" <davem@davemloft.net> | 8316 | M: "David S. Miller" <davem@davemloft.net> |
8323 | M: Masami Hiramatsu <mhiramat@kernel.org> | 8317 | M: Masami Hiramatsu <mhiramat@kernel.org> |
@@ -8673,7 +8667,7 @@ M: Nicholas Piggin <npiggin@gmail.com> | |||
8673 | M: David Howells <dhowells@redhat.com> | 8667 | M: David Howells <dhowells@redhat.com> |
8674 | M: Jade Alglave <j.alglave@ucl.ac.uk> | 8668 | M: Jade Alglave <j.alglave@ucl.ac.uk> |
8675 | M: Luc Maranget <luc.maranget@inria.fr> | 8669 | M: Luc Maranget <luc.maranget@inria.fr> |
8676 | M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> | 8670 | M: "Paul E. McKenney" <paulmck@linux.ibm.com> |
8677 | R: Akira Yokosawa <akiyks@gmail.com> | 8671 | R: Akira Yokosawa <akiyks@gmail.com> |
8678 | R: Daniel Lustig <dlustig@nvidia.com> | 8672 | R: Daniel Lustig <dlustig@nvidia.com> |
8679 | L: linux-kernel@vger.kernel.org | 8673 | L: linux-kernel@vger.kernel.org |
@@ -9631,7 +9625,7 @@ F: drivers/platform/x86/mlx-platform.c | |||
9631 | 9625 | ||
9632 | MEMBARRIER SUPPORT | 9626 | MEMBARRIER SUPPORT |
9633 | M: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | 9627 | M: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
9634 | M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> | 9628 | M: "Paul E. McKenney" <paulmck@linux.ibm.com> |
9635 | L: linux-kernel@vger.kernel.org | 9629 | L: linux-kernel@vger.kernel.org |
9636 | S: Supported | 9630 | S: Supported |
9637 | F: kernel/sched/membarrier.c | 9631 | F: kernel/sched/membarrier.c |
@@ -10769,7 +10763,7 @@ S: Supported | |||
10769 | F: tools/objtool/ | 10763 | F: tools/objtool/ |
10770 | 10764 | ||
10771 | OCXL (Open Coherent Accelerator Processor Interface OpenCAPI) DRIVER | 10765 | OCXL (Open Coherent Accelerator Processor Interface OpenCAPI) DRIVER |
10772 | M: Frederic Barrat <fbarrat@linux.vnet.ibm.com> | 10766 | M: Frederic Barrat <fbarrat@linux.ibm.com> |
10773 | M: Andrew Donnellan <andrew.donnellan@au1.ibm.com> | 10767 | M: Andrew Donnellan <andrew.donnellan@au1.ibm.com> |
10774 | L: linuxppc-dev@lists.ozlabs.org | 10768 | L: linuxppc-dev@lists.ozlabs.org |
10775 | S: Supported | 10769 | S: Supported |
@@ -12580,7 +12574,7 @@ S: Orphan | |||
12580 | F: drivers/net/wireless/ray* | 12574 | F: drivers/net/wireless/ray* |
12581 | 12575 | ||
12582 | RCUTORTURE TEST FRAMEWORK | 12576 | RCUTORTURE TEST FRAMEWORK |
12583 | M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> | 12577 | M: "Paul E. McKenney" <paulmck@linux.ibm.com> |
12584 | M: Josh Triplett <josh@joshtriplett.org> | 12578 | M: Josh Triplett <josh@joshtriplett.org> |
12585 | R: Steven Rostedt <rostedt@goodmis.org> | 12579 | R: Steven Rostedt <rostedt@goodmis.org> |
12586 | R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | 12580 | R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
@@ -12627,11 +12621,12 @@ F: arch/x86/include/asm/intel_rdt_sched.h | |||
12627 | F: Documentation/x86/intel_rdt* | 12621 | F: Documentation/x86/intel_rdt* |
12628 | 12622 | ||
12629 | READ-COPY UPDATE (RCU) | 12623 | READ-COPY UPDATE (RCU) |
12630 | M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> | 12624 | M: "Paul E. McKenney" <paulmck@linux.ibm.com> |
12631 | M: Josh Triplett <josh@joshtriplett.org> | 12625 | M: Josh Triplett <josh@joshtriplett.org> |
12632 | R: Steven Rostedt <rostedt@goodmis.org> | 12626 | R: Steven Rostedt <rostedt@goodmis.org> |
12633 | R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | 12627 | R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
12634 | R: Lai Jiangshan <jiangshanlai@gmail.com> | 12628 | R: Lai Jiangshan <jiangshanlai@gmail.com> |
12629 | R: Joel Fernandes <joel@joelfernandes.org> | ||
12635 | L: linux-kernel@vger.kernel.org | 12630 | L: linux-kernel@vger.kernel.org |
12636 | W: http://www.rdrop.com/users/paulmck/RCU/ | 12631 | W: http://www.rdrop.com/users/paulmck/RCU/ |
12637 | S: Supported | 12632 | S: Supported |
@@ -12767,7 +12762,7 @@ F: include/linux/reset-controller.h | |||
12767 | RESTARTABLE SEQUENCES SUPPORT | 12762 | RESTARTABLE SEQUENCES SUPPORT |
12768 | M: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | 12763 | M: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
12769 | M: Peter Zijlstra <peterz@infradead.org> | 12764 | M: Peter Zijlstra <peterz@infradead.org> |
12770 | M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> | 12765 | M: "Paul E. McKenney" <paulmck@linux.ibm.com> |
12771 | M: Boqun Feng <boqun.feng@gmail.com> | 12766 | M: Boqun Feng <boqun.feng@gmail.com> |
12772 | L: linux-kernel@vger.kernel.org | 12767 | L: linux-kernel@vger.kernel.org |
12773 | S: Supported | 12768 | S: Supported |
@@ -13292,7 +13287,7 @@ F: drivers/scsi/sg.c | |||
13292 | F: include/scsi/sg.h | 13287 | F: include/scsi/sg.h |
13293 | 13288 | ||
13294 | SCSI SUBSYSTEM | 13289 | SCSI SUBSYSTEM |
13295 | M: "James E.J. Bottomley" <jejb@linux.vnet.ibm.com> | 13290 | M: "James E.J. Bottomley" <jejb@linux.ibm.com> |
13296 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git | 13291 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git |
13297 | M: "Martin K. Petersen" <martin.petersen@oracle.com> | 13292 | M: "Martin K. Petersen" <martin.petersen@oracle.com> |
13298 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkp/scsi.git | 13293 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkp/scsi.git |
@@ -13727,7 +13722,7 @@ F: mm/sl?b* | |||
13727 | 13722 | ||
13728 | SLEEPABLE READ-COPY UPDATE (SRCU) | 13723 | SLEEPABLE READ-COPY UPDATE (SRCU) |
13729 | M: Lai Jiangshan <jiangshanlai@gmail.com> | 13724 | M: Lai Jiangshan <jiangshanlai@gmail.com> |
13730 | M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> | 13725 | M: "Paul E. McKenney" <paulmck@linux.ibm.com> |
13731 | M: Josh Triplett <josh@joshtriplett.org> | 13726 | M: Josh Triplett <josh@joshtriplett.org> |
13732 | R: Steven Rostedt <rostedt@goodmis.org> | 13727 | R: Steven Rostedt <rostedt@goodmis.org> |
13733 | R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | 13728 | R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
@@ -15151,7 +15146,7 @@ F: drivers/platform/x86/topstar-laptop.c | |||
15151 | 15146 | ||
15152 | TORTURE-TEST MODULES | 15147 | TORTURE-TEST MODULES |
15153 | M: Davidlohr Bueso <dave@stgolabs.net> | 15148 | M: Davidlohr Bueso <dave@stgolabs.net> |
15154 | M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> | 15149 | M: "Paul E. McKenney" <paulmck@linux.ibm.com> |
15155 | M: Josh Triplett <josh@joshtriplett.org> | 15150 | M: Josh Triplett <josh@joshtriplett.org> |
15156 | L: linux-kernel@vger.kernel.org | 15151 | L: linux-kernel@vger.kernel.org |
15157 | S: Supported | 15152 | S: Supported |
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 8cf035e68378..4c01e9a01a74 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c | |||
@@ -289,7 +289,7 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte) | |||
289 | 289 | ||
290 | (*batchp)->ptes[(*batchp)->index++] = hugepte; | 290 | (*batchp)->ptes[(*batchp)->index++] = hugepte; |
291 | if ((*batchp)->index == HUGEPD_FREELIST_SIZE) { | 291 | if ((*batchp)->index == HUGEPD_FREELIST_SIZE) { |
292 | call_rcu_sched(&(*batchp)->rcu, hugepd_free_rcu_callback); | 292 | call_rcu(&(*batchp)->rcu, hugepd_free_rcu_callback); |
293 | *batchp = NULL; | 293 | *batchp = NULL; |
294 | } | 294 | } |
295 | put_cpu_var(hugepd_freelist_cur); | 295 | put_cpu_var(hugepd_freelist_cur); |
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index 6791562779ee..db6bb2f97a2c 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c | |||
@@ -352,7 +352,7 @@ void tlb_table_flush(struct mmu_gather *tlb) | |||
352 | struct mmu_table_batch **batch = &tlb->batch; | 352 | struct mmu_table_batch **batch = &tlb->batch; |
353 | 353 | ||
354 | if (*batch) { | 354 | if (*batch) { |
355 | call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); | 355 | call_rcu(&(*batch)->rcu, tlb_remove_table_rcu); |
356 | *batch = NULL; | 356 | *batch = NULL; |
357 | } | 357 | } |
358 | } | 358 | } |
diff --git a/arch/sparc/oprofile/init.c b/arch/sparc/oprofile/init.c index f9024bccff16..43730c9b1c86 100644 --- a/arch/sparc/oprofile/init.c +++ b/arch/sparc/oprofile/init.c | |||
@@ -53,7 +53,7 @@ static void timer_stop(void) | |||
53 | { | 53 | { |
54 | nmi_adjust_hz(1); | 54 | nmi_adjust_hz(1); |
55 | unregister_die_notifier(&profile_timer_exceptions_nb); | 55 | unregister_die_notifier(&profile_timer_exceptions_nb); |
56 | synchronize_sched(); /* Allow already-started NMIs to complete. */ | 56 | synchronize_rcu(); /* Allow already-started NMIs to complete. */ |
57 | } | 57 | } |
58 | 58 | ||
59 | static int op_nmi_timer_init(struct oprofile_operations *ops) | 59 | static int op_nmi_timer_init(struct oprofile_operations *ops) |
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index 8cd66152cdb0..9df652d3d927 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c | |||
@@ -59,7 +59,7 @@ static struct pcibios_fwaddrmap *pcibios_fwaddrmap_lookup(struct pci_dev *dev) | |||
59 | { | 59 | { |
60 | struct pcibios_fwaddrmap *map; | 60 | struct pcibios_fwaddrmap *map; |
61 | 61 | ||
62 | WARN_ON_SMP(!spin_is_locked(&pcibios_fwaddrmap_lock)); | 62 | lockdep_assert_held(&pcibios_fwaddrmap_lock); |
63 | 63 | ||
64 | list_for_each_entry(map, &pcibios_fwaddrmappings, list) | 64 | list_for_each_entry(map, &pcibios_fwaddrmappings, list) |
65 | if (map->dev == dev) | 65 | if (map->dev == dev) |
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index f8ec3d4ba4a8..8eb3c4c9ff67 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c | |||
@@ -382,7 +382,7 @@ static int pcrypt_cpumask_change_notify(struct notifier_block *self, | |||
382 | 382 | ||
383 | cpumask_copy(new_mask->mask, cpumask->cbcpu); | 383 | cpumask_copy(new_mask->mask, cpumask->cbcpu); |
384 | rcu_assign_pointer(pcrypt->cb_cpumask, new_mask); | 384 | rcu_assign_pointer(pcrypt->cb_cpumask, new_mask); |
385 | synchronize_rcu_bh(); | 385 | synchronize_rcu(); |
386 | 386 | ||
387 | free_cpumask_var(old_mask->mask); | 387 | free_cpumask_var(old_mask->mask); |
388 | kfree(old_mask); | 388 | kfree(old_mask); |
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 677618e6f1f7..dc8603d34320 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c | |||
@@ -2187,7 +2187,7 @@ static void shutdown_smi(void *send_info) | |||
2187 | * handlers might have been running before we freed the | 2187 | * handlers might have been running before we freed the |
2188 | * interrupt. | 2188 | * interrupt. |
2189 | */ | 2189 | */ |
2190 | synchronize_sched(); | 2190 | synchronize_rcu(); |
2191 | 2191 | ||
2192 | /* | 2192 | /* |
2193 | * Timeouts are stopped, now make sure the interrupts are off | 2193 | * Timeouts are stopped, now make sure the interrupts are off |
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 6d53f7d9fc7a..ffa9adeaba31 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c | |||
@@ -346,7 +346,7 @@ static inline void gov_clear_update_util(struct cpufreq_policy *policy) | |||
346 | for_each_cpu(i, policy->cpus) | 346 | for_each_cpu(i, policy->cpus) |
347 | cpufreq_remove_update_util_hook(i); | 347 | cpufreq_remove_update_util_hook(i); |
348 | 348 | ||
349 | synchronize_sched(); | 349 | synchronize_rcu(); |
350 | } | 350 | } |
351 | 351 | ||
352 | static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy, | 352 | static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy, |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 9578312e43f2..ed124d72db76 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
@@ -1930,7 +1930,7 @@ static void intel_pstate_clear_update_util_hook(unsigned int cpu) | |||
1930 | 1930 | ||
1931 | cpufreq_remove_update_util_hook(cpu); | 1931 | cpufreq_remove_update_util_hook(cpu); |
1932 | cpu_data->update_util_set = false; | 1932 | cpu_data->update_util_set = false; |
1933 | synchronize_sched(); | 1933 | synchronize_rcu(); |
1934 | } | 1934 | } |
1935 | 1935 | ||
1936 | static int intel_pstate_get_max_freq(struct cpudata *cpu) | 1936 | static int intel_pstate_get_max_freq(struct cpudata *cpu) |
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index ffd68a7bc9e1..69d752f0b621 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c | |||
@@ -1661,7 +1661,7 @@ static void rtl8139_tx_timeout_task (struct work_struct *work) | |||
1661 | 1661 | ||
1662 | napi_disable(&tp->napi); | 1662 | napi_disable(&tp->napi); |
1663 | netif_stop_queue(dev); | 1663 | netif_stop_queue(dev); |
1664 | synchronize_sched(); | 1664 | synchronize_rcu(); |
1665 | 1665 | ||
1666 | netdev_dbg(dev, "Transmit timeout, status %02x %04x %04x media %02x\n", | 1666 | netdev_dbg(dev, "Transmit timeout, status %02x %04x %04x media %02x\n", |
1667 | RTL_R8(ChipCmd), RTL_R16(IntrStatus), | 1667 | RTL_R8(ChipCmd), RTL_R16(IntrStatus), |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 1fd01688d37b..4f1d89f0dc24 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -5866,7 +5866,7 @@ static void rtl_reset_work(struct rtl8169_private *tp) | |||
5866 | 5866 | ||
5867 | napi_disable(&tp->napi); | 5867 | napi_disable(&tp->napi); |
5868 | netif_stop_queue(dev); | 5868 | netif_stop_queue(dev); |
5869 | synchronize_sched(); | 5869 | synchronize_rcu(); |
5870 | 5870 | ||
5871 | rtl8169_hw_reset(tp); | 5871 | rtl8169_hw_reset(tp); |
5872 | 5872 | ||
@@ -6609,7 +6609,7 @@ static void rtl8169_down(struct net_device *dev) | |||
6609 | rtl8169_rx_missed(dev); | 6609 | rtl8169_rx_missed(dev); |
6610 | 6610 | ||
6611 | /* Give a racing hard_start_xmit a few cycles to complete. */ | 6611 | /* Give a racing hard_start_xmit a few cycles to complete. */ |
6612 | synchronize_sched(); | 6612 | synchronize_rcu(); |
6613 | 6613 | ||
6614 | rtl8169_tx_clear(tp); | 6614 | rtl8169_tx_clear(tp); |
6615 | 6615 | ||
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 98fe7e762e17..3643015a55cf 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c | |||
@@ -3167,7 +3167,7 @@ struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx, | |||
3167 | { | 3167 | { |
3168 | u32 hash = efx_filter_spec_hash(spec); | 3168 | u32 hash = efx_filter_spec_hash(spec); |
3169 | 3169 | ||
3170 | WARN_ON(!spin_is_locked(&efx->rps_hash_lock)); | 3170 | lockdep_assert_held(&efx->rps_hash_lock); |
3171 | if (!efx->rps_hash_table) | 3171 | if (!efx->rps_hash_table) |
3172 | return NULL; | 3172 | return NULL; |
3173 | return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE]; | 3173 | return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE]; |
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c index c2c50522b96d..808cf9816673 100644 --- a/drivers/net/ethernet/sis/sis190.c +++ b/drivers/net/ethernet/sis/sis190.c | |||
@@ -1142,7 +1142,7 @@ static void sis190_down(struct net_device *dev) | |||
1142 | if (!poll_locked) | 1142 | if (!poll_locked) |
1143 | poll_locked++; | 1143 | poll_locked++; |
1144 | 1144 | ||
1145 | synchronize_sched(); | 1145 | synchronize_rcu(); |
1146 | 1146 | ||
1147 | } while (SIS_R32(IntrMask)); | 1147 | } while (SIS_R32(IntrMask)); |
1148 | 1148 | ||
diff --git a/drivers/net/ethernet/smsc/smsc911x.h b/drivers/net/ethernet/smsc/smsc911x.h index 8d75508acd2b..51b2fc1a395f 100644 --- a/drivers/net/ethernet/smsc/smsc911x.h +++ b/drivers/net/ethernet/smsc/smsc911x.h | |||
@@ -67,7 +67,7 @@ | |||
67 | 67 | ||
68 | #ifdef CONFIG_DEBUG_SPINLOCK | 68 | #ifdef CONFIG_DEBUG_SPINLOCK |
69 | #define SMSC_ASSERT_MAC_LOCK(pdata) \ | 69 | #define SMSC_ASSERT_MAC_LOCK(pdata) \ |
70 | WARN_ON_SMP(!spin_is_locked(&pdata->mac_lock)) | 70 | lockdep_assert_held(&pdata->mac_lock) |
71 | #else | 71 | #else |
72 | #define SMSC_ASSERT_MAC_LOCK(pdata) do {} while (0) | 72 | #define SMSC_ASSERT_MAC_LOCK(pdata) do {} while (0) |
73 | #endif /* CONFIG_DEBUG_SPINLOCK */ | 73 | #endif /* CONFIG_DEBUG_SPINLOCK */ |
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index ab11b2bee273..564ead864028 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c | |||
@@ -1359,7 +1359,7 @@ static int vhost_net_release(struct inode *inode, struct file *f) | |||
1359 | if (rx_sock) | 1359 | if (rx_sock) |
1360 | sockfd_put(rx_sock); | 1360 | sockfd_put(rx_sock); |
1361 | /* Make sure no callbacks are outstanding */ | 1361 | /* Make sure no callbacks are outstanding */ |
1362 | synchronize_rcu_bh(); | 1362 | synchronize_rcu(); |
1363 | /* We do an extra flush before freeing memory, | 1363 | /* We do an extra flush before freeing memory, |
1364 | * since jobs can re-queue themselves. */ | 1364 | * since jobs can re-queue themselves. */ |
1365 | vhost_net_flush(n); | 1365 | vhost_net_flush(n); |
@@ -158,7 +158,7 @@ static int expand_fdtable(struct files_struct *files, unsigned int nr) | |||
158 | * or have finished their rcu_read_lock_sched() section. | 158 | * or have finished their rcu_read_lock_sched() section. |
159 | */ | 159 | */ |
160 | if (atomic_read(&files->count) > 1) | 160 | if (atomic_read(&files->count) > 1) |
161 | synchronize_sched(); | 161 | synchronize_rcu(); |
162 | 162 | ||
163 | spin_lock(&files->file_lock); | 163 | spin_lock(&files->file_lock); |
164 | if (!new_fdt) | 164 | if (!new_fdt) |
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index cd58939dc977..fe41a3302d00 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c | |||
@@ -926,7 +926,7 @@ static inline struct userfaultfd_wait_queue *find_userfault_in( | |||
926 | wait_queue_entry_t *wq; | 926 | wait_queue_entry_t *wq; |
927 | struct userfaultfd_wait_queue *uwq; | 927 | struct userfaultfd_wait_queue *uwq; |
928 | 928 | ||
929 | VM_BUG_ON(!spin_is_locked(&wqh->lock)); | 929 | lockdep_assert_held(&wqh->lock); |
930 | 930 | ||
931 | uwq = NULL; | 931 | uwq = NULL; |
932 | if (!waitqueue_active(wqh)) | 932 | if (!waitqueue_active(wqh)) |
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h index 79b99d653e03..71b75643c432 100644 --- a/include/linux/percpu-rwsem.h +++ b/include/linux/percpu-rwsem.h | |||
@@ -41,7 +41,7 @@ static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore * | |||
41 | * cannot both change sem->state from readers_fast and start checking | 41 | * cannot both change sem->state from readers_fast and start checking |
42 | * counters while we are here. So if we see !sem->state, we know that | 42 | * counters while we are here. So if we see !sem->state, we know that |
43 | * the writer won't be checking until we're past the preempt_enable() | 43 | * the writer won't be checking until we're past the preempt_enable() |
44 | * and that one the synchronize_sched() is done, the writer will see | 44 | * and that once the synchronize_rcu() is done, the writer will see |
45 | * anything we did within this RCU-sched read-size critical section. | 45 | * anything we did within this RCU-sched read-size critical section. |
46 | */ | 46 | */ |
47 | __this_cpu_inc(*sem->read_count); | 47 | __this_cpu_inc(*sem->read_count); |
diff --git a/include/linux/rcupdate_wait.h b/include/linux/rcupdate_wait.h index 8a16c3eb3dd0..c0578ba23c1a 100644 --- a/include/linux/rcupdate_wait.h +++ b/include/linux/rcupdate_wait.h | |||
@@ -31,21 +31,4 @@ do { \ | |||
31 | 31 | ||
32 | #define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) | 32 | #define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) |
33 | 33 | ||
34 | /** | ||
35 | * synchronize_rcu_mult - Wait concurrently for multiple grace periods | ||
36 | * @...: List of call_rcu() functions for different grace periods to wait on | ||
37 | * | ||
38 | * This macro waits concurrently for multiple types of RCU grace periods. | ||
39 | * For example, synchronize_rcu_mult(call_rcu, call_rcu_tasks) would wait | ||
40 | * on concurrent RCU and RCU-tasks grace periods. Waiting on a give SRCU | ||
41 | * domain requires you to write a wrapper function for that SRCU domain's | ||
42 | * call_srcu() function, supplying the corresponding srcu_struct. | ||
43 | * | ||
44 | * If Tiny RCU, tell _wait_rcu_gp() does not bother waiting for RCU, | ||
45 | * given that anywhere synchronize_rcu_mult() can be called is automatically | ||
46 | * a grace period. | ||
47 | */ | ||
48 | #define synchronize_rcu_mult(...) \ | ||
49 | _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__) | ||
50 | |||
51 | #endif /* _LINUX_SCHED_RCUPDATE_WAIT_H */ | 34 | #endif /* _LINUX_SCHED_RCUPDATE_WAIT_H */ |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 291a9bd5b97f..4f8fc5294291 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -572,8 +572,10 @@ union rcu_special { | |||
572 | struct { | 572 | struct { |
573 | u8 blocked; | 573 | u8 blocked; |
574 | u8 need_qs; | 574 | u8 need_qs; |
575 | u8 exp_hint; /* Hint for performance. */ | ||
576 | u8 pad; /* No garbage from compiler! */ | ||
575 | } b; /* Bits. */ | 577 | } b; /* Bits. */ |
576 | u16 s; /* Set of bits. */ | 578 | u32 s; /* Set of bits. */ |
577 | }; | 579 | }; |
578 | 580 | ||
579 | enum perf_event_task_context { | 581 | enum perf_event_task_context { |
diff --git a/include/linux/srcu.h b/include/linux/srcu.h index 67135d4a8a30..c614375cd264 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h | |||
@@ -38,20 +38,20 @@ struct srcu_struct; | |||
38 | 38 | ||
39 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 39 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
40 | 40 | ||
41 | int __init_srcu_struct(struct srcu_struct *sp, const char *name, | 41 | int __init_srcu_struct(struct srcu_struct *ssp, const char *name, |
42 | struct lock_class_key *key); | 42 | struct lock_class_key *key); |
43 | 43 | ||
44 | #define init_srcu_struct(sp) \ | 44 | #define init_srcu_struct(ssp) \ |
45 | ({ \ | 45 | ({ \ |
46 | static struct lock_class_key __srcu_key; \ | 46 | static struct lock_class_key __srcu_key; \ |
47 | \ | 47 | \ |
48 | __init_srcu_struct((sp), #sp, &__srcu_key); \ | 48 | __init_srcu_struct((ssp), #ssp, &__srcu_key); \ |
49 | }) | 49 | }) |
50 | 50 | ||
51 | #define __SRCU_DEP_MAP_INIT(srcu_name) .dep_map = { .name = #srcu_name }, | 51 | #define __SRCU_DEP_MAP_INIT(srcu_name) .dep_map = { .name = #srcu_name }, |
52 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | 52 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
53 | 53 | ||
54 | int init_srcu_struct(struct srcu_struct *sp); | 54 | int init_srcu_struct(struct srcu_struct *ssp); |
55 | 55 | ||
56 | #define __SRCU_DEP_MAP_INIT(srcu_name) | 56 | #define __SRCU_DEP_MAP_INIT(srcu_name) |
57 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | 57 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
@@ -67,28 +67,28 @@ int init_srcu_struct(struct srcu_struct *sp); | |||
67 | struct srcu_struct { }; | 67 | struct srcu_struct { }; |
68 | #endif | 68 | #endif |
69 | 69 | ||
70 | void call_srcu(struct srcu_struct *sp, struct rcu_head *head, | 70 | void call_srcu(struct srcu_struct *ssp, struct rcu_head *head, |
71 | void (*func)(struct rcu_head *head)); | 71 | void (*func)(struct rcu_head *head)); |
72 | void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced); | 72 | void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced); |
73 | int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp); | 73 | int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp); |
74 | void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp); | 74 | void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp); |
75 | void synchronize_srcu(struct srcu_struct *sp); | 75 | void synchronize_srcu(struct srcu_struct *ssp); |
76 | 76 | ||
77 | /** | 77 | /** |
78 | * cleanup_srcu_struct - deconstruct a sleep-RCU structure | 78 | * cleanup_srcu_struct - deconstruct a sleep-RCU structure |
79 | * @sp: structure to clean up. | 79 | * @ssp: structure to clean up. |
80 | * | 80 | * |
81 | * Must invoke this after you are finished using a given srcu_struct that | 81 | * Must invoke this after you are finished using a given srcu_struct that |
82 | * was initialized via init_srcu_struct(), else you leak memory. | 82 | * was initialized via init_srcu_struct(), else you leak memory. |
83 | */ | 83 | */ |
84 | static inline void cleanup_srcu_struct(struct srcu_struct *sp) | 84 | static inline void cleanup_srcu_struct(struct srcu_struct *ssp) |
85 | { | 85 | { |
86 | _cleanup_srcu_struct(sp, false); | 86 | _cleanup_srcu_struct(ssp, false); |
87 | } | 87 | } |
88 | 88 | ||
89 | /** | 89 | /** |
90 | * cleanup_srcu_struct_quiesced - deconstruct a quiesced sleep-RCU structure | 90 | * cleanup_srcu_struct_quiesced - deconstruct a quiesced sleep-RCU structure |
91 | * @sp: structure to clean up. | 91 | * @ssp: structure to clean up. |
92 | * | 92 | * |
93 | * Must invoke this after you are finished using a given srcu_struct that | 93 | * Must invoke this after you are finished using a given srcu_struct that |
94 | * was initialized via init_srcu_struct(), else you leak memory. Also, | 94 | * was initialized via init_srcu_struct(), else you leak memory. Also, |
@@ -103,16 +103,16 @@ static inline void cleanup_srcu_struct(struct srcu_struct *sp) | |||
103 | * (with high probability, anyway), and will also cause the srcu_struct | 103 | * (with high probability, anyway), and will also cause the srcu_struct |
104 | * to be leaked. | 104 | * to be leaked. |
105 | */ | 105 | */ |
106 | static inline void cleanup_srcu_struct_quiesced(struct srcu_struct *sp) | 106 | static inline void cleanup_srcu_struct_quiesced(struct srcu_struct *ssp) |
107 | { | 107 | { |
108 | _cleanup_srcu_struct(sp, true); | 108 | _cleanup_srcu_struct(ssp, true); |
109 | } | 109 | } |
110 | 110 | ||
111 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 111 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
112 | 112 | ||
113 | /** | 113 | /** |
114 | * srcu_read_lock_held - might we be in SRCU read-side critical section? | 114 | * srcu_read_lock_held - might we be in SRCU read-side critical section? |
115 | * @sp: The srcu_struct structure to check | 115 | * @ssp: The srcu_struct structure to check |
116 | * | 116 | * |
117 | * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU | 117 | * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU |
118 | * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, | 118 | * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, |
@@ -126,16 +126,16 @@ static inline void cleanup_srcu_struct_quiesced(struct srcu_struct *sp) | |||
126 | * relies on normal RCU, it can be called from the CPU which | 126 | * relies on normal RCU, it can be called from the CPU which |
127 | * is in the idle loop from an RCU point of view or offline. | 127 | * is in the idle loop from an RCU point of view or offline. |
128 | */ | 128 | */ |
129 | static inline int srcu_read_lock_held(const struct srcu_struct *sp) | 129 | static inline int srcu_read_lock_held(const struct srcu_struct *ssp) |
130 | { | 130 | { |
131 | if (!debug_lockdep_rcu_enabled()) | 131 | if (!debug_lockdep_rcu_enabled()) |
132 | return 1; | 132 | return 1; |
133 | return lock_is_held(&sp->dep_map); | 133 | return lock_is_held(&ssp->dep_map); |
134 | } | 134 | } |
135 | 135 | ||
136 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | 136 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
137 | 137 | ||
138 | static inline int srcu_read_lock_held(const struct srcu_struct *sp) | 138 | static inline int srcu_read_lock_held(const struct srcu_struct *ssp) |
139 | { | 139 | { |
140 | return 1; | 140 | return 1; |
141 | } | 141 | } |
@@ -145,7 +145,7 @@ static inline int srcu_read_lock_held(const struct srcu_struct *sp) | |||
145 | /** | 145 | /** |
146 | * srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing | 146 | * srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing |
147 | * @p: the pointer to fetch and protect for later dereferencing | 147 | * @p: the pointer to fetch and protect for later dereferencing |
148 | * @sp: pointer to the srcu_struct, which is used to check that we | 148 | * @ssp: pointer to the srcu_struct, which is used to check that we |
149 | * really are in an SRCU read-side critical section. | 149 | * really are in an SRCU read-side critical section. |
150 | * @c: condition to check for update-side use | 150 | * @c: condition to check for update-side use |
151 | * | 151 | * |
@@ -154,29 +154,32 @@ static inline int srcu_read_lock_held(const struct srcu_struct *sp) | |||
154 | * to 1. The @c argument will normally be a logical expression containing | 154 | * to 1. The @c argument will normally be a logical expression containing |
155 | * lockdep_is_held() calls. | 155 | * lockdep_is_held() calls. |
156 | */ | 156 | */ |
157 | #define srcu_dereference_check(p, sp, c) \ | 157 | #define srcu_dereference_check(p, ssp, c) \ |
158 | __rcu_dereference_check((p), (c) || srcu_read_lock_held(sp), __rcu) | 158 | __rcu_dereference_check((p), (c) || srcu_read_lock_held(ssp), __rcu) |
159 | 159 | ||
160 | /** | 160 | /** |
161 | * srcu_dereference - fetch SRCU-protected pointer for later dereferencing | 161 | * srcu_dereference - fetch SRCU-protected pointer for later dereferencing |
162 | * @p: the pointer to fetch and protect for later dereferencing | 162 | * @p: the pointer to fetch and protect for later dereferencing |
163 | * @sp: pointer to the srcu_struct, which is used to check that we | 163 | * @ssp: pointer to the srcu_struct, which is used to check that we |
164 | * really are in an SRCU read-side critical section. | 164 | * really are in an SRCU read-side critical section. |
165 | * | 165 | * |
166 | * Makes rcu_dereference_check() do the dirty work. If PROVE_RCU | 166 | * Makes rcu_dereference_check() do the dirty work. If PROVE_RCU |
167 | * is enabled, invoking this outside of an RCU read-side critical | 167 | * is enabled, invoking this outside of an RCU read-side critical |
168 | * section will result in an RCU-lockdep splat. | 168 | * section will result in an RCU-lockdep splat. |
169 | */ | 169 | */ |
170 | #define srcu_dereference(p, sp) srcu_dereference_check((p), (sp), 0) | 170 | #define srcu_dereference(p, ssp) srcu_dereference_check((p), (ssp), 0) |
171 | 171 | ||
172 | /** | 172 | /** |
173 | * srcu_dereference_notrace - no tracing and no lockdep calls from here | 173 | * srcu_dereference_notrace - no tracing and no lockdep calls from here |
174 | * @p: the pointer to fetch and protect for later dereferencing | ||
175 | * @ssp: pointer to the srcu_struct, which is used to check that we | ||
176 | * really are in an SRCU read-side critical section. | ||
174 | */ | 177 | */ |
175 | #define srcu_dereference_notrace(p, sp) srcu_dereference_check((p), (sp), 1) | 178 | #define srcu_dereference_notrace(p, ssp) srcu_dereference_check((p), (ssp), 1) |
176 | 179 | ||
177 | /** | 180 | /** |
178 | * srcu_read_lock - register a new reader for an SRCU-protected structure. | 181 | * srcu_read_lock - register a new reader for an SRCU-protected structure. |
179 | * @sp: srcu_struct in which to register the new reader. | 182 | * @ssp: srcu_struct in which to register the new reader. |
180 | * | 183 | * |
181 | * Enter an SRCU read-side critical section. Note that SRCU read-side | 184 | * Enter an SRCU read-side critical section. Note that SRCU read-side |
182 | * critical sections may be nested. However, it is illegal to | 185 | * critical sections may be nested. However, it is illegal to |
@@ -191,44 +194,44 @@ static inline int srcu_read_lock_held(const struct srcu_struct *sp) | |||
191 | * srcu_read_unlock() in an irq handler if the matching srcu_read_lock() | 194 | * srcu_read_unlock() in an irq handler if the matching srcu_read_lock() |
192 | * was invoked in process context. | 195 | * was invoked in process context. |
193 | */ | 196 | */ |
194 | static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) | 197 | static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp) |
195 | { | 198 | { |
196 | int retval; | 199 | int retval; |
197 | 200 | ||
198 | retval = __srcu_read_lock(sp); | 201 | retval = __srcu_read_lock(ssp); |
199 | rcu_lock_acquire(&(sp)->dep_map); | 202 | rcu_lock_acquire(&(ssp)->dep_map); |
200 | return retval; | 203 | return retval; |
201 | } | 204 | } |
202 | 205 | ||
203 | /* Used by tracing, cannot be traced and cannot invoke lockdep. */ | 206 | /* Used by tracing, cannot be traced and cannot invoke lockdep. */ |
204 | static inline notrace int | 207 | static inline notrace int |
205 | srcu_read_lock_notrace(struct srcu_struct *sp) __acquires(sp) | 208 | srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp) |
206 | { | 209 | { |
207 | int retval; | 210 | int retval; |
208 | 211 | ||
209 | retval = __srcu_read_lock(sp); | 212 | retval = __srcu_read_lock(ssp); |
210 | return retval; | 213 | return retval; |
211 | } | 214 | } |
212 | 215 | ||
213 | /** | 216 | /** |
214 | * srcu_read_unlock - unregister a old reader from an SRCU-protected structure. | 217 | * srcu_read_unlock - unregister a old reader from an SRCU-protected structure. |
215 | * @sp: srcu_struct in which to unregister the old reader. | 218 | * @ssp: srcu_struct in which to unregister the old reader. |
216 | * @idx: return value from corresponding srcu_read_lock(). | 219 | * @idx: return value from corresponding srcu_read_lock(). |
217 | * | 220 | * |
218 | * Exit an SRCU read-side critical section. | 221 | * Exit an SRCU read-side critical section. |
219 | */ | 222 | */ |
220 | static inline void srcu_read_unlock(struct srcu_struct *sp, int idx) | 223 | static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx) |
221 | __releases(sp) | 224 | __releases(ssp) |
222 | { | 225 | { |
223 | rcu_lock_release(&(sp)->dep_map); | 226 | rcu_lock_release(&(ssp)->dep_map); |
224 | __srcu_read_unlock(sp, idx); | 227 | __srcu_read_unlock(ssp, idx); |
225 | } | 228 | } |
226 | 229 | ||
227 | /* Used by tracing, cannot be traced and cannot call lockdep. */ | 230 | /* Used by tracing, cannot be traced and cannot call lockdep. */ |
228 | static inline notrace void | 231 | static inline notrace void |
229 | srcu_read_unlock_notrace(struct srcu_struct *sp, int idx) __releases(sp) | 232 | srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases(ssp) |
230 | { | 233 | { |
231 | __srcu_read_unlock(sp, idx); | 234 | __srcu_read_unlock(ssp, idx); |
232 | } | 235 | } |
233 | 236 | ||
234 | /** | 237 | /** |
diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h index f41d2fb09f87..b19216aaaef2 100644 --- a/include/linux/srcutiny.h +++ b/include/linux/srcutiny.h | |||
@@ -60,7 +60,7 @@ void srcu_drive_gp(struct work_struct *wp); | |||
60 | #define DEFINE_STATIC_SRCU(name) \ | 60 | #define DEFINE_STATIC_SRCU(name) \ |
61 | static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name) | 61 | static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name) |
62 | 62 | ||
63 | void synchronize_srcu(struct srcu_struct *sp); | 63 | void synchronize_srcu(struct srcu_struct *ssp); |
64 | 64 | ||
65 | /* | 65 | /* |
66 | * Counts the new reader in the appropriate per-CPU element of the | 66 | * Counts the new reader in the appropriate per-CPU element of the |
@@ -68,36 +68,36 @@ void synchronize_srcu(struct srcu_struct *sp); | |||
68 | * __srcu_read_unlock() must be in the same handler instance. Returns an | 68 | * __srcu_read_unlock() must be in the same handler instance. Returns an |
69 | * index that must be passed to the matching srcu_read_unlock(). | 69 | * index that must be passed to the matching srcu_read_unlock(). |
70 | */ | 70 | */ |
71 | static inline int __srcu_read_lock(struct srcu_struct *sp) | 71 | static inline int __srcu_read_lock(struct srcu_struct *ssp) |
72 | { | 72 | { |
73 | int idx; | 73 | int idx; |
74 | 74 | ||
75 | idx = READ_ONCE(sp->srcu_idx); | 75 | idx = READ_ONCE(ssp->srcu_idx); |
76 | WRITE_ONCE(sp->srcu_lock_nesting[idx], sp->srcu_lock_nesting[idx] + 1); | 76 | WRITE_ONCE(ssp->srcu_lock_nesting[idx], ssp->srcu_lock_nesting[idx] + 1); |
77 | return idx; | 77 | return idx; |
78 | } | 78 | } |
79 | 79 | ||
80 | static inline void synchronize_srcu_expedited(struct srcu_struct *sp) | 80 | static inline void synchronize_srcu_expedited(struct srcu_struct *ssp) |
81 | { | 81 | { |
82 | synchronize_srcu(sp); | 82 | synchronize_srcu(ssp); |
83 | } | 83 | } |
84 | 84 | ||
85 | static inline void srcu_barrier(struct srcu_struct *sp) | 85 | static inline void srcu_barrier(struct srcu_struct *ssp) |
86 | { | 86 | { |
87 | synchronize_srcu(sp); | 87 | synchronize_srcu(ssp); |
88 | } | 88 | } |
89 | 89 | ||
90 | /* Defined here to avoid size increase for non-torture kernels. */ | 90 | /* Defined here to avoid size increase for non-torture kernels. */ |
91 | static inline void srcu_torture_stats_print(struct srcu_struct *sp, | 91 | static inline void srcu_torture_stats_print(struct srcu_struct *ssp, |
92 | char *tt, char *tf) | 92 | char *tt, char *tf) |
93 | { | 93 | { |
94 | int idx; | 94 | int idx; |
95 | 95 | ||
96 | idx = READ_ONCE(sp->srcu_idx) & 0x1; | 96 | idx = READ_ONCE(ssp->srcu_idx) & 0x1; |
97 | pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd)\n", | 97 | pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd)\n", |
98 | tt, tf, idx, | 98 | tt, tf, idx, |
99 | READ_ONCE(sp->srcu_lock_nesting[!idx]), | 99 | READ_ONCE(ssp->srcu_lock_nesting[!idx]), |
100 | READ_ONCE(sp->srcu_lock_nesting[idx])); | 100 | READ_ONCE(ssp->srcu_lock_nesting[idx])); |
101 | } | 101 | } |
102 | 102 | ||
103 | #endif | 103 | #endif |
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h index 0ae91b3a7406..6f292bd3e7db 100644 --- a/include/linux/srcutree.h +++ b/include/linux/srcutree.h | |||
@@ -51,7 +51,7 @@ struct srcu_data { | |||
51 | unsigned long grpmask; /* Mask for leaf srcu_node */ | 51 | unsigned long grpmask; /* Mask for leaf srcu_node */ |
52 | /* ->srcu_data_have_cbs[]. */ | 52 | /* ->srcu_data_have_cbs[]. */ |
53 | int cpu; | 53 | int cpu; |
54 | struct srcu_struct *sp; | 54 | struct srcu_struct *ssp; |
55 | }; | 55 | }; |
56 | 56 | ||
57 | /* | 57 | /* |
@@ -138,8 +138,8 @@ struct srcu_struct { | |||
138 | #define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */) | 138 | #define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */) |
139 | #define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static) | 139 | #define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static) |
140 | 140 | ||
141 | void synchronize_srcu_expedited(struct srcu_struct *sp); | 141 | void synchronize_srcu_expedited(struct srcu_struct *ssp); |
142 | void srcu_barrier(struct srcu_struct *sp); | 142 | void srcu_barrier(struct srcu_struct *ssp); |
143 | void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf); | 143 | void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf); |
144 | 144 | ||
145 | #endif | 145 | #endif |
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index e9de8ad0bad7..9c3186578ce0 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h | |||
@@ -82,7 +82,7 @@ int unregister_tracepoint_module_notifier(struct notifier_block *nb) | |||
82 | static inline void tracepoint_synchronize_unregister(void) | 82 | static inline void tracepoint_synchronize_unregister(void) |
83 | { | 83 | { |
84 | synchronize_srcu(&tracepoint_srcu); | 84 | synchronize_srcu(&tracepoint_srcu); |
85 | synchronize_sched(); | 85 | synchronize_rcu(); |
86 | } | 86 | } |
87 | #else | 87 | #else |
88 | static inline void tracepoint_synchronize_unregister(void) | 88 | static inline void tracepoint_synchronize_unregister(void) |
diff --git a/include/linux/types.h b/include/linux/types.h index 9834e90aa010..c2615d6a019e 100644 --- a/include/linux/types.h +++ b/include/linux/types.h | |||
@@ -212,8 +212,8 @@ struct ustat { | |||
212 | * weird ABI and we need to ask it explicitly. | 212 | * weird ABI and we need to ask it explicitly. |
213 | * | 213 | * |
214 | * The alignment is required to guarantee that bit 0 of @next will be | 214 | * The alignment is required to guarantee that bit 0 of @next will be |
215 | * clear under normal conditions -- as long as we use call_rcu(), | 215 | * clear under normal conditions -- as long as we use call_rcu() or |
216 | * call_rcu_bh(), call_rcu_sched(), or call_srcu() to queue callback. | 216 | * call_srcu() to queue the callback. |
217 | * | 217 | * |
218 | * This guarantee is important for few reasons: | 218 | * This guarantee is important for few reasons: |
219 | * - future call_rcu_lazy() will make use of lower bits in the pointer; | 219 | * - future call_rcu_lazy() will make use of lower bits in the pointer; |
diff --git a/init/main.c b/init/main.c index ee147103ba1b..a45486330243 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -1046,12 +1046,12 @@ static void mark_readonly(void) | |||
1046 | { | 1046 | { |
1047 | if (rodata_enabled) { | 1047 | if (rodata_enabled) { |
1048 | /* | 1048 | /* |
1049 | * load_module() results in W+X mappings, which are cleaned up | 1049 | * load_module() results in W+X mappings, which are cleaned |
1050 | * with call_rcu_sched(). Let's make sure that queued work is | 1050 | * up with call_rcu(). Let's make sure that queued work is |
1051 | * flushed so that we don't hit false positives looking for | 1051 | * flushed so that we don't hit false positives looking for |
1052 | * insecure pages which are W+X. | 1052 | * insecure pages which are W+X. |
1053 | */ | 1053 | */ |
1054 | rcu_barrier_sched(); | 1054 | rcu_barrier(); |
1055 | mark_rodata_ro(); | 1055 | mark_rodata_ro(); |
1056 | rodata_test(); | 1056 | rodata_test(); |
1057 | } else | 1057 | } else |
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 6aaf5dd5383b..7a8429f8e280 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c | |||
@@ -5343,7 +5343,7 @@ int __init cgroup_init(void) | |||
5343 | cgroup_rstat_boot(); | 5343 | cgroup_rstat_boot(); |
5344 | 5344 | ||
5345 | /* | 5345 | /* |
5346 | * The latency of the synchronize_sched() is too high for cgroups, | 5346 | * The latency of the synchronize_rcu() is too high for cgroups, |
5347 | * avoid it at the cost of forcing all readers into the slow path. | 5347 | * avoid it at the cost of forcing all readers into the slow path. |
5348 | */ | 5348 | */ |
5349 | rcu_sync_enter_start(&cgroup_threadgroup_rwsem.rss); | 5349 | rcu_sync_enter_start(&cgroup_threadgroup_rwsem.rss); |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 84530ab358c3..c4b90cf7734a 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -9918,7 +9918,7 @@ static void account_event(struct perf_event *event) | |||
9918 | * call the perf scheduling hooks before proceeding to | 9918 | * call the perf scheduling hooks before proceeding to |
9919 | * install events that need them. | 9919 | * install events that need them. |
9920 | */ | 9920 | */ |
9921 | synchronize_sched(); | 9921 | synchronize_rcu(); |
9922 | } | 9922 | } |
9923 | /* | 9923 | /* |
9924 | * Now that we have waited for the sync_sched(), allow further | 9924 | * Now that we have waited for the sync_sched(), allow further |
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 90e98e233647..08e31d863191 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -229,7 +229,7 @@ static int collect_garbage_slots(struct kprobe_insn_cache *c) | |||
229 | struct kprobe_insn_page *kip, *next; | 229 | struct kprobe_insn_page *kip, *next; |
230 | 230 | ||
231 | /* Ensure no-one is interrupted on the garbages */ | 231 | /* Ensure no-one is interrupted on the garbages */ |
232 | synchronize_sched(); | 232 | synchronize_rcu(); |
233 | 233 | ||
234 | list_for_each_entry_safe(kip, next, &c->pages, list) { | 234 | list_for_each_entry_safe(kip, next, &c->pages, list) { |
235 | int i; | 235 | int i; |
@@ -1382,7 +1382,7 @@ out: | |||
1382 | if (ret) { | 1382 | if (ret) { |
1383 | ap->flags |= KPROBE_FLAG_DISABLED; | 1383 | ap->flags |= KPROBE_FLAG_DISABLED; |
1384 | list_del_rcu(&p->list); | 1384 | list_del_rcu(&p->list); |
1385 | synchronize_sched(); | 1385 | synchronize_rcu(); |
1386 | } | 1386 | } |
1387 | } | 1387 | } |
1388 | } | 1388 | } |
@@ -1597,7 +1597,7 @@ int register_kprobe(struct kprobe *p) | |||
1597 | ret = arm_kprobe(p); | 1597 | ret = arm_kprobe(p); |
1598 | if (ret) { | 1598 | if (ret) { |
1599 | hlist_del_rcu(&p->hlist); | 1599 | hlist_del_rcu(&p->hlist); |
1600 | synchronize_sched(); | 1600 | synchronize_rcu(); |
1601 | goto out; | 1601 | goto out; |
1602 | } | 1602 | } |
1603 | } | 1603 | } |
@@ -1776,7 +1776,7 @@ void unregister_kprobes(struct kprobe **kps, int num) | |||
1776 | kps[i]->addr = NULL; | 1776 | kps[i]->addr = NULL; |
1777 | mutex_unlock(&kprobe_mutex); | 1777 | mutex_unlock(&kprobe_mutex); |
1778 | 1778 | ||
1779 | synchronize_sched(); | 1779 | synchronize_rcu(); |
1780 | for (i = 0; i < num; i++) | 1780 | for (i = 0; i < num; i++) |
1781 | if (kps[i]->addr) | 1781 | if (kps[i]->addr) |
1782 | __unregister_kprobe_bottom(kps[i]); | 1782 | __unregister_kprobe_bottom(kps[i]); |
@@ -1966,7 +1966,7 @@ void unregister_kretprobes(struct kretprobe **rps, int num) | |||
1966 | rps[i]->kp.addr = NULL; | 1966 | rps[i]->kp.addr = NULL; |
1967 | mutex_unlock(&kprobe_mutex); | 1967 | mutex_unlock(&kprobe_mutex); |
1968 | 1968 | ||
1969 | synchronize_sched(); | 1969 | synchronize_rcu(); |
1970 | for (i = 0; i < num; i++) { | 1970 | for (i = 0; i < num; i++) { |
1971 | if (rps[i]->kp.addr) { | 1971 | if (rps[i]->kp.addr) { |
1972 | __unregister_kprobe_bottom(&rps[i]->kp); | 1972 | __unregister_kprobe_bottom(&rps[i]->kp); |
diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c index 82d584225dc6..7702cb4064fc 100644 --- a/kernel/livepatch/patch.c +++ b/kernel/livepatch/patch.c | |||
@@ -61,7 +61,7 @@ static void notrace klp_ftrace_handler(unsigned long ip, | |||
61 | ops = container_of(fops, struct klp_ops, fops); | 61 | ops = container_of(fops, struct klp_ops, fops); |
62 | 62 | ||
63 | /* | 63 | /* |
64 | * A variant of synchronize_sched() is used to allow patching functions | 64 | * A variant of synchronize_rcu() is used to allow patching functions |
65 | * where RCU is not watching, see klp_synchronize_transition(). | 65 | * where RCU is not watching, see klp_synchronize_transition(). |
66 | */ | 66 | */ |
67 | preempt_disable_notrace(); | 67 | preempt_disable_notrace(); |
@@ -72,7 +72,7 @@ static void notrace klp_ftrace_handler(unsigned long ip, | |||
72 | /* | 72 | /* |
73 | * func should never be NULL because preemption should be disabled here | 73 | * func should never be NULL because preemption should be disabled here |
74 | * and unregister_ftrace_function() does the equivalent of a | 74 | * and unregister_ftrace_function() does the equivalent of a |
75 | * synchronize_sched() before the func_stack removal. | 75 | * synchronize_rcu() before the func_stack removal. |
76 | */ | 76 | */ |
77 | if (WARN_ON_ONCE(!func)) | 77 | if (WARN_ON_ONCE(!func)) |
78 | goto unlock; | 78 | goto unlock; |
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c index 5bc349805e03..304d5eb8a98c 100644 --- a/kernel/livepatch/transition.c +++ b/kernel/livepatch/transition.c | |||
@@ -52,7 +52,7 @@ static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn); | |||
52 | 52 | ||
53 | /* | 53 | /* |
54 | * This function is just a stub to implement a hard force | 54 | * This function is just a stub to implement a hard force |
55 | * of synchronize_sched(). This requires synchronizing | 55 | * of synchronize_rcu(). This requires synchronizing |
56 | * tasks even in userspace and idle. | 56 | * tasks even in userspace and idle. |
57 | */ | 57 | */ |
58 | static void klp_sync(struct work_struct *work) | 58 | static void klp_sync(struct work_struct *work) |
@@ -175,7 +175,7 @@ void klp_cancel_transition(void) | |||
175 | void klp_update_patch_state(struct task_struct *task) | 175 | void klp_update_patch_state(struct task_struct *task) |
176 | { | 176 | { |
177 | /* | 177 | /* |
178 | * A variant of synchronize_sched() is used to allow patching functions | 178 | * A variant of synchronize_rcu() is used to allow patching functions |
179 | * where RCU is not watching, see klp_synchronize_transition(). | 179 | * where RCU is not watching, see klp_synchronize_transition(). |
180 | */ | 180 | */ |
181 | preempt_disable_notrace(); | 181 | preempt_disable_notrace(); |
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 1efada2dd9dd..ef27f98714c0 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c | |||
@@ -4195,7 +4195,7 @@ void lockdep_free_key_range(void *start, unsigned long size) | |||
4195 | * | 4195 | * |
4196 | * sync_sched() is sufficient because the read-side is IRQ disable. | 4196 | * sync_sched() is sufficient because the read-side is IRQ disable. |
4197 | */ | 4197 | */ |
4198 | synchronize_sched(); | 4198 | synchronize_rcu(); |
4199 | 4199 | ||
4200 | /* | 4200 | /* |
4201 | * XXX at this point we could return the resources to the pool; | 4201 | * XXX at this point we could return the resources to the pool; |
diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c index 9aa713629387..771d4ca96dda 100644 --- a/kernel/locking/mutex-debug.c +++ b/kernel/locking/mutex-debug.c | |||
@@ -36,7 +36,7 @@ void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter) | |||
36 | 36 | ||
37 | void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter) | 37 | void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter) |
38 | { | 38 | { |
39 | SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock)); | 39 | lockdep_assert_held(&lock->wait_lock); |
40 | DEBUG_LOCKS_WARN_ON(list_empty(&lock->wait_list)); | 40 | DEBUG_LOCKS_WARN_ON(list_empty(&lock->wait_list)); |
41 | DEBUG_LOCKS_WARN_ON(waiter->magic != waiter); | 41 | DEBUG_LOCKS_WARN_ON(waiter->magic != waiter); |
42 | DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list)); | 42 | DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list)); |
@@ -51,7 +51,7 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter) | |||
51 | void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, | 51 | void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, |
52 | struct task_struct *task) | 52 | struct task_struct *task) |
53 | { | 53 | { |
54 | SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock)); | 54 | lockdep_assert_held(&lock->wait_lock); |
55 | 55 | ||
56 | /* Mark the current thread as blocked on the lock: */ | 56 | /* Mark the current thread as blocked on the lock: */ |
57 | task->blocked_on = waiter; | 57 | task->blocked_on = waiter; |
diff --git a/kernel/module.c b/kernel/module.c index 49a405891587..99b46c32d579 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -2159,7 +2159,7 @@ static void free_module(struct module *mod) | |||
2159 | /* Remove this module from bug list, this uses list_del_rcu */ | 2159 | /* Remove this module from bug list, this uses list_del_rcu */ |
2160 | module_bug_cleanup(mod); | 2160 | module_bug_cleanup(mod); |
2161 | /* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */ | 2161 | /* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */ |
2162 | synchronize_sched(); | 2162 | synchronize_rcu(); |
2163 | mutex_unlock(&module_mutex); | 2163 | mutex_unlock(&module_mutex); |
2164 | 2164 | ||
2165 | /* This may be empty, but that's OK */ | 2165 | /* This may be empty, but that's OK */ |
@@ -3507,15 +3507,15 @@ static noinline int do_init_module(struct module *mod) | |||
3507 | /* | 3507 | /* |
3508 | * We want to free module_init, but be aware that kallsyms may be | 3508 | * We want to free module_init, but be aware that kallsyms may be |
3509 | * walking this with preempt disabled. In all the failure paths, we | 3509 | * walking this with preempt disabled. In all the failure paths, we |
3510 | * call synchronize_sched(), but we don't want to slow down the success | 3510 | * call synchronize_rcu(), but we don't want to slow down the success |
3511 | * path, so use actual RCU here. | 3511 | * path, so use actual RCU here. |
3512 | * Note that module_alloc() on most architectures creates W+X page | 3512 | * Note that module_alloc() on most architectures creates W+X page |
3513 | * mappings which won't be cleaned up until do_free_init() runs. Any | 3513 | * mappings which won't be cleaned up until do_free_init() runs. Any |
3514 | * code such as mark_rodata_ro() which depends on those mappings to | 3514 | * code such as mark_rodata_ro() which depends on those mappings to |
3515 | * be cleaned up needs to sync with the queued work - ie | 3515 | * be cleaned up needs to sync with the queued work - ie |
3516 | * rcu_barrier_sched() | 3516 | * rcu_barrier() |
3517 | */ | 3517 | */ |
3518 | call_rcu_sched(&freeinit->rcu, do_free_init); | 3518 | call_rcu(&freeinit->rcu, do_free_init); |
3519 | mutex_unlock(&module_mutex); | 3519 | mutex_unlock(&module_mutex); |
3520 | wake_up_all(&module_wq); | 3520 | wake_up_all(&module_wq); |
3521 | 3521 | ||
@@ -3526,7 +3526,7 @@ fail_free_freeinit: | |||
3526 | fail: | 3526 | fail: |
3527 | /* Try to protect us from buggy refcounters. */ | 3527 | /* Try to protect us from buggy refcounters. */ |
3528 | mod->state = MODULE_STATE_GOING; | 3528 | mod->state = MODULE_STATE_GOING; |
3529 | synchronize_sched(); | 3529 | synchronize_rcu(); |
3530 | module_put(mod); | 3530 | module_put(mod); |
3531 | blocking_notifier_call_chain(&module_notify_list, | 3531 | blocking_notifier_call_chain(&module_notify_list, |
3532 | MODULE_STATE_GOING, mod); | 3532 | MODULE_STATE_GOING, mod); |
@@ -3819,7 +3819,7 @@ static int load_module(struct load_info *info, const char __user *uargs, | |||
3819 | ddebug_cleanup: | 3819 | ddebug_cleanup: |
3820 | ftrace_release_mod(mod); | 3820 | ftrace_release_mod(mod); |
3821 | dynamic_debug_remove(mod, info->debug); | 3821 | dynamic_debug_remove(mod, info->debug); |
3822 | synchronize_sched(); | 3822 | synchronize_rcu(); |
3823 | kfree(mod->args); | 3823 | kfree(mod->args); |
3824 | free_arch_cleanup: | 3824 | free_arch_cleanup: |
3825 | module_arch_cleanup(mod); | 3825 | module_arch_cleanup(mod); |
@@ -3834,7 +3834,7 @@ static int load_module(struct load_info *info, const char __user *uargs, | |||
3834 | mod_tree_remove(mod); | 3834 | mod_tree_remove(mod); |
3835 | wake_up_all(&module_wq); | 3835 | wake_up_all(&module_wq); |
3836 | /* Wait for RCU-sched synchronizing before releasing mod->list. */ | 3836 | /* Wait for RCU-sched synchronizing before releasing mod->list. */ |
3837 | synchronize_sched(); | 3837 | synchronize_rcu(); |
3838 | mutex_unlock(&module_mutex); | 3838 | mutex_unlock(&module_mutex); |
3839 | free_module: | 3839 | free_module: |
3840 | /* Free lock-classes; relies on the preceding sync_rcu() */ | 3840 | /* Free lock-classes; relies on the preceding sync_rcu() */ |
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index 2866166863f0..a393e24a9195 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h | |||
@@ -526,12 +526,14 @@ srcu_batches_completed(struct srcu_struct *sp) { return 0; } | |||
526 | static inline void rcu_force_quiescent_state(void) { } | 526 | static inline void rcu_force_quiescent_state(void) { } |
527 | static inline void show_rcu_gp_kthreads(void) { } | 527 | static inline void show_rcu_gp_kthreads(void) { } |
528 | static inline int rcu_get_gp_kthreads_prio(void) { return 0; } | 528 | static inline int rcu_get_gp_kthreads_prio(void) { return 0; } |
529 | static inline void rcu_fwd_progress_check(unsigned long j) { } | ||
529 | #else /* #ifdef CONFIG_TINY_RCU */ | 530 | #else /* #ifdef CONFIG_TINY_RCU */ |
530 | unsigned long rcu_get_gp_seq(void); | 531 | unsigned long rcu_get_gp_seq(void); |
531 | unsigned long rcu_exp_batches_completed(void); | 532 | unsigned long rcu_exp_batches_completed(void); |
532 | unsigned long srcu_batches_completed(struct srcu_struct *sp); | 533 | unsigned long srcu_batches_completed(struct srcu_struct *sp); |
533 | void show_rcu_gp_kthreads(void); | 534 | void show_rcu_gp_kthreads(void); |
534 | int rcu_get_gp_kthreads_prio(void); | 535 | int rcu_get_gp_kthreads_prio(void); |
536 | void rcu_fwd_progress_check(unsigned long j); | ||
535 | void rcu_force_quiescent_state(void); | 537 | void rcu_force_quiescent_state(void); |
536 | extern struct workqueue_struct *rcu_gp_wq; | 538 | extern struct workqueue_struct *rcu_gp_wq; |
537 | extern struct workqueue_struct *rcu_par_gp_wq; | 539 | extern struct workqueue_struct *rcu_par_gp_wq; |
@@ -539,8 +541,10 @@ extern struct workqueue_struct *rcu_par_gp_wq; | |||
539 | 541 | ||
540 | #ifdef CONFIG_RCU_NOCB_CPU | 542 | #ifdef CONFIG_RCU_NOCB_CPU |
541 | bool rcu_is_nocb_cpu(int cpu); | 543 | bool rcu_is_nocb_cpu(int cpu); |
544 | void rcu_bind_current_to_nocb(void); | ||
542 | #else | 545 | #else |
543 | static inline bool rcu_is_nocb_cpu(int cpu) { return false; } | 546 | static inline bool rcu_is_nocb_cpu(int cpu) { return false; } |
547 | static inline void rcu_bind_current_to_nocb(void) { } | ||
544 | #endif | 548 | #endif |
545 | 549 | ||
546 | #endif /* __LINUX_RCU_H */ | 550 | #endif /* __LINUX_RCU_H */ |
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 210c77460365..f6e85faa4ff4 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c | |||
@@ -56,6 +56,7 @@ | |||
56 | #include <linux/vmalloc.h> | 56 | #include <linux/vmalloc.h> |
57 | #include <linux/sched/debug.h> | 57 | #include <linux/sched/debug.h> |
58 | #include <linux/sched/sysctl.h> | 58 | #include <linux/sched/sysctl.h> |
59 | #include <linux/oom.h> | ||
59 | 60 | ||
60 | #include "rcu.h" | 61 | #include "rcu.h" |
61 | 62 | ||
@@ -80,13 +81,6 @@ MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@jos | |||
80 | /* Must be power of two minus one. */ | 81 | /* Must be power of two minus one. */ |
81 | #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3) | 82 | #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3) |
82 | 83 | ||
83 | torture_param(int, cbflood_inter_holdoff, HZ, | ||
84 | "Holdoff between floods (jiffies)"); | ||
85 | torture_param(int, cbflood_intra_holdoff, 1, | ||
86 | "Holdoff between bursts (jiffies)"); | ||
87 | torture_param(int, cbflood_n_burst, 3, "# bursts in flood, zero to disable"); | ||
88 | torture_param(int, cbflood_n_per_burst, 20000, | ||
89 | "# callbacks per burst in flood"); | ||
90 | torture_param(int, extendables, RCUTORTURE_MAX_EXTEND, | 84 | torture_param(int, extendables, RCUTORTURE_MAX_EXTEND, |
91 | "Extend readers by disabling bh (1), irqs (2), or preempt (4)"); | 85 | "Extend readers by disabling bh (1), irqs (2), or preempt (4)"); |
92 | torture_param(int, fqs_duration, 0, | 86 | torture_param(int, fqs_duration, 0, |
@@ -138,12 +132,10 @@ module_param(torture_type, charp, 0444); | |||
138 | MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)"); | 132 | MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)"); |
139 | 133 | ||
140 | static int nrealreaders; | 134 | static int nrealreaders; |
141 | static int ncbflooders; | ||
142 | static struct task_struct *writer_task; | 135 | static struct task_struct *writer_task; |
143 | static struct task_struct **fakewriter_tasks; | 136 | static struct task_struct **fakewriter_tasks; |
144 | static struct task_struct **reader_tasks; | 137 | static struct task_struct **reader_tasks; |
145 | static struct task_struct *stats_task; | 138 | static struct task_struct *stats_task; |
146 | static struct task_struct **cbflood_task; | ||
147 | static struct task_struct *fqs_task; | 139 | static struct task_struct *fqs_task; |
148 | static struct task_struct *boost_tasks[NR_CPUS]; | 140 | static struct task_struct *boost_tasks[NR_CPUS]; |
149 | static struct task_struct *stall_task; | 141 | static struct task_struct *stall_task; |
@@ -181,7 +173,6 @@ static long n_rcu_torture_boosts; | |||
181 | static atomic_long_t n_rcu_torture_timers; | 173 | static atomic_long_t n_rcu_torture_timers; |
182 | static long n_barrier_attempts; | 174 | static long n_barrier_attempts; |
183 | static long n_barrier_successes; /* did rcu_barrier test succeed? */ | 175 | static long n_barrier_successes; /* did rcu_barrier test succeed? */ |
184 | static atomic_long_t n_cbfloods; | ||
185 | static struct list_head rcu_torture_removed; | 176 | static struct list_head rcu_torture_removed; |
186 | 177 | ||
187 | static int rcu_torture_writer_state; | 178 | static int rcu_torture_writer_state; |
@@ -259,6 +250,8 @@ static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */ | |||
259 | static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ | 250 | static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ |
260 | static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); | 251 | static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); |
261 | 252 | ||
253 | static bool rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */ | ||
254 | |||
262 | /* | 255 | /* |
263 | * Allocate an element from the rcu_tortures pool. | 256 | * Allocate an element from the rcu_tortures pool. |
264 | */ | 257 | */ |
@@ -348,7 +341,8 @@ rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) | |||
348 | * period, and we want a long delay occasionally to trigger | 341 | * period, and we want a long delay occasionally to trigger |
349 | * force_quiescent_state. */ | 342 | * force_quiescent_state. */ |
350 | 343 | ||
351 | if (!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) { | 344 | if (!rcu_fwd_cb_nodelay && |
345 | !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) { | ||
352 | started = cur_ops->get_gp_seq(); | 346 | started = cur_ops->get_gp_seq(); |
353 | ts = rcu_trace_clock_local(); | 347 | ts = rcu_trace_clock_local(); |
354 | if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK)) | 348 | if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK)) |
@@ -870,59 +864,6 @@ checkwait: stutter_wait("rcu_torture_boost"); | |||
870 | return 0; | 864 | return 0; |
871 | } | 865 | } |
872 | 866 | ||
873 | static void rcu_torture_cbflood_cb(struct rcu_head *rhp) | ||
874 | { | ||
875 | } | ||
876 | |||
877 | /* | ||
878 | * RCU torture callback-flood kthread. Repeatedly induces bursts of calls | ||
879 | * to call_rcu() or analogous, increasing the probability of occurrence | ||
880 | * of callback-overflow corner cases. | ||
881 | */ | ||
882 | static int | ||
883 | rcu_torture_cbflood(void *arg) | ||
884 | { | ||
885 | int err = 1; | ||
886 | int i; | ||
887 | int j; | ||
888 | struct rcu_head *rhp; | ||
889 | |||
890 | if (cbflood_n_per_burst > 0 && | ||
891 | cbflood_inter_holdoff > 0 && | ||
892 | cbflood_intra_holdoff > 0 && | ||
893 | cur_ops->call && | ||
894 | cur_ops->cb_barrier) { | ||
895 | rhp = vmalloc(array3_size(cbflood_n_burst, | ||
896 | cbflood_n_per_burst, | ||
897 | sizeof(*rhp))); | ||
898 | err = !rhp; | ||
899 | } | ||
900 | if (err) { | ||
901 | VERBOSE_TOROUT_STRING("rcu_torture_cbflood disabled: Bad args or OOM"); | ||
902 | goto wait_for_stop; | ||
903 | } | ||
904 | VERBOSE_TOROUT_STRING("rcu_torture_cbflood task started"); | ||
905 | do { | ||
906 | schedule_timeout_interruptible(cbflood_inter_holdoff); | ||
907 | atomic_long_inc(&n_cbfloods); | ||
908 | WARN_ON(signal_pending(current)); | ||
909 | for (i = 0; i < cbflood_n_burst; i++) { | ||
910 | for (j = 0; j < cbflood_n_per_burst; j++) { | ||
911 | cur_ops->call(&rhp[i * cbflood_n_per_burst + j], | ||
912 | rcu_torture_cbflood_cb); | ||
913 | } | ||
914 | schedule_timeout_interruptible(cbflood_intra_holdoff); | ||
915 | WARN_ON(signal_pending(current)); | ||
916 | } | ||
917 | cur_ops->cb_barrier(); | ||
918 | stutter_wait("rcu_torture_cbflood"); | ||
919 | } while (!torture_must_stop()); | ||
920 | vfree(rhp); | ||
921 | wait_for_stop: | ||
922 | torture_kthread_stopping("rcu_torture_cbflood"); | ||
923 | return 0; | ||
924 | } | ||
925 | |||
926 | /* | 867 | /* |
927 | * RCU torture force-quiescent-state kthread. Repeatedly induces | 868 | * RCU torture force-quiescent-state kthread. Repeatedly induces |
928 | * bursts of calls to force_quiescent_state(), increasing the probability | 869 | * bursts of calls to force_quiescent_state(), increasing the probability |
@@ -1457,11 +1398,10 @@ rcu_torture_stats_print(void) | |||
1457 | n_rcu_torture_boosts, | 1398 | n_rcu_torture_boosts, |
1458 | atomic_long_read(&n_rcu_torture_timers)); | 1399 | atomic_long_read(&n_rcu_torture_timers)); |
1459 | torture_onoff_stats(); | 1400 | torture_onoff_stats(); |
1460 | pr_cont("barrier: %ld/%ld:%ld ", | 1401 | pr_cont("barrier: %ld/%ld:%ld\n", |
1461 | n_barrier_successes, | 1402 | n_barrier_successes, |
1462 | n_barrier_attempts, | 1403 | n_barrier_attempts, |
1463 | n_rcu_torture_barrier_error); | 1404 | n_rcu_torture_barrier_error); |
1464 | pr_cont("cbflood: %ld\n", atomic_long_read(&n_cbfloods)); | ||
1465 | 1405 | ||
1466 | pr_alert("%s%s ", torture_type, TORTURE_FLAG); | 1406 | pr_alert("%s%s ", torture_type, TORTURE_FLAG); |
1467 | if (atomic_read(&n_rcu_torture_mberror) != 0 || | 1407 | if (atomic_read(&n_rcu_torture_mberror) != 0 || |
@@ -1674,8 +1614,90 @@ static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp) | |||
1674 | cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb); | 1614 | cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb); |
1675 | } | 1615 | } |
1676 | 1616 | ||
1677 | /* Carry out grace-period forward-progress testing. */ | 1617 | /* State for continuous-flood RCU callbacks. */ |
1678 | static int rcu_torture_fwd_prog(void *args) | 1618 | struct rcu_fwd_cb { |
1619 | struct rcu_head rh; | ||
1620 | struct rcu_fwd_cb *rfc_next; | ||
1621 | int rfc_gps; | ||
1622 | }; | ||
1623 | static DEFINE_SPINLOCK(rcu_fwd_lock); | ||
1624 | static struct rcu_fwd_cb *rcu_fwd_cb_head; | ||
1625 | static struct rcu_fwd_cb **rcu_fwd_cb_tail = &rcu_fwd_cb_head; | ||
1626 | static long n_launders_cb; | ||
1627 | static unsigned long rcu_fwd_startat; | ||
1628 | static bool rcu_fwd_emergency_stop; | ||
1629 | #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */ | ||
1630 | #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */ | ||
1631 | #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */ | ||
1632 | #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */ | ||
1633 | static long n_launders_hist[2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV)]; | ||
1634 | |||
1635 | static void rcu_torture_fwd_cb_hist(void) | ||
1636 | { | ||
1637 | int i; | ||
1638 | int j; | ||
1639 | |||
1640 | for (i = ARRAY_SIZE(n_launders_hist) - 1; i > 0; i--) | ||
1641 | if (n_launders_hist[i] > 0) | ||
1642 | break; | ||
1643 | pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):", | ||
1644 | __func__, jiffies - rcu_fwd_startat); | ||
1645 | for (j = 0; j <= i; j++) | ||
1646 | pr_cont(" %ds/%d: %ld", | ||
1647 | j + 1, FWD_CBS_HIST_DIV, n_launders_hist[j]); | ||
1648 | pr_cont("\n"); | ||
1649 | } | ||
1650 | |||
1651 | /* Callback function for continuous-flood RCU callbacks. */ | ||
1652 | static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp) | ||
1653 | { | ||
1654 | unsigned long flags; | ||
1655 | int i; | ||
1656 | struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh); | ||
1657 | struct rcu_fwd_cb **rfcpp; | ||
1658 | |||
1659 | rfcp->rfc_next = NULL; | ||
1660 | rfcp->rfc_gps++; | ||
1661 | spin_lock_irqsave(&rcu_fwd_lock, flags); | ||
1662 | rfcpp = rcu_fwd_cb_tail; | ||
1663 | rcu_fwd_cb_tail = &rfcp->rfc_next; | ||
1664 | WRITE_ONCE(*rfcpp, rfcp); | ||
1665 | WRITE_ONCE(n_launders_cb, n_launders_cb + 1); | ||
1666 | i = ((jiffies - rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV)); | ||
1667 | if (i >= ARRAY_SIZE(n_launders_hist)) | ||
1668 | i = ARRAY_SIZE(n_launders_hist) - 1; | ||
1669 | n_launders_hist[i]++; | ||
1670 | spin_unlock_irqrestore(&rcu_fwd_lock, flags); | ||
1671 | } | ||
1672 | |||
1673 | /* | ||
1674 | * Free all callbacks on the rcu_fwd_cb_head list, either because the | ||
1675 | * test is over or because we hit an OOM event. | ||
1676 | */ | ||
1677 | static unsigned long rcu_torture_fwd_prog_cbfree(void) | ||
1678 | { | ||
1679 | unsigned long flags; | ||
1680 | unsigned long freed = 0; | ||
1681 | struct rcu_fwd_cb *rfcp; | ||
1682 | |||
1683 | for (;;) { | ||
1684 | spin_lock_irqsave(&rcu_fwd_lock, flags); | ||
1685 | rfcp = rcu_fwd_cb_head; | ||
1686 | if (!rfcp) | ||
1687 | break; | ||
1688 | rcu_fwd_cb_head = rfcp->rfc_next; | ||
1689 | if (!rcu_fwd_cb_head) | ||
1690 | rcu_fwd_cb_tail = &rcu_fwd_cb_head; | ||
1691 | spin_unlock_irqrestore(&rcu_fwd_lock, flags); | ||
1692 | kfree(rfcp); | ||
1693 | freed++; | ||
1694 | } | ||
1695 | spin_unlock_irqrestore(&rcu_fwd_lock, flags); | ||
1696 | return freed; | ||
1697 | } | ||
1698 | |||
1699 | /* Carry out need_resched()/cond_resched() forward-progress testing. */ | ||
1700 | static void rcu_torture_fwd_prog_nr(int *tested, int *tested_tries) | ||
1679 | { | 1701 | { |
1680 | unsigned long cver; | 1702 | unsigned long cver; |
1681 | unsigned long dur; | 1703 | unsigned long dur; |
@@ -1686,56 +1708,186 @@ static int rcu_torture_fwd_prog(void *args) | |||
1686 | int sd4; | 1708 | int sd4; |
1687 | bool selfpropcb = false; | 1709 | bool selfpropcb = false; |
1688 | unsigned long stopat; | 1710 | unsigned long stopat; |
1689 | int tested = 0; | ||
1690 | int tested_tries = 0; | ||
1691 | static DEFINE_TORTURE_RANDOM(trs); | 1711 | static DEFINE_TORTURE_RANDOM(trs); |
1692 | 1712 | ||
1693 | VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started"); | ||
1694 | if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST)) | ||
1695 | set_user_nice(current, MAX_NICE); | ||
1696 | if (cur_ops->call && cur_ops->sync && cur_ops->cb_barrier) { | 1713 | if (cur_ops->call && cur_ops->sync && cur_ops->cb_barrier) { |
1697 | init_rcu_head_on_stack(&fcs.rh); | 1714 | init_rcu_head_on_stack(&fcs.rh); |
1698 | selfpropcb = true; | 1715 | selfpropcb = true; |
1699 | } | 1716 | } |
1717 | |||
1718 | /* Tight loop containing cond_resched(). */ | ||
1719 | if (selfpropcb) { | ||
1720 | WRITE_ONCE(fcs.stop, 0); | ||
1721 | cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb); | ||
1722 | } | ||
1723 | cver = READ_ONCE(rcu_torture_current_version); | ||
1724 | gps = cur_ops->get_gp_seq(); | ||
1725 | sd = cur_ops->stall_dur() + 1; | ||
1726 | sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div; | ||
1727 | dur = sd4 + torture_random(&trs) % (sd - sd4); | ||
1728 | WRITE_ONCE(rcu_fwd_startat, jiffies); | ||
1729 | stopat = rcu_fwd_startat + dur; | ||
1730 | while (time_before(jiffies, stopat) && | ||
1731 | !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { | ||
1732 | idx = cur_ops->readlock(); | ||
1733 | udelay(10); | ||
1734 | cur_ops->readunlock(idx); | ||
1735 | if (!fwd_progress_need_resched || need_resched()) | ||
1736 | cond_resched(); | ||
1737 | } | ||
1738 | (*tested_tries)++; | ||
1739 | if (!time_before(jiffies, stopat) && | ||
1740 | !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { | ||
1741 | (*tested)++; | ||
1742 | cver = READ_ONCE(rcu_torture_current_version) - cver; | ||
1743 | gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); | ||
1744 | WARN_ON(!cver && gps < 2); | ||
1745 | pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, cver, gps); | ||
1746 | } | ||
1747 | if (selfpropcb) { | ||
1748 | WRITE_ONCE(fcs.stop, 1); | ||
1749 | cur_ops->sync(); /* Wait for running CB to complete. */ | ||
1750 | cur_ops->cb_barrier(); /* Wait for queued callbacks. */ | ||
1751 | } | ||
1752 | |||
1753 | if (selfpropcb) { | ||
1754 | WARN_ON(READ_ONCE(fcs.stop) != 2); | ||
1755 | destroy_rcu_head_on_stack(&fcs.rh); | ||
1756 | } | ||
1757 | } | ||
1758 | |||
1759 | /* Carry out call_rcu() forward-progress testing. */ | ||
1760 | static void rcu_torture_fwd_prog_cr(void) | ||
1761 | { | ||
1762 | unsigned long cver; | ||
1763 | unsigned long gps; | ||
1764 | int i; | ||
1765 | long n_launders; | ||
1766 | long n_launders_cb_snap; | ||
1767 | long n_launders_sa; | ||
1768 | long n_max_cbs; | ||
1769 | long n_max_gps; | ||
1770 | struct rcu_fwd_cb *rfcp; | ||
1771 | struct rcu_fwd_cb *rfcpn; | ||
1772 | unsigned long stopat; | ||
1773 | unsigned long stoppedat; | ||
1774 | |||
1775 | if (READ_ONCE(rcu_fwd_emergency_stop)) | ||
1776 | return; /* Get out of the way quickly, no GP wait! */ | ||
1777 | |||
1778 | /* Loop continuously posting RCU callbacks. */ | ||
1779 | WRITE_ONCE(rcu_fwd_cb_nodelay, true); | ||
1780 | cur_ops->sync(); /* Later readers see above write. */ | ||
1781 | WRITE_ONCE(rcu_fwd_startat, jiffies); | ||
1782 | stopat = rcu_fwd_startat + MAX_FWD_CB_JIFFIES; | ||
1783 | n_launders = 0; | ||
1784 | n_launders_cb = 0; | ||
1785 | n_launders_sa = 0; | ||
1786 | n_max_cbs = 0; | ||
1787 | n_max_gps = 0; | ||
1788 | for (i = 0; i < ARRAY_SIZE(n_launders_hist); i++) | ||
1789 | n_launders_hist[i] = 0; | ||
1790 | cver = READ_ONCE(rcu_torture_current_version); | ||
1791 | gps = cur_ops->get_gp_seq(); | ||
1792 | while (time_before(jiffies, stopat) && | ||
1793 | !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { | ||
1794 | rfcp = READ_ONCE(rcu_fwd_cb_head); | ||
1795 | rfcpn = NULL; | ||
1796 | if (rfcp) | ||
1797 | rfcpn = READ_ONCE(rfcp->rfc_next); | ||
1798 | if (rfcpn) { | ||
1799 | if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS && | ||
1800 | ++n_max_gps >= MIN_FWD_CBS_LAUNDERED) | ||
1801 | break; | ||
1802 | rcu_fwd_cb_head = rfcpn; | ||
1803 | n_launders++; | ||
1804 | n_launders_sa++; | ||
1805 | } else { | ||
1806 | rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL); | ||
1807 | if (WARN_ON_ONCE(!rfcp)) { | ||
1808 | schedule_timeout_interruptible(1); | ||
1809 | continue; | ||
1810 | } | ||
1811 | n_max_cbs++; | ||
1812 | n_launders_sa = 0; | ||
1813 | rfcp->rfc_gps = 0; | ||
1814 | } | ||
1815 | cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr); | ||
1816 | cond_resched(); | ||
1817 | } | ||
1818 | stoppedat = jiffies; | ||
1819 | n_launders_cb_snap = READ_ONCE(n_launders_cb); | ||
1820 | cver = READ_ONCE(rcu_torture_current_version) - cver; | ||
1821 | gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); | ||
1822 | cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */ | ||
1823 | (void)rcu_torture_fwd_prog_cbfree(); | ||
1824 | |||
1825 | WRITE_ONCE(rcu_fwd_cb_nodelay, false); | ||
1826 | if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop)) { | ||
1827 | WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED); | ||
1828 | pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n", | ||
1829 | __func__, | ||
1830 | stoppedat - rcu_fwd_startat, jiffies - stoppedat, | ||
1831 | n_launders + n_max_cbs - n_launders_cb_snap, | ||
1832 | n_launders, n_launders_sa, | ||
1833 | n_max_gps, n_max_cbs, cver, gps); | ||
1834 | rcu_torture_fwd_cb_hist(); | ||
1835 | } | ||
1836 | } | ||
1837 | |||
1838 | |||
1839 | /* | ||
1840 | * OOM notifier, but this only prints diagnostic information for the | ||
1841 | * current forward-progress test. | ||
1842 | */ | ||
1843 | static int rcutorture_oom_notify(struct notifier_block *self, | ||
1844 | unsigned long notused, void *nfreed) | ||
1845 | { | ||
1846 | WARN(1, "%s invoked upon OOM during forward-progress testing.\n", | ||
1847 | __func__); | ||
1848 | rcu_torture_fwd_cb_hist(); | ||
1849 | rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rcu_fwd_startat) / 2)); | ||
1850 | WRITE_ONCE(rcu_fwd_emergency_stop, true); | ||
1851 | smp_mb(); /* Emergency stop before free and wait to avoid hangs. */ | ||
1852 | pr_info("%s: Freed %lu RCU callbacks.\n", | ||
1853 | __func__, rcu_torture_fwd_prog_cbfree()); | ||
1854 | rcu_barrier(); | ||
1855 | pr_info("%s: Freed %lu RCU callbacks.\n", | ||
1856 | __func__, rcu_torture_fwd_prog_cbfree()); | ||
1857 | rcu_barrier(); | ||
1858 | pr_info("%s: Freed %lu RCU callbacks.\n", | ||
1859 | __func__, rcu_torture_fwd_prog_cbfree()); | ||
1860 | smp_mb(); /* Frees before return to avoid redoing OOM. */ | ||
1861 | (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */ | ||
1862 | pr_info("%s returning after OOM processing.\n", __func__); | ||
1863 | return NOTIFY_OK; | ||
1864 | } | ||
1865 | |||
1866 | static struct notifier_block rcutorture_oom_nb = { | ||
1867 | .notifier_call = rcutorture_oom_notify | ||
1868 | }; | ||
1869 | |||
1870 | /* Carry out grace-period forward-progress testing. */ | ||
1871 | static int rcu_torture_fwd_prog(void *args) | ||
1872 | { | ||
1873 | int tested = 0; | ||
1874 | int tested_tries = 0; | ||
1875 | |||
1876 | VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started"); | ||
1877 | rcu_bind_current_to_nocb(); | ||
1878 | if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST)) | ||
1879 | set_user_nice(current, MAX_NICE); | ||
1700 | do { | 1880 | do { |
1701 | schedule_timeout_interruptible(fwd_progress_holdoff * HZ); | 1881 | schedule_timeout_interruptible(fwd_progress_holdoff * HZ); |
1702 | if (selfpropcb) { | 1882 | WRITE_ONCE(rcu_fwd_emergency_stop, false); |
1703 | WRITE_ONCE(fcs.stop, 0); | 1883 | register_oom_notifier(&rcutorture_oom_nb); |
1704 | cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb); | 1884 | rcu_torture_fwd_prog_nr(&tested, &tested_tries); |
1705 | } | 1885 | rcu_torture_fwd_prog_cr(); |
1706 | cver = READ_ONCE(rcu_torture_current_version); | 1886 | unregister_oom_notifier(&rcutorture_oom_nb); |
1707 | gps = cur_ops->get_gp_seq(); | 1887 | |
1708 | sd = cur_ops->stall_dur() + 1; | ||
1709 | sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div; | ||
1710 | dur = sd4 + torture_random(&trs) % (sd - sd4); | ||
1711 | stopat = jiffies + dur; | ||
1712 | while (time_before(jiffies, stopat) && !torture_must_stop()) { | ||
1713 | idx = cur_ops->readlock(); | ||
1714 | udelay(10); | ||
1715 | cur_ops->readunlock(idx); | ||
1716 | if (!fwd_progress_need_resched || need_resched()) | ||
1717 | cond_resched(); | ||
1718 | } | ||
1719 | tested_tries++; | ||
1720 | if (!time_before(jiffies, stopat) && !torture_must_stop()) { | ||
1721 | tested++; | ||
1722 | cver = READ_ONCE(rcu_torture_current_version) - cver; | ||
1723 | gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); | ||
1724 | WARN_ON(!cver && gps < 2); | ||
1725 | pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, cver, gps); | ||
1726 | } | ||
1727 | if (selfpropcb) { | ||
1728 | WRITE_ONCE(fcs.stop, 1); | ||
1729 | cur_ops->sync(); /* Wait for running CB to complete. */ | ||
1730 | cur_ops->cb_barrier(); /* Wait for queued callbacks. */ | ||
1731 | } | ||
1732 | /* Avoid slow periods, better to test when busy. */ | 1888 | /* Avoid slow periods, better to test when busy. */ |
1733 | stutter_wait("rcu_torture_fwd_prog"); | 1889 | stutter_wait("rcu_torture_fwd_prog"); |
1734 | } while (!torture_must_stop()); | 1890 | } while (!torture_must_stop()); |
1735 | if (selfpropcb) { | ||
1736 | WARN_ON(READ_ONCE(fcs.stop) != 2); | ||
1737 | destroy_rcu_head_on_stack(&fcs.rh); | ||
1738 | } | ||
1739 | /* Short runs might not contain a valid forward-progress attempt. */ | 1891 | /* Short runs might not contain a valid forward-progress attempt. */ |
1740 | WARN_ON(!tested && tested_tries >= 5); | 1892 | WARN_ON(!tested && tested_tries >= 5); |
1741 | pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries); | 1893 | pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries); |
@@ -1748,7 +1900,8 @@ static int __init rcu_torture_fwd_prog_init(void) | |||
1748 | { | 1900 | { |
1749 | if (!fwd_progress) | 1901 | if (!fwd_progress) |
1750 | return 0; /* Not requested, so don't do it. */ | 1902 | return 0; /* Not requested, so don't do it. */ |
1751 | if (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0) { | 1903 | if (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0 || |
1904 | cur_ops == &rcu_busted_ops) { | ||
1752 | VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test"); | 1905 | VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test"); |
1753 | return 0; | 1906 | return 0; |
1754 | } | 1907 | } |
@@ -1968,8 +2121,6 @@ rcu_torture_cleanup(void) | |||
1968 | cur_ops->name, gp_seq, flags); | 2121 | cur_ops->name, gp_seq, flags); |
1969 | torture_stop_kthread(rcu_torture_stats, stats_task); | 2122 | torture_stop_kthread(rcu_torture_stats, stats_task); |
1970 | torture_stop_kthread(rcu_torture_fqs, fqs_task); | 2123 | torture_stop_kthread(rcu_torture_fqs, fqs_task); |
1971 | for (i = 0; i < ncbflooders; i++) | ||
1972 | torture_stop_kthread(rcu_torture_cbflood, cbflood_task[i]); | ||
1973 | if (rcu_torture_can_boost()) | 2124 | if (rcu_torture_can_boost()) |
1974 | cpuhp_remove_state(rcutor_hp); | 2125 | cpuhp_remove_state(rcutor_hp); |
1975 | 2126 | ||
@@ -2252,24 +2403,6 @@ rcu_torture_init(void) | |||
2252 | goto unwind; | 2403 | goto unwind; |
2253 | if (object_debug) | 2404 | if (object_debug) |
2254 | rcu_test_debug_objects(); | 2405 | rcu_test_debug_objects(); |
2255 | if (cbflood_n_burst > 0) { | ||
2256 | /* Create the cbflood threads */ | ||
2257 | ncbflooders = (num_online_cpus() + 3) / 4; | ||
2258 | cbflood_task = kcalloc(ncbflooders, sizeof(*cbflood_task), | ||
2259 | GFP_KERNEL); | ||
2260 | if (!cbflood_task) { | ||
2261 | VERBOSE_TOROUT_ERRSTRING("out of memory"); | ||
2262 | firsterr = -ENOMEM; | ||
2263 | goto unwind; | ||
2264 | } | ||
2265 | for (i = 0; i < ncbflooders; i++) { | ||
2266 | firsterr = torture_create_kthread(rcu_torture_cbflood, | ||
2267 | NULL, | ||
2268 | cbflood_task[i]); | ||
2269 | if (firsterr) | ||
2270 | goto unwind; | ||
2271 | } | ||
2272 | } | ||
2273 | torture_init_end(); | 2406 | torture_init_end(); |
2274 | return 0; | 2407 | return 0; |
2275 | 2408 | ||
diff --git a/kernel/rcu/srcutiny.c b/kernel/rcu/srcutiny.c index b46e6683f8c9..32dfd6522548 100644 --- a/kernel/rcu/srcutiny.c +++ b/kernel/rcu/srcutiny.c | |||
@@ -37,30 +37,30 @@ int rcu_scheduler_active __read_mostly; | |||
37 | static LIST_HEAD(srcu_boot_list); | 37 | static LIST_HEAD(srcu_boot_list); |
38 | static bool srcu_init_done; | 38 | static bool srcu_init_done; |
39 | 39 | ||
40 | static int init_srcu_struct_fields(struct srcu_struct *sp) | 40 | static int init_srcu_struct_fields(struct srcu_struct *ssp) |
41 | { | 41 | { |
42 | sp->srcu_lock_nesting[0] = 0; | 42 | ssp->srcu_lock_nesting[0] = 0; |
43 | sp->srcu_lock_nesting[1] = 0; | 43 | ssp->srcu_lock_nesting[1] = 0; |
44 | init_swait_queue_head(&sp->srcu_wq); | 44 | init_swait_queue_head(&ssp->srcu_wq); |
45 | sp->srcu_cb_head = NULL; | 45 | ssp->srcu_cb_head = NULL; |
46 | sp->srcu_cb_tail = &sp->srcu_cb_head; | 46 | ssp->srcu_cb_tail = &ssp->srcu_cb_head; |
47 | sp->srcu_gp_running = false; | 47 | ssp->srcu_gp_running = false; |
48 | sp->srcu_gp_waiting = false; | 48 | ssp->srcu_gp_waiting = false; |
49 | sp->srcu_idx = 0; | 49 | ssp->srcu_idx = 0; |
50 | INIT_WORK(&sp->srcu_work, srcu_drive_gp); | 50 | INIT_WORK(&ssp->srcu_work, srcu_drive_gp); |
51 | INIT_LIST_HEAD(&sp->srcu_work.entry); | 51 | INIT_LIST_HEAD(&ssp->srcu_work.entry); |
52 | return 0; | 52 | return 0; |
53 | } | 53 | } |
54 | 54 | ||
55 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 55 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
56 | 56 | ||
57 | int __init_srcu_struct(struct srcu_struct *sp, const char *name, | 57 | int __init_srcu_struct(struct srcu_struct *ssp, const char *name, |
58 | struct lock_class_key *key) | 58 | struct lock_class_key *key) |
59 | { | 59 | { |
60 | /* Don't re-initialize a lock while it is held. */ | 60 | /* Don't re-initialize a lock while it is held. */ |
61 | debug_check_no_locks_freed((void *)sp, sizeof(*sp)); | 61 | debug_check_no_locks_freed((void *)ssp, sizeof(*ssp)); |
62 | lockdep_init_map(&sp->dep_map, name, key, 0); | 62 | lockdep_init_map(&ssp->dep_map, name, key, 0); |
63 | return init_srcu_struct_fields(sp); | 63 | return init_srcu_struct_fields(ssp); |
64 | } | 64 | } |
65 | EXPORT_SYMBOL_GPL(__init_srcu_struct); | 65 | EXPORT_SYMBOL_GPL(__init_srcu_struct); |
66 | 66 | ||
@@ -68,15 +68,15 @@ EXPORT_SYMBOL_GPL(__init_srcu_struct); | |||
68 | 68 | ||
69 | /* | 69 | /* |
70 | * init_srcu_struct - initialize a sleep-RCU structure | 70 | * init_srcu_struct - initialize a sleep-RCU structure |
71 | * @sp: structure to initialize. | 71 | * @ssp: structure to initialize. |
72 | * | 72 | * |
73 | * Must invoke this on a given srcu_struct before passing that srcu_struct | 73 | * Must invoke this on a given srcu_struct before passing that srcu_struct |
74 | * to any other function. Each srcu_struct represents a separate domain | 74 | * to any other function. Each srcu_struct represents a separate domain |
75 | * of SRCU protection. | 75 | * of SRCU protection. |
76 | */ | 76 | */ |
77 | int init_srcu_struct(struct srcu_struct *sp) | 77 | int init_srcu_struct(struct srcu_struct *ssp) |
78 | { | 78 | { |
79 | return init_srcu_struct_fields(sp); | 79 | return init_srcu_struct_fields(ssp); |
80 | } | 80 | } |
81 | EXPORT_SYMBOL_GPL(init_srcu_struct); | 81 | EXPORT_SYMBOL_GPL(init_srcu_struct); |
82 | 82 | ||
@@ -84,22 +84,22 @@ EXPORT_SYMBOL_GPL(init_srcu_struct); | |||
84 | 84 | ||
85 | /* | 85 | /* |
86 | * cleanup_srcu_struct - deconstruct a sleep-RCU structure | 86 | * cleanup_srcu_struct - deconstruct a sleep-RCU structure |
87 | * @sp: structure to clean up. | 87 | * @ssp: structure to clean up. |
88 | * | 88 | * |
89 | * Must invoke this after you are finished using a given srcu_struct that | 89 | * Must invoke this after you are finished using a given srcu_struct that |
90 | * was initialized via init_srcu_struct(), else you leak memory. | 90 | * was initialized via init_srcu_struct(), else you leak memory. |
91 | */ | 91 | */ |
92 | void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced) | 92 | void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced) |
93 | { | 93 | { |
94 | WARN_ON(sp->srcu_lock_nesting[0] || sp->srcu_lock_nesting[1]); | 94 | WARN_ON(ssp->srcu_lock_nesting[0] || ssp->srcu_lock_nesting[1]); |
95 | if (quiesced) | 95 | if (quiesced) |
96 | WARN_ON(work_pending(&sp->srcu_work)); | 96 | WARN_ON(work_pending(&ssp->srcu_work)); |
97 | else | 97 | else |
98 | flush_work(&sp->srcu_work); | 98 | flush_work(&ssp->srcu_work); |
99 | WARN_ON(sp->srcu_gp_running); | 99 | WARN_ON(ssp->srcu_gp_running); |
100 | WARN_ON(sp->srcu_gp_waiting); | 100 | WARN_ON(ssp->srcu_gp_waiting); |
101 | WARN_ON(sp->srcu_cb_head); | 101 | WARN_ON(ssp->srcu_cb_head); |
102 | WARN_ON(&sp->srcu_cb_head != sp->srcu_cb_tail); | 102 | WARN_ON(&ssp->srcu_cb_head != ssp->srcu_cb_tail); |
103 | } | 103 | } |
104 | EXPORT_SYMBOL_GPL(_cleanup_srcu_struct); | 104 | EXPORT_SYMBOL_GPL(_cleanup_srcu_struct); |
105 | 105 | ||
@@ -107,13 +107,13 @@ EXPORT_SYMBOL_GPL(_cleanup_srcu_struct); | |||
107 | * Removes the count for the old reader from the appropriate element of | 107 | * Removes the count for the old reader from the appropriate element of |
108 | * the srcu_struct. | 108 | * the srcu_struct. |
109 | */ | 109 | */ |
110 | void __srcu_read_unlock(struct srcu_struct *sp, int idx) | 110 | void __srcu_read_unlock(struct srcu_struct *ssp, int idx) |
111 | { | 111 | { |
112 | int newval = sp->srcu_lock_nesting[idx] - 1; | 112 | int newval = ssp->srcu_lock_nesting[idx] - 1; |
113 | 113 | ||
114 | WRITE_ONCE(sp->srcu_lock_nesting[idx], newval); | 114 | WRITE_ONCE(ssp->srcu_lock_nesting[idx], newval); |
115 | if (!newval && READ_ONCE(sp->srcu_gp_waiting)) | 115 | if (!newval && READ_ONCE(ssp->srcu_gp_waiting)) |
116 | swake_up_one(&sp->srcu_wq); | 116 | swake_up_one(&ssp->srcu_wq); |
117 | } | 117 | } |
118 | EXPORT_SYMBOL_GPL(__srcu_read_unlock); | 118 | EXPORT_SYMBOL_GPL(__srcu_read_unlock); |
119 | 119 | ||
@@ -127,24 +127,24 @@ void srcu_drive_gp(struct work_struct *wp) | |||
127 | int idx; | 127 | int idx; |
128 | struct rcu_head *lh; | 128 | struct rcu_head *lh; |
129 | struct rcu_head *rhp; | 129 | struct rcu_head *rhp; |
130 | struct srcu_struct *sp; | 130 | struct srcu_struct *ssp; |
131 | 131 | ||
132 | sp = container_of(wp, struct srcu_struct, srcu_work); | 132 | ssp = container_of(wp, struct srcu_struct, srcu_work); |
133 | if (sp->srcu_gp_running || !READ_ONCE(sp->srcu_cb_head)) | 133 | if (ssp->srcu_gp_running || !READ_ONCE(ssp->srcu_cb_head)) |
134 | return; /* Already running or nothing to do. */ | 134 | return; /* Already running or nothing to do. */ |
135 | 135 | ||
136 | /* Remove recently arrived callbacks and wait for readers. */ | 136 | /* Remove recently arrived callbacks and wait for readers. */ |
137 | WRITE_ONCE(sp->srcu_gp_running, true); | 137 | WRITE_ONCE(ssp->srcu_gp_running, true); |
138 | local_irq_disable(); | 138 | local_irq_disable(); |
139 | lh = sp->srcu_cb_head; | 139 | lh = ssp->srcu_cb_head; |
140 | sp->srcu_cb_head = NULL; | 140 | ssp->srcu_cb_head = NULL; |
141 | sp->srcu_cb_tail = &sp->srcu_cb_head; | 141 | ssp->srcu_cb_tail = &ssp->srcu_cb_head; |
142 | local_irq_enable(); | 142 | local_irq_enable(); |
143 | idx = sp->srcu_idx; | 143 | idx = ssp->srcu_idx; |
144 | WRITE_ONCE(sp->srcu_idx, !sp->srcu_idx); | 144 | WRITE_ONCE(ssp->srcu_idx, !ssp->srcu_idx); |
145 | WRITE_ONCE(sp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */ | 145 | WRITE_ONCE(ssp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */ |
146 | swait_event_exclusive(sp->srcu_wq, !READ_ONCE(sp->srcu_lock_nesting[idx])); | 146 | swait_event_exclusive(ssp->srcu_wq, !READ_ONCE(ssp->srcu_lock_nesting[idx])); |
147 | WRITE_ONCE(sp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */ | 147 | WRITE_ONCE(ssp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */ |
148 | 148 | ||
149 | /* Invoke the callbacks we removed above. */ | 149 | /* Invoke the callbacks we removed above. */ |
150 | while (lh) { | 150 | while (lh) { |
@@ -161,9 +161,9 @@ void srcu_drive_gp(struct work_struct *wp) | |||
161 | * at interrupt level, but the ->srcu_gp_running checks will | 161 | * at interrupt level, but the ->srcu_gp_running checks will |
162 | * straighten that out. | 162 | * straighten that out. |
163 | */ | 163 | */ |
164 | WRITE_ONCE(sp->srcu_gp_running, false); | 164 | WRITE_ONCE(ssp->srcu_gp_running, false); |
165 | if (READ_ONCE(sp->srcu_cb_head)) | 165 | if (READ_ONCE(ssp->srcu_cb_head)) |
166 | schedule_work(&sp->srcu_work); | 166 | schedule_work(&ssp->srcu_work); |
167 | } | 167 | } |
168 | EXPORT_SYMBOL_GPL(srcu_drive_gp); | 168 | EXPORT_SYMBOL_GPL(srcu_drive_gp); |
169 | 169 | ||
@@ -171,7 +171,7 @@ EXPORT_SYMBOL_GPL(srcu_drive_gp); | |||
171 | * Enqueue an SRCU callback on the specified srcu_struct structure, | 171 | * Enqueue an SRCU callback on the specified srcu_struct structure, |
172 | * initiating grace-period processing if it is not already running. | 172 | * initiating grace-period processing if it is not already running. |
173 | */ | 173 | */ |
174 | void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, | 174 | void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, |
175 | rcu_callback_t func) | 175 | rcu_callback_t func) |
176 | { | 176 | { |
177 | unsigned long flags; | 177 | unsigned long flags; |
@@ -179,14 +179,14 @@ void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, | |||
179 | rhp->func = func; | 179 | rhp->func = func; |
180 | rhp->next = NULL; | 180 | rhp->next = NULL; |
181 | local_irq_save(flags); | 181 | local_irq_save(flags); |
182 | *sp->srcu_cb_tail = rhp; | 182 | *ssp->srcu_cb_tail = rhp; |
183 | sp->srcu_cb_tail = &rhp->next; | 183 | ssp->srcu_cb_tail = &rhp->next; |
184 | local_irq_restore(flags); | 184 | local_irq_restore(flags); |
185 | if (!READ_ONCE(sp->srcu_gp_running)) { | 185 | if (!READ_ONCE(ssp->srcu_gp_running)) { |
186 | if (likely(srcu_init_done)) | 186 | if (likely(srcu_init_done)) |
187 | schedule_work(&sp->srcu_work); | 187 | schedule_work(&ssp->srcu_work); |
188 | else if (list_empty(&sp->srcu_work.entry)) | 188 | else if (list_empty(&ssp->srcu_work.entry)) |
189 | list_add(&sp->srcu_work.entry, &srcu_boot_list); | 189 | list_add(&ssp->srcu_work.entry, &srcu_boot_list); |
190 | } | 190 | } |
191 | } | 191 | } |
192 | EXPORT_SYMBOL_GPL(call_srcu); | 192 | EXPORT_SYMBOL_GPL(call_srcu); |
@@ -194,13 +194,13 @@ EXPORT_SYMBOL_GPL(call_srcu); | |||
194 | /* | 194 | /* |
195 | * synchronize_srcu - wait for prior SRCU read-side critical-section completion | 195 | * synchronize_srcu - wait for prior SRCU read-side critical-section completion |
196 | */ | 196 | */ |
197 | void synchronize_srcu(struct srcu_struct *sp) | 197 | void synchronize_srcu(struct srcu_struct *ssp) |
198 | { | 198 | { |
199 | struct rcu_synchronize rs; | 199 | struct rcu_synchronize rs; |
200 | 200 | ||
201 | init_rcu_head_on_stack(&rs.head); | 201 | init_rcu_head_on_stack(&rs.head); |
202 | init_completion(&rs.completion); | 202 | init_completion(&rs.completion); |
203 | call_srcu(sp, &rs.head, wakeme_after_rcu); | 203 | call_srcu(ssp, &rs.head, wakeme_after_rcu); |
204 | wait_for_completion(&rs.completion); | 204 | wait_for_completion(&rs.completion); |
205 | destroy_rcu_head_on_stack(&rs.head); | 205 | destroy_rcu_head_on_stack(&rs.head); |
206 | } | 206 | } |
@@ -219,13 +219,13 @@ void __init rcu_scheduler_starting(void) | |||
219 | */ | 219 | */ |
220 | void __init srcu_init(void) | 220 | void __init srcu_init(void) |
221 | { | 221 | { |
222 | struct srcu_struct *sp; | 222 | struct srcu_struct *ssp; |
223 | 223 | ||
224 | srcu_init_done = true; | 224 | srcu_init_done = true; |
225 | while (!list_empty(&srcu_boot_list)) { | 225 | while (!list_empty(&srcu_boot_list)) { |
226 | sp = list_first_entry(&srcu_boot_list, | 226 | ssp = list_first_entry(&srcu_boot_list, |
227 | struct srcu_struct, srcu_work.entry); | 227 | struct srcu_struct, srcu_work.entry); |
228 | list_del_init(&sp->srcu_work.entry); | 228 | list_del_init(&ssp->srcu_work.entry); |
229 | schedule_work(&sp->srcu_work); | 229 | schedule_work(&ssp->srcu_work); |
230 | } | 230 | } |
231 | } | 231 | } |
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c index a8846ed7f352..3600d88d8956 100644 --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c | |||
@@ -56,7 +56,7 @@ static LIST_HEAD(srcu_boot_list); | |||
56 | static bool __read_mostly srcu_init_done; | 56 | static bool __read_mostly srcu_init_done; |
57 | 57 | ||
58 | static void srcu_invoke_callbacks(struct work_struct *work); | 58 | static void srcu_invoke_callbacks(struct work_struct *work); |
59 | static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay); | 59 | static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay); |
60 | static void process_srcu(struct work_struct *work); | 60 | static void process_srcu(struct work_struct *work); |
61 | 61 | ||
62 | /* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */ | 62 | /* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */ |
@@ -92,7 +92,7 @@ do { \ | |||
92 | * srcu_read_unlock() running against them. So if the is_static parameter | 92 | * srcu_read_unlock() running against them. So if the is_static parameter |
93 | * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[]. | 93 | * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[]. |
94 | */ | 94 | */ |
95 | static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static) | 95 | static void init_srcu_struct_nodes(struct srcu_struct *ssp, bool is_static) |
96 | { | 96 | { |
97 | int cpu; | 97 | int cpu; |
98 | int i; | 98 | int i; |
@@ -103,13 +103,13 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static) | |||
103 | struct srcu_node *snp_first; | 103 | struct srcu_node *snp_first; |
104 | 104 | ||
105 | /* Work out the overall tree geometry. */ | 105 | /* Work out the overall tree geometry. */ |
106 | sp->level[0] = &sp->node[0]; | 106 | ssp->level[0] = &ssp->node[0]; |
107 | for (i = 1; i < rcu_num_lvls; i++) | 107 | for (i = 1; i < rcu_num_lvls; i++) |
108 | sp->level[i] = sp->level[i - 1] + num_rcu_lvl[i - 1]; | 108 | ssp->level[i] = ssp->level[i - 1] + num_rcu_lvl[i - 1]; |
109 | rcu_init_levelspread(levelspread, num_rcu_lvl); | 109 | rcu_init_levelspread(levelspread, num_rcu_lvl); |
110 | 110 | ||
111 | /* Each pass through this loop initializes one srcu_node structure. */ | 111 | /* Each pass through this loop initializes one srcu_node structure. */ |
112 | srcu_for_each_node_breadth_first(sp, snp) { | 112 | srcu_for_each_node_breadth_first(ssp, snp) { |
113 | spin_lock_init(&ACCESS_PRIVATE(snp, lock)); | 113 | spin_lock_init(&ACCESS_PRIVATE(snp, lock)); |
114 | WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) != | 114 | WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) != |
115 | ARRAY_SIZE(snp->srcu_data_have_cbs)); | 115 | ARRAY_SIZE(snp->srcu_data_have_cbs)); |
@@ -120,17 +120,17 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static) | |||
120 | snp->srcu_gp_seq_needed_exp = 0; | 120 | snp->srcu_gp_seq_needed_exp = 0; |
121 | snp->grplo = -1; | 121 | snp->grplo = -1; |
122 | snp->grphi = -1; | 122 | snp->grphi = -1; |
123 | if (snp == &sp->node[0]) { | 123 | if (snp == &ssp->node[0]) { |
124 | /* Root node, special case. */ | 124 | /* Root node, special case. */ |
125 | snp->srcu_parent = NULL; | 125 | snp->srcu_parent = NULL; |
126 | continue; | 126 | continue; |
127 | } | 127 | } |
128 | 128 | ||
129 | /* Non-root node. */ | 129 | /* Non-root node. */ |
130 | if (snp == sp->level[level + 1]) | 130 | if (snp == ssp->level[level + 1]) |
131 | level++; | 131 | level++; |
132 | snp->srcu_parent = sp->level[level - 1] + | 132 | snp->srcu_parent = ssp->level[level - 1] + |
133 | (snp - sp->level[level]) / | 133 | (snp - ssp->level[level]) / |
134 | levelspread[level - 1]; | 134 | levelspread[level - 1]; |
135 | } | 135 | } |
136 | 136 | ||
@@ -141,14 +141,14 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static) | |||
141 | WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) != | 141 | WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) != |
142 | ARRAY_SIZE(sdp->srcu_unlock_count)); | 142 | ARRAY_SIZE(sdp->srcu_unlock_count)); |
143 | level = rcu_num_lvls - 1; | 143 | level = rcu_num_lvls - 1; |
144 | snp_first = sp->level[level]; | 144 | snp_first = ssp->level[level]; |
145 | for_each_possible_cpu(cpu) { | 145 | for_each_possible_cpu(cpu) { |
146 | sdp = per_cpu_ptr(sp->sda, cpu); | 146 | sdp = per_cpu_ptr(ssp->sda, cpu); |
147 | spin_lock_init(&ACCESS_PRIVATE(sdp, lock)); | 147 | spin_lock_init(&ACCESS_PRIVATE(sdp, lock)); |
148 | rcu_segcblist_init(&sdp->srcu_cblist); | 148 | rcu_segcblist_init(&sdp->srcu_cblist); |
149 | sdp->srcu_cblist_invoking = false; | 149 | sdp->srcu_cblist_invoking = false; |
150 | sdp->srcu_gp_seq_needed = sp->srcu_gp_seq; | 150 | sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq; |
151 | sdp->srcu_gp_seq_needed_exp = sp->srcu_gp_seq; | 151 | sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq; |
152 | sdp->mynode = &snp_first[cpu / levelspread[level]]; | 152 | sdp->mynode = &snp_first[cpu / levelspread[level]]; |
153 | for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) { | 153 | for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) { |
154 | if (snp->grplo < 0) | 154 | if (snp->grplo < 0) |
@@ -157,7 +157,7 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static) | |||
157 | } | 157 | } |
158 | sdp->cpu = cpu; | 158 | sdp->cpu = cpu; |
159 | INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks); | 159 | INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks); |
160 | sdp->sp = sp; | 160 | sdp->ssp = ssp; |
161 | sdp->grpmask = 1 << (cpu - sdp->mynode->grplo); | 161 | sdp->grpmask = 1 << (cpu - sdp->mynode->grplo); |
162 | if (is_static) | 162 | if (is_static) |
163 | continue; | 163 | continue; |
@@ -176,35 +176,35 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static) | |||
176 | * parameter is passed through to init_srcu_struct_nodes(), and | 176 | * parameter is passed through to init_srcu_struct_nodes(), and |
177 | * also tells us that ->sda has already been wired up to srcu_data. | 177 | * also tells us that ->sda has already been wired up to srcu_data. |
178 | */ | 178 | */ |
179 | static int init_srcu_struct_fields(struct srcu_struct *sp, bool is_static) | 179 | static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static) |
180 | { | 180 | { |
181 | mutex_init(&sp->srcu_cb_mutex); | 181 | mutex_init(&ssp->srcu_cb_mutex); |
182 | mutex_init(&sp->srcu_gp_mutex); | 182 | mutex_init(&ssp->srcu_gp_mutex); |
183 | sp->srcu_idx = 0; | 183 | ssp->srcu_idx = 0; |
184 | sp->srcu_gp_seq = 0; | 184 | ssp->srcu_gp_seq = 0; |
185 | sp->srcu_barrier_seq = 0; | 185 | ssp->srcu_barrier_seq = 0; |
186 | mutex_init(&sp->srcu_barrier_mutex); | 186 | mutex_init(&ssp->srcu_barrier_mutex); |
187 | atomic_set(&sp->srcu_barrier_cpu_cnt, 0); | 187 | atomic_set(&ssp->srcu_barrier_cpu_cnt, 0); |
188 | INIT_DELAYED_WORK(&sp->work, process_srcu); | 188 | INIT_DELAYED_WORK(&ssp->work, process_srcu); |
189 | if (!is_static) | 189 | if (!is_static) |
190 | sp->sda = alloc_percpu(struct srcu_data); | 190 | ssp->sda = alloc_percpu(struct srcu_data); |
191 | init_srcu_struct_nodes(sp, is_static); | 191 | init_srcu_struct_nodes(ssp, is_static); |
192 | sp->srcu_gp_seq_needed_exp = 0; | 192 | ssp->srcu_gp_seq_needed_exp = 0; |
193 | sp->srcu_last_gp_end = ktime_get_mono_fast_ns(); | 193 | ssp->srcu_last_gp_end = ktime_get_mono_fast_ns(); |
194 | smp_store_release(&sp->srcu_gp_seq_needed, 0); /* Init done. */ | 194 | smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */ |
195 | return sp->sda ? 0 : -ENOMEM; | 195 | return ssp->sda ? 0 : -ENOMEM; |
196 | } | 196 | } |
197 | 197 | ||
198 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 198 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
199 | 199 | ||
200 | int __init_srcu_struct(struct srcu_struct *sp, const char *name, | 200 | int __init_srcu_struct(struct srcu_struct *ssp, const char *name, |
201 | struct lock_class_key *key) | 201 | struct lock_class_key *key) |
202 | { | 202 | { |
203 | /* Don't re-initialize a lock while it is held. */ | 203 | /* Don't re-initialize a lock while it is held. */ |
204 | debug_check_no_locks_freed((void *)sp, sizeof(*sp)); | 204 | debug_check_no_locks_freed((void *)ssp, sizeof(*ssp)); |
205 | lockdep_init_map(&sp->dep_map, name, key, 0); | 205 | lockdep_init_map(&ssp->dep_map, name, key, 0); |
206 | spin_lock_init(&ACCESS_PRIVATE(sp, lock)); | 206 | spin_lock_init(&ACCESS_PRIVATE(ssp, lock)); |
207 | return init_srcu_struct_fields(sp, false); | 207 | return init_srcu_struct_fields(ssp, false); |
208 | } | 208 | } |
209 | EXPORT_SYMBOL_GPL(__init_srcu_struct); | 209 | EXPORT_SYMBOL_GPL(__init_srcu_struct); |
210 | 210 | ||
@@ -212,16 +212,16 @@ EXPORT_SYMBOL_GPL(__init_srcu_struct); | |||
212 | 212 | ||
213 | /** | 213 | /** |
214 | * init_srcu_struct - initialize a sleep-RCU structure | 214 | * init_srcu_struct - initialize a sleep-RCU structure |
215 | * @sp: structure to initialize. | 215 | * @ssp: structure to initialize. |
216 | * | 216 | * |
217 | * Must invoke this on a given srcu_struct before passing that srcu_struct | 217 | * Must invoke this on a given srcu_struct before passing that srcu_struct |
218 | * to any other function. Each srcu_struct represents a separate domain | 218 | * to any other function. Each srcu_struct represents a separate domain |
219 | * of SRCU protection. | 219 | * of SRCU protection. |
220 | */ | 220 | */ |
221 | int init_srcu_struct(struct srcu_struct *sp) | 221 | int init_srcu_struct(struct srcu_struct *ssp) |
222 | { | 222 | { |
223 | spin_lock_init(&ACCESS_PRIVATE(sp, lock)); | 223 | spin_lock_init(&ACCESS_PRIVATE(ssp, lock)); |
224 | return init_srcu_struct_fields(sp, false); | 224 | return init_srcu_struct_fields(ssp, false); |
225 | } | 225 | } |
226 | EXPORT_SYMBOL_GPL(init_srcu_struct); | 226 | EXPORT_SYMBOL_GPL(init_srcu_struct); |
227 | 227 | ||
@@ -231,37 +231,37 @@ EXPORT_SYMBOL_GPL(init_srcu_struct); | |||
231 | * First-use initialization of statically allocated srcu_struct | 231 | * First-use initialization of statically allocated srcu_struct |
232 | * structure. Wiring up the combining tree is more than can be | 232 | * structure. Wiring up the combining tree is more than can be |
233 | * done with compile-time initialization, so this check is added | 233 | * done with compile-time initialization, so this check is added |
234 | * to each update-side SRCU primitive. Use sp->lock, which -is- | 234 | * to each update-side SRCU primitive. Use ssp->lock, which -is- |
235 | * compile-time initialized, to resolve races involving multiple | 235 | * compile-time initialized, to resolve races involving multiple |
236 | * CPUs trying to garner first-use privileges. | 236 | * CPUs trying to garner first-use privileges. |
237 | */ | 237 | */ |
238 | static void check_init_srcu_struct(struct srcu_struct *sp) | 238 | static void check_init_srcu_struct(struct srcu_struct *ssp) |
239 | { | 239 | { |
240 | unsigned long flags; | 240 | unsigned long flags; |
241 | 241 | ||
242 | /* The smp_load_acquire() pairs with the smp_store_release(). */ | 242 | /* The smp_load_acquire() pairs with the smp_store_release(). */ |
243 | if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed))) /*^^^*/ | 243 | if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed))) /*^^^*/ |
244 | return; /* Already initialized. */ | 244 | return; /* Already initialized. */ |
245 | spin_lock_irqsave_rcu_node(sp, flags); | 245 | spin_lock_irqsave_rcu_node(ssp, flags); |
246 | if (!rcu_seq_state(sp->srcu_gp_seq_needed)) { | 246 | if (!rcu_seq_state(ssp->srcu_gp_seq_needed)) { |
247 | spin_unlock_irqrestore_rcu_node(sp, flags); | 247 | spin_unlock_irqrestore_rcu_node(ssp, flags); |
248 | return; | 248 | return; |
249 | } | 249 | } |
250 | init_srcu_struct_fields(sp, true); | 250 | init_srcu_struct_fields(ssp, true); |
251 | spin_unlock_irqrestore_rcu_node(sp, flags); | 251 | spin_unlock_irqrestore_rcu_node(ssp, flags); |
252 | } | 252 | } |
253 | 253 | ||
254 | /* | 254 | /* |
255 | * Returns approximate total of the readers' ->srcu_lock_count[] values | 255 | * Returns approximate total of the readers' ->srcu_lock_count[] values |
256 | * for the rank of per-CPU counters specified by idx. | 256 | * for the rank of per-CPU counters specified by idx. |
257 | */ | 257 | */ |
258 | static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx) | 258 | static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx) |
259 | { | 259 | { |
260 | int cpu; | 260 | int cpu; |
261 | unsigned long sum = 0; | 261 | unsigned long sum = 0; |
262 | 262 | ||
263 | for_each_possible_cpu(cpu) { | 263 | for_each_possible_cpu(cpu) { |
264 | struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu); | 264 | struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); |
265 | 265 | ||
266 | sum += READ_ONCE(cpuc->srcu_lock_count[idx]); | 266 | sum += READ_ONCE(cpuc->srcu_lock_count[idx]); |
267 | } | 267 | } |
@@ -272,13 +272,13 @@ static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx) | |||
272 | * Returns approximate total of the readers' ->srcu_unlock_count[] values | 272 | * Returns approximate total of the readers' ->srcu_unlock_count[] values |
273 | * for the rank of per-CPU counters specified by idx. | 273 | * for the rank of per-CPU counters specified by idx. |
274 | */ | 274 | */ |
275 | static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx) | 275 | static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx) |
276 | { | 276 | { |
277 | int cpu; | 277 | int cpu; |
278 | unsigned long sum = 0; | 278 | unsigned long sum = 0; |
279 | 279 | ||
280 | for_each_possible_cpu(cpu) { | 280 | for_each_possible_cpu(cpu) { |
281 | struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu); | 281 | struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); |
282 | 282 | ||
283 | sum += READ_ONCE(cpuc->srcu_unlock_count[idx]); | 283 | sum += READ_ONCE(cpuc->srcu_unlock_count[idx]); |
284 | } | 284 | } |
@@ -289,11 +289,11 @@ static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx) | |||
289 | * Return true if the number of pre-existing readers is determined to | 289 | * Return true if the number of pre-existing readers is determined to |
290 | * be zero. | 290 | * be zero. |
291 | */ | 291 | */ |
292 | static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx) | 292 | static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx) |
293 | { | 293 | { |
294 | unsigned long unlocks; | 294 | unsigned long unlocks; |
295 | 295 | ||
296 | unlocks = srcu_readers_unlock_idx(sp, idx); | 296 | unlocks = srcu_readers_unlock_idx(ssp, idx); |
297 | 297 | ||
298 | /* | 298 | /* |
299 | * Make sure that a lock is always counted if the corresponding | 299 | * Make sure that a lock is always counted if the corresponding |
@@ -329,25 +329,25 @@ static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx) | |||
329 | * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient, | 329 | * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient, |
330 | * especially on 64-bit systems. | 330 | * especially on 64-bit systems. |
331 | */ | 331 | */ |
332 | return srcu_readers_lock_idx(sp, idx) == unlocks; | 332 | return srcu_readers_lock_idx(ssp, idx) == unlocks; |
333 | } | 333 | } |
334 | 334 | ||
335 | /** | 335 | /** |
336 | * srcu_readers_active - returns true if there are readers. and false | 336 | * srcu_readers_active - returns true if there are readers. and false |
337 | * otherwise | 337 | * otherwise |
338 | * @sp: which srcu_struct to count active readers (holding srcu_read_lock). | 338 | * @ssp: which srcu_struct to count active readers (holding srcu_read_lock). |
339 | * | 339 | * |
340 | * Note that this is not an atomic primitive, and can therefore suffer | 340 | * Note that this is not an atomic primitive, and can therefore suffer |
341 | * severe errors when invoked on an active srcu_struct. That said, it | 341 | * severe errors when invoked on an active srcu_struct. That said, it |
342 | * can be useful as an error check at cleanup time. | 342 | * can be useful as an error check at cleanup time. |
343 | */ | 343 | */ |
344 | static bool srcu_readers_active(struct srcu_struct *sp) | 344 | static bool srcu_readers_active(struct srcu_struct *ssp) |
345 | { | 345 | { |
346 | int cpu; | 346 | int cpu; |
347 | unsigned long sum = 0; | 347 | unsigned long sum = 0; |
348 | 348 | ||
349 | for_each_possible_cpu(cpu) { | 349 | for_each_possible_cpu(cpu) { |
350 | struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu); | 350 | struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); |
351 | 351 | ||
352 | sum += READ_ONCE(cpuc->srcu_lock_count[0]); | 352 | sum += READ_ONCE(cpuc->srcu_lock_count[0]); |
353 | sum += READ_ONCE(cpuc->srcu_lock_count[1]); | 353 | sum += READ_ONCE(cpuc->srcu_lock_count[1]); |
@@ -363,44 +363,44 @@ static bool srcu_readers_active(struct srcu_struct *sp) | |||
363 | * Return grace-period delay, zero if there are expedited grace | 363 | * Return grace-period delay, zero if there are expedited grace |
364 | * periods pending, SRCU_INTERVAL otherwise. | 364 | * periods pending, SRCU_INTERVAL otherwise. |
365 | */ | 365 | */ |
366 | static unsigned long srcu_get_delay(struct srcu_struct *sp) | 366 | static unsigned long srcu_get_delay(struct srcu_struct *ssp) |
367 | { | 367 | { |
368 | if (ULONG_CMP_LT(READ_ONCE(sp->srcu_gp_seq), | 368 | if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq), |
369 | READ_ONCE(sp->srcu_gp_seq_needed_exp))) | 369 | READ_ONCE(ssp->srcu_gp_seq_needed_exp))) |
370 | return 0; | 370 | return 0; |
371 | return SRCU_INTERVAL; | 371 | return SRCU_INTERVAL; |
372 | } | 372 | } |
373 | 373 | ||
374 | /* Helper for cleanup_srcu_struct() and cleanup_srcu_struct_quiesced(). */ | 374 | /* Helper for cleanup_srcu_struct() and cleanup_srcu_struct_quiesced(). */ |
375 | void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced) | 375 | void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced) |
376 | { | 376 | { |
377 | int cpu; | 377 | int cpu; |
378 | 378 | ||
379 | if (WARN_ON(!srcu_get_delay(sp))) | 379 | if (WARN_ON(!srcu_get_delay(ssp))) |
380 | return; /* Just leak it! */ | 380 | return; /* Just leak it! */ |
381 | if (WARN_ON(srcu_readers_active(sp))) | 381 | if (WARN_ON(srcu_readers_active(ssp))) |
382 | return; /* Just leak it! */ | 382 | return; /* Just leak it! */ |
383 | if (quiesced) { | 383 | if (quiesced) { |
384 | if (WARN_ON(delayed_work_pending(&sp->work))) | 384 | if (WARN_ON(delayed_work_pending(&ssp->work))) |
385 | return; /* Just leak it! */ | 385 | return; /* Just leak it! */ |
386 | } else { | 386 | } else { |
387 | flush_delayed_work(&sp->work); | 387 | flush_delayed_work(&ssp->work); |
388 | } | 388 | } |
389 | for_each_possible_cpu(cpu) | 389 | for_each_possible_cpu(cpu) |
390 | if (quiesced) { | 390 | if (quiesced) { |
391 | if (WARN_ON(delayed_work_pending(&per_cpu_ptr(sp->sda, cpu)->work))) | 391 | if (WARN_ON(delayed_work_pending(&per_cpu_ptr(ssp->sda, cpu)->work))) |
392 | return; /* Just leak it! */ | 392 | return; /* Just leak it! */ |
393 | } else { | 393 | } else { |
394 | flush_delayed_work(&per_cpu_ptr(sp->sda, cpu)->work); | 394 | flush_delayed_work(&per_cpu_ptr(ssp->sda, cpu)->work); |
395 | } | 395 | } |
396 | if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) || | 396 | if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) || |
397 | WARN_ON(srcu_readers_active(sp))) { | 397 | WARN_ON(srcu_readers_active(ssp))) { |
398 | pr_info("%s: Active srcu_struct %p state: %d\n", | 398 | pr_info("%s: Active srcu_struct %p state: %d\n", |
399 | __func__, sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq))); | 399 | __func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq))); |
400 | return; /* Caller forgot to stop doing call_srcu()? */ | 400 | return; /* Caller forgot to stop doing call_srcu()? */ |
401 | } | 401 | } |
402 | free_percpu(sp->sda); | 402 | free_percpu(ssp->sda); |
403 | sp->sda = NULL; | 403 | ssp->sda = NULL; |
404 | } | 404 | } |
405 | EXPORT_SYMBOL_GPL(_cleanup_srcu_struct); | 405 | EXPORT_SYMBOL_GPL(_cleanup_srcu_struct); |
406 | 406 | ||
@@ -409,12 +409,12 @@ EXPORT_SYMBOL_GPL(_cleanup_srcu_struct); | |||
409 | * srcu_struct. | 409 | * srcu_struct. |
410 | * Returns an index that must be passed to the matching srcu_read_unlock(). | 410 | * Returns an index that must be passed to the matching srcu_read_unlock(). |
411 | */ | 411 | */ |
412 | int __srcu_read_lock(struct srcu_struct *sp) | 412 | int __srcu_read_lock(struct srcu_struct *ssp) |
413 | { | 413 | { |
414 | int idx; | 414 | int idx; |
415 | 415 | ||
416 | idx = READ_ONCE(sp->srcu_idx) & 0x1; | 416 | idx = READ_ONCE(ssp->srcu_idx) & 0x1; |
417 | this_cpu_inc(sp->sda->srcu_lock_count[idx]); | 417 | this_cpu_inc(ssp->sda->srcu_lock_count[idx]); |
418 | smp_mb(); /* B */ /* Avoid leaking the critical section. */ | 418 | smp_mb(); /* B */ /* Avoid leaking the critical section. */ |
419 | return idx; | 419 | return idx; |
420 | } | 420 | } |
@@ -425,10 +425,10 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock); | |||
425 | * element of the srcu_struct. Note that this may well be a different | 425 | * element of the srcu_struct. Note that this may well be a different |
426 | * CPU than that which was incremented by the corresponding srcu_read_lock(). | 426 | * CPU than that which was incremented by the corresponding srcu_read_lock(). |
427 | */ | 427 | */ |
428 | void __srcu_read_unlock(struct srcu_struct *sp, int idx) | 428 | void __srcu_read_unlock(struct srcu_struct *ssp, int idx) |
429 | { | 429 | { |
430 | smp_mb(); /* C */ /* Avoid leaking the critical section. */ | 430 | smp_mb(); /* C */ /* Avoid leaking the critical section. */ |
431 | this_cpu_inc(sp->sda->srcu_unlock_count[idx]); | 431 | this_cpu_inc(ssp->sda->srcu_unlock_count[idx]); |
432 | } | 432 | } |
433 | EXPORT_SYMBOL_GPL(__srcu_read_unlock); | 433 | EXPORT_SYMBOL_GPL(__srcu_read_unlock); |
434 | 434 | ||
@@ -444,20 +444,22 @@ EXPORT_SYMBOL_GPL(__srcu_read_unlock); | |||
444 | /* | 444 | /* |
445 | * Start an SRCU grace period. | 445 | * Start an SRCU grace period. |
446 | */ | 446 | */ |
447 | static void srcu_gp_start(struct srcu_struct *sp) | 447 | static void srcu_gp_start(struct srcu_struct *ssp) |
448 | { | 448 | { |
449 | struct srcu_data *sdp = this_cpu_ptr(sp->sda); | 449 | struct srcu_data *sdp = this_cpu_ptr(ssp->sda); |
450 | int state; | 450 | int state; |
451 | 451 | ||
452 | lockdep_assert_held(&ACCESS_PRIVATE(sp, lock)); | 452 | lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock)); |
453 | WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)); | 453 | WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)); |
454 | spin_lock_rcu_node(sdp); /* Interrupts already disabled. */ | ||
454 | rcu_segcblist_advance(&sdp->srcu_cblist, | 455 | rcu_segcblist_advance(&sdp->srcu_cblist, |
455 | rcu_seq_current(&sp->srcu_gp_seq)); | 456 | rcu_seq_current(&ssp->srcu_gp_seq)); |
456 | (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, | 457 | (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, |
457 | rcu_seq_snap(&sp->srcu_gp_seq)); | 458 | rcu_seq_snap(&ssp->srcu_gp_seq)); |
459 | spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */ | ||
458 | smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */ | 460 | smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */ |
459 | rcu_seq_start(&sp->srcu_gp_seq); | 461 | rcu_seq_start(&ssp->srcu_gp_seq); |
460 | state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)); | 462 | state = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)); |
461 | WARN_ON_ONCE(state != SRCU_STATE_SCAN1); | 463 | WARN_ON_ONCE(state != SRCU_STATE_SCAN1); |
462 | } | 464 | } |
463 | 465 | ||
@@ -511,7 +513,7 @@ static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay) | |||
511 | * just-completed grace period, the one corresponding to idx. If possible, | 513 | * just-completed grace period, the one corresponding to idx. If possible, |
512 | * schedule this invocation on the corresponding CPUs. | 514 | * schedule this invocation on the corresponding CPUs. |
513 | */ | 515 | */ |
514 | static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp, | 516 | static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp, |
515 | unsigned long mask, unsigned long delay) | 517 | unsigned long mask, unsigned long delay) |
516 | { | 518 | { |
517 | int cpu; | 519 | int cpu; |
@@ -519,7 +521,7 @@ static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp, | |||
519 | for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { | 521 | for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { |
520 | if (!(mask & (1 << (cpu - snp->grplo)))) | 522 | if (!(mask & (1 << (cpu - snp->grplo)))) |
521 | continue; | 523 | continue; |
522 | srcu_schedule_cbs_sdp(per_cpu_ptr(sp->sda, cpu), delay); | 524 | srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay); |
523 | } | 525 | } |
524 | } | 526 | } |
525 | 527 | ||
@@ -532,7 +534,7 @@ static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp, | |||
532 | * are initiating callback invocation. This allows the ->srcu_have_cbs[] | 534 | * are initiating callback invocation. This allows the ->srcu_have_cbs[] |
533 | * array to have a finite number of elements. | 535 | * array to have a finite number of elements. |
534 | */ | 536 | */ |
535 | static void srcu_gp_end(struct srcu_struct *sp) | 537 | static void srcu_gp_end(struct srcu_struct *ssp) |
536 | { | 538 | { |
537 | unsigned long cbdelay; | 539 | unsigned long cbdelay; |
538 | bool cbs; | 540 | bool cbs; |
@@ -546,28 +548,28 @@ static void srcu_gp_end(struct srcu_struct *sp) | |||
546 | struct srcu_node *snp; | 548 | struct srcu_node *snp; |
547 | 549 | ||
548 | /* Prevent more than one additional grace period. */ | 550 | /* Prevent more than one additional grace period. */ |
549 | mutex_lock(&sp->srcu_cb_mutex); | 551 | mutex_lock(&ssp->srcu_cb_mutex); |
550 | 552 | ||
551 | /* End the current grace period. */ | 553 | /* End the current grace period. */ |
552 | spin_lock_irq_rcu_node(sp); | 554 | spin_lock_irq_rcu_node(ssp); |
553 | idx = rcu_seq_state(sp->srcu_gp_seq); | 555 | idx = rcu_seq_state(ssp->srcu_gp_seq); |
554 | WARN_ON_ONCE(idx != SRCU_STATE_SCAN2); | 556 | WARN_ON_ONCE(idx != SRCU_STATE_SCAN2); |
555 | cbdelay = srcu_get_delay(sp); | 557 | cbdelay = srcu_get_delay(ssp); |
556 | sp->srcu_last_gp_end = ktime_get_mono_fast_ns(); | 558 | ssp->srcu_last_gp_end = ktime_get_mono_fast_ns(); |
557 | rcu_seq_end(&sp->srcu_gp_seq); | 559 | rcu_seq_end(&ssp->srcu_gp_seq); |
558 | gpseq = rcu_seq_current(&sp->srcu_gp_seq); | 560 | gpseq = rcu_seq_current(&ssp->srcu_gp_seq); |
559 | if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq)) | 561 | if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq)) |
560 | sp->srcu_gp_seq_needed_exp = gpseq; | 562 | ssp->srcu_gp_seq_needed_exp = gpseq; |
561 | spin_unlock_irq_rcu_node(sp); | 563 | spin_unlock_irq_rcu_node(ssp); |
562 | mutex_unlock(&sp->srcu_gp_mutex); | 564 | mutex_unlock(&ssp->srcu_gp_mutex); |
563 | /* A new grace period can start at this point. But only one. */ | 565 | /* A new grace period can start at this point. But only one. */ |
564 | 566 | ||
565 | /* Initiate callback invocation as needed. */ | 567 | /* Initiate callback invocation as needed. */ |
566 | idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs); | 568 | idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs); |
567 | srcu_for_each_node_breadth_first(sp, snp) { | 569 | srcu_for_each_node_breadth_first(ssp, snp) { |
568 | spin_lock_irq_rcu_node(snp); | 570 | spin_lock_irq_rcu_node(snp); |
569 | cbs = false; | 571 | cbs = false; |
570 | last_lvl = snp >= sp->level[rcu_num_lvls - 1]; | 572 | last_lvl = snp >= ssp->level[rcu_num_lvls - 1]; |
571 | if (last_lvl) | 573 | if (last_lvl) |
572 | cbs = snp->srcu_have_cbs[idx] == gpseq; | 574 | cbs = snp->srcu_have_cbs[idx] == gpseq; |
573 | snp->srcu_have_cbs[idx] = gpseq; | 575 | snp->srcu_have_cbs[idx] = gpseq; |
@@ -578,12 +580,12 @@ static void srcu_gp_end(struct srcu_struct *sp) | |||
578 | snp->srcu_data_have_cbs[idx] = 0; | 580 | snp->srcu_data_have_cbs[idx] = 0; |
579 | spin_unlock_irq_rcu_node(snp); | 581 | spin_unlock_irq_rcu_node(snp); |
580 | if (cbs) | 582 | if (cbs) |
581 | srcu_schedule_cbs_snp(sp, snp, mask, cbdelay); | 583 | srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay); |
582 | 584 | ||
583 | /* Occasionally prevent srcu_data counter wrap. */ | 585 | /* Occasionally prevent srcu_data counter wrap. */ |
584 | if (!(gpseq & counter_wrap_check) && last_lvl) | 586 | if (!(gpseq & counter_wrap_check) && last_lvl) |
585 | for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { | 587 | for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { |
586 | sdp = per_cpu_ptr(sp->sda, cpu); | 588 | sdp = per_cpu_ptr(ssp->sda, cpu); |
587 | spin_lock_irqsave_rcu_node(sdp, flags); | 589 | spin_lock_irqsave_rcu_node(sdp, flags); |
588 | if (ULONG_CMP_GE(gpseq, | 590 | if (ULONG_CMP_GE(gpseq, |
589 | sdp->srcu_gp_seq_needed + 100)) | 591 | sdp->srcu_gp_seq_needed + 100)) |
@@ -596,18 +598,18 @@ static void srcu_gp_end(struct srcu_struct *sp) | |||
596 | } | 598 | } |
597 | 599 | ||
598 | /* Callback initiation done, allow grace periods after next. */ | 600 | /* Callback initiation done, allow grace periods after next. */ |
599 | mutex_unlock(&sp->srcu_cb_mutex); | 601 | mutex_unlock(&ssp->srcu_cb_mutex); |
600 | 602 | ||
601 | /* Start a new grace period if needed. */ | 603 | /* Start a new grace period if needed. */ |
602 | spin_lock_irq_rcu_node(sp); | 604 | spin_lock_irq_rcu_node(ssp); |
603 | gpseq = rcu_seq_current(&sp->srcu_gp_seq); | 605 | gpseq = rcu_seq_current(&ssp->srcu_gp_seq); |
604 | if (!rcu_seq_state(gpseq) && | 606 | if (!rcu_seq_state(gpseq) && |
605 | ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) { | 607 | ULONG_CMP_LT(gpseq, ssp->srcu_gp_seq_needed)) { |
606 | srcu_gp_start(sp); | 608 | srcu_gp_start(ssp); |
607 | spin_unlock_irq_rcu_node(sp); | 609 | spin_unlock_irq_rcu_node(ssp); |
608 | srcu_reschedule(sp, 0); | 610 | srcu_reschedule(ssp, 0); |
609 | } else { | 611 | } else { |
610 | spin_unlock_irq_rcu_node(sp); | 612 | spin_unlock_irq_rcu_node(ssp); |
611 | } | 613 | } |
612 | } | 614 | } |
613 | 615 | ||
@@ -618,13 +620,13 @@ static void srcu_gp_end(struct srcu_struct *sp) | |||
618 | * but without expediting. To start a completely new grace period, | 620 | * but without expediting. To start a completely new grace period, |
619 | * whether expedited or not, use srcu_funnel_gp_start() instead. | 621 | * whether expedited or not, use srcu_funnel_gp_start() instead. |
620 | */ | 622 | */ |
621 | static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp, | 623 | static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp, |
622 | unsigned long s) | 624 | unsigned long s) |
623 | { | 625 | { |
624 | unsigned long flags; | 626 | unsigned long flags; |
625 | 627 | ||
626 | for (; snp != NULL; snp = snp->srcu_parent) { | 628 | for (; snp != NULL; snp = snp->srcu_parent) { |
627 | if (rcu_seq_done(&sp->srcu_gp_seq, s) || | 629 | if (rcu_seq_done(&ssp->srcu_gp_seq, s) || |
628 | ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s)) | 630 | ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s)) |
629 | return; | 631 | return; |
630 | spin_lock_irqsave_rcu_node(snp, flags); | 632 | spin_lock_irqsave_rcu_node(snp, flags); |
@@ -635,10 +637,10 @@ static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp, | |||
635 | WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s); | 637 | WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s); |
636 | spin_unlock_irqrestore_rcu_node(snp, flags); | 638 | spin_unlock_irqrestore_rcu_node(snp, flags); |
637 | } | 639 | } |
638 | spin_lock_irqsave_rcu_node(sp, flags); | 640 | spin_lock_irqsave_rcu_node(ssp, flags); |
639 | if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s)) | 641 | if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s)) |
640 | sp->srcu_gp_seq_needed_exp = s; | 642 | ssp->srcu_gp_seq_needed_exp = s; |
641 | spin_unlock_irqrestore_rcu_node(sp, flags); | 643 | spin_unlock_irqrestore_rcu_node(ssp, flags); |
642 | } | 644 | } |
643 | 645 | ||
644 | /* | 646 | /* |
@@ -651,7 +653,7 @@ static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp, | |||
651 | * Note that this function also does the work of srcu_funnel_exp_start(), | 653 | * Note that this function also does the work of srcu_funnel_exp_start(), |
652 | * in some cases by directly invoking it. | 654 | * in some cases by directly invoking it. |
653 | */ | 655 | */ |
654 | static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp, | 656 | static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp, |
655 | unsigned long s, bool do_norm) | 657 | unsigned long s, bool do_norm) |
656 | { | 658 | { |
657 | unsigned long flags; | 659 | unsigned long flags; |
@@ -661,7 +663,7 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp, | |||
661 | 663 | ||
662 | /* Each pass through the loop does one level of the srcu_node tree. */ | 664 | /* Each pass through the loop does one level of the srcu_node tree. */ |
663 | for (; snp != NULL; snp = snp->srcu_parent) { | 665 | for (; snp != NULL; snp = snp->srcu_parent) { |
664 | if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode) | 666 | if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != sdp->mynode) |
665 | return; /* GP already done and CBs recorded. */ | 667 | return; /* GP already done and CBs recorded. */ |
666 | spin_lock_irqsave_rcu_node(snp, flags); | 668 | spin_lock_irqsave_rcu_node(snp, flags); |
667 | if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) { | 669 | if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) { |
@@ -676,7 +678,7 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp, | |||
676 | return; | 678 | return; |
677 | } | 679 | } |
678 | if (!do_norm) | 680 | if (!do_norm) |
679 | srcu_funnel_exp_start(sp, snp, s); | 681 | srcu_funnel_exp_start(ssp, snp, s); |
680 | return; | 682 | return; |
681 | } | 683 | } |
682 | snp->srcu_have_cbs[idx] = s; | 684 | snp->srcu_have_cbs[idx] = s; |
@@ -688,29 +690,29 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp, | |||
688 | } | 690 | } |
689 | 691 | ||
690 | /* Top of tree, must ensure the grace period will be started. */ | 692 | /* Top of tree, must ensure the grace period will be started. */ |
691 | spin_lock_irqsave_rcu_node(sp, flags); | 693 | spin_lock_irqsave_rcu_node(ssp, flags); |
692 | if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) { | 694 | if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) { |
693 | /* | 695 | /* |
694 | * Record need for grace period s. Pair with load | 696 | * Record need for grace period s. Pair with load |
695 | * acquire setting up for initialization. | 697 | * acquire setting up for initialization. |
696 | */ | 698 | */ |
697 | smp_store_release(&sp->srcu_gp_seq_needed, s); /*^^^*/ | 699 | smp_store_release(&ssp->srcu_gp_seq_needed, s); /*^^^*/ |
698 | } | 700 | } |
699 | if (!do_norm && ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s)) | 701 | if (!do_norm && ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s)) |
700 | sp->srcu_gp_seq_needed_exp = s; | 702 | ssp->srcu_gp_seq_needed_exp = s; |
701 | 703 | ||
702 | /* If grace period not already done and none in progress, start it. */ | 704 | /* If grace period not already done and none in progress, start it. */ |
703 | if (!rcu_seq_done(&sp->srcu_gp_seq, s) && | 705 | if (!rcu_seq_done(&ssp->srcu_gp_seq, s) && |
704 | rcu_seq_state(sp->srcu_gp_seq) == SRCU_STATE_IDLE) { | 706 | rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) { |
705 | WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)); | 707 | WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)); |
706 | srcu_gp_start(sp); | 708 | srcu_gp_start(ssp); |
707 | if (likely(srcu_init_done)) | 709 | if (likely(srcu_init_done)) |
708 | queue_delayed_work(rcu_gp_wq, &sp->work, | 710 | queue_delayed_work(rcu_gp_wq, &ssp->work, |
709 | srcu_get_delay(sp)); | 711 | srcu_get_delay(ssp)); |
710 | else if (list_empty(&sp->work.work.entry)) | 712 | else if (list_empty(&ssp->work.work.entry)) |
711 | list_add(&sp->work.work.entry, &srcu_boot_list); | 713 | list_add(&ssp->work.work.entry, &srcu_boot_list); |
712 | } | 714 | } |
713 | spin_unlock_irqrestore_rcu_node(sp, flags); | 715 | spin_unlock_irqrestore_rcu_node(ssp, flags); |
714 | } | 716 | } |
715 | 717 | ||
716 | /* | 718 | /* |
@@ -718,12 +720,12 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp, | |||
718 | * loop an additional time if there is an expedited grace period pending. | 720 | * loop an additional time if there is an expedited grace period pending. |
719 | * The caller must ensure that ->srcu_idx is not changed while checking. | 721 | * The caller must ensure that ->srcu_idx is not changed while checking. |
720 | */ | 722 | */ |
721 | static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount) | 723 | static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount) |
722 | { | 724 | { |
723 | for (;;) { | 725 | for (;;) { |
724 | if (srcu_readers_active_idx_check(sp, idx)) | 726 | if (srcu_readers_active_idx_check(ssp, idx)) |
725 | return true; | 727 | return true; |
726 | if (--trycount + !srcu_get_delay(sp) <= 0) | 728 | if (--trycount + !srcu_get_delay(ssp) <= 0) |
727 | return false; | 729 | return false; |
728 | udelay(SRCU_RETRY_CHECK_DELAY); | 730 | udelay(SRCU_RETRY_CHECK_DELAY); |
729 | } | 731 | } |
@@ -734,7 +736,7 @@ static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount) | |||
734 | * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows | 736 | * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows |
735 | * us to wait for pre-existing readers in a starvation-free manner. | 737 | * us to wait for pre-existing readers in a starvation-free manner. |
736 | */ | 738 | */ |
737 | static void srcu_flip(struct srcu_struct *sp) | 739 | static void srcu_flip(struct srcu_struct *ssp) |
738 | { | 740 | { |
739 | /* | 741 | /* |
740 | * Ensure that if this updater saw a given reader's increment | 742 | * Ensure that if this updater saw a given reader's increment |
@@ -746,7 +748,7 @@ static void srcu_flip(struct srcu_struct *sp) | |||
746 | */ | 748 | */ |
747 | smp_mb(); /* E */ /* Pairs with B and C. */ | 749 | smp_mb(); /* E */ /* Pairs with B and C. */ |
748 | 750 | ||
749 | WRITE_ONCE(sp->srcu_idx, sp->srcu_idx + 1); | 751 | WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1); |
750 | 752 | ||
751 | /* | 753 | /* |
752 | * Ensure that if the updater misses an __srcu_read_unlock() | 754 | * Ensure that if the updater misses an __srcu_read_unlock() |
@@ -779,7 +781,7 @@ static void srcu_flip(struct srcu_struct *sp) | |||
779 | * negligible when amoritized over that time period, and the extra latency | 781 | * negligible when amoritized over that time period, and the extra latency |
780 | * of a needlessly non-expedited grace period is similarly negligible. | 782 | * of a needlessly non-expedited grace period is similarly negligible. |
781 | */ | 783 | */ |
782 | static bool srcu_might_be_idle(struct srcu_struct *sp) | 784 | static bool srcu_might_be_idle(struct srcu_struct *ssp) |
783 | { | 785 | { |
784 | unsigned long curseq; | 786 | unsigned long curseq; |
785 | unsigned long flags; | 787 | unsigned long flags; |
@@ -788,7 +790,7 @@ static bool srcu_might_be_idle(struct srcu_struct *sp) | |||
788 | 790 | ||
789 | /* If the local srcu_data structure has callbacks, not idle. */ | 791 | /* If the local srcu_data structure has callbacks, not idle. */ |
790 | local_irq_save(flags); | 792 | local_irq_save(flags); |
791 | sdp = this_cpu_ptr(sp->sda); | 793 | sdp = this_cpu_ptr(ssp->sda); |
792 | if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) { | 794 | if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) { |
793 | local_irq_restore(flags); | 795 | local_irq_restore(flags); |
794 | return false; /* Callbacks already present, so not idle. */ | 796 | return false; /* Callbacks already present, so not idle. */ |
@@ -804,17 +806,17 @@ static bool srcu_might_be_idle(struct srcu_struct *sp) | |||
804 | /* First, see if enough time has passed since the last GP. */ | 806 | /* First, see if enough time has passed since the last GP. */ |
805 | t = ktime_get_mono_fast_ns(); | 807 | t = ktime_get_mono_fast_ns(); |
806 | if (exp_holdoff == 0 || | 808 | if (exp_holdoff == 0 || |
807 | time_in_range_open(t, sp->srcu_last_gp_end, | 809 | time_in_range_open(t, ssp->srcu_last_gp_end, |
808 | sp->srcu_last_gp_end + exp_holdoff)) | 810 | ssp->srcu_last_gp_end + exp_holdoff)) |
809 | return false; /* Too soon after last GP. */ | 811 | return false; /* Too soon after last GP. */ |
810 | 812 | ||
811 | /* Next, check for probable idleness. */ | 813 | /* Next, check for probable idleness. */ |
812 | curseq = rcu_seq_current(&sp->srcu_gp_seq); | 814 | curseq = rcu_seq_current(&ssp->srcu_gp_seq); |
813 | smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */ | 815 | smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */ |
814 | if (ULONG_CMP_LT(curseq, READ_ONCE(sp->srcu_gp_seq_needed))) | 816 | if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_gp_seq_needed))) |
815 | return false; /* Grace period in progress, so not idle. */ | 817 | return false; /* Grace period in progress, so not idle. */ |
816 | smp_mb(); /* Order ->srcu_gp_seq with prior access. */ | 818 | smp_mb(); /* Order ->srcu_gp_seq with prior access. */ |
817 | if (curseq != rcu_seq_current(&sp->srcu_gp_seq)) | 819 | if (curseq != rcu_seq_current(&ssp->srcu_gp_seq)) |
818 | return false; /* GP # changed, so not idle. */ | 820 | return false; /* GP # changed, so not idle. */ |
819 | return true; /* With reasonable probability, idle! */ | 821 | return true; /* With reasonable probability, idle! */ |
820 | } | 822 | } |
@@ -854,16 +856,17 @@ static void srcu_leak_callback(struct rcu_head *rhp) | |||
854 | * srcu_read_lock(), and srcu_read_unlock() that are all passed the same | 856 | * srcu_read_lock(), and srcu_read_unlock() that are all passed the same |
855 | * srcu_struct structure. | 857 | * srcu_struct structure. |
856 | */ | 858 | */ |
857 | void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, | 859 | void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, |
858 | rcu_callback_t func, bool do_norm) | 860 | rcu_callback_t func, bool do_norm) |
859 | { | 861 | { |
860 | unsigned long flags; | 862 | unsigned long flags; |
863 | int idx; | ||
861 | bool needexp = false; | 864 | bool needexp = false; |
862 | bool needgp = false; | 865 | bool needgp = false; |
863 | unsigned long s; | 866 | unsigned long s; |
864 | struct srcu_data *sdp; | 867 | struct srcu_data *sdp; |
865 | 868 | ||
866 | check_init_srcu_struct(sp); | 869 | check_init_srcu_struct(ssp); |
867 | if (debug_rcu_head_queue(rhp)) { | 870 | if (debug_rcu_head_queue(rhp)) { |
868 | /* Probable double call_srcu(), so leak the callback. */ | 871 | /* Probable double call_srcu(), so leak the callback. */ |
869 | WRITE_ONCE(rhp->func, srcu_leak_callback); | 872 | WRITE_ONCE(rhp->func, srcu_leak_callback); |
@@ -871,13 +874,14 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, | |||
871 | return; | 874 | return; |
872 | } | 875 | } |
873 | rhp->func = func; | 876 | rhp->func = func; |
877 | idx = srcu_read_lock(ssp); | ||
874 | local_irq_save(flags); | 878 | local_irq_save(flags); |
875 | sdp = this_cpu_ptr(sp->sda); | 879 | sdp = this_cpu_ptr(ssp->sda); |
876 | spin_lock_rcu_node(sdp); | 880 | spin_lock_rcu_node(sdp); |
877 | rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false); | 881 | rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false); |
878 | rcu_segcblist_advance(&sdp->srcu_cblist, | 882 | rcu_segcblist_advance(&sdp->srcu_cblist, |
879 | rcu_seq_current(&sp->srcu_gp_seq)); | 883 | rcu_seq_current(&ssp->srcu_gp_seq)); |
880 | s = rcu_seq_snap(&sp->srcu_gp_seq); | 884 | s = rcu_seq_snap(&ssp->srcu_gp_seq); |
881 | (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s); | 885 | (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s); |
882 | if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) { | 886 | if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) { |
883 | sdp->srcu_gp_seq_needed = s; | 887 | sdp->srcu_gp_seq_needed = s; |
@@ -889,14 +893,15 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, | |||
889 | } | 893 | } |
890 | spin_unlock_irqrestore_rcu_node(sdp, flags); | 894 | spin_unlock_irqrestore_rcu_node(sdp, flags); |
891 | if (needgp) | 895 | if (needgp) |
892 | srcu_funnel_gp_start(sp, sdp, s, do_norm); | 896 | srcu_funnel_gp_start(ssp, sdp, s, do_norm); |
893 | else if (needexp) | 897 | else if (needexp) |
894 | srcu_funnel_exp_start(sp, sdp->mynode, s); | 898 | srcu_funnel_exp_start(ssp, sdp->mynode, s); |
899 | srcu_read_unlock(ssp, idx); | ||
895 | } | 900 | } |
896 | 901 | ||
897 | /** | 902 | /** |
898 | * call_srcu() - Queue a callback for invocation after an SRCU grace period | 903 | * call_srcu() - Queue a callback for invocation after an SRCU grace period |
899 | * @sp: srcu_struct in queue the callback | 904 | * @ssp: srcu_struct in queue the callback |
900 | * @rhp: structure to be used for queueing the SRCU callback. | 905 | * @rhp: structure to be used for queueing the SRCU callback. |
901 | * @func: function to be invoked after the SRCU grace period | 906 | * @func: function to be invoked after the SRCU grace period |
902 | * | 907 | * |
@@ -911,21 +916,21 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, | |||
911 | * The callback will be invoked from process context, but must nevertheless | 916 | * The callback will be invoked from process context, but must nevertheless |
912 | * be fast and must not block. | 917 | * be fast and must not block. |
913 | */ | 918 | */ |
914 | void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, | 919 | void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, |
915 | rcu_callback_t func) | 920 | rcu_callback_t func) |
916 | { | 921 | { |
917 | __call_srcu(sp, rhp, func, true); | 922 | __call_srcu(ssp, rhp, func, true); |
918 | } | 923 | } |
919 | EXPORT_SYMBOL_GPL(call_srcu); | 924 | EXPORT_SYMBOL_GPL(call_srcu); |
920 | 925 | ||
921 | /* | 926 | /* |
922 | * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). | 927 | * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). |
923 | */ | 928 | */ |
924 | static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm) | 929 | static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm) |
925 | { | 930 | { |
926 | struct rcu_synchronize rcu; | 931 | struct rcu_synchronize rcu; |
927 | 932 | ||
928 | RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) || | 933 | RCU_LOCKDEP_WARN(lock_is_held(&ssp->dep_map) || |
929 | lock_is_held(&rcu_bh_lock_map) || | 934 | lock_is_held(&rcu_bh_lock_map) || |
930 | lock_is_held(&rcu_lock_map) || | 935 | lock_is_held(&rcu_lock_map) || |
931 | lock_is_held(&rcu_sched_lock_map), | 936 | lock_is_held(&rcu_sched_lock_map), |
@@ -934,10 +939,10 @@ static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm) | |||
934 | if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) | 939 | if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) |
935 | return; | 940 | return; |
936 | might_sleep(); | 941 | might_sleep(); |
937 | check_init_srcu_struct(sp); | 942 | check_init_srcu_struct(ssp); |
938 | init_completion(&rcu.completion); | 943 | init_completion(&rcu.completion); |
939 | init_rcu_head_on_stack(&rcu.head); | 944 | init_rcu_head_on_stack(&rcu.head); |
940 | __call_srcu(sp, &rcu.head, wakeme_after_rcu, do_norm); | 945 | __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm); |
941 | wait_for_completion(&rcu.completion); | 946 | wait_for_completion(&rcu.completion); |
942 | destroy_rcu_head_on_stack(&rcu.head); | 947 | destroy_rcu_head_on_stack(&rcu.head); |
943 | 948 | ||
@@ -953,7 +958,7 @@ static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm) | |||
953 | 958 | ||
954 | /** | 959 | /** |
955 | * synchronize_srcu_expedited - Brute-force SRCU grace period | 960 | * synchronize_srcu_expedited - Brute-force SRCU grace period |
956 | * @sp: srcu_struct with which to synchronize. | 961 | * @ssp: srcu_struct with which to synchronize. |
957 | * | 962 | * |
958 | * Wait for an SRCU grace period to elapse, but be more aggressive about | 963 | * Wait for an SRCU grace period to elapse, but be more aggressive about |
959 | * spinning rather than blocking when waiting. | 964 | * spinning rather than blocking when waiting. |
@@ -961,15 +966,15 @@ static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm) | |||
961 | * Note that synchronize_srcu_expedited() has the same deadlock and | 966 | * Note that synchronize_srcu_expedited() has the same deadlock and |
962 | * memory-ordering properties as does synchronize_srcu(). | 967 | * memory-ordering properties as does synchronize_srcu(). |
963 | */ | 968 | */ |
964 | void synchronize_srcu_expedited(struct srcu_struct *sp) | 969 | void synchronize_srcu_expedited(struct srcu_struct *ssp) |
965 | { | 970 | { |
966 | __synchronize_srcu(sp, rcu_gp_is_normal()); | 971 | __synchronize_srcu(ssp, rcu_gp_is_normal()); |
967 | } | 972 | } |
968 | EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); | 973 | EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); |
969 | 974 | ||
970 | /** | 975 | /** |
971 | * synchronize_srcu - wait for prior SRCU read-side critical-section completion | 976 | * synchronize_srcu - wait for prior SRCU read-side critical-section completion |
972 | * @sp: srcu_struct with which to synchronize. | 977 | * @ssp: srcu_struct with which to synchronize. |
973 | * | 978 | * |
974 | * Wait for the count to drain to zero of both indexes. To avoid the | 979 | * Wait for the count to drain to zero of both indexes. To avoid the |
975 | * possible starvation of synchronize_srcu(), it waits for the count of | 980 | * possible starvation of synchronize_srcu(), it waits for the count of |
@@ -1011,12 +1016,12 @@ EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); | |||
1011 | * SRCU must also provide it. Note that detecting idleness is heuristic | 1016 | * SRCU must also provide it. Note that detecting idleness is heuristic |
1012 | * and subject to both false positives and negatives. | 1017 | * and subject to both false positives and negatives. |
1013 | */ | 1018 | */ |
1014 | void synchronize_srcu(struct srcu_struct *sp) | 1019 | void synchronize_srcu(struct srcu_struct *ssp) |
1015 | { | 1020 | { |
1016 | if (srcu_might_be_idle(sp) || rcu_gp_is_expedited()) | 1021 | if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited()) |
1017 | synchronize_srcu_expedited(sp); | 1022 | synchronize_srcu_expedited(ssp); |
1018 | else | 1023 | else |
1019 | __synchronize_srcu(sp, true); | 1024 | __synchronize_srcu(ssp, true); |
1020 | } | 1025 | } |
1021 | EXPORT_SYMBOL_GPL(synchronize_srcu); | 1026 | EXPORT_SYMBOL_GPL(synchronize_srcu); |
1022 | 1027 | ||
@@ -1026,36 +1031,36 @@ EXPORT_SYMBOL_GPL(synchronize_srcu); | |||
1026 | static void srcu_barrier_cb(struct rcu_head *rhp) | 1031 | static void srcu_barrier_cb(struct rcu_head *rhp) |
1027 | { | 1032 | { |
1028 | struct srcu_data *sdp; | 1033 | struct srcu_data *sdp; |
1029 | struct srcu_struct *sp; | 1034 | struct srcu_struct *ssp; |
1030 | 1035 | ||
1031 | sdp = container_of(rhp, struct srcu_data, srcu_barrier_head); | 1036 | sdp = container_of(rhp, struct srcu_data, srcu_barrier_head); |
1032 | sp = sdp->sp; | 1037 | ssp = sdp->ssp; |
1033 | if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt)) | 1038 | if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt)) |
1034 | complete(&sp->srcu_barrier_completion); | 1039 | complete(&ssp->srcu_barrier_completion); |
1035 | } | 1040 | } |
1036 | 1041 | ||
1037 | /** | 1042 | /** |
1038 | * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete. | 1043 | * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete. |
1039 | * @sp: srcu_struct on which to wait for in-flight callbacks. | 1044 | * @ssp: srcu_struct on which to wait for in-flight callbacks. |
1040 | */ | 1045 | */ |
1041 | void srcu_barrier(struct srcu_struct *sp) | 1046 | void srcu_barrier(struct srcu_struct *ssp) |
1042 | { | 1047 | { |
1043 | int cpu; | 1048 | int cpu; |
1044 | struct srcu_data *sdp; | 1049 | struct srcu_data *sdp; |
1045 | unsigned long s = rcu_seq_snap(&sp->srcu_barrier_seq); | 1050 | unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq); |
1046 | 1051 | ||
1047 | check_init_srcu_struct(sp); | 1052 | check_init_srcu_struct(ssp); |
1048 | mutex_lock(&sp->srcu_barrier_mutex); | 1053 | mutex_lock(&ssp->srcu_barrier_mutex); |
1049 | if (rcu_seq_done(&sp->srcu_barrier_seq, s)) { | 1054 | if (rcu_seq_done(&ssp->srcu_barrier_seq, s)) { |
1050 | smp_mb(); /* Force ordering following return. */ | 1055 | smp_mb(); /* Force ordering following return. */ |
1051 | mutex_unlock(&sp->srcu_barrier_mutex); | 1056 | mutex_unlock(&ssp->srcu_barrier_mutex); |
1052 | return; /* Someone else did our work for us. */ | 1057 | return; /* Someone else did our work for us. */ |
1053 | } | 1058 | } |
1054 | rcu_seq_start(&sp->srcu_barrier_seq); | 1059 | rcu_seq_start(&ssp->srcu_barrier_seq); |
1055 | init_completion(&sp->srcu_barrier_completion); | 1060 | init_completion(&ssp->srcu_barrier_completion); |
1056 | 1061 | ||
1057 | /* Initial count prevents reaching zero until all CBs are posted. */ | 1062 | /* Initial count prevents reaching zero until all CBs are posted. */ |
1058 | atomic_set(&sp->srcu_barrier_cpu_cnt, 1); | 1063 | atomic_set(&ssp->srcu_barrier_cpu_cnt, 1); |
1059 | 1064 | ||
1060 | /* | 1065 | /* |
1061 | * Each pass through this loop enqueues a callback, but only | 1066 | * Each pass through this loop enqueues a callback, but only |
@@ -1066,39 +1071,39 @@ void srcu_barrier(struct srcu_struct *sp) | |||
1066 | * grace period as the last callback already in the queue. | 1071 | * grace period as the last callback already in the queue. |
1067 | */ | 1072 | */ |
1068 | for_each_possible_cpu(cpu) { | 1073 | for_each_possible_cpu(cpu) { |
1069 | sdp = per_cpu_ptr(sp->sda, cpu); | 1074 | sdp = per_cpu_ptr(ssp->sda, cpu); |
1070 | spin_lock_irq_rcu_node(sdp); | 1075 | spin_lock_irq_rcu_node(sdp); |
1071 | atomic_inc(&sp->srcu_barrier_cpu_cnt); | 1076 | atomic_inc(&ssp->srcu_barrier_cpu_cnt); |
1072 | sdp->srcu_barrier_head.func = srcu_barrier_cb; | 1077 | sdp->srcu_barrier_head.func = srcu_barrier_cb; |
1073 | debug_rcu_head_queue(&sdp->srcu_barrier_head); | 1078 | debug_rcu_head_queue(&sdp->srcu_barrier_head); |
1074 | if (!rcu_segcblist_entrain(&sdp->srcu_cblist, | 1079 | if (!rcu_segcblist_entrain(&sdp->srcu_cblist, |
1075 | &sdp->srcu_barrier_head, 0)) { | 1080 | &sdp->srcu_barrier_head, 0)) { |
1076 | debug_rcu_head_unqueue(&sdp->srcu_barrier_head); | 1081 | debug_rcu_head_unqueue(&sdp->srcu_barrier_head); |
1077 | atomic_dec(&sp->srcu_barrier_cpu_cnt); | 1082 | atomic_dec(&ssp->srcu_barrier_cpu_cnt); |
1078 | } | 1083 | } |
1079 | spin_unlock_irq_rcu_node(sdp); | 1084 | spin_unlock_irq_rcu_node(sdp); |
1080 | } | 1085 | } |
1081 | 1086 | ||
1082 | /* Remove the initial count, at which point reaching zero can happen. */ | 1087 | /* Remove the initial count, at which point reaching zero can happen. */ |
1083 | if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt)) | 1088 | if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt)) |
1084 | complete(&sp->srcu_barrier_completion); | 1089 | complete(&ssp->srcu_barrier_completion); |
1085 | wait_for_completion(&sp->srcu_barrier_completion); | 1090 | wait_for_completion(&ssp->srcu_barrier_completion); |
1086 | 1091 | ||
1087 | rcu_seq_end(&sp->srcu_barrier_seq); | 1092 | rcu_seq_end(&ssp->srcu_barrier_seq); |
1088 | mutex_unlock(&sp->srcu_barrier_mutex); | 1093 | mutex_unlock(&ssp->srcu_barrier_mutex); |
1089 | } | 1094 | } |
1090 | EXPORT_SYMBOL_GPL(srcu_barrier); | 1095 | EXPORT_SYMBOL_GPL(srcu_barrier); |
1091 | 1096 | ||
1092 | /** | 1097 | /** |
1093 | * srcu_batches_completed - return batches completed. | 1098 | * srcu_batches_completed - return batches completed. |
1094 | * @sp: srcu_struct on which to report batch completion. | 1099 | * @ssp: srcu_struct on which to report batch completion. |
1095 | * | 1100 | * |
1096 | * Report the number of batches, correlated with, but not necessarily | 1101 | * Report the number of batches, correlated with, but not necessarily |
1097 | * precisely the same as, the number of grace periods that have elapsed. | 1102 | * precisely the same as, the number of grace periods that have elapsed. |
1098 | */ | 1103 | */ |
1099 | unsigned long srcu_batches_completed(struct srcu_struct *sp) | 1104 | unsigned long srcu_batches_completed(struct srcu_struct *ssp) |
1100 | { | 1105 | { |
1101 | return sp->srcu_idx; | 1106 | return ssp->srcu_idx; |
1102 | } | 1107 | } |
1103 | EXPORT_SYMBOL_GPL(srcu_batches_completed); | 1108 | EXPORT_SYMBOL_GPL(srcu_batches_completed); |
1104 | 1109 | ||
@@ -1107,11 +1112,11 @@ EXPORT_SYMBOL_GPL(srcu_batches_completed); | |||
1107 | * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has | 1112 | * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has |
1108 | * completed in that state. | 1113 | * completed in that state. |
1109 | */ | 1114 | */ |
1110 | static void srcu_advance_state(struct srcu_struct *sp) | 1115 | static void srcu_advance_state(struct srcu_struct *ssp) |
1111 | { | 1116 | { |
1112 | int idx; | 1117 | int idx; |
1113 | 1118 | ||
1114 | mutex_lock(&sp->srcu_gp_mutex); | 1119 | mutex_lock(&ssp->srcu_gp_mutex); |
1115 | 1120 | ||
1116 | /* | 1121 | /* |
1117 | * Because readers might be delayed for an extended period after | 1122 | * Because readers might be delayed for an extended period after |
@@ -1123,47 +1128,47 @@ static void srcu_advance_state(struct srcu_struct *sp) | |||
1123 | * The load-acquire ensures that we see the accesses performed | 1128 | * The load-acquire ensures that we see the accesses performed |
1124 | * by the prior grace period. | 1129 | * by the prior grace period. |
1125 | */ | 1130 | */ |
1126 | idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */ | 1131 | idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq)); /* ^^^ */ |
1127 | if (idx == SRCU_STATE_IDLE) { | 1132 | if (idx == SRCU_STATE_IDLE) { |
1128 | spin_lock_irq_rcu_node(sp); | 1133 | spin_lock_irq_rcu_node(ssp); |
1129 | if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) { | 1134 | if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) { |
1130 | WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq)); | 1135 | WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq)); |
1131 | spin_unlock_irq_rcu_node(sp); | 1136 | spin_unlock_irq_rcu_node(ssp); |
1132 | mutex_unlock(&sp->srcu_gp_mutex); | 1137 | mutex_unlock(&ssp->srcu_gp_mutex); |
1133 | return; | 1138 | return; |
1134 | } | 1139 | } |
1135 | idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)); | 1140 | idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)); |
1136 | if (idx == SRCU_STATE_IDLE) | 1141 | if (idx == SRCU_STATE_IDLE) |
1137 | srcu_gp_start(sp); | 1142 | srcu_gp_start(ssp); |
1138 | spin_unlock_irq_rcu_node(sp); | 1143 | spin_unlock_irq_rcu_node(ssp); |
1139 | if (idx != SRCU_STATE_IDLE) { | 1144 | if (idx != SRCU_STATE_IDLE) { |
1140 | mutex_unlock(&sp->srcu_gp_mutex); | 1145 | mutex_unlock(&ssp->srcu_gp_mutex); |
1141 | return; /* Someone else started the grace period. */ | 1146 | return; /* Someone else started the grace period. */ |
1142 | } | 1147 | } |
1143 | } | 1148 | } |
1144 | 1149 | ||
1145 | if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN1) { | 1150 | if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) { |
1146 | idx = 1 ^ (sp->srcu_idx & 1); | 1151 | idx = 1 ^ (ssp->srcu_idx & 1); |
1147 | if (!try_check_zero(sp, idx, 1)) { | 1152 | if (!try_check_zero(ssp, idx, 1)) { |
1148 | mutex_unlock(&sp->srcu_gp_mutex); | 1153 | mutex_unlock(&ssp->srcu_gp_mutex); |
1149 | return; /* readers present, retry later. */ | 1154 | return; /* readers present, retry later. */ |
1150 | } | 1155 | } |
1151 | srcu_flip(sp); | 1156 | srcu_flip(ssp); |
1152 | rcu_seq_set_state(&sp->srcu_gp_seq, SRCU_STATE_SCAN2); | 1157 | rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2); |
1153 | } | 1158 | } |
1154 | 1159 | ||
1155 | if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN2) { | 1160 | if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN2) { |
1156 | 1161 | ||
1157 | /* | 1162 | /* |
1158 | * SRCU read-side critical sections are normally short, | 1163 | * SRCU read-side critical sections are normally short, |
1159 | * so check at least twice in quick succession after a flip. | 1164 | * so check at least twice in quick succession after a flip. |
1160 | */ | 1165 | */ |
1161 | idx = 1 ^ (sp->srcu_idx & 1); | 1166 | idx = 1 ^ (ssp->srcu_idx & 1); |
1162 | if (!try_check_zero(sp, idx, 2)) { | 1167 | if (!try_check_zero(ssp, idx, 2)) { |
1163 | mutex_unlock(&sp->srcu_gp_mutex); | 1168 | mutex_unlock(&ssp->srcu_gp_mutex); |
1164 | return; /* readers present, retry later. */ | 1169 | return; /* readers present, retry later. */ |
1165 | } | 1170 | } |
1166 | srcu_gp_end(sp); /* Releases ->srcu_gp_mutex. */ | 1171 | srcu_gp_end(ssp); /* Releases ->srcu_gp_mutex. */ |
1167 | } | 1172 | } |
1168 | } | 1173 | } |
1169 | 1174 | ||
@@ -1179,14 +1184,14 @@ static void srcu_invoke_callbacks(struct work_struct *work) | |||
1179 | struct rcu_cblist ready_cbs; | 1184 | struct rcu_cblist ready_cbs; |
1180 | struct rcu_head *rhp; | 1185 | struct rcu_head *rhp; |
1181 | struct srcu_data *sdp; | 1186 | struct srcu_data *sdp; |
1182 | struct srcu_struct *sp; | 1187 | struct srcu_struct *ssp; |
1183 | 1188 | ||
1184 | sdp = container_of(work, struct srcu_data, work.work); | 1189 | sdp = container_of(work, struct srcu_data, work.work); |
1185 | sp = sdp->sp; | 1190 | ssp = sdp->ssp; |
1186 | rcu_cblist_init(&ready_cbs); | 1191 | rcu_cblist_init(&ready_cbs); |
1187 | spin_lock_irq_rcu_node(sdp); | 1192 | spin_lock_irq_rcu_node(sdp); |
1188 | rcu_segcblist_advance(&sdp->srcu_cblist, | 1193 | rcu_segcblist_advance(&sdp->srcu_cblist, |
1189 | rcu_seq_current(&sp->srcu_gp_seq)); | 1194 | rcu_seq_current(&ssp->srcu_gp_seq)); |
1190 | if (sdp->srcu_cblist_invoking || | 1195 | if (sdp->srcu_cblist_invoking || |
1191 | !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) { | 1196 | !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) { |
1192 | spin_unlock_irq_rcu_node(sdp); | 1197 | spin_unlock_irq_rcu_node(sdp); |
@@ -1212,7 +1217,7 @@ static void srcu_invoke_callbacks(struct work_struct *work) | |||
1212 | spin_lock_irq_rcu_node(sdp); | 1217 | spin_lock_irq_rcu_node(sdp); |
1213 | rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs); | 1218 | rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs); |
1214 | (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, | 1219 | (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, |
1215 | rcu_seq_snap(&sp->srcu_gp_seq)); | 1220 | rcu_seq_snap(&ssp->srcu_gp_seq)); |
1216 | sdp->srcu_cblist_invoking = false; | 1221 | sdp->srcu_cblist_invoking = false; |
1217 | more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist); | 1222 | more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist); |
1218 | spin_unlock_irq_rcu_node(sdp); | 1223 | spin_unlock_irq_rcu_node(sdp); |
@@ -1224,24 +1229,24 @@ static void srcu_invoke_callbacks(struct work_struct *work) | |||
1224 | * Finished one round of SRCU grace period. Start another if there are | 1229 | * Finished one round of SRCU grace period. Start another if there are |
1225 | * more SRCU callbacks queued, otherwise put SRCU into not-running state. | 1230 | * more SRCU callbacks queued, otherwise put SRCU into not-running state. |
1226 | */ | 1231 | */ |
1227 | static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay) | 1232 | static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay) |
1228 | { | 1233 | { |
1229 | bool pushgp = true; | 1234 | bool pushgp = true; |
1230 | 1235 | ||
1231 | spin_lock_irq_rcu_node(sp); | 1236 | spin_lock_irq_rcu_node(ssp); |
1232 | if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) { | 1237 | if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) { |
1233 | if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) { | 1238 | if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq))) { |
1234 | /* All requests fulfilled, time to go idle. */ | 1239 | /* All requests fulfilled, time to go idle. */ |
1235 | pushgp = false; | 1240 | pushgp = false; |
1236 | } | 1241 | } |
1237 | } else if (!rcu_seq_state(sp->srcu_gp_seq)) { | 1242 | } else if (!rcu_seq_state(ssp->srcu_gp_seq)) { |
1238 | /* Outstanding request and no GP. Start one. */ | 1243 | /* Outstanding request and no GP. Start one. */ |
1239 | srcu_gp_start(sp); | 1244 | srcu_gp_start(ssp); |
1240 | } | 1245 | } |
1241 | spin_unlock_irq_rcu_node(sp); | 1246 | spin_unlock_irq_rcu_node(ssp); |
1242 | 1247 | ||
1243 | if (pushgp) | 1248 | if (pushgp) |
1244 | queue_delayed_work(rcu_gp_wq, &sp->work, delay); | 1249 | queue_delayed_work(rcu_gp_wq, &ssp->work, delay); |
1245 | } | 1250 | } |
1246 | 1251 | ||
1247 | /* | 1252 | /* |
@@ -1249,41 +1254,41 @@ static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay) | |||
1249 | */ | 1254 | */ |
1250 | static void process_srcu(struct work_struct *work) | 1255 | static void process_srcu(struct work_struct *work) |
1251 | { | 1256 | { |
1252 | struct srcu_struct *sp; | 1257 | struct srcu_struct *ssp; |
1253 | 1258 | ||
1254 | sp = container_of(work, struct srcu_struct, work.work); | 1259 | ssp = container_of(work, struct srcu_struct, work.work); |
1255 | 1260 | ||
1256 | srcu_advance_state(sp); | 1261 | srcu_advance_state(ssp); |
1257 | srcu_reschedule(sp, srcu_get_delay(sp)); | 1262 | srcu_reschedule(ssp, srcu_get_delay(ssp)); |
1258 | } | 1263 | } |
1259 | 1264 | ||
1260 | void srcutorture_get_gp_data(enum rcutorture_type test_type, | 1265 | void srcutorture_get_gp_data(enum rcutorture_type test_type, |
1261 | struct srcu_struct *sp, int *flags, | 1266 | struct srcu_struct *ssp, int *flags, |
1262 | unsigned long *gp_seq) | 1267 | unsigned long *gp_seq) |
1263 | { | 1268 | { |
1264 | if (test_type != SRCU_FLAVOR) | 1269 | if (test_type != SRCU_FLAVOR) |
1265 | return; | 1270 | return; |
1266 | *flags = 0; | 1271 | *flags = 0; |
1267 | *gp_seq = rcu_seq_current(&sp->srcu_gp_seq); | 1272 | *gp_seq = rcu_seq_current(&ssp->srcu_gp_seq); |
1268 | } | 1273 | } |
1269 | EXPORT_SYMBOL_GPL(srcutorture_get_gp_data); | 1274 | EXPORT_SYMBOL_GPL(srcutorture_get_gp_data); |
1270 | 1275 | ||
1271 | void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf) | 1276 | void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf) |
1272 | { | 1277 | { |
1273 | int cpu; | 1278 | int cpu; |
1274 | int idx; | 1279 | int idx; |
1275 | unsigned long s0 = 0, s1 = 0; | 1280 | unsigned long s0 = 0, s1 = 0; |
1276 | 1281 | ||
1277 | idx = sp->srcu_idx & 0x1; | 1282 | idx = ssp->srcu_idx & 0x1; |
1278 | pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):", | 1283 | pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):", |
1279 | tt, tf, rcu_seq_current(&sp->srcu_gp_seq), idx); | 1284 | tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), idx); |
1280 | for_each_possible_cpu(cpu) { | 1285 | for_each_possible_cpu(cpu) { |
1281 | unsigned long l0, l1; | 1286 | unsigned long l0, l1; |
1282 | unsigned long u0, u1; | 1287 | unsigned long u0, u1; |
1283 | long c0, c1; | 1288 | long c0, c1; |
1284 | struct srcu_data *sdp; | 1289 | struct srcu_data *sdp; |
1285 | 1290 | ||
1286 | sdp = per_cpu_ptr(sp->sda, cpu); | 1291 | sdp = per_cpu_ptr(ssp->sda, cpu); |
1287 | u0 = sdp->srcu_unlock_count[!idx]; | 1292 | u0 = sdp->srcu_unlock_count[!idx]; |
1288 | u1 = sdp->srcu_unlock_count[idx]; | 1293 | u1 = sdp->srcu_unlock_count[idx]; |
1289 | 1294 | ||
@@ -1318,14 +1323,14 @@ early_initcall(srcu_bootup_announce); | |||
1318 | 1323 | ||
1319 | void __init srcu_init(void) | 1324 | void __init srcu_init(void) |
1320 | { | 1325 | { |
1321 | struct srcu_struct *sp; | 1326 | struct srcu_struct *ssp; |
1322 | 1327 | ||
1323 | srcu_init_done = true; | 1328 | srcu_init_done = true; |
1324 | while (!list_empty(&srcu_boot_list)) { | 1329 | while (!list_empty(&srcu_boot_list)) { |
1325 | sp = list_first_entry(&srcu_boot_list, struct srcu_struct, | 1330 | ssp = list_first_entry(&srcu_boot_list, struct srcu_struct, |
1326 | work.work.entry); | 1331 | work.work.entry); |
1327 | check_init_srcu_struct(sp); | 1332 | check_init_srcu_struct(ssp); |
1328 | list_del_init(&sp->work.work.entry); | 1333 | list_del_init(&ssp->work.work.entry); |
1329 | queue_work(rcu_gp_wq, &sp->work.work); | 1334 | queue_work(rcu_gp_wq, &ssp->work.work); |
1330 | } | 1335 | } |
1331 | } | 1336 | } |
diff --git a/kernel/rcu/sync.c b/kernel/rcu/sync.c index 3f943efcf61c..be10036fa621 100644 --- a/kernel/rcu/sync.c +++ b/kernel/rcu/sync.c | |||
@@ -44,15 +44,15 @@ static const struct { | |||
44 | __INIT_HELD(rcu_read_lock_held) | 44 | __INIT_HELD(rcu_read_lock_held) |
45 | }, | 45 | }, |
46 | [RCU_SCHED_SYNC] = { | 46 | [RCU_SCHED_SYNC] = { |
47 | .sync = synchronize_sched, | 47 | .sync = synchronize_rcu, |
48 | .call = call_rcu_sched, | 48 | .call = call_rcu, |
49 | .wait = rcu_barrier_sched, | 49 | .wait = rcu_barrier, |
50 | __INIT_HELD(rcu_read_lock_sched_held) | 50 | __INIT_HELD(rcu_read_lock_sched_held) |
51 | }, | 51 | }, |
52 | [RCU_BH_SYNC] = { | 52 | [RCU_BH_SYNC] = { |
53 | .sync = synchronize_rcu_bh, | 53 | .sync = synchronize_rcu, |
54 | .call = call_rcu_bh, | 54 | .call = call_rcu, |
55 | .wait = rcu_barrier_bh, | 55 | .wait = rcu_barrier, |
56 | __INIT_HELD(rcu_read_lock_bh_held) | 56 | __INIT_HELD(rcu_read_lock_bh_held) |
57 | }, | 57 | }, |
58 | }; | 58 | }; |
@@ -125,8 +125,7 @@ void rcu_sync_enter(struct rcu_sync *rsp) | |||
125 | rsp->gp_state = GP_PENDING; | 125 | rsp->gp_state = GP_PENDING; |
126 | spin_unlock_irq(&rsp->rss_lock); | 126 | spin_unlock_irq(&rsp->rss_lock); |
127 | 127 | ||
128 | BUG_ON(need_wait && need_sync); | 128 | WARN_ON_ONCE(need_wait && need_sync); |
129 | |||
130 | if (need_sync) { | 129 | if (need_sync) { |
131 | gp_ops[rsp->gp_type].sync(); | 130 | gp_ops[rsp->gp_type].sync(); |
132 | rsp->gp_state = GP_PASSED; | 131 | rsp->gp_state = GP_PASSED; |
@@ -139,7 +138,7 @@ void rcu_sync_enter(struct rcu_sync *rsp) | |||
139 | * Nobody has yet been allowed the 'fast' path and thus we can | 138 | * Nobody has yet been allowed the 'fast' path and thus we can |
140 | * avoid doing any sync(). The callback will get 'dropped'. | 139 | * avoid doing any sync(). The callback will get 'dropped'. |
141 | */ | 140 | */ |
142 | BUG_ON(rsp->gp_state != GP_PASSED); | 141 | WARN_ON_ONCE(rsp->gp_state != GP_PASSED); |
143 | } | 142 | } |
144 | } | 143 | } |
145 | 144 | ||
@@ -166,8 +165,8 @@ static void rcu_sync_func(struct rcu_head *rhp) | |||
166 | struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head); | 165 | struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head); |
167 | unsigned long flags; | 166 | unsigned long flags; |
168 | 167 | ||
169 | BUG_ON(rsp->gp_state != GP_PASSED); | 168 | WARN_ON_ONCE(rsp->gp_state != GP_PASSED); |
170 | BUG_ON(rsp->cb_state == CB_IDLE); | 169 | WARN_ON_ONCE(rsp->cb_state == CB_IDLE); |
171 | 170 | ||
172 | spin_lock_irqsave(&rsp->rss_lock, flags); | 171 | spin_lock_irqsave(&rsp->rss_lock, flags); |
173 | if (rsp->gp_count) { | 172 | if (rsp->gp_count) { |
@@ -225,7 +224,7 @@ void rcu_sync_dtor(struct rcu_sync *rsp) | |||
225 | { | 224 | { |
226 | int cb_state; | 225 | int cb_state; |
227 | 226 | ||
228 | BUG_ON(rsp->gp_count); | 227 | WARN_ON_ONCE(rsp->gp_count); |
229 | 228 | ||
230 | spin_lock_irq(&rsp->rss_lock); | 229 | spin_lock_irq(&rsp->rss_lock); |
231 | if (rsp->cb_state == CB_REPLAY) | 230 | if (rsp->cb_state == CB_REPLAY) |
@@ -235,6 +234,6 @@ void rcu_sync_dtor(struct rcu_sync *rsp) | |||
235 | 234 | ||
236 | if (cb_state != CB_IDLE) { | 235 | if (cb_state != CB_IDLE) { |
237 | gp_ops[rsp->gp_type].wait(); | 236 | gp_ops[rsp->gp_type].wait(); |
238 | BUG_ON(rsp->cb_state != CB_IDLE); | 237 | WARN_ON_ONCE(rsp->cb_state != CB_IDLE); |
239 | } | 238 | } |
240 | } | 239 | } |
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 121f833acd04..9180158756d2 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -207,6 +207,19 @@ static int rcu_gp_in_progress(void) | |||
207 | return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq)); | 207 | return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq)); |
208 | } | 208 | } |
209 | 209 | ||
210 | /* | ||
211 | * Return the number of callbacks queued on the specified CPU. | ||
212 | * Handles both the nocbs and normal cases. | ||
213 | */ | ||
214 | static long rcu_get_n_cbs_cpu(int cpu) | ||
215 | { | ||
216 | struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); | ||
217 | |||
218 | if (rcu_segcblist_is_enabled(&rdp->cblist)) /* Online normal CPU? */ | ||
219 | return rcu_segcblist_n_cbs(&rdp->cblist); | ||
220 | return rcu_get_n_cbs_nocb_cpu(rdp); /* Works for offline, too. */ | ||
221 | } | ||
222 | |||
210 | void rcu_softirq_qs(void) | 223 | void rcu_softirq_qs(void) |
211 | { | 224 | { |
212 | rcu_qs(); | 225 | rcu_qs(); |
@@ -500,16 +513,29 @@ void rcu_force_quiescent_state(void) | |||
500 | EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); | 513 | EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); |
501 | 514 | ||
502 | /* | 515 | /* |
516 | * Convert a ->gp_state value to a character string. | ||
517 | */ | ||
518 | static const char *gp_state_getname(short gs) | ||
519 | { | ||
520 | if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names)) | ||
521 | return "???"; | ||
522 | return gp_state_names[gs]; | ||
523 | } | ||
524 | |||
525 | /* | ||
503 | * Show the state of the grace-period kthreads. | 526 | * Show the state of the grace-period kthreads. |
504 | */ | 527 | */ |
505 | void show_rcu_gp_kthreads(void) | 528 | void show_rcu_gp_kthreads(void) |
506 | { | 529 | { |
507 | int cpu; | 530 | int cpu; |
531 | unsigned long j; | ||
508 | struct rcu_data *rdp; | 532 | struct rcu_data *rdp; |
509 | struct rcu_node *rnp; | 533 | struct rcu_node *rnp; |
510 | 534 | ||
511 | pr_info("%s: wait state: %d ->state: %#lx\n", rcu_state.name, | 535 | j = jiffies - READ_ONCE(rcu_state.gp_activity); |
512 | rcu_state.gp_state, rcu_state.gp_kthread->state); | 536 | pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %ld\n", |
537 | rcu_state.name, gp_state_getname(rcu_state.gp_state), | ||
538 | rcu_state.gp_state, rcu_state.gp_kthread->state, j); | ||
513 | rcu_for_each_node_breadth_first(rnp) { | 539 | rcu_for_each_node_breadth_first(rnp) { |
514 | if (ULONG_CMP_GE(rcu_state.gp_seq, rnp->gp_seq_needed)) | 540 | if (ULONG_CMP_GE(rcu_state.gp_seq, rnp->gp_seq_needed)) |
515 | continue; | 541 | continue; |
@@ -891,12 +917,12 @@ void rcu_irq_enter_irqson(void) | |||
891 | } | 917 | } |
892 | 918 | ||
893 | /** | 919 | /** |
894 | * rcu_is_watching - see if RCU thinks that the current CPU is idle | 920 | * rcu_is_watching - see if RCU thinks that the current CPU is not idle |
895 | * | 921 | * |
896 | * Return true if RCU is watching the running CPU, which means that this | 922 | * Return true if RCU is watching the running CPU, which means that this |
897 | * CPU can safely enter RCU read-side critical sections. In other words, | 923 | * CPU can safely enter RCU read-side critical sections. In other words, |
898 | * if the current CPU is in its idle loop and is neither in an interrupt | 924 | * if the current CPU is not in its idle loop or is in an interrupt or |
899 | * or NMI handler, return true. | 925 | * NMI handler, return true. |
900 | */ | 926 | */ |
901 | bool notrace rcu_is_watching(void) | 927 | bool notrace rcu_is_watching(void) |
902 | { | 928 | { |
@@ -1143,16 +1169,6 @@ static void record_gp_stall_check_time(void) | |||
1143 | } | 1169 | } |
1144 | 1170 | ||
1145 | /* | 1171 | /* |
1146 | * Convert a ->gp_state value to a character string. | ||
1147 | */ | ||
1148 | static const char *gp_state_getname(short gs) | ||
1149 | { | ||
1150 | if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names)) | ||
1151 | return "???"; | ||
1152 | return gp_state_names[gs]; | ||
1153 | } | ||
1154 | |||
1155 | /* | ||
1156 | * Complain about starvation of grace-period kthread. | 1172 | * Complain about starvation of grace-period kthread. |
1157 | */ | 1173 | */ |
1158 | static void rcu_check_gp_kthread_starvation(void) | 1174 | static void rcu_check_gp_kthread_starvation(void) |
@@ -1262,8 +1278,7 @@ static void print_other_cpu_stall(unsigned long gp_seq) | |||
1262 | 1278 | ||
1263 | print_cpu_stall_info_end(); | 1279 | print_cpu_stall_info_end(); |
1264 | for_each_possible_cpu(cpu) | 1280 | for_each_possible_cpu(cpu) |
1265 | totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(&rcu_data, | 1281 | totqlen += rcu_get_n_cbs_cpu(cpu); |
1266 | cpu)->cblist); | ||
1267 | pr_cont("(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n", | 1282 | pr_cont("(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n", |
1268 | smp_processor_id(), (long)(jiffies - rcu_state.gp_start), | 1283 | smp_processor_id(), (long)(jiffies - rcu_state.gp_start), |
1269 | (long)rcu_seq_current(&rcu_state.gp_seq), totqlen); | 1284 | (long)rcu_seq_current(&rcu_state.gp_seq), totqlen); |
@@ -1323,8 +1338,7 @@ static void print_cpu_stall(void) | |||
1323 | raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags); | 1338 | raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags); |
1324 | print_cpu_stall_info_end(); | 1339 | print_cpu_stall_info_end(); |
1325 | for_each_possible_cpu(cpu) | 1340 | for_each_possible_cpu(cpu) |
1326 | totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(&rcu_data, | 1341 | totqlen += rcu_get_n_cbs_cpu(cpu); |
1327 | cpu)->cblist); | ||
1328 | pr_cont(" (t=%lu jiffies g=%ld q=%lu)\n", | 1342 | pr_cont(" (t=%lu jiffies g=%ld q=%lu)\n", |
1329 | jiffies - rcu_state.gp_start, | 1343 | jiffies - rcu_state.gp_start, |
1330 | (long)rcu_seq_current(&rcu_state.gp_seq), totqlen); | 1344 | (long)rcu_seq_current(&rcu_state.gp_seq), totqlen); |
@@ -1986,7 +2000,8 @@ static void rcu_gp_cleanup(void) | |||
1986 | 2000 | ||
1987 | WRITE_ONCE(rcu_state.gp_activity, jiffies); | 2001 | WRITE_ONCE(rcu_state.gp_activity, jiffies); |
1988 | raw_spin_lock_irq_rcu_node(rnp); | 2002 | raw_spin_lock_irq_rcu_node(rnp); |
1989 | gp_duration = jiffies - rcu_state.gp_start; | 2003 | rcu_state.gp_end = jiffies; |
2004 | gp_duration = rcu_state.gp_end - rcu_state.gp_start; | ||
1990 | if (gp_duration > rcu_state.gp_max) | 2005 | if (gp_duration > rcu_state.gp_max) |
1991 | rcu_state.gp_max = gp_duration; | 2006 | rcu_state.gp_max = gp_duration; |
1992 | 2007 | ||
@@ -2032,9 +2047,9 @@ static void rcu_gp_cleanup(void) | |||
2032 | rnp = rcu_get_root(); | 2047 | rnp = rcu_get_root(); |
2033 | raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */ | 2048 | raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */ |
2034 | 2049 | ||
2035 | /* Declare grace period done. */ | 2050 | /* Declare grace period done, trace first to use old GP number. */ |
2036 | rcu_seq_end(&rcu_state.gp_seq); | ||
2037 | trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end")); | 2051 | trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end")); |
2052 | rcu_seq_end(&rcu_state.gp_seq); | ||
2038 | rcu_state.gp_state = RCU_GP_IDLE; | 2053 | rcu_state.gp_state = RCU_GP_IDLE; |
2039 | /* Check for GP requests since above loop. */ | 2054 | /* Check for GP requests since above loop. */ |
2040 | rdp = this_cpu_ptr(&rcu_data); | 2055 | rdp = this_cpu_ptr(&rcu_data); |
@@ -2600,10 +2615,10 @@ static void force_quiescent_state(void) | |||
2600 | * This function checks for grace-period requests that fail to motivate | 2615 | * This function checks for grace-period requests that fail to motivate |
2601 | * RCU to come out of its idle mode. | 2616 | * RCU to come out of its idle mode. |
2602 | */ | 2617 | */ |
2603 | static void | 2618 | void |
2604 | rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp) | 2619 | rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp, |
2620 | const unsigned long gpssdelay) | ||
2605 | { | 2621 | { |
2606 | const unsigned long gpssdelay = rcu_jiffies_till_stall_check() * HZ; | ||
2607 | unsigned long flags; | 2622 | unsigned long flags; |
2608 | unsigned long j; | 2623 | unsigned long j; |
2609 | struct rcu_node *rnp_root = rcu_get_root(); | 2624 | struct rcu_node *rnp_root = rcu_get_root(); |
@@ -2655,6 +2670,48 @@ rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp) | |||
2655 | } | 2670 | } |
2656 | 2671 | ||
2657 | /* | 2672 | /* |
2673 | * Do a forward-progress check for rcutorture. This is normally invoked | ||
2674 | * due to an OOM event. The argument "j" gives the time period during | ||
2675 | * which rcutorture would like progress to have been made. | ||
2676 | */ | ||
2677 | void rcu_fwd_progress_check(unsigned long j) | ||
2678 | { | ||
2679 | unsigned long cbs; | ||
2680 | int cpu; | ||
2681 | unsigned long max_cbs = 0; | ||
2682 | int max_cpu = -1; | ||
2683 | struct rcu_data *rdp; | ||
2684 | |||
2685 | if (rcu_gp_in_progress()) { | ||
2686 | pr_info("%s: GP age %lu jiffies\n", | ||
2687 | __func__, jiffies - rcu_state.gp_start); | ||
2688 | show_rcu_gp_kthreads(); | ||
2689 | } else { | ||
2690 | pr_info("%s: Last GP end %lu jiffies ago\n", | ||
2691 | __func__, jiffies - rcu_state.gp_end); | ||
2692 | preempt_disable(); | ||
2693 | rdp = this_cpu_ptr(&rcu_data); | ||
2694 | rcu_check_gp_start_stall(rdp->mynode, rdp, j); | ||
2695 | preempt_enable(); | ||
2696 | } | ||
2697 | for_each_possible_cpu(cpu) { | ||
2698 | cbs = rcu_get_n_cbs_cpu(cpu); | ||
2699 | if (!cbs) | ||
2700 | continue; | ||
2701 | if (max_cpu < 0) | ||
2702 | pr_info("%s: callbacks", __func__); | ||
2703 | pr_cont(" %d: %lu", cpu, cbs); | ||
2704 | if (cbs <= max_cbs) | ||
2705 | continue; | ||
2706 | max_cbs = cbs; | ||
2707 | max_cpu = cpu; | ||
2708 | } | ||
2709 | if (max_cpu >= 0) | ||
2710 | pr_cont("\n"); | ||
2711 | } | ||
2712 | EXPORT_SYMBOL_GPL(rcu_fwd_progress_check); | ||
2713 | |||
2714 | /* | ||
2658 | * This does the RCU core processing work for the specified rcu_data | 2715 | * This does the RCU core processing work for the specified rcu_data |
2659 | * structures. This may be called only from the CPU to whom the rdp | 2716 | * structures. This may be called only from the CPU to whom the rdp |
2660 | * belongs. | 2717 | * belongs. |
@@ -2690,7 +2747,7 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused | |||
2690 | local_irq_restore(flags); | 2747 | local_irq_restore(flags); |
2691 | } | 2748 | } |
2692 | 2749 | ||
2693 | rcu_check_gp_start_stall(rnp, rdp); | 2750 | rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check()); |
2694 | 2751 | ||
2695 | /* If there are callbacks ready, invoke them. */ | 2752 | /* If there are callbacks ready, invoke them. */ |
2696 | if (rcu_segcblist_ready_cbs(&rdp->cblist)) | 2753 | if (rcu_segcblist_ready_cbs(&rdp->cblist)) |
@@ -2826,7 +2883,7 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func, int cpu, bool lazy) | |||
2826 | * Very early boot, before rcu_init(). Initialize if needed | 2883 | * Very early boot, before rcu_init(). Initialize if needed |
2827 | * and then drop through to queue the callback. | 2884 | * and then drop through to queue the callback. |
2828 | */ | 2885 | */ |
2829 | BUG_ON(cpu != -1); | 2886 | WARN_ON_ONCE(cpu != -1); |
2830 | WARN_ON_ONCE(!rcu_is_watching()); | 2887 | WARN_ON_ONCE(!rcu_is_watching()); |
2831 | if (rcu_segcblist_empty(&rdp->cblist)) | 2888 | if (rcu_segcblist_empty(&rdp->cblist)) |
2832 | rcu_segcblist_init(&rdp->cblist); | 2889 | rcu_segcblist_init(&rdp->cblist); |
@@ -3485,7 +3542,8 @@ static int __init rcu_spawn_gp_kthread(void) | |||
3485 | 3542 | ||
3486 | rcu_scheduler_fully_active = 1; | 3543 | rcu_scheduler_fully_active = 1; |
3487 | t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name); | 3544 | t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name); |
3488 | BUG_ON(IS_ERR(t)); | 3545 | if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__)) |
3546 | return 0; | ||
3489 | rnp = rcu_get_root(); | 3547 | rnp = rcu_get_root(); |
3490 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | 3548 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
3491 | rcu_state.gp_kthread = t; | 3549 | rcu_state.gp_kthread = t; |
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 703e19ff532d..d90b02b53c0e 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h | |||
@@ -57,7 +57,7 @@ struct rcu_node { | |||
57 | /* some rcu_state fields as well as */ | 57 | /* some rcu_state fields as well as */ |
58 | /* following. */ | 58 | /* following. */ |
59 | unsigned long gp_seq; /* Track rsp->rcu_gp_seq. */ | 59 | unsigned long gp_seq; /* Track rsp->rcu_gp_seq. */ |
60 | unsigned long gp_seq_needed; /* Track rsp->rcu_gp_seq_needed. */ | 60 | unsigned long gp_seq_needed; /* Track furthest future GP request. */ |
61 | unsigned long completedqs; /* All QSes done for this node. */ | 61 | unsigned long completedqs; /* All QSes done for this node. */ |
62 | unsigned long qsmask; /* CPUs or groups that need to switch in */ | 62 | unsigned long qsmask; /* CPUs or groups that need to switch in */ |
63 | /* order for current grace period to proceed.*/ | 63 | /* order for current grace period to proceed.*/ |
@@ -163,7 +163,7 @@ union rcu_noqs { | |||
163 | struct rcu_data { | 163 | struct rcu_data { |
164 | /* 1) quiescent-state and grace-period handling : */ | 164 | /* 1) quiescent-state and grace-period handling : */ |
165 | unsigned long gp_seq; /* Track rsp->rcu_gp_seq counter. */ | 165 | unsigned long gp_seq; /* Track rsp->rcu_gp_seq counter. */ |
166 | unsigned long gp_seq_needed; /* Track rsp->rcu_gp_seq_needed ctr. */ | 166 | unsigned long gp_seq_needed; /* Track furthest future GP request. */ |
167 | union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */ | 167 | union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */ |
168 | bool core_needs_qs; /* Core waits for quiesc state. */ | 168 | bool core_needs_qs; /* Core waits for quiesc state. */ |
169 | bool beenonline; /* CPU online at least once. */ | 169 | bool beenonline; /* CPU online at least once. */ |
@@ -328,6 +328,8 @@ struct rcu_state { | |||
328 | /* force_quiescent_state(). */ | 328 | /* force_quiescent_state(). */ |
329 | unsigned long gp_start; /* Time at which GP started, */ | 329 | unsigned long gp_start; /* Time at which GP started, */ |
330 | /* but in jiffies. */ | 330 | /* but in jiffies. */ |
331 | unsigned long gp_end; /* Time last GP ended, again */ | ||
332 | /* in jiffies. */ | ||
331 | unsigned long gp_activity; /* Time of last GP kthread */ | 333 | unsigned long gp_activity; /* Time of last GP kthread */ |
332 | /* activity in jiffies. */ | 334 | /* activity in jiffies. */ |
333 | unsigned long gp_req_activity; /* Time of last GP request */ | 335 | unsigned long gp_req_activity; /* Time of last GP request */ |
@@ -398,17 +400,6 @@ static const char *tp_rcu_varname __used __tracepoint_string = rcu_name; | |||
398 | #define RCU_NAME rcu_name | 400 | #define RCU_NAME rcu_name |
399 | #endif /* #else #ifdef CONFIG_TRACING */ | 401 | #endif /* #else #ifdef CONFIG_TRACING */ |
400 | 402 | ||
401 | /* | ||
402 | * RCU implementation internal declarations: | ||
403 | */ | ||
404 | extern struct rcu_state rcu_sched_state; | ||
405 | |||
406 | extern struct rcu_state rcu_bh_state; | ||
407 | |||
408 | #ifdef CONFIG_PREEMPT_RCU | ||
409 | extern struct rcu_state rcu_preempt_state; | ||
410 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ | ||
411 | |||
412 | int rcu_dynticks_snap(struct rcu_data *rdp); | 403 | int rcu_dynticks_snap(struct rcu_data *rdp); |
413 | 404 | ||
414 | #ifdef CONFIG_RCU_BOOST | 405 | #ifdef CONFIG_RCU_BOOST |
@@ -466,6 +457,7 @@ static void __init rcu_spawn_nocb_kthreads(void); | |||
466 | static void __init rcu_organize_nocb_kthreads(void); | 457 | static void __init rcu_organize_nocb_kthreads(void); |
467 | #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ | 458 | #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ |
468 | static bool init_nocb_callback_list(struct rcu_data *rdp); | 459 | static bool init_nocb_callback_list(struct rcu_data *rdp); |
460 | static unsigned long rcu_get_n_cbs_nocb_cpu(struct rcu_data *rdp); | ||
469 | static void rcu_bind_gp_kthread(void); | 461 | static void rcu_bind_gp_kthread(void); |
470 | static bool rcu_nohz_full_cpu(void); | 462 | static bool rcu_nohz_full_cpu(void); |
471 | static void rcu_dynticks_task_enter(void); | 463 | static void rcu_dynticks_task_enter(void); |
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 8d18c1014e2b..928fe5893a57 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h | |||
@@ -450,10 +450,12 @@ static void sync_rcu_exp_select_cpus(smp_call_func_t func) | |||
450 | } | 450 | } |
451 | INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus); | 451 | INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus); |
452 | preempt_disable(); | 452 | preempt_disable(); |
453 | cpu = cpumask_next(rnp->grplo - 1, cpu_online_mask); | 453 | cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1); |
454 | /* If all offline, queue the work on an unbound CPU. */ | 454 | /* If all offline, queue the work on an unbound CPU. */ |
455 | if (unlikely(cpu > rnp->grphi)) | 455 | if (unlikely(cpu > rnp->grphi - rnp->grplo)) |
456 | cpu = WORK_CPU_UNBOUND; | 456 | cpu = WORK_CPU_UNBOUND; |
457 | else | ||
458 | cpu += rnp->grplo; | ||
457 | queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work); | 459 | queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work); |
458 | preempt_enable(); | 460 | preempt_enable(); |
459 | rnp->exp_need_flush = true; | 461 | rnp->exp_need_flush = true; |
@@ -690,8 +692,10 @@ static void sync_rcu_exp_handler(void *unused) | |||
690 | */ | 692 | */ |
691 | if (t->rcu_read_lock_nesting > 0) { | 693 | if (t->rcu_read_lock_nesting > 0) { |
692 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | 694 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
693 | if (rnp->expmask & rdp->grpmask) | 695 | if (rnp->expmask & rdp->grpmask) { |
694 | rdp->deferred_qs = true; | 696 | rdp->deferred_qs = true; |
697 | WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, true); | ||
698 | } | ||
695 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | 699 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
696 | } | 700 | } |
697 | 701 | ||
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 05915e536336..1b3dd2fc0cd6 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h | |||
@@ -397,6 +397,11 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) | |||
397 | return rnp->gp_tasks != NULL; | 397 | return rnp->gp_tasks != NULL; |
398 | } | 398 | } |
399 | 399 | ||
400 | /* Bias and limit values for ->rcu_read_lock_nesting. */ | ||
401 | #define RCU_NEST_BIAS INT_MAX | ||
402 | #define RCU_NEST_NMAX (-INT_MAX / 2) | ||
403 | #define RCU_NEST_PMAX (INT_MAX / 2) | ||
404 | |||
400 | /* | 405 | /* |
401 | * Preemptible RCU implementation for rcu_read_lock(). | 406 | * Preemptible RCU implementation for rcu_read_lock(). |
402 | * Just increment ->rcu_read_lock_nesting, shared state will be updated | 407 | * Just increment ->rcu_read_lock_nesting, shared state will be updated |
@@ -405,6 +410,8 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) | |||
405 | void __rcu_read_lock(void) | 410 | void __rcu_read_lock(void) |
406 | { | 411 | { |
407 | current->rcu_read_lock_nesting++; | 412 | current->rcu_read_lock_nesting++; |
413 | if (IS_ENABLED(CONFIG_PROVE_LOCKING)) | ||
414 | WARN_ON_ONCE(current->rcu_read_lock_nesting > RCU_NEST_PMAX); | ||
408 | barrier(); /* critical section after entry code. */ | 415 | barrier(); /* critical section after entry code. */ |
409 | } | 416 | } |
410 | EXPORT_SYMBOL_GPL(__rcu_read_lock); | 417 | EXPORT_SYMBOL_GPL(__rcu_read_lock); |
@@ -424,20 +431,18 @@ void __rcu_read_unlock(void) | |||
424 | --t->rcu_read_lock_nesting; | 431 | --t->rcu_read_lock_nesting; |
425 | } else { | 432 | } else { |
426 | barrier(); /* critical section before exit code. */ | 433 | barrier(); /* critical section before exit code. */ |
427 | t->rcu_read_lock_nesting = INT_MIN; | 434 | t->rcu_read_lock_nesting = -RCU_NEST_BIAS; |
428 | barrier(); /* assign before ->rcu_read_unlock_special load */ | 435 | barrier(); /* assign before ->rcu_read_unlock_special load */ |
429 | if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s))) | 436 | if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s))) |
430 | rcu_read_unlock_special(t); | 437 | rcu_read_unlock_special(t); |
431 | barrier(); /* ->rcu_read_unlock_special load before assign */ | 438 | barrier(); /* ->rcu_read_unlock_special load before assign */ |
432 | t->rcu_read_lock_nesting = 0; | 439 | t->rcu_read_lock_nesting = 0; |
433 | } | 440 | } |
434 | #ifdef CONFIG_PROVE_LOCKING | 441 | if (IS_ENABLED(CONFIG_PROVE_LOCKING)) { |
435 | { | 442 | int rrln = t->rcu_read_lock_nesting; |
436 | int rrln = READ_ONCE(t->rcu_read_lock_nesting); | ||
437 | 443 | ||
438 | WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); | 444 | WARN_ON_ONCE(rrln < 0 && rrln > RCU_NEST_NMAX); |
439 | } | 445 | } |
440 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ | ||
441 | } | 446 | } |
442 | EXPORT_SYMBOL_GPL(__rcu_read_unlock); | 447 | EXPORT_SYMBOL_GPL(__rcu_read_unlock); |
443 | 448 | ||
@@ -597,7 +602,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags) | |||
597 | */ | 602 | */ |
598 | static bool rcu_preempt_need_deferred_qs(struct task_struct *t) | 603 | static bool rcu_preempt_need_deferred_qs(struct task_struct *t) |
599 | { | 604 | { |
600 | return (this_cpu_ptr(&rcu_data)->deferred_qs || | 605 | return (__this_cpu_read(rcu_data.deferred_qs) || |
601 | READ_ONCE(t->rcu_read_unlock_special.s)) && | 606 | READ_ONCE(t->rcu_read_unlock_special.s)) && |
602 | t->rcu_read_lock_nesting <= 0; | 607 | t->rcu_read_lock_nesting <= 0; |
603 | } | 608 | } |
@@ -617,11 +622,11 @@ static void rcu_preempt_deferred_qs(struct task_struct *t) | |||
617 | if (!rcu_preempt_need_deferred_qs(t)) | 622 | if (!rcu_preempt_need_deferred_qs(t)) |
618 | return; | 623 | return; |
619 | if (couldrecurse) | 624 | if (couldrecurse) |
620 | t->rcu_read_lock_nesting -= INT_MIN; | 625 | t->rcu_read_lock_nesting -= RCU_NEST_BIAS; |
621 | local_irq_save(flags); | 626 | local_irq_save(flags); |
622 | rcu_preempt_deferred_qs_irqrestore(t, flags); | 627 | rcu_preempt_deferred_qs_irqrestore(t, flags); |
623 | if (couldrecurse) | 628 | if (couldrecurse) |
624 | t->rcu_read_lock_nesting += INT_MIN; | 629 | t->rcu_read_lock_nesting += RCU_NEST_BIAS; |
625 | } | 630 | } |
626 | 631 | ||
627 | /* | 632 | /* |
@@ -642,13 +647,21 @@ static void rcu_read_unlock_special(struct task_struct *t) | |||
642 | 647 | ||
643 | local_irq_save(flags); | 648 | local_irq_save(flags); |
644 | irqs_were_disabled = irqs_disabled_flags(flags); | 649 | irqs_were_disabled = irqs_disabled_flags(flags); |
645 | if ((preempt_bh_were_disabled || irqs_were_disabled) && | 650 | if (preempt_bh_were_disabled || irqs_were_disabled) { |
646 | t->rcu_read_unlock_special.b.blocked) { | 651 | WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, false); |
647 | /* Need to defer quiescent state until everything is enabled. */ | 652 | /* Need to defer quiescent state until everything is enabled. */ |
648 | raise_softirq_irqoff(RCU_SOFTIRQ); | 653 | if (irqs_were_disabled) { |
654 | /* Enabling irqs does not reschedule, so... */ | ||
655 | raise_softirq_irqoff(RCU_SOFTIRQ); | ||
656 | } else { | ||
657 | /* Enabling BH or preempt does reschedule, so... */ | ||
658 | set_tsk_need_resched(current); | ||
659 | set_preempt_need_resched(); | ||
660 | } | ||
649 | local_irq_restore(flags); | 661 | local_irq_restore(flags); |
650 | return; | 662 | return; |
651 | } | 663 | } |
664 | WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, false); | ||
652 | rcu_preempt_deferred_qs_irqrestore(t, flags); | 665 | rcu_preempt_deferred_qs_irqrestore(t, flags); |
653 | } | 666 | } |
654 | 667 | ||
@@ -1464,7 +1477,8 @@ static void __init rcu_spawn_boost_kthreads(void) | |||
1464 | 1477 | ||
1465 | for_each_possible_cpu(cpu) | 1478 | for_each_possible_cpu(cpu) |
1466 | per_cpu(rcu_cpu_has_work, cpu) = 0; | 1479 | per_cpu(rcu_cpu_has_work, cpu) = 0; |
1467 | BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); | 1480 | if (WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec), "%s: Could not start rcub kthread, OOM is now expected behavior\n", __func__)) |
1481 | return; | ||
1468 | rcu_for_each_leaf_node(rnp) | 1482 | rcu_for_each_leaf_node(rnp) |
1469 | (void)rcu_spawn_one_boost_kthread(rnp); | 1483 | (void)rcu_spawn_one_boost_kthread(rnp); |
1470 | } | 1484 | } |
@@ -1997,7 +2011,7 @@ static bool rcu_nocb_cpu_needs_barrier(int cpu) | |||
1997 | * (if a callback is in fact needed). This is associated with an | 2011 | * (if a callback is in fact needed). This is associated with an |
1998 | * atomic_inc() in the caller. | 2012 | * atomic_inc() in the caller. |
1999 | */ | 2013 | */ |
2000 | ret = atomic_long_read(&rdp->nocb_q_count); | 2014 | ret = rcu_get_n_cbs_nocb_cpu(rdp); |
2001 | 2015 | ||
2002 | #ifdef CONFIG_PROVE_RCU | 2016 | #ifdef CONFIG_PROVE_RCU |
2003 | rhp = READ_ONCE(rdp->nocb_head); | 2017 | rhp = READ_ONCE(rdp->nocb_head); |
@@ -2052,7 +2066,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp, | |||
2052 | TPS("WakeNotPoll")); | 2066 | TPS("WakeNotPoll")); |
2053 | return; | 2067 | return; |
2054 | } | 2068 | } |
2055 | len = atomic_long_read(&rdp->nocb_q_count); | 2069 | len = rcu_get_n_cbs_nocb_cpu(rdp); |
2056 | if (old_rhpp == &rdp->nocb_head) { | 2070 | if (old_rhpp == &rdp->nocb_head) { |
2057 | if (!irqs_disabled_flags(flags)) { | 2071 | if (!irqs_disabled_flags(flags)) { |
2058 | /* ... if queue was empty ... */ | 2072 | /* ... if queue was empty ... */ |
@@ -2101,11 +2115,11 @@ static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, | |||
2101 | trace_rcu_kfree_callback(rcu_state.name, rhp, | 2115 | trace_rcu_kfree_callback(rcu_state.name, rhp, |
2102 | (unsigned long)rhp->func, | 2116 | (unsigned long)rhp->func, |
2103 | -atomic_long_read(&rdp->nocb_q_count_lazy), | 2117 | -atomic_long_read(&rdp->nocb_q_count_lazy), |
2104 | -atomic_long_read(&rdp->nocb_q_count)); | 2118 | -rcu_get_n_cbs_nocb_cpu(rdp)); |
2105 | else | 2119 | else |
2106 | trace_rcu_callback(rcu_state.name, rhp, | 2120 | trace_rcu_callback(rcu_state.name, rhp, |
2107 | -atomic_long_read(&rdp->nocb_q_count_lazy), | 2121 | -atomic_long_read(&rdp->nocb_q_count_lazy), |
2108 | -atomic_long_read(&rdp->nocb_q_count)); | 2122 | -rcu_get_n_cbs_nocb_cpu(rdp)); |
2109 | 2123 | ||
2110 | /* | 2124 | /* |
2111 | * If called from an extended quiescent state with interrupts | 2125 | * If called from an extended quiescent state with interrupts |
@@ -2322,13 +2336,14 @@ static int rcu_nocb_kthread(void *arg) | |||
2322 | tail = rdp->nocb_follower_tail; | 2336 | tail = rdp->nocb_follower_tail; |
2323 | rdp->nocb_follower_tail = &rdp->nocb_follower_head; | 2337 | rdp->nocb_follower_tail = &rdp->nocb_follower_head; |
2324 | raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); | 2338 | raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); |
2325 | BUG_ON(!list); | 2339 | if (WARN_ON_ONCE(!list)) |
2340 | continue; | ||
2326 | trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeNonEmpty")); | 2341 | trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeNonEmpty")); |
2327 | 2342 | ||
2328 | /* Each pass through the following loop invokes a callback. */ | 2343 | /* Each pass through the following loop invokes a callback. */ |
2329 | trace_rcu_batch_start(rcu_state.name, | 2344 | trace_rcu_batch_start(rcu_state.name, |
2330 | atomic_long_read(&rdp->nocb_q_count_lazy), | 2345 | atomic_long_read(&rdp->nocb_q_count_lazy), |
2331 | atomic_long_read(&rdp->nocb_q_count), -1); | 2346 | rcu_get_n_cbs_nocb_cpu(rdp), -1); |
2332 | c = cl = 0; | 2347 | c = cl = 0; |
2333 | while (list) { | 2348 | while (list) { |
2334 | next = list->next; | 2349 | next = list->next; |
@@ -2495,7 +2510,8 @@ static void rcu_spawn_one_nocb_kthread(int cpu) | |||
2495 | /* Spawn the kthread for this CPU. */ | 2510 | /* Spawn the kthread for this CPU. */ |
2496 | t = kthread_run(rcu_nocb_kthread, rdp_spawn, | 2511 | t = kthread_run(rcu_nocb_kthread, rdp_spawn, |
2497 | "rcuo%c/%d", rcu_state.abbr, cpu); | 2512 | "rcuo%c/%d", rcu_state.abbr, cpu); |
2498 | BUG_ON(IS_ERR(t)); | 2513 | if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo kthread, OOM is now expected behavior\n", __func__)) |
2514 | return; | ||
2499 | WRITE_ONCE(rdp_spawn->nocb_kthread, t); | 2515 | WRITE_ONCE(rdp_spawn->nocb_kthread, t); |
2500 | } | 2516 | } |
2501 | 2517 | ||
@@ -2587,6 +2603,26 @@ static bool init_nocb_callback_list(struct rcu_data *rdp) | |||
2587 | return true; | 2603 | return true; |
2588 | } | 2604 | } |
2589 | 2605 | ||
2606 | /* | ||
2607 | * Bind the current task to the offloaded CPUs. If there are no offloaded | ||
2608 | * CPUs, leave the task unbound. Splat if the bind attempt fails. | ||
2609 | */ | ||
2610 | void rcu_bind_current_to_nocb(void) | ||
2611 | { | ||
2612 | if (cpumask_available(rcu_nocb_mask) && cpumask_weight(rcu_nocb_mask)) | ||
2613 | WARN_ON(sched_setaffinity(current->pid, rcu_nocb_mask)); | ||
2614 | } | ||
2615 | EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb); | ||
2616 | |||
2617 | /* | ||
2618 | * Return the number of RCU callbacks still queued from the specified | ||
2619 | * CPU, which must be a nocbs CPU. | ||
2620 | */ | ||
2621 | static unsigned long rcu_get_n_cbs_nocb_cpu(struct rcu_data *rdp) | ||
2622 | { | ||
2623 | return atomic_long_read(&rdp->nocb_q_count); | ||
2624 | } | ||
2625 | |||
2590 | #else /* #ifdef CONFIG_RCU_NOCB_CPU */ | 2626 | #else /* #ifdef CONFIG_RCU_NOCB_CPU */ |
2591 | 2627 | ||
2592 | static bool rcu_nocb_cpu_needs_barrier(int cpu) | 2628 | static bool rcu_nocb_cpu_needs_barrier(int cpu) |
@@ -2647,6 +2683,11 @@ static bool init_nocb_callback_list(struct rcu_data *rdp) | |||
2647 | return false; | 2683 | return false; |
2648 | } | 2684 | } |
2649 | 2685 | ||
2686 | static unsigned long rcu_get_n_cbs_nocb_cpu(struct rcu_data *rdp) | ||
2687 | { | ||
2688 | return 0; | ||
2689 | } | ||
2690 | |||
2650 | #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ | 2691 | #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ |
2651 | 2692 | ||
2652 | /* | 2693 | /* |
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index f203b94f6b5b..1971869c4072 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c | |||
@@ -335,8 +335,7 @@ void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, | |||
335 | /* Initialize and register callbacks for each crcu_array element. */ | 335 | /* Initialize and register callbacks for each crcu_array element. */ |
336 | for (i = 0; i < n; i++) { | 336 | for (i = 0; i < n; i++) { |
337 | if (checktiny && | 337 | if (checktiny && |
338 | (crcu_array[i] == call_rcu || | 338 | (crcu_array[i] == call_rcu)) { |
339 | crcu_array[i] == call_rcu_bh)) { | ||
340 | might_sleep(); | 339 | might_sleep(); |
341 | continue; | 340 | continue; |
342 | } | 341 | } |
@@ -352,8 +351,7 @@ void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, | |||
352 | /* Wait for all callbacks to be invoked. */ | 351 | /* Wait for all callbacks to be invoked. */ |
353 | for (i = 0; i < n; i++) { | 352 | for (i = 0; i < n; i++) { |
354 | if (checktiny && | 353 | if (checktiny && |
355 | (crcu_array[i] == call_rcu || | 354 | (crcu_array[i] == call_rcu)) |
356 | crcu_array[i] == call_rcu_bh)) | ||
357 | continue; | 355 | continue; |
358 | for (j = 0; j < i; j++) | 356 | for (j = 0; j < i; j++) |
359 | if (crcu_array[j] == crcu_array[i]) | 357 | if (crcu_array[j] == crcu_array[i]) |
@@ -822,7 +820,8 @@ static int __init rcu_spawn_tasks_kthread(void) | |||
822 | struct task_struct *t; | 820 | struct task_struct *t; |
823 | 821 | ||
824 | t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread"); | 822 | t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread"); |
825 | BUG_ON(IS_ERR(t)); | 823 | if (WARN_ONCE(IS_ERR(t), "%s: Could not start Tasks-RCU grace-period kthread, OOM is now expected behavior\n", __func__)) |
824 | return 0; | ||
826 | smp_mb(); /* Ensure others see full kthread. */ | 825 | smp_mb(); /* Ensure others see full kthread. */ |
827 | WRITE_ONCE(rcu_tasks_kthread_ptr, t); | 826 | WRITE_ONCE(rcu_tasks_kthread_ptr, t); |
828 | return 0; | 827 | return 0; |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 6fedf3a98581..a5b7f1c9f24f 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -5783,7 +5783,7 @@ int sched_cpu_deactivate(unsigned int cpu) | |||
5783 | * | 5783 | * |
5784 | * Do sync before park smpboot threads to take care the rcu boost case. | 5784 | * Do sync before park smpboot threads to take care the rcu boost case. |
5785 | */ | 5785 | */ |
5786 | synchronize_rcu_mult(call_rcu, call_rcu_sched); | 5786 | synchronize_rcu(); |
5787 | 5787 | ||
5788 | #ifdef CONFIG_SCHED_SMT | 5788 | #ifdef CONFIG_SCHED_SMT |
5789 | /* | 5789 | /* |
diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c index 76e0eaf4654e..3cd8a3a795d2 100644 --- a/kernel/sched/membarrier.c +++ b/kernel/sched/membarrier.c | |||
@@ -210,7 +210,7 @@ static int membarrier_register_global_expedited(void) | |||
210 | * future scheduler executions will observe the new | 210 | * future scheduler executions will observe the new |
211 | * thread flag state for this mm. | 211 | * thread flag state for this mm. |
212 | */ | 212 | */ |
213 | synchronize_sched(); | 213 | synchronize_rcu(); |
214 | } | 214 | } |
215 | atomic_or(MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY, | 215 | atomic_or(MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY, |
216 | &mm->membarrier_state); | 216 | &mm->membarrier_state); |
@@ -246,7 +246,7 @@ static int membarrier_register_private_expedited(int flags) | |||
246 | * Ensure all future scheduler executions will observe the | 246 | * Ensure all future scheduler executions will observe the |
247 | * new thread flag state for this process. | 247 | * new thread flag state for this process. |
248 | */ | 248 | */ |
249 | synchronize_sched(); | 249 | synchronize_rcu(); |
250 | } | 250 | } |
251 | atomic_or(state, &mm->membarrier_state); | 251 | atomic_or(state, &mm->membarrier_state); |
252 | 252 | ||
@@ -298,7 +298,7 @@ SYSCALL_DEFINE2(membarrier, int, cmd, int, flags) | |||
298 | if (tick_nohz_full_enabled()) | 298 | if (tick_nohz_full_enabled()) |
299 | return -EINVAL; | 299 | return -EINVAL; |
300 | if (num_online_cpus() > 1) | 300 | if (num_online_cpus() > 1) |
301 | synchronize_sched(); | 301 | synchronize_rcu(); |
302 | return 0; | 302 | return 0; |
303 | case MEMBARRIER_CMD_GLOBAL_EXPEDITED: | 303 | case MEMBARRIER_CMD_GLOBAL_EXPEDITED: |
304 | return membarrier_global_expedited(); | 304 | return membarrier_global_expedited(); |
diff --git a/kernel/torture.c b/kernel/torture.c index 17d91f5fba2a..bbf6d473e50c 100644 --- a/kernel/torture.c +++ b/kernel/torture.c | |||
@@ -194,11 +194,23 @@ torture_onoff(void *arg) | |||
194 | int cpu; | 194 | int cpu; |
195 | int maxcpu = -1; | 195 | int maxcpu = -1; |
196 | DEFINE_TORTURE_RANDOM(rand); | 196 | DEFINE_TORTURE_RANDOM(rand); |
197 | int ret; | ||
197 | 198 | ||
198 | VERBOSE_TOROUT_STRING("torture_onoff task started"); | 199 | VERBOSE_TOROUT_STRING("torture_onoff task started"); |
199 | for_each_online_cpu(cpu) | 200 | for_each_online_cpu(cpu) |
200 | maxcpu = cpu; | 201 | maxcpu = cpu; |
201 | WARN_ON(maxcpu < 0); | 202 | WARN_ON(maxcpu < 0); |
203 | if (!IS_MODULE(CONFIG_TORTURE_TEST)) | ||
204 | for_each_possible_cpu(cpu) { | ||
205 | if (cpu_online(cpu)) | ||
206 | continue; | ||
207 | ret = cpu_up(cpu); | ||
208 | if (ret && verbose) { | ||
209 | pr_alert("%s" TORTURE_FLAG | ||
210 | "%s: Initial online %d: errno %d\n", | ||
211 | __func__, torture_type, cpu, ret); | ||
212 | } | ||
213 | } | ||
202 | 214 | ||
203 | if (maxcpu == 0) { | 215 | if (maxcpu == 0) { |
204 | VERBOSE_TOROUT_STRING("Only one CPU, so CPU-hotplug testing is disabled"); | 216 | VERBOSE_TOROUT_STRING("Only one CPU, so CPU-hotplug testing is disabled"); |
@@ -233,16 +245,15 @@ stop: | |||
233 | */ | 245 | */ |
234 | int torture_onoff_init(long ooholdoff, long oointerval) | 246 | int torture_onoff_init(long ooholdoff, long oointerval) |
235 | { | 247 | { |
236 | int ret = 0; | ||
237 | |||
238 | #ifdef CONFIG_HOTPLUG_CPU | 248 | #ifdef CONFIG_HOTPLUG_CPU |
239 | onoff_holdoff = ooholdoff; | 249 | onoff_holdoff = ooholdoff; |
240 | onoff_interval = oointerval; | 250 | onoff_interval = oointerval; |
241 | if (onoff_interval <= 0) | 251 | if (onoff_interval <= 0) |
242 | return 0; | 252 | return 0; |
243 | ret = torture_create_kthread(torture_onoff, NULL, onoff_task); | 253 | return torture_create_kthread(torture_onoff, NULL, onoff_task); |
244 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | 254 | #else /* #ifdef CONFIG_HOTPLUG_CPU */ |
245 | return ret; | 255 | return 0; |
256 | #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */ | ||
246 | } | 257 | } |
247 | EXPORT_SYMBOL_GPL(torture_onoff_init); | 258 | EXPORT_SYMBOL_GPL(torture_onoff_init); |
248 | 259 | ||
@@ -513,15 +524,13 @@ static int torture_shutdown(void *arg) | |||
513 | */ | 524 | */ |
514 | int torture_shutdown_init(int ssecs, void (*cleanup)(void)) | 525 | int torture_shutdown_init(int ssecs, void (*cleanup)(void)) |
515 | { | 526 | { |
516 | int ret = 0; | ||
517 | |||
518 | torture_shutdown_hook = cleanup; | 527 | torture_shutdown_hook = cleanup; |
519 | if (ssecs > 0) { | 528 | if (ssecs > 0) { |
520 | shutdown_time = ktime_add(ktime_get(), ktime_set(ssecs, 0)); | 529 | shutdown_time = ktime_add(ktime_get(), ktime_set(ssecs, 0)); |
521 | ret = torture_create_kthread(torture_shutdown, NULL, | 530 | return torture_create_kthread(torture_shutdown, NULL, |
522 | shutdown_task); | 531 | shutdown_task); |
523 | } | 532 | } |
524 | return ret; | 533 | return 0; |
525 | } | 534 | } |
526 | EXPORT_SYMBOL_GPL(torture_shutdown_init); | 535 | EXPORT_SYMBOL_GPL(torture_shutdown_init); |
527 | 536 | ||
@@ -620,13 +629,10 @@ static int torture_stutter(void *arg) | |||
620 | /* | 629 | /* |
621 | * Initialize and kick off the torture_stutter kthread. | 630 | * Initialize and kick off the torture_stutter kthread. |
622 | */ | 631 | */ |
623 | int torture_stutter_init(int s) | 632 | int torture_stutter_init(const int s) |
624 | { | 633 | { |
625 | int ret; | ||
626 | |||
627 | stutter = s; | 634 | stutter = s; |
628 | ret = torture_create_kthread(torture_stutter, NULL, stutter_task); | 635 | return torture_create_kthread(torture_stutter, NULL, stutter_task); |
629 | return ret; | ||
630 | } | 636 | } |
631 | EXPORT_SYMBOL_GPL(torture_stutter_init); | 637 | EXPORT_SYMBOL_GPL(torture_stutter_init); |
632 | 638 | ||
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 77734451cb05..c375e33239f7 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -173,7 +173,7 @@ static void ftrace_sync(struct work_struct *work) | |||
173 | { | 173 | { |
174 | /* | 174 | /* |
175 | * This function is just a stub to implement a hard force | 175 | * This function is just a stub to implement a hard force |
176 | * of synchronize_sched(). This requires synchronizing | 176 | * of synchronize_rcu(). This requires synchronizing |
177 | * tasks even in userspace and idle. | 177 | * tasks even in userspace and idle. |
178 | * | 178 | * |
179 | * Yes, function tracing is rude. | 179 | * Yes, function tracing is rude. |
@@ -934,7 +934,7 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf, | |||
934 | ftrace_profile_enabled = 0; | 934 | ftrace_profile_enabled = 0; |
935 | /* | 935 | /* |
936 | * unregister_ftrace_profiler calls stop_machine | 936 | * unregister_ftrace_profiler calls stop_machine |
937 | * so this acts like an synchronize_sched. | 937 | * so this acts like an synchronize_rcu. |
938 | */ | 938 | */ |
939 | unregister_ftrace_profiler(); | 939 | unregister_ftrace_profiler(); |
940 | } | 940 | } |
@@ -1086,7 +1086,7 @@ struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr) | |||
1086 | 1086 | ||
1087 | /* | 1087 | /* |
1088 | * Some of the ops may be dynamically allocated, | 1088 | * Some of the ops may be dynamically allocated, |
1089 | * they are freed after a synchronize_sched(). | 1089 | * they are freed after a synchronize_rcu(). |
1090 | */ | 1090 | */ |
1091 | preempt_disable_notrace(); | 1091 | preempt_disable_notrace(); |
1092 | 1092 | ||
@@ -1286,7 +1286,7 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash) | |||
1286 | { | 1286 | { |
1287 | if (!hash || hash == EMPTY_HASH) | 1287 | if (!hash || hash == EMPTY_HASH) |
1288 | return; | 1288 | return; |
1289 | call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu); | 1289 | call_rcu(&hash->rcu, __free_ftrace_hash_rcu); |
1290 | } | 1290 | } |
1291 | 1291 | ||
1292 | void ftrace_free_filter(struct ftrace_ops *ops) | 1292 | void ftrace_free_filter(struct ftrace_ops *ops) |
@@ -1501,7 +1501,7 @@ static bool hash_contains_ip(unsigned long ip, | |||
1501 | * the ip is not in the ops->notrace_hash. | 1501 | * the ip is not in the ops->notrace_hash. |
1502 | * | 1502 | * |
1503 | * This needs to be called with preemption disabled as | 1503 | * This needs to be called with preemption disabled as |
1504 | * the hashes are freed with call_rcu_sched(). | 1504 | * the hashes are freed with call_rcu(). |
1505 | */ | 1505 | */ |
1506 | static int | 1506 | static int |
1507 | ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) | 1507 | ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) |
@@ -4496,7 +4496,7 @@ unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr, | |||
4496 | if (ftrace_enabled && !ftrace_hash_empty(hash)) | 4496 | if (ftrace_enabled && !ftrace_hash_empty(hash)) |
4497 | ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS, | 4497 | ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS, |
4498 | &old_hash_ops); | 4498 | &old_hash_ops); |
4499 | synchronize_sched(); | 4499 | synchronize_rcu(); |
4500 | 4500 | ||
4501 | hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) { | 4501 | hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) { |
4502 | hlist_del(&entry->hlist); | 4502 | hlist_del(&entry->hlist); |
@@ -5314,7 +5314,7 @@ ftrace_graph_release(struct inode *inode, struct file *file) | |||
5314 | mutex_unlock(&graph_lock); | 5314 | mutex_unlock(&graph_lock); |
5315 | 5315 | ||
5316 | /* Wait till all users are no longer using the old hash */ | 5316 | /* Wait till all users are no longer using the old hash */ |
5317 | synchronize_sched(); | 5317 | synchronize_rcu(); |
5318 | 5318 | ||
5319 | free_ftrace_hash(old_hash); | 5319 | free_ftrace_hash(old_hash); |
5320 | } | 5320 | } |
@@ -5707,7 +5707,7 @@ void ftrace_release_mod(struct module *mod) | |||
5707 | list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) { | 5707 | list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) { |
5708 | if (mod_map->mod == mod) { | 5708 | if (mod_map->mod == mod) { |
5709 | list_del_rcu(&mod_map->list); | 5709 | list_del_rcu(&mod_map->list); |
5710 | call_rcu_sched(&mod_map->rcu, ftrace_free_mod_map); | 5710 | call_rcu(&mod_map->rcu, ftrace_free_mod_map); |
5711 | break; | 5711 | break; |
5712 | } | 5712 | } |
5713 | } | 5713 | } |
@@ -5927,7 +5927,7 @@ ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, | |||
5927 | struct ftrace_mod_map *mod_map; | 5927 | struct ftrace_mod_map *mod_map; |
5928 | const char *ret = NULL; | 5928 | const char *ret = NULL; |
5929 | 5929 | ||
5930 | /* mod_map is freed via call_rcu_sched() */ | 5930 | /* mod_map is freed via call_rcu() */ |
5931 | preempt_disable(); | 5931 | preempt_disable(); |
5932 | list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) { | 5932 | list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) { |
5933 | ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym); | 5933 | ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym); |
@@ -6262,7 +6262,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, | |||
6262 | 6262 | ||
6263 | /* | 6263 | /* |
6264 | * Some of the ops may be dynamically allocated, | 6264 | * Some of the ops may be dynamically allocated, |
6265 | * they must be freed after a synchronize_sched(). | 6265 | * they must be freed after a synchronize_rcu(). |
6266 | */ | 6266 | */ |
6267 | preempt_disable_notrace(); | 6267 | preempt_disable_notrace(); |
6268 | 6268 | ||
@@ -6433,7 +6433,7 @@ static void clear_ftrace_pids(struct trace_array *tr) | |||
6433 | rcu_assign_pointer(tr->function_pids, NULL); | 6433 | rcu_assign_pointer(tr->function_pids, NULL); |
6434 | 6434 | ||
6435 | /* Wait till all users are no longer using pid filtering */ | 6435 | /* Wait till all users are no longer using pid filtering */ |
6436 | synchronize_sched(); | 6436 | synchronize_rcu(); |
6437 | 6437 | ||
6438 | trace_free_pid_list(pid_list); | 6438 | trace_free_pid_list(pid_list); |
6439 | } | 6439 | } |
@@ -6580,7 +6580,7 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf, | |||
6580 | rcu_assign_pointer(tr->function_pids, pid_list); | 6580 | rcu_assign_pointer(tr->function_pids, pid_list); |
6581 | 6581 | ||
6582 | if (filtered_pids) { | 6582 | if (filtered_pids) { |
6583 | synchronize_sched(); | 6583 | synchronize_rcu(); |
6584 | trace_free_pid_list(filtered_pids); | 6584 | trace_free_pid_list(filtered_pids); |
6585 | } else if (pid_list) { | 6585 | } else if (pid_list) { |
6586 | /* Register a probe to set whether to ignore the tracing of a task */ | 6586 | /* Register a probe to set whether to ignore the tracing of a task */ |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 65bd4616220d..4f3247a53259 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -1834,7 +1834,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, | |||
1834 | * There could have been a race between checking | 1834 | * There could have been a race between checking |
1835 | * record_disable and incrementing it. | 1835 | * record_disable and incrementing it. |
1836 | */ | 1836 | */ |
1837 | synchronize_sched(); | 1837 | synchronize_rcu(); |
1838 | for_each_buffer_cpu(buffer, cpu) { | 1838 | for_each_buffer_cpu(buffer, cpu) { |
1839 | cpu_buffer = buffer->buffers[cpu]; | 1839 | cpu_buffer = buffer->buffers[cpu]; |
1840 | rb_check_pages(cpu_buffer); | 1840 | rb_check_pages(cpu_buffer); |
@@ -3151,7 +3151,7 @@ static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) | |||
3151 | * This prevents all writes to the buffer. Any attempt to write | 3151 | * This prevents all writes to the buffer. Any attempt to write |
3152 | * to the buffer after this will fail and return NULL. | 3152 | * to the buffer after this will fail and return NULL. |
3153 | * | 3153 | * |
3154 | * The caller should call synchronize_sched() after this. | 3154 | * The caller should call synchronize_rcu() after this. |
3155 | */ | 3155 | */ |
3156 | void ring_buffer_record_disable(struct ring_buffer *buffer) | 3156 | void ring_buffer_record_disable(struct ring_buffer *buffer) |
3157 | { | 3157 | { |
@@ -3253,7 +3253,7 @@ bool ring_buffer_record_is_set_on(struct ring_buffer *buffer) | |||
3253 | * This prevents all writes to the buffer. Any attempt to write | 3253 | * This prevents all writes to the buffer. Any attempt to write |
3254 | * to the buffer after this will fail and return NULL. | 3254 | * to the buffer after this will fail and return NULL. |
3255 | * | 3255 | * |
3256 | * The caller should call synchronize_sched() after this. | 3256 | * The caller should call synchronize_rcu() after this. |
3257 | */ | 3257 | */ |
3258 | void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu) | 3258 | void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu) |
3259 | { | 3259 | { |
@@ -4191,7 +4191,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_read_prepare); | |||
4191 | void | 4191 | void |
4192 | ring_buffer_read_prepare_sync(void) | 4192 | ring_buffer_read_prepare_sync(void) |
4193 | { | 4193 | { |
4194 | synchronize_sched(); | 4194 | synchronize_rcu(); |
4195 | } | 4195 | } |
4196 | EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync); | 4196 | EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync); |
4197 | 4197 | ||
@@ -4363,7 +4363,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) | |||
4363 | atomic_inc(&cpu_buffer->record_disabled); | 4363 | atomic_inc(&cpu_buffer->record_disabled); |
4364 | 4364 | ||
4365 | /* Make sure all commits have finished */ | 4365 | /* Make sure all commits have finished */ |
4366 | synchronize_sched(); | 4366 | synchronize_rcu(); |
4367 | 4367 | ||
4368 | raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 4368 | raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
4369 | 4369 | ||
@@ -4496,7 +4496,7 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, | |||
4496 | goto out; | 4496 | goto out; |
4497 | 4497 | ||
4498 | /* | 4498 | /* |
4499 | * We can't do a synchronize_sched here because this | 4499 | * We can't do a synchronize_rcu here because this |
4500 | * function can be called in atomic context. | 4500 | * function can be called in atomic context. |
4501 | * Normally this will be called from the same CPU as cpu. | 4501 | * Normally this will be called from the same CPU as cpu. |
4502 | * If not it's up to the caller to protect this. | 4502 | * If not it's up to the caller to protect this. |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index ff1c4b20cd0a..51612b4a603f 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1681,7 +1681,7 @@ void tracing_reset(struct trace_buffer *buf, int cpu) | |||
1681 | ring_buffer_record_disable(buffer); | 1681 | ring_buffer_record_disable(buffer); |
1682 | 1682 | ||
1683 | /* Make sure all commits have finished */ | 1683 | /* Make sure all commits have finished */ |
1684 | synchronize_sched(); | 1684 | synchronize_rcu(); |
1685 | ring_buffer_reset_cpu(buffer, cpu); | 1685 | ring_buffer_reset_cpu(buffer, cpu); |
1686 | 1686 | ||
1687 | ring_buffer_record_enable(buffer); | 1687 | ring_buffer_record_enable(buffer); |
@@ -1698,7 +1698,7 @@ void tracing_reset_online_cpus(struct trace_buffer *buf) | |||
1698 | ring_buffer_record_disable(buffer); | 1698 | ring_buffer_record_disable(buffer); |
1699 | 1699 | ||
1700 | /* Make sure all commits have finished */ | 1700 | /* Make sure all commits have finished */ |
1701 | synchronize_sched(); | 1701 | synchronize_rcu(); |
1702 | 1702 | ||
1703 | buf->time_start = buffer_ftrace_now(buf, buf->cpu); | 1703 | buf->time_start = buffer_ftrace_now(buf, buf->cpu); |
1704 | 1704 | ||
@@ -2250,7 +2250,7 @@ void trace_buffered_event_disable(void) | |||
2250 | preempt_enable(); | 2250 | preempt_enable(); |
2251 | 2251 | ||
2252 | /* Wait for all current users to finish */ | 2252 | /* Wait for all current users to finish */ |
2253 | synchronize_sched(); | 2253 | synchronize_rcu(); |
2254 | 2254 | ||
2255 | for_each_tracing_cpu(cpu) { | 2255 | for_each_tracing_cpu(cpu) { |
2256 | free_page((unsigned long)per_cpu(trace_buffered_event, cpu)); | 2256 | free_page((unsigned long)per_cpu(trace_buffered_event, cpu)); |
@@ -5398,7 +5398,7 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf) | |||
5398 | if (tr->current_trace->reset) | 5398 | if (tr->current_trace->reset) |
5399 | tr->current_trace->reset(tr); | 5399 | tr->current_trace->reset(tr); |
5400 | 5400 | ||
5401 | /* Current trace needs to be nop_trace before synchronize_sched */ | 5401 | /* Current trace needs to be nop_trace before synchronize_rcu */ |
5402 | tr->current_trace = &nop_trace; | 5402 | tr->current_trace = &nop_trace; |
5403 | 5403 | ||
5404 | #ifdef CONFIG_TRACER_MAX_TRACE | 5404 | #ifdef CONFIG_TRACER_MAX_TRACE |
@@ -5412,7 +5412,7 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf) | |||
5412 | * The update_max_tr is called from interrupts disabled | 5412 | * The update_max_tr is called from interrupts disabled |
5413 | * so a synchronized_sched() is sufficient. | 5413 | * so a synchronized_sched() is sufficient. |
5414 | */ | 5414 | */ |
5415 | synchronize_sched(); | 5415 | synchronize_rcu(); |
5416 | free_snapshot(tr); | 5416 | free_snapshot(tr); |
5417 | } | 5417 | } |
5418 | #endif | 5418 | #endif |
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 84a65173b1e9..35f3aa55be85 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
@@ -1614,7 +1614,7 @@ static int process_system_preds(struct trace_subsystem_dir *dir, | |||
1614 | 1614 | ||
1615 | /* | 1615 | /* |
1616 | * The calls can still be using the old filters. | 1616 | * The calls can still be using the old filters. |
1617 | * Do a synchronize_sched() and to ensure all calls are | 1617 | * Do a synchronize_rcu() and to ensure all calls are |
1618 | * done with them before we free them. | 1618 | * done with them before we free them. |
1619 | */ | 1619 | */ |
1620 | tracepoint_synchronize_unregister(); | 1620 | tracepoint_synchronize_unregister(); |
@@ -1845,7 +1845,7 @@ int apply_subsystem_event_filter(struct trace_subsystem_dir *dir, | |||
1845 | if (filter) { | 1845 | if (filter) { |
1846 | /* | 1846 | /* |
1847 | * No event actually uses the system filter | 1847 | * No event actually uses the system filter |
1848 | * we can free it without synchronize_sched(). | 1848 | * we can free it without synchronize_rcu(). |
1849 | */ | 1849 | */ |
1850 | __free_filter(system->filter); | 1850 | __free_filter(system->filter); |
1851 | system->filter = filter; | 1851 | system->filter = filter; |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index fec67188c4d2..adc153ab51c0 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -333,7 +333,7 @@ disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) | |||
333 | * event_call related objects, which will be accessed in | 333 | * event_call related objects, which will be accessed in |
334 | * the kprobe_trace_func/kretprobe_trace_func. | 334 | * the kprobe_trace_func/kretprobe_trace_func. |
335 | */ | 335 | */ |
336 | synchronize_sched(); | 336 | synchronize_rcu(); |
337 | kfree(link); /* Ignored if link == NULL */ | 337 | kfree(link); /* Ignored if link == NULL */ |
338 | } | 338 | } |
339 | 339 | ||
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index a3be42304485..46f2ab1e08a9 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c | |||
@@ -92,7 +92,7 @@ static __init int release_early_probes(void) | |||
92 | while (early_probes) { | 92 | while (early_probes) { |
93 | tmp = early_probes; | 93 | tmp = early_probes; |
94 | early_probes = tmp->next; | 94 | early_probes = tmp->next; |
95 | call_rcu_sched(tmp, rcu_free_old_probes); | 95 | call_rcu(tmp, rcu_free_old_probes); |
96 | } | 96 | } |
97 | 97 | ||
98 | return 0; | 98 | return 0; |
@@ -123,7 +123,7 @@ static inline void release_probes(struct tracepoint_func *old) | |||
123 | * cover both cases. So let us chain the SRCU and sched RCU | 123 | * cover both cases. So let us chain the SRCU and sched RCU |
124 | * callbacks to wait for both grace periods. | 124 | * callbacks to wait for both grace periods. |
125 | */ | 125 | */ |
126 | call_rcu_sched(&tp_probes->rcu, rcu_free_old_probes); | 126 | call_rcu(&tp_probes->rcu, rcu_free_old_probes); |
127 | } | 127 | } |
128 | } | 128 | } |
129 | 129 | ||
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 0280deac392e..392be4b252f6 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -3396,7 +3396,7 @@ static void put_unbound_pool(struct worker_pool *pool) | |||
3396 | del_timer_sync(&pool->mayday_timer); | 3396 | del_timer_sync(&pool->mayday_timer); |
3397 | 3397 | ||
3398 | /* sched-RCU protected to allow dereferences from get_work_pool() */ | 3398 | /* sched-RCU protected to allow dereferences from get_work_pool() */ |
3399 | call_rcu_sched(&pool->rcu, rcu_free_pool); | 3399 | call_rcu(&pool->rcu, rcu_free_pool); |
3400 | } | 3400 | } |
3401 | 3401 | ||
3402 | /** | 3402 | /** |
@@ -3503,14 +3503,14 @@ static void pwq_unbound_release_workfn(struct work_struct *work) | |||
3503 | put_unbound_pool(pool); | 3503 | put_unbound_pool(pool); |
3504 | mutex_unlock(&wq_pool_mutex); | 3504 | mutex_unlock(&wq_pool_mutex); |
3505 | 3505 | ||
3506 | call_rcu_sched(&pwq->rcu, rcu_free_pwq); | 3506 | call_rcu(&pwq->rcu, rcu_free_pwq); |
3507 | 3507 | ||
3508 | /* | 3508 | /* |
3509 | * If we're the last pwq going away, @wq is already dead and no one | 3509 | * If we're the last pwq going away, @wq is already dead and no one |
3510 | * is gonna access it anymore. Schedule RCU free. | 3510 | * is gonna access it anymore. Schedule RCU free. |
3511 | */ | 3511 | */ |
3512 | if (is_last) | 3512 | if (is_last) |
3513 | call_rcu_sched(&wq->rcu, rcu_free_wq); | 3513 | call_rcu(&wq->rcu, rcu_free_wq); |
3514 | } | 3514 | } |
3515 | 3515 | ||
3516 | /** | 3516 | /** |
@@ -4195,7 +4195,7 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
4195 | * The base ref is never dropped on per-cpu pwqs. Directly | 4195 | * The base ref is never dropped on per-cpu pwqs. Directly |
4196 | * schedule RCU free. | 4196 | * schedule RCU free. |
4197 | */ | 4197 | */ |
4198 | call_rcu_sched(&wq->rcu, rcu_free_wq); | 4198 | call_rcu(&wq->rcu, rcu_free_wq); |
4199 | } else { | 4199 | } else { |
4200 | /* | 4200 | /* |
4201 | * We're the sole accessor of @wq at this point. Directly | 4201 | * We're the sole accessor of @wq at this point. Directly |
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c index de10b8c0bff6..9877682e49c7 100644 --- a/lib/percpu-refcount.c +++ b/lib/percpu-refcount.c | |||
@@ -181,7 +181,7 @@ static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref, | |||
181 | ref->confirm_switch = confirm_switch ?: percpu_ref_noop_confirm_switch; | 181 | ref->confirm_switch = confirm_switch ?: percpu_ref_noop_confirm_switch; |
182 | 182 | ||
183 | percpu_ref_get(ref); /* put after confirmation */ | 183 | percpu_ref_get(ref); /* put after confirmation */ |
184 | call_rcu_sched(&ref->rcu, percpu_ref_switch_to_atomic_rcu); | 184 | call_rcu(&ref->rcu, percpu_ref_switch_to_atomic_rcu); |
185 | } | 185 | } |
186 | 186 | ||
187 | static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref) | 187 | static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref) |
diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 8e2ff195ecb3..43ce2f4d2551 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c | |||
@@ -1225,7 +1225,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot) | |||
1225 | { | 1225 | { |
1226 | struct mm_struct *mm = mm_slot->mm; | 1226 | struct mm_struct *mm = mm_slot->mm; |
1227 | 1227 | ||
1228 | VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); | 1228 | lockdep_assert_held(&khugepaged_mm_lock); |
1229 | 1229 | ||
1230 | if (khugepaged_test_exit(mm)) { | 1230 | if (khugepaged_test_exit(mm)) { |
1231 | /* free mm_slot */ | 1231 | /* free mm_slot */ |
@@ -1653,7 +1653,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, | |||
1653 | int progress = 0; | 1653 | int progress = 0; |
1654 | 1654 | ||
1655 | VM_BUG_ON(!pages); | 1655 | VM_BUG_ON(!pages); |
1656 | VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); | 1656 | lockdep_assert_held(&khugepaged_mm_lock); |
1657 | 1657 | ||
1658 | if (khugepaged_scan.mm_slot) | 1658 | if (khugepaged_scan.mm_slot) |
1659 | mm_slot = khugepaged_scan.mm_slot; | 1659 | mm_slot = khugepaged_scan.mm_slot; |
diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c index 2a9fbc4a37d5..f2f03c655807 100644 --- a/mm/mmu_gather.c +++ b/mm/mmu_gather.c | |||
@@ -199,7 +199,7 @@ void tlb_table_flush(struct mmu_gather *tlb) | |||
199 | 199 | ||
200 | if (*batch) { | 200 | if (*batch) { |
201 | tlb_table_invalidate(tlb); | 201 | tlb_table_invalidate(tlb); |
202 | call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); | 202 | call_rcu(&(*batch)->rcu, tlb_remove_table_rcu); |
203 | *batch = NULL; | 203 | *batch = NULL; |
204 | } | 204 | } |
205 | } | 205 | } |
@@ -962,10 +962,10 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep, | |||
962 | * To protect lockless access to n->shared during irq disabled context. | 962 | * To protect lockless access to n->shared during irq disabled context. |
963 | * If n->shared isn't NULL in irq disabled context, accessing to it is | 963 | * If n->shared isn't NULL in irq disabled context, accessing to it is |
964 | * guaranteed to be valid until irq is re-enabled, because it will be | 964 | * guaranteed to be valid until irq is re-enabled, because it will be |
965 | * freed after synchronize_sched(). | 965 | * freed after synchronize_rcu(). |
966 | */ | 966 | */ |
967 | if (old_shared && force_change) | 967 | if (old_shared && force_change) |
968 | synchronize_sched(); | 968 | synchronize_rcu(); |
969 | 969 | ||
970 | fail: | 970 | fail: |
971 | kfree(old_shared); | 971 | kfree(old_shared); |
diff --git a/mm/slab_common.c b/mm/slab_common.c index 7eb8dc136c1c..9c11e8a937d2 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c | |||
@@ -724,7 +724,7 @@ void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s, | |||
724 | css_get(&s->memcg_params.memcg->css); | 724 | css_get(&s->memcg_params.memcg->css); |
725 | 725 | ||
726 | s->memcg_params.deact_fn = deact_fn; | 726 | s->memcg_params.deact_fn = deact_fn; |
727 | call_rcu_sched(&s->memcg_params.deact_rcu_head, kmemcg_deactivate_rcufn); | 727 | call_rcu(&s->memcg_params.deact_rcu_head, kmemcg_deactivate_rcufn); |
728 | } | 728 | } |
729 | 729 | ||
730 | void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg) | 730 | void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg) |
@@ -839,11 +839,11 @@ static void flush_memcg_workqueue(struct kmem_cache *s) | |||
839 | mutex_unlock(&slab_mutex); | 839 | mutex_unlock(&slab_mutex); |
840 | 840 | ||
841 | /* | 841 | /* |
842 | * SLUB deactivates the kmem_caches through call_rcu_sched. Make | 842 | * SLUB deactivates the kmem_caches through call_rcu. Make |
843 | * sure all registered rcu callbacks have been invoked. | 843 | * sure all registered rcu callbacks have been invoked. |
844 | */ | 844 | */ |
845 | if (IS_ENABLED(CONFIG_SLUB)) | 845 | if (IS_ENABLED(CONFIG_SLUB)) |
846 | rcu_barrier_sched(); | 846 | rcu_barrier(); |
847 | 847 | ||
848 | /* | 848 | /* |
849 | * SLAB and SLUB create memcg kmem_caches through workqueue and SLUB | 849 | * SLAB and SLUB create memcg kmem_caches through workqueue and SLUB |
@@ -823,8 +823,7 @@ void lru_add_page_tail(struct page *page, struct page *page_tail, | |||
823 | VM_BUG_ON_PAGE(!PageHead(page), page); | 823 | VM_BUG_ON_PAGE(!PageHead(page), page); |
824 | VM_BUG_ON_PAGE(PageCompound(page_tail), page); | 824 | VM_BUG_ON_PAGE(PageCompound(page_tail), page); |
825 | VM_BUG_ON_PAGE(PageLRU(page_tail), page); | 825 | VM_BUG_ON_PAGE(PageLRU(page_tail), page); |
826 | VM_BUG_ON(NR_CPUS != 1 && | 826 | lockdep_assert_held(&lruvec_pgdat(lruvec)->lru_lock); |
827 | !spin_is_locked(&lruvec_pgdat(lruvec)->lru_lock)); | ||
828 | 827 | ||
829 | if (!list) | 828 | if (!list) |
830 | SetPageLRU(page_tail); | 829 | SetPageLRU(page_tail); |
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index a7ea2d431714..596ec6e7df11 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c | |||
@@ -728,7 +728,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry) | |||
728 | rcu_assign_pointer(*pp, p->next); | 728 | rcu_assign_pointer(*pp, p->next); |
729 | hlist_del_init(&p->mglist); | 729 | hlist_del_init(&p->mglist); |
730 | del_timer(&p->timer); | 730 | del_timer(&p->timer); |
731 | call_rcu_bh(&p->rcu, br_multicast_free_pg); | 731 | call_rcu(&p->rcu, br_multicast_free_pg); |
732 | err = 0; | 732 | err = 0; |
733 | 733 | ||
734 | if (!mp->ports && !mp->host_joined && | 734 | if (!mp->ports && !mp->host_joined && |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 6bac0d6b7b94..0255223f2001 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
@@ -260,7 +260,7 @@ static void br_multicast_group_expired(struct timer_list *t) | |||
260 | hlist_del_rcu(&mp->hlist[mdb->ver]); | 260 | hlist_del_rcu(&mp->hlist[mdb->ver]); |
261 | mdb->size--; | 261 | mdb->size--; |
262 | 262 | ||
263 | call_rcu_bh(&mp->rcu, br_multicast_free_group); | 263 | call_rcu(&mp->rcu, br_multicast_free_group); |
264 | 264 | ||
265 | out: | 265 | out: |
266 | spin_unlock(&br->multicast_lock); | 266 | spin_unlock(&br->multicast_lock); |
@@ -291,7 +291,7 @@ static void br_multicast_del_pg(struct net_bridge *br, | |||
291 | del_timer(&p->timer); | 291 | del_timer(&p->timer); |
292 | br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB, | 292 | br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB, |
293 | p->flags); | 293 | p->flags); |
294 | call_rcu_bh(&p->rcu, br_multicast_free_pg); | 294 | call_rcu(&p->rcu, br_multicast_free_pg); |
295 | 295 | ||
296 | if (!mp->ports && !mp->host_joined && | 296 | if (!mp->ports && !mp->host_joined && |
297 | netif_running(br->dev)) | 297 | netif_running(br->dev)) |
@@ -358,7 +358,7 @@ static int br_mdb_rehash(struct net_bridge_mdb_htable __rcu **mdbp, int max, | |||
358 | } | 358 | } |
359 | 359 | ||
360 | br_mdb_rehash_seq++; | 360 | br_mdb_rehash_seq++; |
361 | call_rcu_bh(&mdb->rcu, br_mdb_free); | 361 | call_rcu(&mdb->rcu, br_mdb_free); |
362 | 362 | ||
363 | out: | 363 | out: |
364 | rcu_assign_pointer(*mdbp, mdb); | 364 | rcu_assign_pointer(*mdbp, mdb); |
@@ -1629,7 +1629,7 @@ br_multicast_leave_group(struct net_bridge *br, | |||
1629 | rcu_assign_pointer(*pp, p->next); | 1629 | rcu_assign_pointer(*pp, p->next); |
1630 | hlist_del_init(&p->mglist); | 1630 | hlist_del_init(&p->mglist); |
1631 | del_timer(&p->timer); | 1631 | del_timer(&p->timer); |
1632 | call_rcu_bh(&p->rcu, br_multicast_free_pg); | 1632 | call_rcu(&p->rcu, br_multicast_free_pg); |
1633 | br_mdb_notify(br->dev, port, group, RTM_DELMDB, | 1633 | br_mdb_notify(br->dev, port, group, RTM_DELMDB, |
1634 | p->flags); | 1634 | p->flags); |
1635 | 1635 | ||
@@ -2051,19 +2051,19 @@ void br_multicast_dev_del(struct net_bridge *br) | |||
2051 | hlist_for_each_entry_safe(mp, n, &mdb->mhash[i], | 2051 | hlist_for_each_entry_safe(mp, n, &mdb->mhash[i], |
2052 | hlist[ver]) { | 2052 | hlist[ver]) { |
2053 | del_timer(&mp->timer); | 2053 | del_timer(&mp->timer); |
2054 | call_rcu_bh(&mp->rcu, br_multicast_free_group); | 2054 | call_rcu(&mp->rcu, br_multicast_free_group); |
2055 | } | 2055 | } |
2056 | } | 2056 | } |
2057 | 2057 | ||
2058 | if (mdb->old) { | 2058 | if (mdb->old) { |
2059 | spin_unlock_bh(&br->multicast_lock); | 2059 | spin_unlock_bh(&br->multicast_lock); |
2060 | rcu_barrier_bh(); | 2060 | rcu_barrier(); |
2061 | spin_lock_bh(&br->multicast_lock); | 2061 | spin_lock_bh(&br->multicast_lock); |
2062 | WARN_ON(mdb->old); | 2062 | WARN_ON(mdb->old); |
2063 | } | 2063 | } |
2064 | 2064 | ||
2065 | mdb->old = mdb; | 2065 | mdb->old = mdb; |
2066 | call_rcu_bh(&mdb->rcu, br_mdb_free); | 2066 | call_rcu(&mdb->rcu, br_mdb_free); |
2067 | 2067 | ||
2068 | out: | 2068 | out: |
2069 | spin_unlock_bh(&br->multicast_lock); | 2069 | spin_unlock_bh(&br->multicast_lock); |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 2b9fdbc43205..464f0ff46c22 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -801,7 +801,7 @@ void __netpoll_cleanup(struct netpoll *np) | |||
801 | ops->ndo_netpoll_cleanup(np->dev); | 801 | ops->ndo_netpoll_cleanup(np->dev); |
802 | 802 | ||
803 | RCU_INIT_POINTER(np->dev->npinfo, NULL); | 803 | RCU_INIT_POINTER(np->dev->npinfo, NULL); |
804 | call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info); | 804 | call_rcu(&npinfo->rcu, rcu_cleanup_netpoll_info); |
805 | } else | 805 | } else |
806 | RCU_INIT_POINTER(np->dev->npinfo, NULL); | 806 | RCU_INIT_POINTER(np->dev->npinfo, NULL); |
807 | } | 807 | } |
@@ -812,7 +812,7 @@ void __netpoll_free(struct netpoll *np) | |||
812 | ASSERT_RTNL(); | 812 | ASSERT_RTNL(); |
813 | 813 | ||
814 | /* Wait for transmitting packets to finish before freeing. */ | 814 | /* Wait for transmitting packets to finish before freeing. */ |
815 | synchronize_rcu_bh(); | 815 | synchronize_rcu(); |
816 | __netpoll_cleanup(np); | 816 | __netpoll_cleanup(np); |
817 | kfree(np); | 817 | kfree(np); |
818 | } | 818 | } |
diff --git a/net/core/skmsg.c b/net/core/skmsg.c index 56a99d0c9aa0..c92d6ccce610 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c | |||
@@ -580,7 +580,7 @@ void sk_psock_drop(struct sock *sk, struct sk_psock *psock) | |||
580 | write_unlock_bh(&sk->sk_callback_lock); | 580 | write_unlock_bh(&sk->sk_callback_lock); |
581 | sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED); | 581 | sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED); |
582 | 582 | ||
583 | call_rcu_sched(&psock->rcu, sk_psock_destroy); | 583 | call_rcu(&psock->rcu, sk_psock_destroy); |
584 | } | 584 | } |
585 | EXPORT_SYMBOL_GPL(sk_psock_drop); | 585 | EXPORT_SYMBOL_GPL(sk_psock_drop); |
586 | 586 | ||
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index 7d6ff983ba2c..dbd0f7bae00a 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c | |||
@@ -2405,7 +2405,7 @@ static void __exit decnet_exit(void) | |||
2405 | 2405 | ||
2406 | proto_unregister(&dn_proto); | 2406 | proto_unregister(&dn_proto); |
2407 | 2407 | ||
2408 | rcu_barrier_bh(); /* Wait for completion of call_rcu_bh()'s */ | 2408 | rcu_barrier(); /* Wait for completion of call_rcu()'s */ |
2409 | } | 2409 | } |
2410 | module_exit(decnet_exit); | 2410 | module_exit(decnet_exit); |
2411 | #endif | 2411 | #endif |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index ca3b0f46de53..016e628c6ac9 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -540,7 +540,7 @@ void qdisc_put_stab(struct qdisc_size_table *tab) | |||
540 | 540 | ||
541 | if (--tab->refcnt == 0) { | 541 | if (--tab->refcnt == 0) { |
542 | list_del(&tab->list); | 542 | list_del(&tab->list); |
543 | call_rcu_bh(&tab->rcu, stab_kfree_rcu); | 543 | call_rcu(&tab->rcu, stab_kfree_rcu); |
544 | } | 544 | } |
545 | } | 545 | } |
546 | EXPORT_SYMBOL(qdisc_put_stab); | 546 | EXPORT_SYMBOL(qdisc_put_stab); |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index de1663f7d3ad..66ba2ce2320f 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -1372,7 +1372,7 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp, | |||
1372 | if (!tp_head) { | 1372 | if (!tp_head) { |
1373 | RCU_INIT_POINTER(*miniqp->p_miniq, NULL); | 1373 | RCU_INIT_POINTER(*miniqp->p_miniq, NULL); |
1374 | /* Wait for flying RCU callback before it is freed. */ | 1374 | /* Wait for flying RCU callback before it is freed. */ |
1375 | rcu_barrier_bh(); | 1375 | rcu_barrier(); |
1376 | return; | 1376 | return; |
1377 | } | 1377 | } |
1378 | 1378 | ||
@@ -1380,10 +1380,10 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp, | |||
1380 | &miniqp->miniq1 : &miniqp->miniq2; | 1380 | &miniqp->miniq1 : &miniqp->miniq2; |
1381 | 1381 | ||
1382 | /* We need to make sure that readers won't see the miniq | 1382 | /* We need to make sure that readers won't see the miniq |
1383 | * we are about to modify. So wait until previous call_rcu_bh callback | 1383 | * we are about to modify. So wait until previous call_rcu callback |
1384 | * is done. | 1384 | * is done. |
1385 | */ | 1385 | */ |
1386 | rcu_barrier_bh(); | 1386 | rcu_barrier(); |
1387 | miniq->filter_list = tp_head; | 1387 | miniq->filter_list = tp_head; |
1388 | rcu_assign_pointer(*miniqp->p_miniq, miniq); | 1388 | rcu_assign_pointer(*miniqp->p_miniq, miniq); |
1389 | 1389 | ||
@@ -1392,7 +1392,7 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp, | |||
1392 | * block potential new user of miniq_old until all readers | 1392 | * block potential new user of miniq_old until all readers |
1393 | * are not seeing it. | 1393 | * are not seeing it. |
1394 | */ | 1394 | */ |
1395 | call_rcu_bh(&miniq_old->rcu, mini_qdisc_rcu_func); | 1395 | call_rcu(&miniq_old->rcu, mini_qdisc_rcu_func); |
1396 | } | 1396 | } |
1397 | EXPORT_SYMBOL(mini_qdisc_pair_swap); | 1397 | EXPORT_SYMBOL(mini_qdisc_pair_swap); |
1398 | 1398 | ||
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index c883ec55654f..377f373db6c0 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl | |||
@@ -573,6 +573,27 @@ foreach my $entry (@mode_permission_funcs) { | |||
573 | } | 573 | } |
574 | $mode_perms_search = "(?:${mode_perms_search})"; | 574 | $mode_perms_search = "(?:${mode_perms_search})"; |
575 | 575 | ||
576 | our %deprecated_apis = ( | ||
577 | "synchronize_rcu_bh" => "synchronize_rcu", | ||
578 | "synchronize_rcu_bh_expedited" => "synchronize_rcu_expedited", | ||
579 | "call_rcu_bh" => "call_rcu", | ||
580 | "rcu_barrier_bh" => "rcu_barrier", | ||
581 | "synchronize_sched" => "synchronize_rcu", | ||
582 | "synchronize_sched_expedited" => "synchronize_rcu_expedited", | ||
583 | "call_rcu_sched" => "call_rcu", | ||
584 | "rcu_barrier_sched" => "rcu_barrier", | ||
585 | "get_state_synchronize_sched" => "get_state_synchronize_rcu", | ||
586 | "cond_synchronize_sched" => "cond_synchronize_rcu", | ||
587 | ); | ||
588 | |||
589 | #Create a search pattern for all these strings to speed up a loop below | ||
590 | our $deprecated_apis_search = ""; | ||
591 | foreach my $entry (keys %deprecated_apis) { | ||
592 | $deprecated_apis_search .= '|' if ($deprecated_apis_search ne ""); | ||
593 | $deprecated_apis_search .= $entry; | ||
594 | } | ||
595 | $deprecated_apis_search = "(?:${deprecated_apis_search})"; | ||
596 | |||
576 | our $mode_perms_world_writable = qr{ | 597 | our $mode_perms_world_writable = qr{ |
577 | S_IWUGO | | 598 | S_IWUGO | |
578 | S_IWOTH | | 599 | S_IWOTH | |
@@ -6368,6 +6389,20 @@ sub process { | |||
6368 | "please use device_initcall() or more appropriate function instead of __initcall() (see include/linux/init.h)\n" . $herecurr); | 6389 | "please use device_initcall() or more appropriate function instead of __initcall() (see include/linux/init.h)\n" . $herecurr); |
6369 | } | 6390 | } |
6370 | 6391 | ||
6392 | # check for spin_is_locked(), suggest lockdep instead | ||
6393 | if ($line =~ /\bspin_is_locked\(/) { | ||
6394 | WARN("USE_LOCKDEP", | ||
6395 | "Where possible, use lockdep_assert_held instead of assertions based on spin_is_locked\n" . $herecurr); | ||
6396 | } | ||
6397 | |||
6398 | # check for deprecated apis | ||
6399 | if ($line =~ /\b($deprecated_apis_search)\b\s*\(/) { | ||
6400 | my $deprecated_api = $1; | ||
6401 | my $new_api = $deprecated_apis{$deprecated_api}; | ||
6402 | WARN("DEPRECATED_API", | ||
6403 | "Deprecated use of '$deprecated_api', prefer '$new_api' instead\n" . $herecurr); | ||
6404 | } | ||
6405 | |||
6371 | # check for various structs that are normally const (ops, kgdb, device_tree) | 6406 | # check for various structs that are normally const (ops, kgdb, device_tree) |
6372 | # and avoid what seem like struct definitions 'struct foo {' | 6407 | # and avoid what seem like struct definitions 'struct foo {' |
6373 | if ($line !~ /\bconst\b/ && | 6408 | if ($line !~ /\bconst\b/ && |
diff --git a/tools/include/linux/kernel.h b/tools/include/linux/kernel.h index 6935ef94e77a..857d9e22826e 100644 --- a/tools/include/linux/kernel.h +++ b/tools/include/linux/kernel.h | |||
@@ -116,6 +116,6 @@ int scnprintf(char * buf, size_t size, const char * fmt, ...); | |||
116 | #define round_down(x, y) ((x) & ~__round_mask(x, y)) | 116 | #define round_down(x, y) ((x) & ~__round_mask(x, y)) |
117 | 117 | ||
118 | #define current_gfp_context(k) 0 | 118 | #define current_gfp_context(k) 0 |
119 | #define synchronize_sched() | 119 | #define synchronize_rcu() |
120 | 120 | ||
121 | #endif | 121 | #endif |
diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh index 5a7a62d76a50..19864f1cb27a 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm.sh | |||
@@ -194,6 +194,14 @@ do | |||
194 | shift | 194 | shift |
195 | done | 195 | done |
196 | 196 | ||
197 | if test -z "$TORTURE_INITRD" || tools/testing/selftests/rcutorture/bin/mkinitrd.sh | ||
198 | then | ||
199 | : | ||
200 | else | ||
201 | echo No initrd and unable to create one, aborting test >&2 | ||
202 | exit 1 | ||
203 | fi | ||
204 | |||
197 | CONFIGFRAG=${KVM}/configs/${TORTURE_SUITE}; export CONFIGFRAG | 205 | CONFIGFRAG=${KVM}/configs/${TORTURE_SUITE}; export CONFIGFRAG |
198 | 206 | ||
199 | if test -z "$configs" | 207 | if test -z "$configs" |
diff --git a/tools/testing/selftests/rcutorture/bin/mkinitrd.sh b/tools/testing/selftests/rcutorture/bin/mkinitrd.sh new file mode 100755 index 000000000000..da298394daa2 --- /dev/null +++ b/tools/testing/selftests/rcutorture/bin/mkinitrd.sh | |||
@@ -0,0 +1,136 @@ | |||
1 | #!/bin/bash | ||
2 | # | ||
3 | # Create an initrd directory if one does not already exist. | ||
4 | # | ||
5 | # This program is free software; you can redistribute it and/or modify | ||
6 | # it under the terms of the GNU General Public License as published by | ||
7 | # the Free Software Foundation; either version 2 of the License, or | ||
8 | # (at your option) any later version. | ||
9 | # | ||
10 | # This program is distributed in the hope that it will be useful, | ||
11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | # GNU General Public License for more details. | ||
14 | # | ||
15 | # You should have received a copy of the GNU General Public License | ||
16 | # along with this program; if not, you can access it online at | ||
17 | # http://www.gnu.org/licenses/gpl-2.0.html. | ||
18 | # | ||
19 | # Copyright (C) IBM Corporation, 2013 | ||
20 | # | ||
21 | # Author: Connor Shu <Connor.Shu@ibm.com> | ||
22 | |||
23 | D=tools/testing/selftests/rcutorture | ||
24 | |||
25 | # Prerequisite checks | ||
26 | [ -z "$D" ] && echo >&2 "No argument supplied" && exit 1 | ||
27 | if [ ! -d "$D" ]; then | ||
28 | echo >&2 "$D does not exist: Malformed kernel source tree?" | ||
29 | exit 1 | ||
30 | fi | ||
31 | if [ -s "$D/initrd/init" ]; then | ||
32 | echo "$D/initrd/init already exists, no need to create it" | ||
33 | exit 0 | ||
34 | fi | ||
35 | |||
36 | T=${TMPDIR-/tmp}/mkinitrd.sh.$$ | ||
37 | trap 'rm -rf $T' 0 2 | ||
38 | mkdir $T | ||
39 | |||
40 | cat > $T/init << '__EOF___' | ||
41 | #!/bin/sh | ||
42 | # Run in userspace a few milliseconds every second. This helps to | ||
43 | # exercise the NO_HZ_FULL portions of RCU. | ||
44 | while : | ||
45 | do | ||
46 | q= | ||
47 | for i in \ | ||
48 | a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a \ | ||
49 | a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a \ | ||
50 | a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a \ | ||
51 | a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a \ | ||
52 | a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a \ | ||
53 | a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a | ||
54 | do | ||
55 | q="$q $i" | ||
56 | done | ||
57 | sleep 1 | ||
58 | done | ||
59 | __EOF___ | ||
60 | |||
61 | # Try using dracut to create initrd | ||
62 | if command -v dracut >/dev/null 2>&1 | ||
63 | then | ||
64 | echo Creating $D/initrd using dracut. | ||
65 | # Filesystem creation | ||
66 | dracut --force --no-hostonly --no-hostonly-cmdline --module "base" $T/initramfs.img | ||
67 | cd $D | ||
68 | mkdir -p initrd | ||
69 | cd initrd | ||
70 | zcat $T/initramfs.img | cpio -id | ||
71 | cp $T/init init | ||
72 | chmod +x init | ||
73 | echo Done creating $D/initrd using dracut | ||
74 | exit 0 | ||
75 | fi | ||
76 | |||
77 | # No dracut, so create a C-language initrd/init program and statically | ||
78 | # link it. This results in a very small initrd, but might be a bit less | ||
79 | # future-proof than dracut. | ||
80 | echo "Could not find dracut, attempting C initrd" | ||
81 | cd $D | ||
82 | mkdir -p initrd | ||
83 | cd initrd | ||
84 | cat > init.c << '___EOF___' | ||
85 | #ifndef NOLIBC | ||
86 | #include <unistd.h> | ||
87 | #include <sys/time.h> | ||
88 | #endif | ||
89 | |||
90 | volatile unsigned long delaycount; | ||
91 | |||
92 | int main(int argc, int argv[]) | ||
93 | { | ||
94 | int i; | ||
95 | struct timeval tv; | ||
96 | struct timeval tvb; | ||
97 | |||
98 | for (;;) { | ||
99 | sleep(1); | ||
100 | /* Need some userspace time. */ | ||
101 | if (gettimeofday(&tvb, NULL)) | ||
102 | continue; | ||
103 | do { | ||
104 | for (i = 0; i < 1000 * 100; i++) | ||
105 | delaycount = i * i; | ||
106 | if (gettimeofday(&tv, NULL)) | ||
107 | break; | ||
108 | tv.tv_sec -= tvb.tv_sec; | ||
109 | if (tv.tv_sec > 1) | ||
110 | break; | ||
111 | tv.tv_usec += tv.tv_sec * 1000 * 1000; | ||
112 | tv.tv_usec -= tvb.tv_usec; | ||
113 | } while (tv.tv_usec < 1000); | ||
114 | } | ||
115 | return 0; | ||
116 | } | ||
117 | ___EOF___ | ||
118 | |||
119 | # build using nolibc on supported archs (smaller executable) and fall | ||
120 | # back to regular glibc on other ones. | ||
121 | if echo -e "#if __x86_64__||__i386__||__i486__||__i586__||__i686__" \ | ||
122 | "||__ARM_EABI__||__aarch64__\nyes\n#endif" \ | ||
123 | | ${CROSS_COMPILE}gcc -E -nostdlib -xc - \ | ||
124 | | grep -q '^yes'; then | ||
125 | # architecture supported by nolibc | ||
126 | ${CROSS_COMPILE}gcc -fno-asynchronous-unwind-tables -fno-ident \ | ||
127 | -nostdlib -include ../bin/nolibc.h -lgcc -s -static -Os \ | ||
128 | -o init init.c | ||
129 | else | ||
130 | ${CROSS_COMPILE}gcc -s -static -Os -o init init.c | ||
131 | fi | ||
132 | |||
133 | rm init.c | ||
134 | echo "Done creating a statically linked C-language initrd" | ||
135 | |||
136 | exit 0 | ||
diff --git a/tools/testing/selftests/rcutorture/bin/nolibc.h b/tools/testing/selftests/rcutorture/bin/nolibc.h new file mode 100644 index 000000000000..f98f5b92d3eb --- /dev/null +++ b/tools/testing/selftests/rcutorture/bin/nolibc.h | |||
@@ -0,0 +1,2197 @@ | |||
1 | /* SPDX-License-Identifier: LGPL-2.1 OR MIT */ | ||
2 | /* nolibc.h | ||
3 | * Copyright (C) 2017-2018 Willy Tarreau <w@1wt.eu> | ||
4 | */ | ||
5 | |||
6 | /* some archs (at least aarch64) don't expose the regular syscalls anymore by | ||
7 | * default, either because they have an "_at" replacement, or because there are | ||
8 | * more modern alternatives. For now we'd rather still use them. | ||
9 | */ | ||
10 | #define __ARCH_WANT_SYSCALL_NO_AT | ||
11 | #define __ARCH_WANT_SYSCALL_NO_FLAGS | ||
12 | #define __ARCH_WANT_SYSCALL_DEPRECATED | ||
13 | |||
14 | #include <asm/unistd.h> | ||
15 | #include <asm/ioctls.h> | ||
16 | #include <asm/errno.h> | ||
17 | #include <linux/fs.h> | ||
18 | #include <linux/loop.h> | ||
19 | |||
20 | #define NOLIBC | ||
21 | |||
22 | /* Build a static executable this way : | ||
23 | * $ gcc -fno-asynchronous-unwind-tables -fno-ident -s -Os -nostdlib \ | ||
24 | * -static -include nolibc.h -lgcc -o hello hello.c | ||
25 | * | ||
26 | * Useful calling convention table found here : | ||
27 | * http://man7.org/linux/man-pages/man2/syscall.2.html | ||
28 | * | ||
29 | * This doc is even better : | ||
30 | * https://w3challs.com/syscalls/ | ||
31 | */ | ||
32 | |||
33 | |||
34 | /* this way it will be removed if unused */ | ||
35 | static int errno; | ||
36 | |||
37 | #ifndef NOLIBC_IGNORE_ERRNO | ||
38 | #define SET_ERRNO(v) do { errno = (v); } while (0) | ||
39 | #else | ||
40 | #define SET_ERRNO(v) do { } while (0) | ||
41 | #endif | ||
42 | |||
43 | /* errno codes all ensure that they will not conflict with a valid pointer | ||
44 | * because they all correspond to the highest addressable memry page. | ||
45 | */ | ||
46 | #define MAX_ERRNO 4095 | ||
47 | |||
48 | /* Declare a few quite common macros and types that usually are in stdlib.h, | ||
49 | * stdint.h, ctype.h, unistd.h and a few other common locations. | ||
50 | */ | ||
51 | |||
52 | #define NULL ((void *)0) | ||
53 | |||
54 | /* stdint types */ | ||
55 | typedef unsigned char uint8_t; | ||
56 | typedef signed char int8_t; | ||
57 | typedef unsigned short uint16_t; | ||
58 | typedef signed short int16_t; | ||
59 | typedef unsigned int uint32_t; | ||
60 | typedef signed int int32_t; | ||
61 | typedef unsigned long long uint64_t; | ||
62 | typedef signed long long int64_t; | ||
63 | typedef unsigned long size_t; | ||
64 | typedef signed long ssize_t; | ||
65 | typedef unsigned long uintptr_t; | ||
66 | typedef signed long intptr_t; | ||
67 | typedef signed long ptrdiff_t; | ||
68 | |||
69 | /* for stat() */ | ||
70 | typedef unsigned int dev_t; | ||
71 | typedef unsigned long ino_t; | ||
72 | typedef unsigned int mode_t; | ||
73 | typedef signed int pid_t; | ||
74 | typedef unsigned int uid_t; | ||
75 | typedef unsigned int gid_t; | ||
76 | typedef unsigned long nlink_t; | ||
77 | typedef signed long off_t; | ||
78 | typedef signed long blksize_t; | ||
79 | typedef signed long blkcnt_t; | ||
80 | typedef signed long time_t; | ||
81 | |||
82 | /* for poll() */ | ||
83 | struct pollfd { | ||
84 | int fd; | ||
85 | short int events; | ||
86 | short int revents; | ||
87 | }; | ||
88 | |||
89 | /* for select() */ | ||
90 | struct timeval { | ||
91 | long tv_sec; | ||
92 | long tv_usec; | ||
93 | }; | ||
94 | |||
95 | /* for pselect() */ | ||
96 | struct timespec { | ||
97 | long tv_sec; | ||
98 | long tv_nsec; | ||
99 | }; | ||
100 | |||
101 | /* for gettimeofday() */ | ||
102 | struct timezone { | ||
103 | int tz_minuteswest; | ||
104 | int tz_dsttime; | ||
105 | }; | ||
106 | |||
107 | /* for getdents64() */ | ||
108 | struct linux_dirent64 { | ||
109 | uint64_t d_ino; | ||
110 | int64_t d_off; | ||
111 | unsigned short d_reclen; | ||
112 | unsigned char d_type; | ||
113 | char d_name[]; | ||
114 | }; | ||
115 | |||
116 | /* commonly an fd_set represents 256 FDs */ | ||
117 | #define FD_SETSIZE 256 | ||
118 | typedef struct { uint32_t fd32[FD_SETSIZE/32]; } fd_set; | ||
119 | |||
120 | /* needed by wait4() */ | ||
121 | struct rusage { | ||
122 | struct timeval ru_utime; | ||
123 | struct timeval ru_stime; | ||
124 | long ru_maxrss; | ||
125 | long ru_ixrss; | ||
126 | long ru_idrss; | ||
127 | long ru_isrss; | ||
128 | long ru_minflt; | ||
129 | long ru_majflt; | ||
130 | long ru_nswap; | ||
131 | long ru_inblock; | ||
132 | long ru_oublock; | ||
133 | long ru_msgsnd; | ||
134 | long ru_msgrcv; | ||
135 | long ru_nsignals; | ||
136 | long ru_nvcsw; | ||
137 | long ru_nivcsw; | ||
138 | }; | ||
139 | |||
140 | /* stat flags (WARNING, octal here) */ | ||
141 | #define S_IFDIR 0040000 | ||
142 | #define S_IFCHR 0020000 | ||
143 | #define S_IFBLK 0060000 | ||
144 | #define S_IFREG 0100000 | ||
145 | #define S_IFIFO 0010000 | ||
146 | #define S_IFLNK 0120000 | ||
147 | #define S_IFSOCK 0140000 | ||
148 | #define S_IFMT 0170000 | ||
149 | |||
150 | #define S_ISDIR(mode) (((mode) & S_IFDIR) == S_IFDIR) | ||
151 | #define S_ISCHR(mode) (((mode) & S_IFCHR) == S_IFCHR) | ||
152 | #define S_ISBLK(mode) (((mode) & S_IFBLK) == S_IFBLK) | ||
153 | #define S_ISREG(mode) (((mode) & S_IFREG) == S_IFREG) | ||
154 | #define S_ISFIFO(mode) (((mode) & S_IFIFO) == S_IFIFO) | ||
155 | #define S_ISLNK(mode) (((mode) & S_IFLNK) == S_IFLNK) | ||
156 | #define S_ISSOCK(mode) (((mode) & S_IFSOCK) == S_IFSOCK) | ||
157 | |||
158 | #define DT_UNKNOWN 0 | ||
159 | #define DT_FIFO 1 | ||
160 | #define DT_CHR 2 | ||
161 | #define DT_DIR 4 | ||
162 | #define DT_BLK 6 | ||
163 | #define DT_REG 8 | ||
164 | #define DT_LNK 10 | ||
165 | #define DT_SOCK 12 | ||
166 | |||
167 | /* all the *at functions */ | ||
168 | #ifndef AT_FDWCD | ||
169 | #define AT_FDCWD -100 | ||
170 | #endif | ||
171 | |||
172 | /* lseek */ | ||
173 | #define SEEK_SET 0 | ||
174 | #define SEEK_CUR 1 | ||
175 | #define SEEK_END 2 | ||
176 | |||
177 | /* reboot */ | ||
178 | #define LINUX_REBOOT_MAGIC1 0xfee1dead | ||
179 | #define LINUX_REBOOT_MAGIC2 0x28121969 | ||
180 | #define LINUX_REBOOT_CMD_HALT 0xcdef0123 | ||
181 | #define LINUX_REBOOT_CMD_POWER_OFF 0x4321fedc | ||
182 | #define LINUX_REBOOT_CMD_RESTART 0x01234567 | ||
183 | #define LINUX_REBOOT_CMD_SW_SUSPEND 0xd000fce2 | ||
184 | |||
185 | |||
186 | /* The format of the struct as returned by the libc to the application, which | ||
187 | * significantly differs from the format returned by the stat() syscall flavours. | ||
188 | */ | ||
189 | struct stat { | ||
190 | dev_t st_dev; /* ID of device containing file */ | ||
191 | ino_t st_ino; /* inode number */ | ||
192 | mode_t st_mode; /* protection */ | ||
193 | nlink_t st_nlink; /* number of hard links */ | ||
194 | uid_t st_uid; /* user ID of owner */ | ||
195 | gid_t st_gid; /* group ID of owner */ | ||
196 | dev_t st_rdev; /* device ID (if special file) */ | ||
197 | off_t st_size; /* total size, in bytes */ | ||
198 | blksize_t st_blksize; /* blocksize for file system I/O */ | ||
199 | blkcnt_t st_blocks; /* number of 512B blocks allocated */ | ||
200 | time_t st_atime; /* time of last access */ | ||
201 | time_t st_mtime; /* time of last modification */ | ||
202 | time_t st_ctime; /* time of last status change */ | ||
203 | }; | ||
204 | |||
205 | #define WEXITSTATUS(status) (((status) & 0xff00) >> 8) | ||
206 | #define WIFEXITED(status) (((status) & 0x7f) == 0) | ||
207 | |||
208 | |||
209 | /* Below comes the architecture-specific code. For each architecture, we have | ||
210 | * the syscall declarations and the _start code definition. This is the only | ||
211 | * global part. On all architectures the kernel puts everything in the stack | ||
212 | * before jumping to _start just above us, without any return address (_start | ||
213 | * is not a function but an entry pint). So at the stack pointer we find argc. | ||
214 | * Then argv[] begins, and ends at the first NULL. Then we have envp which | ||
215 | * starts and ends with a NULL as well. So envp=argv+argc+1. | ||
216 | */ | ||
217 | |||
218 | #if defined(__x86_64__) | ||
219 | /* Syscalls for x86_64 : | ||
220 | * - registers are 64-bit | ||
221 | * - syscall number is passed in rax | ||
222 | * - arguments are in rdi, rsi, rdx, r10, r8, r9 respectively | ||
223 | * - the system call is performed by calling the syscall instruction | ||
224 | * - syscall return comes in rax | ||
225 | * - rcx and r8..r11 may be clobbered, others are preserved. | ||
226 | * - the arguments are cast to long and assigned into the target registers | ||
227 | * which are then simply passed as registers to the asm code, so that we | ||
228 | * don't have to experience issues with register constraints. | ||
229 | * - the syscall number is always specified last in order to allow to force | ||
230 | * some registers before (gcc refuses a %-register at the last position). | ||
231 | */ | ||
232 | |||
233 | #define my_syscall0(num) \ | ||
234 | ({ \ | ||
235 | long _ret; \ | ||
236 | register long _num asm("rax") = (num); \ | ||
237 | \ | ||
238 | asm volatile ( \ | ||
239 | "syscall\n" \ | ||
240 | : "=a" (_ret) \ | ||
241 | : "0"(_num) \ | ||
242 | : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \ | ||
243 | ); \ | ||
244 | _ret; \ | ||
245 | }) | ||
246 | |||
247 | #define my_syscall1(num, arg1) \ | ||
248 | ({ \ | ||
249 | long _ret; \ | ||
250 | register long _num asm("rax") = (num); \ | ||
251 | register long _arg1 asm("rdi") = (long)(arg1); \ | ||
252 | \ | ||
253 | asm volatile ( \ | ||
254 | "syscall\n" \ | ||
255 | : "=a" (_ret) \ | ||
256 | : "r"(_arg1), \ | ||
257 | "0"(_num) \ | ||
258 | : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \ | ||
259 | ); \ | ||
260 | _ret; \ | ||
261 | }) | ||
262 | |||
263 | #define my_syscall2(num, arg1, arg2) \ | ||
264 | ({ \ | ||
265 | long _ret; \ | ||
266 | register long _num asm("rax") = (num); \ | ||
267 | register long _arg1 asm("rdi") = (long)(arg1); \ | ||
268 | register long _arg2 asm("rsi") = (long)(arg2); \ | ||
269 | \ | ||
270 | asm volatile ( \ | ||
271 | "syscall\n" \ | ||
272 | : "=a" (_ret) \ | ||
273 | : "r"(_arg1), "r"(_arg2), \ | ||
274 | "0"(_num) \ | ||
275 | : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \ | ||
276 | ); \ | ||
277 | _ret; \ | ||
278 | }) | ||
279 | |||
280 | #define my_syscall3(num, arg1, arg2, arg3) \ | ||
281 | ({ \ | ||
282 | long _ret; \ | ||
283 | register long _num asm("rax") = (num); \ | ||
284 | register long _arg1 asm("rdi") = (long)(arg1); \ | ||
285 | register long _arg2 asm("rsi") = (long)(arg2); \ | ||
286 | register long _arg3 asm("rdx") = (long)(arg3); \ | ||
287 | \ | ||
288 | asm volatile ( \ | ||
289 | "syscall\n" \ | ||
290 | : "=a" (_ret) \ | ||
291 | : "r"(_arg1), "r"(_arg2), "r"(_arg3), \ | ||
292 | "0"(_num) \ | ||
293 | : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \ | ||
294 | ); \ | ||
295 | _ret; \ | ||
296 | }) | ||
297 | |||
298 | #define my_syscall4(num, arg1, arg2, arg3, arg4) \ | ||
299 | ({ \ | ||
300 | long _ret; \ | ||
301 | register long _num asm("rax") = (num); \ | ||
302 | register long _arg1 asm("rdi") = (long)(arg1); \ | ||
303 | register long _arg2 asm("rsi") = (long)(arg2); \ | ||
304 | register long _arg3 asm("rdx") = (long)(arg3); \ | ||
305 | register long _arg4 asm("r10") = (long)(arg4); \ | ||
306 | \ | ||
307 | asm volatile ( \ | ||
308 | "syscall\n" \ | ||
309 | : "=a" (_ret), "=r"(_arg4) \ | ||
310 | : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \ | ||
311 | "0"(_num) \ | ||
312 | : "rcx", "r8", "r9", "r11", "memory", "cc" \ | ||
313 | ); \ | ||
314 | _ret; \ | ||
315 | }) | ||
316 | |||
317 | #define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \ | ||
318 | ({ \ | ||
319 | long _ret; \ | ||
320 | register long _num asm("rax") = (num); \ | ||
321 | register long _arg1 asm("rdi") = (long)(arg1); \ | ||
322 | register long _arg2 asm("rsi") = (long)(arg2); \ | ||
323 | register long _arg3 asm("rdx") = (long)(arg3); \ | ||
324 | register long _arg4 asm("r10") = (long)(arg4); \ | ||
325 | register long _arg5 asm("r8") = (long)(arg5); \ | ||
326 | \ | ||
327 | asm volatile ( \ | ||
328 | "syscall\n" \ | ||
329 | : "=a" (_ret), "=r"(_arg4), "=r"(_arg5) \ | ||
330 | : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \ | ||
331 | "0"(_num) \ | ||
332 | : "rcx", "r9", "r11", "memory", "cc" \ | ||
333 | ); \ | ||
334 | _ret; \ | ||
335 | }) | ||
336 | |||
337 | #define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \ | ||
338 | ({ \ | ||
339 | long _ret; \ | ||
340 | register long _num asm("rax") = (num); \ | ||
341 | register long _arg1 asm("rdi") = (long)(arg1); \ | ||
342 | register long _arg2 asm("rsi") = (long)(arg2); \ | ||
343 | register long _arg3 asm("rdx") = (long)(arg3); \ | ||
344 | register long _arg4 asm("r10") = (long)(arg4); \ | ||
345 | register long _arg5 asm("r8") = (long)(arg5); \ | ||
346 | register long _arg6 asm("r9") = (long)(arg6); \ | ||
347 | \ | ||
348 | asm volatile ( \ | ||
349 | "syscall\n" \ | ||
350 | : "=a" (_ret), "=r"(_arg4), "=r"(_arg5) \ | ||
351 | : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \ | ||
352 | "r"(_arg6), "0"(_num) \ | ||
353 | : "rcx", "r11", "memory", "cc" \ | ||
354 | ); \ | ||
355 | _ret; \ | ||
356 | }) | ||
357 | |||
358 | /* startup code */ | ||
359 | asm(".section .text\n" | ||
360 | ".global _start\n" | ||
361 | "_start:\n" | ||
362 | "pop %rdi\n" // argc (first arg, %rdi) | ||
363 | "mov %rsp, %rsi\n" // argv[] (second arg, %rsi) | ||
364 | "lea 8(%rsi,%rdi,8),%rdx\n" // then a NULL then envp (third arg, %rdx) | ||
365 | "and $-16, %rsp\n" // x86 ABI : esp must be 16-byte aligned when | ||
366 | "sub $8, %rsp\n" // entering the callee | ||
367 | "call main\n" // main() returns the status code, we'll exit with it. | ||
368 | "movzb %al, %rdi\n" // retrieve exit code from 8 lower bits | ||
369 | "mov $60, %rax\n" // NR_exit == 60 | ||
370 | "syscall\n" // really exit | ||
371 | "hlt\n" // ensure it does not return | ||
372 | ""); | ||
373 | |||
374 | /* fcntl / open */ | ||
375 | #define O_RDONLY 0 | ||
376 | #define O_WRONLY 1 | ||
377 | #define O_RDWR 2 | ||
378 | #define O_CREAT 0x40 | ||
379 | #define O_EXCL 0x80 | ||
380 | #define O_NOCTTY 0x100 | ||
381 | #define O_TRUNC 0x200 | ||
382 | #define O_APPEND 0x400 | ||
383 | #define O_NONBLOCK 0x800 | ||
384 | #define O_DIRECTORY 0x10000 | ||
385 | |||
386 | /* The struct returned by the stat() syscall, equivalent to stat64(). The | ||
387 | * syscall returns 116 bytes and stops in the middle of __unused. | ||
388 | */ | ||
389 | struct sys_stat_struct { | ||
390 | unsigned long st_dev; | ||
391 | unsigned long st_ino; | ||
392 | unsigned long st_nlink; | ||
393 | unsigned int st_mode; | ||
394 | unsigned int st_uid; | ||
395 | |||
396 | unsigned int st_gid; | ||
397 | unsigned int __pad0; | ||
398 | unsigned long st_rdev; | ||
399 | long st_size; | ||
400 | long st_blksize; | ||
401 | |||
402 | long st_blocks; | ||
403 | unsigned long st_atime; | ||
404 | unsigned long st_atime_nsec; | ||
405 | unsigned long st_mtime; | ||
406 | |||
407 | unsigned long st_mtime_nsec; | ||
408 | unsigned long st_ctime; | ||
409 | unsigned long st_ctime_nsec; | ||
410 | long __unused[3]; | ||
411 | }; | ||
412 | |||
413 | #elif defined(__i386__) || defined(__i486__) || defined(__i586__) || defined(__i686__) | ||
414 | /* Syscalls for i386 : | ||
415 | * - mostly similar to x86_64 | ||
416 | * - registers are 32-bit | ||
417 | * - syscall number is passed in eax | ||
418 | * - arguments are in ebx, ecx, edx, esi, edi, ebp respectively | ||
419 | * - all registers are preserved (except eax of course) | ||
420 | * - the system call is performed by calling int $0x80 | ||
421 | * - syscall return comes in eax | ||
422 | * - the arguments are cast to long and assigned into the target registers | ||
423 | * which are then simply passed as registers to the asm code, so that we | ||
424 | * don't have to experience issues with register constraints. | ||
425 | * - the syscall number is always specified last in order to allow to force | ||
426 | * some registers before (gcc refuses a %-register at the last position). | ||
427 | * | ||
428 | * Also, i386 supports the old_select syscall if newselect is not available | ||
429 | */ | ||
430 | #define __ARCH_WANT_SYS_OLD_SELECT | ||
431 | |||
432 | #define my_syscall0(num) \ | ||
433 | ({ \ | ||
434 | long _ret; \ | ||
435 | register long _num asm("eax") = (num); \ | ||
436 | \ | ||
437 | asm volatile ( \ | ||
438 | "int $0x80\n" \ | ||
439 | : "=a" (_ret) \ | ||
440 | : "0"(_num) \ | ||
441 | : "memory", "cc" \ | ||
442 | ); \ | ||
443 | _ret; \ | ||
444 | }) | ||
445 | |||
446 | #define my_syscall1(num, arg1) \ | ||
447 | ({ \ | ||
448 | long _ret; \ | ||
449 | register long _num asm("eax") = (num); \ | ||
450 | register long _arg1 asm("ebx") = (long)(arg1); \ | ||
451 | \ | ||
452 | asm volatile ( \ | ||
453 | "int $0x80\n" \ | ||
454 | : "=a" (_ret) \ | ||
455 | : "r"(_arg1), \ | ||
456 | "0"(_num) \ | ||
457 | : "memory", "cc" \ | ||
458 | ); \ | ||
459 | _ret; \ | ||
460 | }) | ||
461 | |||
462 | #define my_syscall2(num, arg1, arg2) \ | ||
463 | ({ \ | ||
464 | long _ret; \ | ||
465 | register long _num asm("eax") = (num); \ | ||
466 | register long _arg1 asm("ebx") = (long)(arg1); \ | ||
467 | register long _arg2 asm("ecx") = (long)(arg2); \ | ||
468 | \ | ||
469 | asm volatile ( \ | ||
470 | "int $0x80\n" \ | ||
471 | : "=a" (_ret) \ | ||
472 | : "r"(_arg1), "r"(_arg2), \ | ||
473 | "0"(_num) \ | ||
474 | : "memory", "cc" \ | ||
475 | ); \ | ||
476 | _ret; \ | ||
477 | }) | ||
478 | |||
479 | #define my_syscall3(num, arg1, arg2, arg3) \ | ||
480 | ({ \ | ||
481 | long _ret; \ | ||
482 | register long _num asm("eax") = (num); \ | ||
483 | register long _arg1 asm("ebx") = (long)(arg1); \ | ||
484 | register long _arg2 asm("ecx") = (long)(arg2); \ | ||
485 | register long _arg3 asm("edx") = (long)(arg3); \ | ||
486 | \ | ||
487 | asm volatile ( \ | ||
488 | "int $0x80\n" \ | ||
489 | : "=a" (_ret) \ | ||
490 | : "r"(_arg1), "r"(_arg2), "r"(_arg3), \ | ||
491 | "0"(_num) \ | ||
492 | : "memory", "cc" \ | ||
493 | ); \ | ||
494 | _ret; \ | ||
495 | }) | ||
496 | |||
497 | #define my_syscall4(num, arg1, arg2, arg3, arg4) \ | ||
498 | ({ \ | ||
499 | long _ret; \ | ||
500 | register long _num asm("eax") = (num); \ | ||
501 | register long _arg1 asm("ebx") = (long)(arg1); \ | ||
502 | register long _arg2 asm("ecx") = (long)(arg2); \ | ||
503 | register long _arg3 asm("edx") = (long)(arg3); \ | ||
504 | register long _arg4 asm("esi") = (long)(arg4); \ | ||
505 | \ | ||
506 | asm volatile ( \ | ||
507 | "int $0x80\n" \ | ||
508 | : "=a" (_ret) \ | ||
509 | : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \ | ||
510 | "0"(_num) \ | ||
511 | : "memory", "cc" \ | ||
512 | ); \ | ||
513 | _ret; \ | ||
514 | }) | ||
515 | |||
516 | #define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \ | ||
517 | ({ \ | ||
518 | long _ret; \ | ||
519 | register long _num asm("eax") = (num); \ | ||
520 | register long _arg1 asm("ebx") = (long)(arg1); \ | ||
521 | register long _arg2 asm("ecx") = (long)(arg2); \ | ||
522 | register long _arg3 asm("edx") = (long)(arg3); \ | ||
523 | register long _arg4 asm("esi") = (long)(arg4); \ | ||
524 | register long _arg5 asm("edi") = (long)(arg5); \ | ||
525 | \ | ||
526 | asm volatile ( \ | ||
527 | "int $0x80\n" \ | ||
528 | : "=a" (_ret) \ | ||
529 | : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \ | ||
530 | "0"(_num) \ | ||
531 | : "memory", "cc" \ | ||
532 | ); \ | ||
533 | _ret; \ | ||
534 | }) | ||
535 | |||
536 | /* startup code */ | ||
537 | asm(".section .text\n" | ||
538 | ".global _start\n" | ||
539 | "_start:\n" | ||
540 | "pop %eax\n" // argc (first arg, %eax) | ||
541 | "mov %esp, %ebx\n" // argv[] (second arg, %ebx) | ||
542 | "lea 4(%ebx,%eax,4),%ecx\n" // then a NULL then envp (third arg, %ecx) | ||
543 | "and $-16, %esp\n" // x86 ABI : esp must be 16-byte aligned when | ||
544 | "push %ecx\n" // push all registers on the stack so that we | ||
545 | "push %ebx\n" // support both regparm and plain stack modes | ||
546 | "push %eax\n" | ||
547 | "call main\n" // main() returns the status code in %eax | ||
548 | "movzbl %al, %ebx\n" // retrieve exit code from lower 8 bits | ||
549 | "movl $1, %eax\n" // NR_exit == 1 | ||
550 | "int $0x80\n" // exit now | ||
551 | "hlt\n" // ensure it does not | ||
552 | ""); | ||
553 | |||
554 | /* fcntl / open */ | ||
555 | #define O_RDONLY 0 | ||
556 | #define O_WRONLY 1 | ||
557 | #define O_RDWR 2 | ||
558 | #define O_CREAT 0x40 | ||
559 | #define O_EXCL 0x80 | ||
560 | #define O_NOCTTY 0x100 | ||
561 | #define O_TRUNC 0x200 | ||
562 | #define O_APPEND 0x400 | ||
563 | #define O_NONBLOCK 0x800 | ||
564 | #define O_DIRECTORY 0x10000 | ||
565 | |||
566 | /* The struct returned by the stat() syscall, 32-bit only, the syscall returns | ||
567 | * exactly 56 bytes (stops before the unused array). | ||
568 | */ | ||
569 | struct sys_stat_struct { | ||
570 | unsigned long st_dev; | ||
571 | unsigned long st_ino; | ||
572 | unsigned short st_mode; | ||
573 | unsigned short st_nlink; | ||
574 | unsigned short st_uid; | ||
575 | unsigned short st_gid; | ||
576 | |||
577 | unsigned long st_rdev; | ||
578 | unsigned long st_size; | ||
579 | unsigned long st_blksize; | ||
580 | unsigned long st_blocks; | ||
581 | |||
582 | unsigned long st_atime; | ||
583 | unsigned long st_atime_nsec; | ||
584 | unsigned long st_mtime; | ||
585 | unsigned long st_mtime_nsec; | ||
586 | |||
587 | unsigned long st_ctime; | ||
588 | unsigned long st_ctime_nsec; | ||
589 | unsigned long __unused[2]; | ||
590 | }; | ||
591 | |||
592 | #elif defined(__ARM_EABI__) | ||
593 | /* Syscalls for ARM in ARM or Thumb modes : | ||
594 | * - registers are 32-bit | ||
595 | * - stack is 8-byte aligned | ||
596 | * ( http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka4127.html) | ||
597 | * - syscall number is passed in r7 | ||
598 | * - arguments are in r0, r1, r2, r3, r4, r5 | ||
599 | * - the system call is performed by calling svc #0 | ||
600 | * - syscall return comes in r0. | ||
601 | * - only lr is clobbered. | ||
602 | * - the arguments are cast to long and assigned into the target registers | ||
603 | * which are then simply passed as registers to the asm code, so that we | ||
604 | * don't have to experience issues with register constraints. | ||
605 | * - the syscall number is always specified last in order to allow to force | ||
606 | * some registers before (gcc refuses a %-register at the last position). | ||
607 | * | ||
608 | * Also, ARM supports the old_select syscall if newselect is not available | ||
609 | */ | ||
610 | #define __ARCH_WANT_SYS_OLD_SELECT | ||
611 | |||
612 | #define my_syscall0(num) \ | ||
613 | ({ \ | ||
614 | register long _num asm("r7") = (num); \ | ||
615 | register long _arg1 asm("r0"); \ | ||
616 | \ | ||
617 | asm volatile ( \ | ||
618 | "svc #0\n" \ | ||
619 | : "=r"(_arg1) \ | ||
620 | : "r"(_num) \ | ||
621 | : "memory", "cc", "lr" \ | ||
622 | ); \ | ||
623 | _arg1; \ | ||
624 | }) | ||
625 | |||
626 | #define my_syscall1(num, arg1) \ | ||
627 | ({ \ | ||
628 | register long _num asm("r7") = (num); \ | ||
629 | register long _arg1 asm("r0") = (long)(arg1); \ | ||
630 | \ | ||
631 | asm volatile ( \ | ||
632 | "svc #0\n" \ | ||
633 | : "=r"(_arg1) \ | ||
634 | : "r"(_arg1), \ | ||
635 | "r"(_num) \ | ||
636 | : "memory", "cc", "lr" \ | ||
637 | ); \ | ||
638 | _arg1; \ | ||
639 | }) | ||
640 | |||
641 | #define my_syscall2(num, arg1, arg2) \ | ||
642 | ({ \ | ||
643 | register long _num asm("r7") = (num); \ | ||
644 | register long _arg1 asm("r0") = (long)(arg1); \ | ||
645 | register long _arg2 asm("r1") = (long)(arg2); \ | ||
646 | \ | ||
647 | asm volatile ( \ | ||
648 | "svc #0\n" \ | ||
649 | : "=r"(_arg1) \ | ||
650 | : "r"(_arg1), "r"(_arg2), \ | ||
651 | "r"(_num) \ | ||
652 | : "memory", "cc", "lr" \ | ||
653 | ); \ | ||
654 | _arg1; \ | ||
655 | }) | ||
656 | |||
657 | #define my_syscall3(num, arg1, arg2, arg3) \ | ||
658 | ({ \ | ||
659 | register long _num asm("r7") = (num); \ | ||
660 | register long _arg1 asm("r0") = (long)(arg1); \ | ||
661 | register long _arg2 asm("r1") = (long)(arg2); \ | ||
662 | register long _arg3 asm("r2") = (long)(arg3); \ | ||
663 | \ | ||
664 | asm volatile ( \ | ||
665 | "svc #0\n" \ | ||
666 | : "=r"(_arg1) \ | ||
667 | : "r"(_arg1), "r"(_arg2), "r"(_arg3), \ | ||
668 | "r"(_num) \ | ||
669 | : "memory", "cc", "lr" \ | ||
670 | ); \ | ||
671 | _arg1; \ | ||
672 | }) | ||
673 | |||
674 | #define my_syscall4(num, arg1, arg2, arg3, arg4) \ | ||
675 | ({ \ | ||
676 | register long _num asm("r7") = (num); \ | ||
677 | register long _arg1 asm("r0") = (long)(arg1); \ | ||
678 | register long _arg2 asm("r1") = (long)(arg2); \ | ||
679 | register long _arg3 asm("r2") = (long)(arg3); \ | ||
680 | register long _arg4 asm("r3") = (long)(arg4); \ | ||
681 | \ | ||
682 | asm volatile ( \ | ||
683 | "svc #0\n" \ | ||
684 | : "=r"(_arg1) \ | ||
685 | : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \ | ||
686 | "r"(_num) \ | ||
687 | : "memory", "cc", "lr" \ | ||
688 | ); \ | ||
689 | _arg1; \ | ||
690 | }) | ||
691 | |||
692 | #define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \ | ||
693 | ({ \ | ||
694 | register long _num asm("r7") = (num); \ | ||
695 | register long _arg1 asm("r0") = (long)(arg1); \ | ||
696 | register long _arg2 asm("r1") = (long)(arg2); \ | ||
697 | register long _arg3 asm("r2") = (long)(arg3); \ | ||
698 | register long _arg4 asm("r3") = (long)(arg4); \ | ||
699 | register long _arg5 asm("r4") = (long)(arg5); \ | ||
700 | \ | ||
701 | asm volatile ( \ | ||
702 | "svc #0\n" \ | ||
703 | : "=r" (_arg1) \ | ||
704 | : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \ | ||
705 | "r"(_num) \ | ||
706 | : "memory", "cc", "lr" \ | ||
707 | ); \ | ||
708 | _arg1; \ | ||
709 | }) | ||
710 | |||
711 | /* startup code */ | ||
712 | asm(".section .text\n" | ||
713 | ".global _start\n" | ||
714 | "_start:\n" | ||
715 | #if defined(__THUMBEB__) || defined(__THUMBEL__) | ||
716 | /* We enter here in 32-bit mode but if some previous functions were in | ||
717 | * 16-bit mode, the assembler cannot know, so we need to tell it we're in | ||
718 | * 32-bit now, then switch to 16-bit (is there a better way to do it than | ||
719 | * adding 1 by hand ?) and tell the asm we're now in 16-bit mode so that | ||
720 | * it generates correct instructions. Note that we do not support thumb1. | ||
721 | */ | ||
722 | ".code 32\n" | ||
723 | "add r0, pc, #1\n" | ||
724 | "bx r0\n" | ||
725 | ".code 16\n" | ||
726 | #endif | ||
727 | "pop {%r0}\n" // argc was in the stack | ||
728 | "mov %r1, %sp\n" // argv = sp | ||
729 | "add %r2, %r1, %r0, lsl #2\n" // envp = argv + 4*argc ... | ||
730 | "add %r2, %r2, $4\n" // ... + 4 | ||
731 | "and %r3, %r1, $-8\n" // AAPCS : sp must be 8-byte aligned in the | ||
732 | "mov %sp, %r3\n" // callee, an bl doesn't push (lr=pc) | ||
733 | "bl main\n" // main() returns the status code, we'll exit with it. | ||
734 | "and %r0, %r0, $0xff\n" // limit exit code to 8 bits | ||
735 | "movs r7, $1\n" // NR_exit == 1 | ||
736 | "svc $0x00\n" | ||
737 | ""); | ||
738 | |||
739 | /* fcntl / open */ | ||
740 | #define O_RDONLY 0 | ||
741 | #define O_WRONLY 1 | ||
742 | #define O_RDWR 2 | ||
743 | #define O_CREAT 0x40 | ||
744 | #define O_EXCL 0x80 | ||
745 | #define O_NOCTTY 0x100 | ||
746 | #define O_TRUNC 0x200 | ||
747 | #define O_APPEND 0x400 | ||
748 | #define O_NONBLOCK 0x800 | ||
749 | #define O_DIRECTORY 0x4000 | ||
750 | |||
751 | /* The struct returned by the stat() syscall, 32-bit only, the syscall returns | ||
752 | * exactly 56 bytes (stops before the unused array). In big endian, the format | ||
753 | * differs as devices are returned as short only. | ||
754 | */ | ||
755 | struct sys_stat_struct { | ||
756 | #if defined(__ARMEB__) | ||
757 | unsigned short st_dev; | ||
758 | unsigned short __pad1; | ||
759 | #else | ||
760 | unsigned long st_dev; | ||
761 | #endif | ||
762 | unsigned long st_ino; | ||
763 | unsigned short st_mode; | ||
764 | unsigned short st_nlink; | ||
765 | unsigned short st_uid; | ||
766 | unsigned short st_gid; | ||
767 | #if defined(__ARMEB__) | ||
768 | unsigned short st_rdev; | ||
769 | unsigned short __pad2; | ||
770 | #else | ||
771 | unsigned long st_rdev; | ||
772 | #endif | ||
773 | unsigned long st_size; | ||
774 | unsigned long st_blksize; | ||
775 | unsigned long st_blocks; | ||
776 | unsigned long st_atime; | ||
777 | unsigned long st_atime_nsec; | ||
778 | unsigned long st_mtime; | ||
779 | unsigned long st_mtime_nsec; | ||
780 | unsigned long st_ctime; | ||
781 | unsigned long st_ctime_nsec; | ||
782 | unsigned long __unused[2]; | ||
783 | }; | ||
784 | |||
785 | #elif defined(__aarch64__) | ||
786 | /* Syscalls for AARCH64 : | ||
787 | * - registers are 64-bit | ||
788 | * - stack is 16-byte aligned | ||
789 | * - syscall number is passed in x8 | ||
790 | * - arguments are in x0, x1, x2, x3, x4, x5 | ||
791 | * - the system call is performed by calling svc 0 | ||
792 | * - syscall return comes in x0. | ||
793 | * - the arguments are cast to long and assigned into the target registers | ||
794 | * which are then simply passed as registers to the asm code, so that we | ||
795 | * don't have to experience issues with register constraints. | ||
796 | * | ||
797 | * On aarch64, select() is not implemented so we have to use pselect6(). | ||
798 | */ | ||
799 | #define __ARCH_WANT_SYS_PSELECT6 | ||
800 | |||
801 | #define my_syscall0(num) \ | ||
802 | ({ \ | ||
803 | register long _num asm("x8") = (num); \ | ||
804 | register long _arg1 asm("x0"); \ | ||
805 | \ | ||
806 | asm volatile ( \ | ||
807 | "svc #0\n" \ | ||
808 | : "=r"(_arg1) \ | ||
809 | : "r"(_num) \ | ||
810 | : "memory", "cc" \ | ||
811 | ); \ | ||
812 | _arg1; \ | ||
813 | }) | ||
814 | |||
815 | #define my_syscall1(num, arg1) \ | ||
816 | ({ \ | ||
817 | register long _num asm("x8") = (num); \ | ||
818 | register long _arg1 asm("x0") = (long)(arg1); \ | ||
819 | \ | ||
820 | asm volatile ( \ | ||
821 | "svc #0\n" \ | ||
822 | : "=r"(_arg1) \ | ||
823 | : "r"(_arg1), \ | ||
824 | "r"(_num) \ | ||
825 | : "memory", "cc" \ | ||
826 | ); \ | ||
827 | _arg1; \ | ||
828 | }) | ||
829 | |||
830 | #define my_syscall2(num, arg1, arg2) \ | ||
831 | ({ \ | ||
832 | register long _num asm("x8") = (num); \ | ||
833 | register long _arg1 asm("x0") = (long)(arg1); \ | ||
834 | register long _arg2 asm("x1") = (long)(arg2); \ | ||
835 | \ | ||
836 | asm volatile ( \ | ||
837 | "svc #0\n" \ | ||
838 | : "=r"(_arg1) \ | ||
839 | : "r"(_arg1), "r"(_arg2), \ | ||
840 | "r"(_num) \ | ||
841 | : "memory", "cc" \ | ||
842 | ); \ | ||
843 | _arg1; \ | ||
844 | }) | ||
845 | |||
846 | #define my_syscall3(num, arg1, arg2, arg3) \ | ||
847 | ({ \ | ||
848 | register long _num asm("x8") = (num); \ | ||
849 | register long _arg1 asm("x0") = (long)(arg1); \ | ||
850 | register long _arg2 asm("x1") = (long)(arg2); \ | ||
851 | register long _arg3 asm("x2") = (long)(arg3); \ | ||
852 | \ | ||
853 | asm volatile ( \ | ||
854 | "svc #0\n" \ | ||
855 | : "=r"(_arg1) \ | ||
856 | : "r"(_arg1), "r"(_arg2), "r"(_arg3), \ | ||
857 | "r"(_num) \ | ||
858 | : "memory", "cc" \ | ||
859 | ); \ | ||
860 | _arg1; \ | ||
861 | }) | ||
862 | |||
863 | #define my_syscall4(num, arg1, arg2, arg3, arg4) \ | ||
864 | ({ \ | ||
865 | register long _num asm("x8") = (num); \ | ||
866 | register long _arg1 asm("x0") = (long)(arg1); \ | ||
867 | register long _arg2 asm("x1") = (long)(arg2); \ | ||
868 | register long _arg3 asm("x2") = (long)(arg3); \ | ||
869 | register long _arg4 asm("x3") = (long)(arg4); \ | ||
870 | \ | ||
871 | asm volatile ( \ | ||
872 | "svc #0\n" \ | ||
873 | : "=r"(_arg1) \ | ||
874 | : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \ | ||
875 | "r"(_num) \ | ||
876 | : "memory", "cc" \ | ||
877 | ); \ | ||
878 | _arg1; \ | ||
879 | }) | ||
880 | |||
881 | #define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \ | ||
882 | ({ \ | ||
883 | register long _num asm("x8") = (num); \ | ||
884 | register long _arg1 asm("x0") = (long)(arg1); \ | ||
885 | register long _arg2 asm("x1") = (long)(arg2); \ | ||
886 | register long _arg3 asm("x2") = (long)(arg3); \ | ||
887 | register long _arg4 asm("x3") = (long)(arg4); \ | ||
888 | register long _arg5 asm("x4") = (long)(arg5); \ | ||
889 | \ | ||
890 | asm volatile ( \ | ||
891 | "svc #0\n" \ | ||
892 | : "=r" (_arg1) \ | ||
893 | : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \ | ||
894 | "r"(_num) \ | ||
895 | : "memory", "cc" \ | ||
896 | ); \ | ||
897 | _arg1; \ | ||
898 | }) | ||
899 | |||
900 | #define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \ | ||
901 | ({ \ | ||
902 | register long _num asm("x8") = (num); \ | ||
903 | register long _arg1 asm("x0") = (long)(arg1); \ | ||
904 | register long _arg2 asm("x1") = (long)(arg2); \ | ||
905 | register long _arg3 asm("x2") = (long)(arg3); \ | ||
906 | register long _arg4 asm("x3") = (long)(arg4); \ | ||
907 | register long _arg5 asm("x4") = (long)(arg5); \ | ||
908 | register long _arg6 asm("x5") = (long)(arg6); \ | ||
909 | \ | ||
910 | asm volatile ( \ | ||
911 | "svc #0\n" \ | ||
912 | : "=r" (_arg1) \ | ||
913 | : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \ | ||
914 | "r"(_arg6), "r"(_num) \ | ||
915 | : "memory", "cc" \ | ||
916 | ); \ | ||
917 | _arg1; \ | ||
918 | }) | ||
919 | |||
920 | /* startup code */ | ||
921 | asm(".section .text\n" | ||
922 | ".global _start\n" | ||
923 | "_start:\n" | ||
924 | "ldr x0, [sp]\n" // argc (x0) was in the stack | ||
925 | "add x1, sp, 8\n" // argv (x1) = sp | ||
926 | "lsl x2, x0, 3\n" // envp (x2) = 8*argc ... | ||
927 | "add x2, x2, 8\n" // + 8 (skip null) | ||
928 | "add x2, x2, x1\n" // + argv | ||
929 | "and sp, x1, -16\n" // sp must be 16-byte aligned in the callee | ||
930 | "bl main\n" // main() returns the status code, we'll exit with it. | ||
931 | "and x0, x0, 0xff\n" // limit exit code to 8 bits | ||
932 | "mov x8, 93\n" // NR_exit == 93 | ||
933 | "svc #0\n" | ||
934 | ""); | ||
935 | |||
936 | /* fcntl / open */ | ||
937 | #define O_RDONLY 0 | ||
938 | #define O_WRONLY 1 | ||
939 | #define O_RDWR 2 | ||
940 | #define O_CREAT 0x40 | ||
941 | #define O_EXCL 0x80 | ||
942 | #define O_NOCTTY 0x100 | ||
943 | #define O_TRUNC 0x200 | ||
944 | #define O_APPEND 0x400 | ||
945 | #define O_NONBLOCK 0x800 | ||
946 | #define O_DIRECTORY 0x4000 | ||
947 | |||
948 | /* The struct returned by the newfstatat() syscall. Differs slightly from the | ||
949 | * x86_64's stat one by field ordering, so be careful. | ||
950 | */ | ||
951 | struct sys_stat_struct { | ||
952 | unsigned long st_dev; | ||
953 | unsigned long st_ino; | ||
954 | unsigned int st_mode; | ||
955 | unsigned int st_nlink; | ||
956 | unsigned int st_uid; | ||
957 | unsigned int st_gid; | ||
958 | |||
959 | unsigned long st_rdev; | ||
960 | unsigned long __pad1; | ||
961 | long st_size; | ||
962 | int st_blksize; | ||
963 | int __pad2; | ||
964 | |||
965 | long st_blocks; | ||
966 | long st_atime; | ||
967 | unsigned long st_atime_nsec; | ||
968 | long st_mtime; | ||
969 | |||
970 | unsigned long st_mtime_nsec; | ||
971 | long st_ctime; | ||
972 | unsigned long st_ctime_nsec; | ||
973 | unsigned int __unused[2]; | ||
974 | }; | ||
975 | |||
976 | #elif defined(__mips__) && defined(_ABIO32) | ||
977 | /* Syscalls for MIPS ABI O32 : | ||
978 | * - WARNING! there's always a delayed slot! | ||
979 | * - WARNING again, the syntax is different, registers take a '$' and numbers | ||
980 | * do not. | ||
981 | * - registers are 32-bit | ||
982 | * - stack is 8-byte aligned | ||
983 | * - syscall number is passed in v0 (starts at 0xfa0). | ||
984 | * - arguments are in a0, a1, a2, a3, then the stack. The caller needs to | ||
985 | * leave some room in the stack for the callee to save a0..a3 if needed. | ||
986 | * - Many registers are clobbered, in fact only a0..a2 and s0..s8 are | ||
987 | * preserved. See: https://www.linux-mips.org/wiki/Syscall as well as | ||
988 | * scall32-o32.S in the kernel sources. | ||
989 | * - the system call is performed by calling "syscall" | ||
990 | * - syscall return comes in v0, and register a3 needs to be checked to know | ||
991 | * if an error occured, in which case errno is in v0. | ||
992 | * - the arguments are cast to long and assigned into the target registers | ||
993 | * which are then simply passed as registers to the asm code, so that we | ||
994 | * don't have to experience issues with register constraints. | ||
995 | */ | ||
996 | |||
997 | #define my_syscall0(num) \ | ||
998 | ({ \ | ||
999 | register long _num asm("v0") = (num); \ | ||
1000 | register long _arg4 asm("a3"); \ | ||
1001 | \ | ||
1002 | asm volatile ( \ | ||
1003 | "addiu $sp, $sp, -32\n" \ | ||
1004 | "syscall\n" \ | ||
1005 | "addiu $sp, $sp, 32\n" \ | ||
1006 | : "=r"(_num), "=r"(_arg4) \ | ||
1007 | : "r"(_num) \ | ||
1008 | : "memory", "cc", "at", "v1", "hi", "lo", \ | ||
1009 | \ | ||
1010 | ); \ | ||
1011 | _arg4 ? -_num : _num; \ | ||
1012 | }) | ||
1013 | |||
1014 | #define my_syscall1(num, arg1) \ | ||
1015 | ({ \ | ||
1016 | register long _num asm("v0") = (num); \ | ||
1017 | register long _arg1 asm("a0") = (long)(arg1); \ | ||
1018 | register long _arg4 asm("a3"); \ | ||
1019 | \ | ||
1020 | asm volatile ( \ | ||
1021 | "addiu $sp, $sp, -32\n" \ | ||
1022 | "syscall\n" \ | ||
1023 | "addiu $sp, $sp, 32\n" \ | ||
1024 | : "=r"(_num), "=r"(_arg4) \ | ||
1025 | : "0"(_num), \ | ||
1026 | "r"(_arg1) \ | ||
1027 | : "memory", "cc", "at", "v1", "hi", "lo", \ | ||
1028 | \ | ||
1029 | ); \ | ||
1030 | _arg4 ? -_num : _num; \ | ||
1031 | }) | ||
1032 | |||
1033 | #define my_syscall2(num, arg1, arg2) \ | ||
1034 | ({ \ | ||
1035 | register long _num asm("v0") = (num); \ | ||
1036 | register long _arg1 asm("a0") = (long)(arg1); \ | ||
1037 | register long _arg2 asm("a1") = (long)(arg2); \ | ||
1038 | register long _arg4 asm("a3"); \ | ||
1039 | \ | ||
1040 | asm volatile ( \ | ||
1041 | "addiu $sp, $sp, -32\n" \ | ||
1042 | "syscall\n" \ | ||
1043 | "addiu $sp, $sp, 32\n" \ | ||
1044 | : "=r"(_num), "=r"(_arg4) \ | ||
1045 | : "0"(_num), \ | ||
1046 | "r"(_arg1), "r"(_arg2) \ | ||
1047 | : "memory", "cc", "at", "v1", "hi", "lo", \ | ||
1048 | \ | ||
1049 | ); \ | ||
1050 | _arg4 ? -_num : _num; \ | ||
1051 | }) | ||
1052 | |||
1053 | #define my_syscall3(num, arg1, arg2, arg3) \ | ||
1054 | ({ \ | ||
1055 | register long _num asm("v0") = (num); \ | ||
1056 | register long _arg1 asm("a0") = (long)(arg1); \ | ||
1057 | register long _arg2 asm("a1") = (long)(arg2); \ | ||
1058 | register long _arg3 asm("a2") = (long)(arg3); \ | ||
1059 | register long _arg4 asm("a3"); \ | ||
1060 | \ | ||
1061 | asm volatile ( \ | ||
1062 | "addiu $sp, $sp, -32\n" \ | ||
1063 | "syscall\n" \ | ||
1064 | "addiu $sp, $sp, 32\n" \ | ||
1065 | : "=r"(_num), "=r"(_arg4) \ | ||
1066 | : "0"(_num), \ | ||
1067 | "r"(_arg1), "r"(_arg2), "r"(_arg3) \ | ||
1068 | : "memory", "cc", "at", "v1", "hi", "lo", \ | ||
1069 | \ | ||
1070 | ); \ | ||
1071 | _arg4 ? -_num : _num; \ | ||
1072 | }) | ||
1073 | |||
1074 | #define my_syscall4(num, arg1, arg2, arg3, arg4) \ | ||
1075 | ({ \ | ||
1076 | register long _num asm("v0") = (num); \ | ||
1077 | register long _arg1 asm("a0") = (long)(arg1); \ | ||
1078 | register long _arg2 asm("a1") = (long)(arg2); \ | ||
1079 | register long _arg3 asm("a2") = (long)(arg3); \ | ||
1080 | register long _arg4 asm("a3") = (long)(arg4); \ | ||
1081 | \ | ||
1082 | asm volatile ( \ | ||
1083 | "addiu $sp, $sp, -32\n" \ | ||
1084 | "syscall\n" \ | ||
1085 | "addiu $sp, $sp, 32\n" \ | ||
1086 | : "=r" (_num), "=r"(_arg4) \ | ||
1087 | : "0"(_num), \ | ||
1088 | "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4) \ | ||
1089 | : "memory", "cc", "at", "v1", "hi", "lo", \ | ||
1090 | \ | ||
1091 | ); \ | ||
1092 | _arg4 ? -_num : _num; \ | ||
1093 | }) | ||
1094 | |||
1095 | #define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \ | ||
1096 | ({ \ | ||
1097 | register long _num asm("v0") = (num); \ | ||
1098 | register long _arg1 asm("a0") = (long)(arg1); \ | ||
1099 | register long _arg2 asm("a1") = (long)(arg2); \ | ||
1100 | register long _arg3 asm("a2") = (long)(arg3); \ | ||
1101 | register long _arg4 asm("a3") = (long)(arg4); \ | ||
1102 | register long _arg5 = (long)(arg5); \ | ||
1103 | \ | ||
1104 | asm volatile ( \ | ||
1105 | "addiu $sp, $sp, -32\n" \ | ||
1106 | "sw %7, 16($sp)\n" \ | ||
1107 | "syscall\n " \ | ||
1108 | "addiu $sp, $sp, 32\n" \ | ||
1109 | : "=r" (_num), "=r"(_arg4) \ | ||
1110 | : "0"(_num), \ | ||
1111 | "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5) \ | ||
1112 | : "memory", "cc", "at", "v1", "hi", "lo", \ | ||
1113 | \ | ||
1114 | ); \ | ||
1115 | _arg4 ? -_num : _num; \ | ||
1116 | }) | ||
1117 | |||
1118 | /* startup code, note that it's called __start on MIPS */ | ||
1119 | asm(".section .text\n" | ||
1120 | ".set nomips16\n" | ||
1121 | ".global __start\n" | ||
1122 | ".set noreorder\n" | ||
1123 | ".option pic0\n" | ||
1124 | ".ent __start\n" | ||
1125 | "__start:\n" | ||
1126 | "lw $a0,($sp)\n" // argc was in the stack | ||
1127 | "addiu $a1, $sp, 4\n" // argv = sp + 4 | ||
1128 | "sll $a2, $a0, 2\n" // a2 = argc * 4 | ||
1129 | "add $a2, $a2, $a1\n" // envp = argv + 4*argc ... | ||
1130 | "addiu $a2, $a2, 4\n" // ... + 4 | ||
1131 | "li $t0, -8\n" | ||
1132 | "and $sp, $sp, $t0\n" // sp must be 8-byte aligned | ||
1133 | "addiu $sp,$sp,-16\n" // the callee expects to save a0..a3 there! | ||
1134 | "jal main\n" // main() returns the status code, we'll exit with it. | ||
1135 | "nop\n" // delayed slot | ||
1136 | "and $a0, $v0, 0xff\n" // limit exit code to 8 bits | ||
1137 | "li $v0, 4001\n" // NR_exit == 4001 | ||
1138 | "syscall\n" | ||
1139 | ".end __start\n" | ||
1140 | ""); | ||
1141 | |||
1142 | /* fcntl / open */ | ||
1143 | #define O_RDONLY 0 | ||
1144 | #define O_WRONLY 1 | ||
1145 | #define O_RDWR 2 | ||
1146 | #define O_APPEND 0x0008 | ||
1147 | #define O_NONBLOCK 0x0080 | ||
1148 | #define O_CREAT 0x0100 | ||
1149 | #define O_TRUNC 0x0200 | ||
1150 | #define O_EXCL 0x0400 | ||
1151 | #define O_NOCTTY 0x0800 | ||
1152 | #define O_DIRECTORY 0x10000 | ||
1153 | |||
1154 | /* The struct returned by the stat() syscall. 88 bytes are returned by the | ||
1155 | * syscall. | ||
1156 | */ | ||
1157 | struct sys_stat_struct { | ||
1158 | unsigned int st_dev; | ||
1159 | long st_pad1[3]; | ||
1160 | unsigned long st_ino; | ||
1161 | unsigned int st_mode; | ||
1162 | unsigned int st_nlink; | ||
1163 | unsigned int st_uid; | ||
1164 | unsigned int st_gid; | ||
1165 | unsigned int st_rdev; | ||
1166 | long st_pad2[2]; | ||
1167 | long st_size; | ||
1168 | long st_pad3; | ||
1169 | long st_atime; | ||
1170 | long st_atime_nsec; | ||
1171 | long st_mtime; | ||
1172 | long st_mtime_nsec; | ||
1173 | long st_ctime; | ||
1174 | long st_ctime_nsec; | ||
1175 | long st_blksize; | ||
1176 | long st_blocks; | ||
1177 | long st_pad4[14]; | ||
1178 | }; | ||
1179 | |||
1180 | #endif | ||
1181 | |||
1182 | |||
1183 | /* Below are the C functions used to declare the raw syscalls. They try to be | ||
1184 | * architecture-agnostic, and return either a success or -errno. Declaring them | ||
1185 | * static will lead to them being inlined in most cases, but it's still possible | ||
1186 | * to reference them by a pointer if needed. | ||
1187 | */ | ||
1188 | static __attribute__((unused)) | ||
1189 | void *sys_brk(void *addr) | ||
1190 | { | ||
1191 | return (void *)my_syscall1(__NR_brk, addr); | ||
1192 | } | ||
1193 | |||
1194 | static __attribute__((noreturn,unused)) | ||
1195 | void sys_exit(int status) | ||
1196 | { | ||
1197 | my_syscall1(__NR_exit, status & 255); | ||
1198 | while(1); // shut the "noreturn" warnings. | ||
1199 | } | ||
1200 | |||
1201 | static __attribute__((unused)) | ||
1202 | int sys_chdir(const char *path) | ||
1203 | { | ||
1204 | return my_syscall1(__NR_chdir, path); | ||
1205 | } | ||
1206 | |||
1207 | static __attribute__((unused)) | ||
1208 | int sys_chmod(const char *path, mode_t mode) | ||
1209 | { | ||
1210 | #ifdef __NR_fchmodat | ||
1211 | return my_syscall4(__NR_fchmodat, AT_FDCWD, path, mode, 0); | ||
1212 | #else | ||
1213 | return my_syscall2(__NR_chmod, path, mode); | ||
1214 | #endif | ||
1215 | } | ||
1216 | |||
1217 | static __attribute__((unused)) | ||
1218 | int sys_chown(const char *path, uid_t owner, gid_t group) | ||
1219 | { | ||
1220 | #ifdef __NR_fchownat | ||
1221 | return my_syscall5(__NR_fchownat, AT_FDCWD, path, owner, group, 0); | ||
1222 | #else | ||
1223 | return my_syscall3(__NR_chown, path, owner, group); | ||
1224 | #endif | ||
1225 | } | ||
1226 | |||
1227 | static __attribute__((unused)) | ||
1228 | int sys_chroot(const char *path) | ||
1229 | { | ||
1230 | return my_syscall1(__NR_chroot, path); | ||
1231 | } | ||
1232 | |||
1233 | static __attribute__((unused)) | ||
1234 | int sys_close(int fd) | ||
1235 | { | ||
1236 | return my_syscall1(__NR_close, fd); | ||
1237 | } | ||
1238 | |||
1239 | static __attribute__((unused)) | ||
1240 | int sys_dup(int fd) | ||
1241 | { | ||
1242 | return my_syscall1(__NR_dup, fd); | ||
1243 | } | ||
1244 | |||
1245 | static __attribute__((unused)) | ||
1246 | int sys_dup2(int old, int new) | ||
1247 | { | ||
1248 | return my_syscall2(__NR_dup2, old, new); | ||
1249 | } | ||
1250 | |||
1251 | static __attribute__((unused)) | ||
1252 | int sys_execve(const char *filename, char *const argv[], char *const envp[]) | ||
1253 | { | ||
1254 | return my_syscall3(__NR_execve, filename, argv, envp); | ||
1255 | } | ||
1256 | |||
1257 | static __attribute__((unused)) | ||
1258 | pid_t sys_fork(void) | ||
1259 | { | ||
1260 | return my_syscall0(__NR_fork); | ||
1261 | } | ||
1262 | |||
1263 | static __attribute__((unused)) | ||
1264 | int sys_fsync(int fd) | ||
1265 | { | ||
1266 | return my_syscall1(__NR_fsync, fd); | ||
1267 | } | ||
1268 | |||
1269 | static __attribute__((unused)) | ||
1270 | int sys_getdents64(int fd, struct linux_dirent64 *dirp, int count) | ||
1271 | { | ||
1272 | return my_syscall3(__NR_getdents64, fd, dirp, count); | ||
1273 | } | ||
1274 | |||
1275 | static __attribute__((unused)) | ||
1276 | pid_t sys_getpgrp(void) | ||
1277 | { | ||
1278 | return my_syscall0(__NR_getpgrp); | ||
1279 | } | ||
1280 | |||
1281 | static __attribute__((unused)) | ||
1282 | pid_t sys_getpid(void) | ||
1283 | { | ||
1284 | return my_syscall0(__NR_getpid); | ||
1285 | } | ||
1286 | |||
1287 | static __attribute__((unused)) | ||
1288 | int sys_gettimeofday(struct timeval *tv, struct timezone *tz) | ||
1289 | { | ||
1290 | return my_syscall2(__NR_gettimeofday, tv, tz); | ||
1291 | } | ||
1292 | |||
1293 | static __attribute__((unused)) | ||
1294 | int sys_ioctl(int fd, unsigned long req, void *value) | ||
1295 | { | ||
1296 | return my_syscall3(__NR_ioctl, fd, req, value); | ||
1297 | } | ||
1298 | |||
1299 | static __attribute__((unused)) | ||
1300 | int sys_kill(pid_t pid, int signal) | ||
1301 | { | ||
1302 | return my_syscall2(__NR_kill, pid, signal); | ||
1303 | } | ||
1304 | |||
1305 | static __attribute__((unused)) | ||
1306 | int sys_link(const char *old, const char *new) | ||
1307 | { | ||
1308 | #ifdef __NR_linkat | ||
1309 | return my_syscall5(__NR_linkat, AT_FDCWD, old, AT_FDCWD, new, 0); | ||
1310 | #else | ||
1311 | return my_syscall2(__NR_link, old, new); | ||
1312 | #endif | ||
1313 | } | ||
1314 | |||
1315 | static __attribute__((unused)) | ||
1316 | off_t sys_lseek(int fd, off_t offset, int whence) | ||
1317 | { | ||
1318 | return my_syscall3(__NR_lseek, fd, offset, whence); | ||
1319 | } | ||
1320 | |||
1321 | static __attribute__((unused)) | ||
1322 | int sys_mkdir(const char *path, mode_t mode) | ||
1323 | { | ||
1324 | #ifdef __NR_mkdirat | ||
1325 | return my_syscall3(__NR_mkdirat, AT_FDCWD, path, mode); | ||
1326 | #else | ||
1327 | return my_syscall2(__NR_mkdir, path, mode); | ||
1328 | #endif | ||
1329 | } | ||
1330 | |||
1331 | static __attribute__((unused)) | ||
1332 | long sys_mknod(const char *path, mode_t mode, dev_t dev) | ||
1333 | { | ||
1334 | #ifdef __NR_mknodat | ||
1335 | return my_syscall4(__NR_mknodat, AT_FDCWD, path, mode, dev); | ||
1336 | #else | ||
1337 | return my_syscall3(__NR_mknod, path, mode, dev); | ||
1338 | #endif | ||
1339 | } | ||
1340 | |||
1341 | static __attribute__((unused)) | ||
1342 | int sys_mount(const char *src, const char *tgt, const char *fst, | ||
1343 | unsigned long flags, const void *data) | ||
1344 | { | ||
1345 | return my_syscall5(__NR_mount, src, tgt, fst, flags, data); | ||
1346 | } | ||
1347 | |||
1348 | static __attribute__((unused)) | ||
1349 | int sys_open(const char *path, int flags, mode_t mode) | ||
1350 | { | ||
1351 | #ifdef __NR_openat | ||
1352 | return my_syscall4(__NR_openat, AT_FDCWD, path, flags, mode); | ||
1353 | #else | ||
1354 | return my_syscall3(__NR_open, path, flags, mode); | ||
1355 | #endif | ||
1356 | } | ||
1357 | |||
1358 | static __attribute__((unused)) | ||
1359 | int sys_pivot_root(const char *new, const char *old) | ||
1360 | { | ||
1361 | return my_syscall2(__NR_pivot_root, new, old); | ||
1362 | } | ||
1363 | |||
1364 | static __attribute__((unused)) | ||
1365 | int sys_poll(struct pollfd *fds, int nfds, int timeout) | ||
1366 | { | ||
1367 | return my_syscall3(__NR_poll, fds, nfds, timeout); | ||
1368 | } | ||
1369 | |||
1370 | static __attribute__((unused)) | ||
1371 | ssize_t sys_read(int fd, void *buf, size_t count) | ||
1372 | { | ||
1373 | return my_syscall3(__NR_read, fd, buf, count); | ||
1374 | } | ||
1375 | |||
1376 | static __attribute__((unused)) | ||
1377 | ssize_t sys_reboot(int magic1, int magic2, int cmd, void *arg) | ||
1378 | { | ||
1379 | return my_syscall4(__NR_reboot, magic1, magic2, cmd, arg); | ||
1380 | } | ||
1381 | |||
1382 | static __attribute__((unused)) | ||
1383 | int sys_sched_yield(void) | ||
1384 | { | ||
1385 | return my_syscall0(__NR_sched_yield); | ||
1386 | } | ||
1387 | |||
1388 | static __attribute__((unused)) | ||
1389 | int sys_select(int nfds, fd_set *rfds, fd_set *wfds, fd_set *efds, struct timeval *timeout) | ||
1390 | { | ||
1391 | #if defined(__ARCH_WANT_SYS_OLD_SELECT) && !defined(__NR__newselect) | ||
1392 | struct sel_arg_struct { | ||
1393 | unsigned long n; | ||
1394 | fd_set *r, *w, *e; | ||
1395 | struct timeval *t; | ||
1396 | } arg = { .n = nfds, .r = rfds, .w = wfds, .e = efds, .t = timeout }; | ||
1397 | return my_syscall1(__NR_select, &arg); | ||
1398 | #elif defined(__ARCH_WANT_SYS_PSELECT6) && defined(__NR_pselect6) | ||
1399 | struct timespec t; | ||
1400 | |||
1401 | if (timeout) { | ||
1402 | t.tv_sec = timeout->tv_sec; | ||
1403 | t.tv_nsec = timeout->tv_usec * 1000; | ||
1404 | } | ||
1405 | return my_syscall6(__NR_pselect6, nfds, rfds, wfds, efds, timeout ? &t : NULL, NULL); | ||
1406 | #else | ||
1407 | #ifndef __NR__newselect | ||
1408 | #define __NR__newselect __NR_select | ||
1409 | #endif | ||
1410 | return my_syscall5(__NR__newselect, nfds, rfds, wfds, efds, timeout); | ||
1411 | #endif | ||
1412 | } | ||
1413 | |||
1414 | static __attribute__((unused)) | ||
1415 | int sys_setpgid(pid_t pid, pid_t pgid) | ||
1416 | { | ||
1417 | return my_syscall2(__NR_setpgid, pid, pgid); | ||
1418 | } | ||
1419 | |||
1420 | static __attribute__((unused)) | ||
1421 | pid_t sys_setsid(void) | ||
1422 | { | ||
1423 | return my_syscall0(__NR_setsid); | ||
1424 | } | ||
1425 | |||
1426 | static __attribute__((unused)) | ||
1427 | int sys_stat(const char *path, struct stat *buf) | ||
1428 | { | ||
1429 | struct sys_stat_struct stat; | ||
1430 | long ret; | ||
1431 | |||
1432 | #ifdef __NR_newfstatat | ||
1433 | /* only solution for arm64 */ | ||
1434 | ret = my_syscall4(__NR_newfstatat, AT_FDCWD, path, &stat, 0); | ||
1435 | #else | ||
1436 | ret = my_syscall2(__NR_stat, path, &stat); | ||
1437 | #endif | ||
1438 | buf->st_dev = stat.st_dev; | ||
1439 | buf->st_ino = stat.st_ino; | ||
1440 | buf->st_mode = stat.st_mode; | ||
1441 | buf->st_nlink = stat.st_nlink; | ||
1442 | buf->st_uid = stat.st_uid; | ||
1443 | buf->st_gid = stat.st_gid; | ||
1444 | buf->st_rdev = stat.st_rdev; | ||
1445 | buf->st_size = stat.st_size; | ||
1446 | buf->st_blksize = stat.st_blksize; | ||
1447 | buf->st_blocks = stat.st_blocks; | ||
1448 | buf->st_atime = stat.st_atime; | ||
1449 | buf->st_mtime = stat.st_mtime; | ||
1450 | buf->st_ctime = stat.st_ctime; | ||
1451 | return ret; | ||
1452 | } | ||
1453 | |||
1454 | |||
1455 | static __attribute__((unused)) | ||
1456 | int sys_symlink(const char *old, const char *new) | ||
1457 | { | ||
1458 | #ifdef __NR_symlinkat | ||
1459 | return my_syscall3(__NR_symlinkat, old, AT_FDCWD, new); | ||
1460 | #else | ||
1461 | return my_syscall2(__NR_symlink, old, new); | ||
1462 | #endif | ||
1463 | } | ||
1464 | |||
1465 | static __attribute__((unused)) | ||
1466 | mode_t sys_umask(mode_t mode) | ||
1467 | { | ||
1468 | return my_syscall1(__NR_umask, mode); | ||
1469 | } | ||
1470 | |||
1471 | static __attribute__((unused)) | ||
1472 | int sys_umount2(const char *path, int flags) | ||
1473 | { | ||
1474 | return my_syscall2(__NR_umount2, path, flags); | ||
1475 | } | ||
1476 | |||
1477 | static __attribute__((unused)) | ||
1478 | int sys_unlink(const char *path) | ||
1479 | { | ||
1480 | #ifdef __NR_unlinkat | ||
1481 | return my_syscall3(__NR_unlinkat, AT_FDCWD, path, 0); | ||
1482 | #else | ||
1483 | return my_syscall1(__NR_unlink, path); | ||
1484 | #endif | ||
1485 | } | ||
1486 | |||
1487 | static __attribute__((unused)) | ||
1488 | pid_t sys_wait4(pid_t pid, int *status, int options, struct rusage *rusage) | ||
1489 | { | ||
1490 | return my_syscall4(__NR_wait4, pid, status, options, rusage); | ||
1491 | } | ||
1492 | |||
1493 | static __attribute__((unused)) | ||
1494 | pid_t sys_waitpid(pid_t pid, int *status, int options) | ||
1495 | { | ||
1496 | return sys_wait4(pid, status, options, 0); | ||
1497 | } | ||
1498 | |||
1499 | static __attribute__((unused)) | ||
1500 | pid_t sys_wait(int *status) | ||
1501 | { | ||
1502 | return sys_waitpid(-1, status, 0); | ||
1503 | } | ||
1504 | |||
1505 | static __attribute__((unused)) | ||
1506 | ssize_t sys_write(int fd, const void *buf, size_t count) | ||
1507 | { | ||
1508 | return my_syscall3(__NR_write, fd, buf, count); | ||
1509 | } | ||
1510 | |||
1511 | |||
1512 | /* Below are the libc-compatible syscalls which return x or -1 and set errno. | ||
1513 | * They rely on the functions above. Similarly they're marked static so that it | ||
1514 | * is possible to assign pointers to them if needed. | ||
1515 | */ | ||
1516 | |||
1517 | static __attribute__((unused)) | ||
1518 | int brk(void *addr) | ||
1519 | { | ||
1520 | void *ret = sys_brk(addr); | ||
1521 | |||
1522 | if (!ret) { | ||
1523 | SET_ERRNO(ENOMEM); | ||
1524 | return -1; | ||
1525 | } | ||
1526 | return 0; | ||
1527 | } | ||
1528 | |||
1529 | static __attribute__((noreturn,unused)) | ||
1530 | void exit(int status) | ||
1531 | { | ||
1532 | sys_exit(status); | ||
1533 | } | ||
1534 | |||
1535 | static __attribute__((unused)) | ||
1536 | int chdir(const char *path) | ||
1537 | { | ||
1538 | int ret = sys_chdir(path); | ||
1539 | |||
1540 | if (ret < 0) { | ||
1541 | SET_ERRNO(-ret); | ||
1542 | ret = -1; | ||
1543 | } | ||
1544 | return ret; | ||
1545 | } | ||
1546 | |||
1547 | static __attribute__((unused)) | ||
1548 | int chmod(const char *path, mode_t mode) | ||
1549 | { | ||
1550 | int ret = sys_chmod(path, mode); | ||
1551 | |||
1552 | if (ret < 0) { | ||
1553 | SET_ERRNO(-ret); | ||
1554 | ret = -1; | ||
1555 | } | ||
1556 | return ret; | ||
1557 | } | ||
1558 | |||
1559 | static __attribute__((unused)) | ||
1560 | int chown(const char *path, uid_t owner, gid_t group) | ||
1561 | { | ||
1562 | int ret = sys_chown(path, owner, group); | ||
1563 | |||
1564 | if (ret < 0) { | ||
1565 | SET_ERRNO(-ret); | ||
1566 | ret = -1; | ||
1567 | } | ||
1568 | return ret; | ||
1569 | } | ||
1570 | |||
1571 | static __attribute__((unused)) | ||
1572 | int chroot(const char *path) | ||
1573 | { | ||
1574 | int ret = sys_chroot(path); | ||
1575 | |||
1576 | if (ret < 0) { | ||
1577 | SET_ERRNO(-ret); | ||
1578 | ret = -1; | ||
1579 | } | ||
1580 | return ret; | ||
1581 | } | ||
1582 | |||
1583 | static __attribute__((unused)) | ||
1584 | int close(int fd) | ||
1585 | { | ||
1586 | int ret = sys_close(fd); | ||
1587 | |||
1588 | if (ret < 0) { | ||
1589 | SET_ERRNO(-ret); | ||
1590 | ret = -1; | ||
1591 | } | ||
1592 | return ret; | ||
1593 | } | ||
1594 | |||
1595 | static __attribute__((unused)) | ||
1596 | int dup2(int old, int new) | ||
1597 | { | ||
1598 | int ret = sys_dup2(old, new); | ||
1599 | |||
1600 | if (ret < 0) { | ||
1601 | SET_ERRNO(-ret); | ||
1602 | ret = -1; | ||
1603 | } | ||
1604 | return ret; | ||
1605 | } | ||
1606 | |||
1607 | static __attribute__((unused)) | ||
1608 | int execve(const char *filename, char *const argv[], char *const envp[]) | ||
1609 | { | ||
1610 | int ret = sys_execve(filename, argv, envp); | ||
1611 | |||
1612 | if (ret < 0) { | ||
1613 | SET_ERRNO(-ret); | ||
1614 | ret = -1; | ||
1615 | } | ||
1616 | return ret; | ||
1617 | } | ||
1618 | |||
1619 | static __attribute__((unused)) | ||
1620 | pid_t fork(void) | ||
1621 | { | ||
1622 | pid_t ret = sys_fork(); | ||
1623 | |||
1624 | if (ret < 0) { | ||
1625 | SET_ERRNO(-ret); | ||
1626 | ret = -1; | ||
1627 | } | ||
1628 | return ret; | ||
1629 | } | ||
1630 | |||
1631 | static __attribute__((unused)) | ||
1632 | int fsync(int fd) | ||
1633 | { | ||
1634 | int ret = sys_fsync(fd); | ||
1635 | |||
1636 | if (ret < 0) { | ||
1637 | SET_ERRNO(-ret); | ||
1638 | ret = -1; | ||
1639 | } | ||
1640 | return ret; | ||
1641 | } | ||
1642 | |||
1643 | static __attribute__((unused)) | ||
1644 | int getdents64(int fd, struct linux_dirent64 *dirp, int count) | ||
1645 | { | ||
1646 | int ret = sys_getdents64(fd, dirp, count); | ||
1647 | |||
1648 | if (ret < 0) { | ||
1649 | SET_ERRNO(-ret); | ||
1650 | ret = -1; | ||
1651 | } | ||
1652 | return ret; | ||
1653 | } | ||
1654 | |||
1655 | static __attribute__((unused)) | ||
1656 | pid_t getpgrp(void) | ||
1657 | { | ||
1658 | pid_t ret = sys_getpgrp(); | ||
1659 | |||
1660 | if (ret < 0) { | ||
1661 | SET_ERRNO(-ret); | ||
1662 | ret = -1; | ||
1663 | } | ||
1664 | return ret; | ||
1665 | } | ||
1666 | |||
1667 | static __attribute__((unused)) | ||
1668 | pid_t getpid(void) | ||
1669 | { | ||
1670 | pid_t ret = sys_getpid(); | ||
1671 | |||
1672 | if (ret < 0) { | ||
1673 | SET_ERRNO(-ret); | ||
1674 | ret = -1; | ||
1675 | } | ||
1676 | return ret; | ||
1677 | } | ||
1678 | |||
1679 | static __attribute__((unused)) | ||
1680 | int gettimeofday(struct timeval *tv, struct timezone *tz) | ||
1681 | { | ||
1682 | int ret = sys_gettimeofday(tv, tz); | ||
1683 | |||
1684 | if (ret < 0) { | ||
1685 | SET_ERRNO(-ret); | ||
1686 | ret = -1; | ||
1687 | } | ||
1688 | return ret; | ||
1689 | } | ||
1690 | |||
1691 | static __attribute__((unused)) | ||
1692 | int ioctl(int fd, unsigned long req, void *value) | ||
1693 | { | ||
1694 | int ret = sys_ioctl(fd, req, value); | ||
1695 | |||
1696 | if (ret < 0) { | ||
1697 | SET_ERRNO(-ret); | ||
1698 | ret = -1; | ||
1699 | } | ||
1700 | return ret; | ||
1701 | } | ||
1702 | |||
1703 | static __attribute__((unused)) | ||
1704 | int kill(pid_t pid, int signal) | ||
1705 | { | ||
1706 | int ret = sys_kill(pid, signal); | ||
1707 | |||
1708 | if (ret < 0) { | ||
1709 | SET_ERRNO(-ret); | ||
1710 | ret = -1; | ||
1711 | } | ||
1712 | return ret; | ||
1713 | } | ||
1714 | |||
1715 | static __attribute__((unused)) | ||
1716 | int link(const char *old, const char *new) | ||
1717 | { | ||
1718 | int ret = sys_link(old, new); | ||
1719 | |||
1720 | if (ret < 0) { | ||
1721 | SET_ERRNO(-ret); | ||
1722 | ret = -1; | ||
1723 | } | ||
1724 | return ret; | ||
1725 | } | ||
1726 | |||
1727 | static __attribute__((unused)) | ||
1728 | off_t lseek(int fd, off_t offset, int whence) | ||
1729 | { | ||
1730 | off_t ret = sys_lseek(fd, offset, whence); | ||
1731 | |||
1732 | if (ret < 0) { | ||
1733 | SET_ERRNO(-ret); | ||
1734 | ret = -1; | ||
1735 | } | ||
1736 | return ret; | ||
1737 | } | ||
1738 | |||
1739 | static __attribute__((unused)) | ||
1740 | int mkdir(const char *path, mode_t mode) | ||
1741 | { | ||
1742 | int ret = sys_mkdir(path, mode); | ||
1743 | |||
1744 | if (ret < 0) { | ||
1745 | SET_ERRNO(-ret); | ||
1746 | ret = -1; | ||
1747 | } | ||
1748 | return ret; | ||
1749 | } | ||
1750 | |||
1751 | static __attribute__((unused)) | ||
1752 | int mknod(const char *path, mode_t mode, dev_t dev) | ||
1753 | { | ||
1754 | int ret = sys_mknod(path, mode, dev); | ||
1755 | |||
1756 | if (ret < 0) { | ||
1757 | SET_ERRNO(-ret); | ||
1758 | ret = -1; | ||
1759 | } | ||
1760 | return ret; | ||
1761 | } | ||
1762 | |||
1763 | static __attribute__((unused)) | ||
1764 | int mount(const char *src, const char *tgt, | ||
1765 | const char *fst, unsigned long flags, | ||
1766 | const void *data) | ||
1767 | { | ||
1768 | int ret = sys_mount(src, tgt, fst, flags, data); | ||
1769 | |||
1770 | if (ret < 0) { | ||
1771 | SET_ERRNO(-ret); | ||
1772 | ret = -1; | ||
1773 | } | ||
1774 | return ret; | ||
1775 | } | ||
1776 | |||
1777 | static __attribute__((unused)) | ||
1778 | int open(const char *path, int flags, mode_t mode) | ||
1779 | { | ||
1780 | int ret = sys_open(path, flags, mode); | ||
1781 | |||
1782 | if (ret < 0) { | ||
1783 | SET_ERRNO(-ret); | ||
1784 | ret = -1; | ||
1785 | } | ||
1786 | return ret; | ||
1787 | } | ||
1788 | |||
1789 | static __attribute__((unused)) | ||
1790 | int pivot_root(const char *new, const char *old) | ||
1791 | { | ||
1792 | int ret = sys_pivot_root(new, old); | ||
1793 | |||
1794 | if (ret < 0) { | ||
1795 | SET_ERRNO(-ret); | ||
1796 | ret = -1; | ||
1797 | } | ||
1798 | return ret; | ||
1799 | } | ||
1800 | |||
1801 | static __attribute__((unused)) | ||
1802 | int poll(struct pollfd *fds, int nfds, int timeout) | ||
1803 | { | ||
1804 | int ret = sys_poll(fds, nfds, timeout); | ||
1805 | |||
1806 | if (ret < 0) { | ||
1807 | SET_ERRNO(-ret); | ||
1808 | ret = -1; | ||
1809 | } | ||
1810 | return ret; | ||
1811 | } | ||
1812 | |||
1813 | static __attribute__((unused)) | ||
1814 | ssize_t read(int fd, void *buf, size_t count) | ||
1815 | { | ||
1816 | ssize_t ret = sys_read(fd, buf, count); | ||
1817 | |||
1818 | if (ret < 0) { | ||
1819 | SET_ERRNO(-ret); | ||
1820 | ret = -1; | ||
1821 | } | ||
1822 | return ret; | ||
1823 | } | ||
1824 | |||
1825 | static __attribute__((unused)) | ||
1826 | int reboot(int cmd) | ||
1827 | { | ||
1828 | int ret = sys_reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, cmd, 0); | ||
1829 | |||
1830 | if (ret < 0) { | ||
1831 | SET_ERRNO(-ret); | ||
1832 | ret = -1; | ||
1833 | } | ||
1834 | return ret; | ||
1835 | } | ||
1836 | |||
1837 | static __attribute__((unused)) | ||
1838 | void *sbrk(intptr_t inc) | ||
1839 | { | ||
1840 | void *ret; | ||
1841 | |||
1842 | /* first call to find current end */ | ||
1843 | if ((ret = sys_brk(0)) && (sys_brk(ret + inc) == ret + inc)) | ||
1844 | return ret + inc; | ||
1845 | |||
1846 | SET_ERRNO(ENOMEM); | ||
1847 | return (void *)-1; | ||
1848 | } | ||
1849 | |||
1850 | static __attribute__((unused)) | ||
1851 | int sched_yield(void) | ||
1852 | { | ||
1853 | int ret = sys_sched_yield(); | ||
1854 | |||
1855 | if (ret < 0) { | ||
1856 | SET_ERRNO(-ret); | ||
1857 | ret = -1; | ||
1858 | } | ||
1859 | return ret; | ||
1860 | } | ||
1861 | |||
1862 | static __attribute__((unused)) | ||
1863 | int select(int nfds, fd_set *rfds, fd_set *wfds, fd_set *efds, struct timeval *timeout) | ||
1864 | { | ||
1865 | int ret = sys_select(nfds, rfds, wfds, efds, timeout); | ||
1866 | |||
1867 | if (ret < 0) { | ||
1868 | SET_ERRNO(-ret); | ||
1869 | ret = -1; | ||
1870 | } | ||
1871 | return ret; | ||
1872 | } | ||
1873 | |||
1874 | static __attribute__((unused)) | ||
1875 | int setpgid(pid_t pid, pid_t pgid) | ||
1876 | { | ||
1877 | int ret = sys_setpgid(pid, pgid); | ||
1878 | |||
1879 | if (ret < 0) { | ||
1880 | SET_ERRNO(-ret); | ||
1881 | ret = -1; | ||
1882 | } | ||
1883 | return ret; | ||
1884 | } | ||
1885 | |||
1886 | static __attribute__((unused)) | ||
1887 | pid_t setsid(void) | ||
1888 | { | ||
1889 | pid_t ret = sys_setsid(); | ||
1890 | |||
1891 | if (ret < 0) { | ||
1892 | SET_ERRNO(-ret); | ||
1893 | ret = -1; | ||
1894 | } | ||
1895 | return ret; | ||
1896 | } | ||
1897 | |||
1898 | static __attribute__((unused)) | ||
1899 | unsigned int sleep(unsigned int seconds) | ||
1900 | { | ||
1901 | struct timeval my_timeval = { seconds, 0 }; | ||
1902 | |||
1903 | if (sys_select(0, 0, 0, 0, &my_timeval) < 0) | ||
1904 | return my_timeval.tv_sec + !!my_timeval.tv_usec; | ||
1905 | else | ||
1906 | return 0; | ||
1907 | } | ||
1908 | |||
1909 | static __attribute__((unused)) | ||
1910 | int stat(const char *path, struct stat *buf) | ||
1911 | { | ||
1912 | int ret = sys_stat(path, buf); | ||
1913 | |||
1914 | if (ret < 0) { | ||
1915 | SET_ERRNO(-ret); | ||
1916 | ret = -1; | ||
1917 | } | ||
1918 | return ret; | ||
1919 | } | ||
1920 | |||
1921 | static __attribute__((unused)) | ||
1922 | int symlink(const char *old, const char *new) | ||
1923 | { | ||
1924 | int ret = sys_symlink(old, new); | ||
1925 | |||
1926 | if (ret < 0) { | ||
1927 | SET_ERRNO(-ret); | ||
1928 | ret = -1; | ||
1929 | } | ||
1930 | return ret; | ||
1931 | } | ||
1932 | |||
1933 | static __attribute__((unused)) | ||
1934 | int tcsetpgrp(int fd, pid_t pid) | ||
1935 | { | ||
1936 | return ioctl(fd, TIOCSPGRP, &pid); | ||
1937 | } | ||
1938 | |||
1939 | static __attribute__((unused)) | ||
1940 | mode_t umask(mode_t mode) | ||
1941 | { | ||
1942 | return sys_umask(mode); | ||
1943 | } | ||
1944 | |||
1945 | static __attribute__((unused)) | ||
1946 | int umount2(const char *path, int flags) | ||
1947 | { | ||
1948 | int ret = sys_umount2(path, flags); | ||
1949 | |||
1950 | if (ret < 0) { | ||
1951 | SET_ERRNO(-ret); | ||
1952 | ret = -1; | ||
1953 | } | ||
1954 | return ret; | ||
1955 | } | ||
1956 | |||
1957 | static __attribute__((unused)) | ||
1958 | int unlink(const char *path) | ||
1959 | { | ||
1960 | int ret = sys_unlink(path); | ||
1961 | |||
1962 | if (ret < 0) { | ||
1963 | SET_ERRNO(-ret); | ||
1964 | ret = -1; | ||
1965 | } | ||
1966 | return ret; | ||
1967 | } | ||
1968 | |||
1969 | static __attribute__((unused)) | ||
1970 | pid_t wait4(pid_t pid, int *status, int options, struct rusage *rusage) | ||
1971 | { | ||
1972 | pid_t ret = sys_wait4(pid, status, options, rusage); | ||
1973 | |||
1974 | if (ret < 0) { | ||
1975 | SET_ERRNO(-ret); | ||
1976 | ret = -1; | ||
1977 | } | ||
1978 | return ret; | ||
1979 | } | ||
1980 | |||
1981 | static __attribute__((unused)) | ||
1982 | pid_t waitpid(pid_t pid, int *status, int options) | ||
1983 | { | ||
1984 | pid_t ret = sys_waitpid(pid, status, options); | ||
1985 | |||
1986 | if (ret < 0) { | ||
1987 | SET_ERRNO(-ret); | ||
1988 | ret = -1; | ||
1989 | } | ||
1990 | return ret; | ||
1991 | } | ||
1992 | |||
1993 | static __attribute__((unused)) | ||
1994 | pid_t wait(int *status) | ||
1995 | { | ||
1996 | pid_t ret = sys_wait(status); | ||
1997 | |||
1998 | if (ret < 0) { | ||
1999 | SET_ERRNO(-ret); | ||
2000 | ret = -1; | ||
2001 | } | ||
2002 | return ret; | ||
2003 | } | ||
2004 | |||
2005 | static __attribute__((unused)) | ||
2006 | ssize_t write(int fd, const void *buf, size_t count) | ||
2007 | { | ||
2008 | ssize_t ret = sys_write(fd, buf, count); | ||
2009 | |||
2010 | if (ret < 0) { | ||
2011 | SET_ERRNO(-ret); | ||
2012 | ret = -1; | ||
2013 | } | ||
2014 | return ret; | ||
2015 | } | ||
2016 | |||
2017 | /* some size-optimized reimplementations of a few common str* and mem* | ||
2018 | * functions. They're marked static, except memcpy() and raise() which are used | ||
2019 | * by libgcc on ARM, so they are marked weak instead in order not to cause an | ||
2020 | * error when building a program made of multiple files (not recommended). | ||
2021 | */ | ||
2022 | |||
2023 | static __attribute__((unused)) | ||
2024 | void *memmove(void *dst, const void *src, size_t len) | ||
2025 | { | ||
2026 | ssize_t pos = (dst <= src) ? -1 : (long)len; | ||
2027 | void *ret = dst; | ||
2028 | |||
2029 | while (len--) { | ||
2030 | pos += (dst <= src) ? 1 : -1; | ||
2031 | ((char *)dst)[pos] = ((char *)src)[pos]; | ||
2032 | } | ||
2033 | return ret; | ||
2034 | } | ||
2035 | |||
2036 | static __attribute__((unused)) | ||
2037 | void *memset(void *dst, int b, size_t len) | ||
2038 | { | ||
2039 | char *p = dst; | ||
2040 | |||
2041 | while (len--) | ||
2042 | *(p++) = b; | ||
2043 | return dst; | ||
2044 | } | ||
2045 | |||
2046 | static __attribute__((unused)) | ||
2047 | int memcmp(const void *s1, const void *s2, size_t n) | ||
2048 | { | ||
2049 | size_t ofs = 0; | ||
2050 | char c1 = 0; | ||
2051 | |||
2052 | while (ofs < n && !(c1 = ((char *)s1)[ofs] - ((char *)s2)[ofs])) { | ||
2053 | ofs++; | ||
2054 | } | ||
2055 | return c1; | ||
2056 | } | ||
2057 | |||
2058 | static __attribute__((unused)) | ||
2059 | char *strcpy(char *dst, const char *src) | ||
2060 | { | ||
2061 | char *ret = dst; | ||
2062 | |||
2063 | while ((*dst++ = *src++)); | ||
2064 | return ret; | ||
2065 | } | ||
2066 | |||
2067 | static __attribute__((unused)) | ||
2068 | char *strchr(const char *s, int c) | ||
2069 | { | ||
2070 | while (*s) { | ||
2071 | if (*s == (char)c) | ||
2072 | return (char *)s; | ||
2073 | s++; | ||
2074 | } | ||
2075 | return NULL; | ||
2076 | } | ||
2077 | |||
2078 | static __attribute__((unused)) | ||
2079 | char *strrchr(const char *s, int c) | ||
2080 | { | ||
2081 | const char *ret = NULL; | ||
2082 | |||
2083 | while (*s) { | ||
2084 | if (*s == (char)c) | ||
2085 | ret = s; | ||
2086 | s++; | ||
2087 | } | ||
2088 | return (char *)ret; | ||
2089 | } | ||
2090 | |||
2091 | static __attribute__((unused)) | ||
2092 | size_t nolibc_strlen(const char *str) | ||
2093 | { | ||
2094 | size_t len; | ||
2095 | |||
2096 | for (len = 0; str[len]; len++); | ||
2097 | return len; | ||
2098 | } | ||
2099 | |||
2100 | #define strlen(str) ({ \ | ||
2101 | __builtin_constant_p((str)) ? \ | ||
2102 | __builtin_strlen((str)) : \ | ||
2103 | nolibc_strlen((str)); \ | ||
2104 | }) | ||
2105 | |||
2106 | static __attribute__((unused)) | ||
2107 | int isdigit(int c) | ||
2108 | { | ||
2109 | return (unsigned int)(c - '0') <= 9; | ||
2110 | } | ||
2111 | |||
2112 | static __attribute__((unused)) | ||
2113 | long atol(const char *s) | ||
2114 | { | ||
2115 | unsigned long ret = 0; | ||
2116 | unsigned long d; | ||
2117 | int neg = 0; | ||
2118 | |||
2119 | if (*s == '-') { | ||
2120 | neg = 1; | ||
2121 | s++; | ||
2122 | } | ||
2123 | |||
2124 | while (1) { | ||
2125 | d = (*s++) - '0'; | ||
2126 | if (d > 9) | ||
2127 | break; | ||
2128 | ret *= 10; | ||
2129 | ret += d; | ||
2130 | } | ||
2131 | |||
2132 | return neg ? -ret : ret; | ||
2133 | } | ||
2134 | |||
2135 | static __attribute__((unused)) | ||
2136 | int atoi(const char *s) | ||
2137 | { | ||
2138 | return atol(s); | ||
2139 | } | ||
2140 | |||
2141 | static __attribute__((unused)) | ||
2142 | const char *ltoa(long in) | ||
2143 | { | ||
2144 | /* large enough for -9223372036854775808 */ | ||
2145 | static char buffer[21]; | ||
2146 | char *pos = buffer + sizeof(buffer) - 1; | ||
2147 | int neg = in < 0; | ||
2148 | unsigned long n = neg ? -in : in; | ||
2149 | |||
2150 | *pos-- = '\0'; | ||
2151 | do { | ||
2152 | *pos-- = '0' + n % 10; | ||
2153 | n /= 10; | ||
2154 | if (pos < buffer) | ||
2155 | return pos + 1; | ||
2156 | } while (n); | ||
2157 | |||
2158 | if (neg) | ||
2159 | *pos-- = '-'; | ||
2160 | return pos + 1; | ||
2161 | } | ||
2162 | |||
2163 | __attribute__((weak,unused)) | ||
2164 | void *memcpy(void *dst, const void *src, size_t len) | ||
2165 | { | ||
2166 | return memmove(dst, src, len); | ||
2167 | } | ||
2168 | |||
2169 | /* needed by libgcc for divide by zero */ | ||
2170 | __attribute__((weak,unused)) | ||
2171 | int raise(int signal) | ||
2172 | { | ||
2173 | return kill(getpid(), signal); | ||
2174 | } | ||
2175 | |||
2176 | /* Here come a few helper functions */ | ||
2177 | |||
2178 | static __attribute__((unused)) | ||
2179 | void FD_ZERO(fd_set *set) | ||
2180 | { | ||
2181 | memset(set, 0, sizeof(*set)); | ||
2182 | } | ||
2183 | |||
2184 | static __attribute__((unused)) | ||
2185 | void FD_SET(int fd, fd_set *set) | ||
2186 | { | ||
2187 | if (fd < 0 || fd >= FD_SETSIZE) | ||
2188 | return; | ||
2189 | set->fd32[fd / 32] |= 1 << (fd & 31); | ||
2190 | } | ||
2191 | |||
2192 | /* WARNING, it only deals with the 4096 first majors and 256 first minors */ | ||
2193 | static __attribute__((unused)) | ||
2194 | dev_t makedev(unsigned int major, unsigned int minor) | ||
2195 | { | ||
2196 | return ((major & 0xfff) << 8) | (minor & 0xff); | ||
2197 | } | ||
diff --git a/tools/testing/selftests/rcutorture/doc/initrd.txt b/tools/testing/selftests/rcutorture/doc/initrd.txt index 833f826d6ec2..933b4fd12327 100644 --- a/tools/testing/selftests/rcutorture/doc/initrd.txt +++ b/tools/testing/selftests/rcutorture/doc/initrd.txt | |||
@@ -1,9 +1,12 @@ | |||
1 | This document describes one way to create the initrd directory hierarchy | 1 | The rcutorture scripting tools automatically create the needed initrd |
2 | in order to allow an initrd to be built into your kernel. The trick | 2 | directory using dracut. Failing that, this tool will create an initrd |
3 | here is to steal the initrd file used on your Linux laptop, Ubuntu in | 3 | containing a single statically linked binary named "init" that loops |
4 | this case. There are probably much better ways of doing this. | 4 | over a very long sleep() call. In both cases, this creation is done |
5 | by tools/testing/selftests/rcutorture/bin/mkinitrd.sh. | ||
5 | 6 | ||
6 | That said, here are the commands: | 7 | However, if you are attempting to run rcutorture on a system that does |
8 | not have dracut installed, and if you don't like the notion of static | ||
9 | linking, you might wish to press an existing initrd into service: | ||
7 | 10 | ||
8 | ------------------------------------------------------------------------ | 11 | ------------------------------------------------------------------------ |
9 | cd tools/testing/selftests/rcutorture | 12 | cd tools/testing/selftests/rcutorture |
@@ -11,22 +14,7 @@ zcat /initrd.img > /tmp/initrd.img.zcat | |||
11 | mkdir initrd | 14 | mkdir initrd |
12 | cd initrd | 15 | cd initrd |
13 | cpio -id < /tmp/initrd.img.zcat | 16 | cpio -id < /tmp/initrd.img.zcat |
14 | ------------------------------------------------------------------------ | 17 | # Manually verify that initrd contains needed binaries and libraries. |
15 | |||
16 | Another way to create an initramfs image is using "dracut"[1], which is | ||
17 | available on many distros, however the initramfs dracut generates is a cpio | ||
18 | archive with another cpio archive in it, so an extra step is needed to create | ||
19 | the initrd directory hierarchy. | ||
20 | |||
21 | Here are the commands to create a initrd directory for rcutorture using | ||
22 | dracut: | ||
23 | |||
24 | ------------------------------------------------------------------------ | ||
25 | dracut --no-hostonly --no-hostonly-cmdline --module "base bash shutdown" /tmp/initramfs.img | ||
26 | cd tools/testing/selftests/rcutorture | ||
27 | mkdir initrd | ||
28 | cd initrd | ||
29 | /usr/lib/dracut/skipcpio /tmp/initramfs.img | zcat | cpio -id < /tmp/initramfs.img | ||
30 | ------------------------------------------------------------------------ | 18 | ------------------------------------------------------------------------ |
31 | 19 | ||
32 | Interestingly enough, if you are running rcutorture, you don't really | 20 | Interestingly enough, if you are running rcutorture, you don't really |
@@ -39,75 +27,12 @@ with 0755 mode. | |||
39 | ------------------------------------------------------------------------ | 27 | ------------------------------------------------------------------------ |
40 | #!/bin/sh | 28 | #!/bin/sh |
41 | 29 | ||
42 | [ -d /dev ] || mkdir -m 0755 /dev | ||
43 | [ -d /root ] || mkdir -m 0700 /root | ||
44 | [ -d /sys ] || mkdir /sys | ||
45 | [ -d /proc ] || mkdir /proc | ||
46 | [ -d /tmp ] || mkdir /tmp | ||
47 | mkdir -p /var/lock | ||
48 | mount -t sysfs -o nodev,noexec,nosuid sysfs /sys | ||
49 | mount -t proc -o nodev,noexec,nosuid proc /proc | ||
50 | # Some things don't work properly without /etc/mtab. | ||
51 | ln -sf /proc/mounts /etc/mtab | ||
52 | |||
53 | # Note that this only becomes /dev on the real filesystem if udev's scripts | ||
54 | # are used; which they will be, but it's worth pointing out | ||
55 | if ! mount -t devtmpfs -o mode=0755 udev /dev; then | ||
56 | echo "W: devtmpfs not available, falling back to tmpfs for /dev" | ||
57 | mount -t tmpfs -o mode=0755 udev /dev | ||
58 | [ -e /dev/console ] || mknod --mode=600 /dev/console c 5 1 | ||
59 | [ -e /dev/kmsg ] || mknod --mode=644 /dev/kmsg c 1 11 | ||
60 | [ -e /dev/null ] || mknod --mode=666 /dev/null c 1 3 | ||
61 | fi | ||
62 | |||
63 | mkdir /dev/pts | ||
64 | mount -t devpts -o noexec,nosuid,gid=5,mode=0620 devpts /dev/pts || true | ||
65 | mount -t tmpfs -o "nosuid,size=20%,mode=0755" tmpfs /run | ||
66 | mkdir /run/initramfs | ||
67 | # compatibility symlink for the pre-oneiric locations | ||
68 | ln -s /run/initramfs /dev/.initramfs | ||
69 | |||
70 | # Export relevant variables | ||
71 | export ROOT= | ||
72 | export ROOTDELAY= | ||
73 | export ROOTFLAGS= | ||
74 | export ROOTFSTYPE= | ||
75 | export IP= | ||
76 | export BOOT= | ||
77 | export BOOTIF= | ||
78 | export UBIMTD= | ||
79 | export break= | ||
80 | export init=/sbin/init | ||
81 | export quiet=n | ||
82 | export readonly=y | ||
83 | export rootmnt=/root | ||
84 | export debug= | ||
85 | export panic= | ||
86 | export blacklist= | ||
87 | export resume= | ||
88 | export resume_offset= | ||
89 | export recovery= | ||
90 | |||
91 | for i in /sys/devices/system/cpu/cpu*/online | ||
92 | do | ||
93 | case $i in | ||
94 | '/sys/devices/system/cpu/cpu0/online') | ||
95 | ;; | ||
96 | '/sys/devices/system/cpu/cpu*/online') | ||
97 | ;; | ||
98 | *) | ||
99 | echo 1 > $i | ||
100 | ;; | ||
101 | esac | ||
102 | done | ||
103 | |||
104 | while : | 30 | while : |
105 | do | 31 | do |
106 | sleep 10 | 32 | sleep 10 |
107 | done | 33 | done |
108 | ------------------------------------------------------------------------ | 34 | ------------------------------------------------------------------------ |
109 | 35 | ||
110 | References: | 36 | This approach also allows most of the binaries and libraries in the |
111 | [1]: https://dracut.wiki.kernel.org/index.php/Main_Page | 37 | initrd filesystem to be dispensed with, which can save significant |
112 | [2]: http://blog.elastocloud.org/2015/06/rapid-linux-kernel-devtest-with-qemu.html | 38 | space in rcutorture's "res" directory. |
113 | [3]: https://www.centos.org/forums/viewtopic.php?t=51621 | ||
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/types.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/types.h index 891ad13e95b2..d27285f8ee82 100644 --- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/types.h +++ b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/types.h | |||
@@ -131,8 +131,8 @@ struct hlist_node { | |||
131 | * weird ABI and we need to ask it explicitly. | 131 | * weird ABI and we need to ask it explicitly. |
132 | * | 132 | * |
133 | * The alignment is required to guarantee that bits 0 and 1 of @next will be | 133 | * The alignment is required to guarantee that bits 0 and 1 of @next will be |
134 | * clear under normal conditions -- as long as we use call_rcu(), | 134 | * clear under normal conditions -- as long as we use call_rcu() or |
135 | * call_rcu_bh(), call_rcu_sched(), or call_srcu() to queue callback. | 135 | * call_srcu() to queue callback. |
136 | * | 136 | * |
137 | * This guarantee is important for few reasons: | 137 | * This guarantee is important for few reasons: |
138 | * - future call_rcu_lazy() will make use of lower bits in the pointer; | 138 | * - future call_rcu_lazy() will make use of lower bits in the pointer; |
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index 7cfdfbc910e0..50e25438fb3c 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c | |||
@@ -196,7 +196,7 @@ void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active) | |||
196 | */ | 196 | */ |
197 | static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq) | 197 | static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq) |
198 | { | 198 | { |
199 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock)); | 199 | lockdep_assert_held(&irq->irq_lock); |
200 | 200 | ||
201 | /* If the interrupt is active, it must stay on the current vcpu */ | 201 | /* If the interrupt is active, it must stay on the current vcpu */ |
202 | if (irq->active) | 202 | if (irq->active) |
@@ -273,7 +273,7 @@ static void vgic_sort_ap_list(struct kvm_vcpu *vcpu) | |||
273 | { | 273 | { |
274 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | 274 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
275 | 275 | ||
276 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); | 276 | lockdep_assert_held(&vgic_cpu->ap_list_lock); |
277 | 277 | ||
278 | list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp); | 278 | list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp); |
279 | } | 279 | } |
@@ -311,7 +311,7 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq, | |||
311 | { | 311 | { |
312 | struct kvm_vcpu *vcpu; | 312 | struct kvm_vcpu *vcpu; |
313 | 313 | ||
314 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock)); | 314 | lockdep_assert_held(&irq->irq_lock); |
315 | 315 | ||
316 | retry: | 316 | retry: |
317 | vcpu = vgic_target_oracle(irq); | 317 | vcpu = vgic_target_oracle(irq); |
@@ -702,7 +702,7 @@ static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu) | |||
702 | static inline void vgic_populate_lr(struct kvm_vcpu *vcpu, | 702 | static inline void vgic_populate_lr(struct kvm_vcpu *vcpu, |
703 | struct vgic_irq *irq, int lr) | 703 | struct vgic_irq *irq, int lr) |
704 | { | 704 | { |
705 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock)); | 705 | lockdep_assert_held(&irq->irq_lock); |
706 | 706 | ||
707 | if (kvm_vgic_global_state.type == VGIC_V2) | 707 | if (kvm_vgic_global_state.type == VGIC_V2) |
708 | vgic_v2_populate_lr(vcpu, irq, lr); | 708 | vgic_v2_populate_lr(vcpu, irq, lr); |
@@ -736,7 +736,7 @@ static int compute_ap_list_depth(struct kvm_vcpu *vcpu, | |||
736 | 736 | ||
737 | *multi_sgi = false; | 737 | *multi_sgi = false; |
738 | 738 | ||
739 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); | 739 | lockdep_assert_held(&vgic_cpu->ap_list_lock); |
740 | 740 | ||
741 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { | 741 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { |
742 | int w; | 742 | int w; |
@@ -761,7 +761,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) | |||
761 | bool multi_sgi; | 761 | bool multi_sgi; |
762 | u8 prio = 0xff; | 762 | u8 prio = 0xff; |
763 | 763 | ||
764 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); | 764 | lockdep_assert_held(&vgic_cpu->ap_list_lock); |
765 | 765 | ||
766 | count = compute_ap_list_depth(vcpu, &multi_sgi); | 766 | count = compute_ap_list_depth(vcpu, &multi_sgi); |
767 | if (count > kvm_vgic_global_state.nr_lr || multi_sgi) | 767 | if (count > kvm_vgic_global_state.nr_lr || multi_sgi) |