aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-02-28 14:46:00 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-28 14:46:00 -0500
commitf89db789de2157441d3b5e879a742437ed69cbbc (patch)
treeab768dbb16c3c5c078d9721fee28d1b2b2508392
parent65314ed08e9c4a94ba85f7d52a7ad324050b152e (diff)
parent8312593a55941a0fae2b09731a4f91d87bd796db (diff)
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Ingo Molnar: "Two documentation updates, plus a debugging annotation fix" * 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/crash: Update the stale comment in reserve_crashkernel() x86/irq, trace: Add __irq_entry annotation to x86's platform IRQ handlers Documentation, x86, resctrl: Recommend locking for resctrlfs
-rw-r--r--Documentation/x86/intel_rdt_ui.txt114
-rw-r--r--arch/x86/kernel/apic/apic.c8
-rw-r--r--arch/x86/kernel/apic/vector.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c6
-rw-r--r--arch/x86/kernel/cpu/mcheck/threshold.c4
-rw-r--r--arch/x86/kernel/irq.c4
-rw-r--r--arch/x86/kernel/irq_work.c5
-rw-r--r--arch/x86/kernel/setup.c4
-rw-r--r--arch/x86/kernel/smp.c15
10 files changed, 144 insertions, 22 deletions
diff --git a/Documentation/x86/intel_rdt_ui.txt b/Documentation/x86/intel_rdt_ui.txt
index d918d268cd72..51cf6fa5591f 100644
--- a/Documentation/x86/intel_rdt_ui.txt
+++ b/Documentation/x86/intel_rdt_ui.txt
@@ -212,3 +212,117 @@ Finally we move core 4-7 over to the new group and make sure that the
212kernel and the tasks running there get 50% of the cache. 212kernel and the tasks running there get 50% of the cache.
213 213
214# echo C0 > p0/cpus 214# echo C0 > p0/cpus
215
2164) Locking between applications
217
218Certain operations on the resctrl filesystem, composed of read/writes
219to/from multiple files, must be atomic.
220
221As an example, the allocation of an exclusive reservation of L3 cache
222involves:
223
224 1. Read the cbmmasks from each directory
225 2. Find a contiguous set of bits in the global CBM bitmask that is clear
226 in any of the directory cbmmasks
227 3. Create a new directory
228 4. Set the bits found in step 2 to the new directory "schemata" file
229
230If two applications attempt to allocate space concurrently then they can
231end up allocating the same bits so the reservations are shared instead of
232exclusive.
233
234To coordinate atomic operations on the resctrlfs and to avoid the problem
235above, the following locking procedure is recommended:
236
237Locking is based on flock, which is available in libc and also as a shell
238script command
239
240Write lock:
241
242 A) Take flock(LOCK_EX) on /sys/fs/resctrl
243 B) Read/write the directory structure.
244 C) funlock
245
246Read lock:
247
248 A) Take flock(LOCK_SH) on /sys/fs/resctrl
249 B) If success read the directory structure.
250 C) funlock
251
252Example with bash:
253
254# Atomically read directory structure
255$ flock -s /sys/fs/resctrl/ find /sys/fs/resctrl
256
257# Read directory contents and create new subdirectory
258
259$ cat create-dir.sh
260find /sys/fs/resctrl/ > output.txt
261mask = function-of(output.txt)
262mkdir /sys/fs/resctrl/newres/
263echo mask > /sys/fs/resctrl/newres/schemata
264
265$ flock /sys/fs/resctrl/ ./create-dir.sh
266
267Example with C:
268
269/*
270 * Example code do take advisory locks
271 * before accessing resctrl filesystem
272 */
273#include <sys/file.h>
274#include <stdlib.h>
275
276void resctrl_take_shared_lock(int fd)
277{
278 int ret;
279
280 /* take shared lock on resctrl filesystem */
281 ret = flock(fd, LOCK_SH);
282 if (ret) {
283 perror("flock");
284 exit(-1);
285 }
286}
287
288void resctrl_take_exclusive_lock(int fd)
289{
290 int ret;
291
292 /* release lock on resctrl filesystem */
293 ret = flock(fd, LOCK_EX);
294 if (ret) {
295 perror("flock");
296 exit(-1);
297 }
298}
299
300void resctrl_release_lock(int fd)
301{
302 int ret;
303
304 /* take shared lock on resctrl filesystem */
305 ret = flock(fd, LOCK_UN);
306 if (ret) {
307 perror("flock");
308 exit(-1);
309 }
310}
311
312void main(void)
313{
314 int fd, ret;
315
316 fd = open("/sys/fs/resctrl", O_DIRECTORY);
317 if (fd == -1) {
318 perror("open");
319 exit(-1);
320 }
321 resctrl_take_shared_lock(fd);
322 /* code to read directory contents */
323 resctrl_release_lock(fd);
324
325 resctrl_take_exclusive_lock(fd);
326 /* code to read and write directory contents */
327 resctrl_release_lock(fd);
328}
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 8567c851172c..4261b3282ad9 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1865,14 +1865,14 @@ static void __smp_spurious_interrupt(u8 vector)
1865 "should never happen.\n", vector, smp_processor_id()); 1865 "should never happen.\n", vector, smp_processor_id());
1866} 1866}
1867 1867
1868__visible void smp_spurious_interrupt(struct pt_regs *regs) 1868__visible void __irq_entry smp_spurious_interrupt(struct pt_regs *regs)
1869{ 1869{
1870 entering_irq(); 1870 entering_irq();
1871 __smp_spurious_interrupt(~regs->orig_ax); 1871 __smp_spurious_interrupt(~regs->orig_ax);
1872 exiting_irq(); 1872 exiting_irq();
1873} 1873}
1874 1874
1875__visible void smp_trace_spurious_interrupt(struct pt_regs *regs) 1875__visible void __irq_entry smp_trace_spurious_interrupt(struct pt_regs *regs)
1876{ 1876{
1877 u8 vector = ~regs->orig_ax; 1877 u8 vector = ~regs->orig_ax;
1878 1878
@@ -1923,14 +1923,14 @@ static void __smp_error_interrupt(struct pt_regs *regs)
1923 1923
1924} 1924}
1925 1925
1926__visible void smp_error_interrupt(struct pt_regs *regs) 1926__visible void __irq_entry smp_error_interrupt(struct pt_regs *regs)
1927{ 1927{
1928 entering_irq(); 1928 entering_irq();
1929 __smp_error_interrupt(regs); 1929 __smp_error_interrupt(regs);
1930 exiting_irq(); 1930 exiting_irq();
1931} 1931}
1932 1932
1933__visible void smp_trace_error_interrupt(struct pt_regs *regs) 1933__visible void __irq_entry smp_trace_error_interrupt(struct pt_regs *regs)
1934{ 1934{
1935 entering_irq(); 1935 entering_irq();
1936 trace_error_apic_entry(ERROR_APIC_VECTOR); 1936 trace_error_apic_entry(ERROR_APIC_VECTOR);
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 5d30c5e42bb1..f3557a1eb562 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -559,7 +559,7 @@ void send_cleanup_vector(struct irq_cfg *cfg)
559 __send_cleanup_vector(data); 559 __send_cleanup_vector(data);
560} 560}
561 561
562asmlinkage __visible void smp_irq_move_cleanup_interrupt(void) 562asmlinkage __visible void __irq_entry smp_irq_move_cleanup_interrupt(void)
563{ 563{
564 unsigned vector, me; 564 unsigned vector, me;
565 565
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 9e5427df3243..524cc5780a77 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -816,14 +816,14 @@ static inline void __smp_deferred_error_interrupt(void)
816 deferred_error_int_vector(); 816 deferred_error_int_vector();
817} 817}
818 818
819asmlinkage __visible void smp_deferred_error_interrupt(void) 819asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(void)
820{ 820{
821 entering_irq(); 821 entering_irq();
822 __smp_deferred_error_interrupt(); 822 __smp_deferred_error_interrupt();
823 exiting_ack_irq(); 823 exiting_ack_irq();
824} 824}
825 825
826asmlinkage __visible void smp_trace_deferred_error_interrupt(void) 826asmlinkage __visible void __irq_entry smp_trace_deferred_error_interrupt(void)
827{ 827{
828 entering_irq(); 828 entering_irq();
829 trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR); 829 trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR);
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 85469f84c921..d7cc190ae457 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -396,14 +396,16 @@ static inline void __smp_thermal_interrupt(void)
396 smp_thermal_vector(); 396 smp_thermal_vector();
397} 397}
398 398
399asmlinkage __visible void smp_thermal_interrupt(struct pt_regs *regs) 399asmlinkage __visible void __irq_entry
400smp_thermal_interrupt(struct pt_regs *regs)
400{ 401{
401 entering_irq(); 402 entering_irq();
402 __smp_thermal_interrupt(); 403 __smp_thermal_interrupt();
403 exiting_ack_irq(); 404 exiting_ack_irq();
404} 405}
405 406
406asmlinkage __visible void smp_trace_thermal_interrupt(struct pt_regs *regs) 407asmlinkage __visible void __irq_entry
408smp_trace_thermal_interrupt(struct pt_regs *regs)
407{ 409{
408 entering_irq(); 410 entering_irq();
409 trace_thermal_apic_entry(THERMAL_APIC_VECTOR); 411 trace_thermal_apic_entry(THERMAL_APIC_VECTOR);
diff --git a/arch/x86/kernel/cpu/mcheck/threshold.c b/arch/x86/kernel/cpu/mcheck/threshold.c
index 9beb092d68a5..bb0e75eed10a 100644
--- a/arch/x86/kernel/cpu/mcheck/threshold.c
+++ b/arch/x86/kernel/cpu/mcheck/threshold.c
@@ -23,14 +23,14 @@ static inline void __smp_threshold_interrupt(void)
23 mce_threshold_vector(); 23 mce_threshold_vector();
24} 24}
25 25
26asmlinkage __visible void smp_threshold_interrupt(void) 26asmlinkage __visible void __irq_entry smp_threshold_interrupt(void)
27{ 27{
28 entering_irq(); 28 entering_irq();
29 __smp_threshold_interrupt(); 29 __smp_threshold_interrupt();
30 exiting_ack_irq(); 30 exiting_ack_irq();
31} 31}
32 32
33asmlinkage __visible void smp_trace_threshold_interrupt(void) 33asmlinkage __visible void __irq_entry smp_trace_threshold_interrupt(void)
34{ 34{
35 entering_irq(); 35 entering_irq();
36 trace_threshold_apic_entry(THRESHOLD_APIC_VECTOR); 36 trace_threshold_apic_entry(THRESHOLD_APIC_VECTOR);
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 7c6e9ffe4424..4d8183b5f113 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -264,7 +264,7 @@ void __smp_x86_platform_ipi(void)
264 x86_platform_ipi_callback(); 264 x86_platform_ipi_callback();
265} 265}
266 266
267__visible void smp_x86_platform_ipi(struct pt_regs *regs) 267__visible void __irq_entry smp_x86_platform_ipi(struct pt_regs *regs)
268{ 268{
269 struct pt_regs *old_regs = set_irq_regs(regs); 269 struct pt_regs *old_regs = set_irq_regs(regs);
270 270
@@ -315,7 +315,7 @@ __visible void smp_kvm_posted_intr_wakeup_ipi(struct pt_regs *regs)
315} 315}
316#endif 316#endif
317 317
318__visible void smp_trace_x86_platform_ipi(struct pt_regs *regs) 318__visible void __irq_entry smp_trace_x86_platform_ipi(struct pt_regs *regs)
319{ 319{
320 struct pt_regs *old_regs = set_irq_regs(regs); 320 struct pt_regs *old_regs = set_irq_regs(regs);
321 321
diff --git a/arch/x86/kernel/irq_work.c b/arch/x86/kernel/irq_work.c
index 3512ba607361..275487872be2 100644
--- a/arch/x86/kernel/irq_work.c
+++ b/arch/x86/kernel/irq_work.c
@@ -9,6 +9,7 @@
9#include <linux/hardirq.h> 9#include <linux/hardirq.h>
10#include <asm/apic.h> 10#include <asm/apic.h>
11#include <asm/trace/irq_vectors.h> 11#include <asm/trace/irq_vectors.h>
12#include <linux/interrupt.h>
12 13
13static inline void __smp_irq_work_interrupt(void) 14static inline void __smp_irq_work_interrupt(void)
14{ 15{
@@ -16,14 +17,14 @@ static inline void __smp_irq_work_interrupt(void)
16 irq_work_run(); 17 irq_work_run();
17} 18}
18 19
19__visible void smp_irq_work_interrupt(struct pt_regs *regs) 20__visible void __irq_entry smp_irq_work_interrupt(struct pt_regs *regs)
20{ 21{
21 ipi_entering_ack_irq(); 22 ipi_entering_ack_irq();
22 __smp_irq_work_interrupt(); 23 __smp_irq_work_interrupt();
23 exiting_irq(); 24 exiting_irq();
24} 25}
25 26
26__visible void smp_trace_irq_work_interrupt(struct pt_regs *regs) 27__visible void __irq_entry smp_trace_irq_work_interrupt(struct pt_regs *regs)
27{ 28{
28 ipi_entering_ack_irq(); 29 ipi_entering_ack_irq();
29 trace_irq_work_entry(IRQ_WORK_VECTOR); 30 trace_irq_work_entry(IRQ_WORK_VECTOR);
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 69780edf0dde..4bf0c8926a1c 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -575,7 +575,9 @@ static void __init reserve_crashkernel(void)
575 /* 0 means: find the address automatically */ 575 /* 0 means: find the address automatically */
576 if (crash_base <= 0) { 576 if (crash_base <= 0) {
577 /* 577 /*
578 * kexec want bzImage is below CRASH_KERNEL_ADDR_MAX 578 * Set CRASH_ADDR_LOW_MAX upper bound for crash memory,
579 * as old kexec-tools loads bzImage below that, unless
580 * "crashkernel=size[KMG],high" is specified.
579 */ 581 */
580 crash_base = memblock_find_in_range(CRASH_ALIGN, 582 crash_base = memblock_find_in_range(CRASH_ALIGN,
581 high ? CRASH_ADDR_HIGH_MAX 583 high ? CRASH_ADDR_HIGH_MAX
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 68f8cc222f25..d3c66a15bbde 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -259,7 +259,7 @@ static inline void __smp_reschedule_interrupt(void)
259 scheduler_ipi(); 259 scheduler_ipi();
260} 260}
261 261
262__visible void smp_reschedule_interrupt(struct pt_regs *regs) 262__visible void __irq_entry smp_reschedule_interrupt(struct pt_regs *regs)
263{ 263{
264 ack_APIC_irq(); 264 ack_APIC_irq();
265 __smp_reschedule_interrupt(); 265 __smp_reschedule_interrupt();
@@ -268,7 +268,7 @@ __visible void smp_reschedule_interrupt(struct pt_regs *regs)
268 */ 268 */
269} 269}
270 270
271__visible void smp_trace_reschedule_interrupt(struct pt_regs *regs) 271__visible void __irq_entry smp_trace_reschedule_interrupt(struct pt_regs *regs)
272{ 272{
273 /* 273 /*
274 * Need to call irq_enter() before calling the trace point. 274 * Need to call irq_enter() before calling the trace point.
@@ -292,14 +292,15 @@ static inline void __smp_call_function_interrupt(void)
292 inc_irq_stat(irq_call_count); 292 inc_irq_stat(irq_call_count);
293} 293}
294 294
295__visible void smp_call_function_interrupt(struct pt_regs *regs) 295__visible void __irq_entry smp_call_function_interrupt(struct pt_regs *regs)
296{ 296{
297 ipi_entering_ack_irq(); 297 ipi_entering_ack_irq();
298 __smp_call_function_interrupt(); 298 __smp_call_function_interrupt();
299 exiting_irq(); 299 exiting_irq();
300} 300}
301 301
302__visible void smp_trace_call_function_interrupt(struct pt_regs *regs) 302__visible void __irq_entry
303smp_trace_call_function_interrupt(struct pt_regs *regs)
303{ 304{
304 ipi_entering_ack_irq(); 305 ipi_entering_ack_irq();
305 trace_call_function_entry(CALL_FUNCTION_VECTOR); 306 trace_call_function_entry(CALL_FUNCTION_VECTOR);
@@ -314,14 +315,16 @@ static inline void __smp_call_function_single_interrupt(void)
314 inc_irq_stat(irq_call_count); 315 inc_irq_stat(irq_call_count);
315} 316}
316 317
317__visible void smp_call_function_single_interrupt(struct pt_regs *regs) 318__visible void __irq_entry
319smp_call_function_single_interrupt(struct pt_regs *regs)
318{ 320{
319 ipi_entering_ack_irq(); 321 ipi_entering_ack_irq();
320 __smp_call_function_single_interrupt(); 322 __smp_call_function_single_interrupt();
321 exiting_irq(); 323 exiting_irq();
322} 324}
323 325
324__visible void smp_trace_call_function_single_interrupt(struct pt_regs *regs) 326__visible void __irq_entry
327smp_trace_call_function_single_interrupt(struct pt_regs *regs)
325{ 328{
326 ipi_entering_ack_irq(); 329 ipi_entering_ack_irq();
327 trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR); 330 trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR);