aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@us.ibm.com>2005-09-06 18:16:35 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-09-07 19:57:19 -0400
commit19306059cd7fedaf96b4b0260a9a8a45e513c857 (patch)
tree7c32d59c1a5830689d5f85a7f81e89e48d1097ae
parentfe21773d655c2c64641ec2cef499289ea175c817 (diff)
[PATCH] NMI: Update NMI users of RCU to use new API
Uses of RCU for dynamically changeable NMI handlers need to use the new rcu_dereference() and rcu_assign_pointer() facilities. This change makes it clear that these uses are safe from a memory-barrier viewpoint, but the main purpose is to document exactly what operations are being protected by RCU. This has been tested on x86 and x86-64, which are the only architectures affected by this change. Signed-off-by: <paulmck@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--Documentation/RCU/NMI-RCU.txt112
-rw-r--r--arch/i386/kernel/traps.c4
-rw-r--r--arch/x86_64/kernel/nmi.c4
3 files changed, 116 insertions, 4 deletions
diff --git a/Documentation/RCU/NMI-RCU.txt b/Documentation/RCU/NMI-RCU.txt
new file mode 100644
index 000000000000..d0634a5c3445
--- /dev/null
+++ b/Documentation/RCU/NMI-RCU.txt
@@ -0,0 +1,112 @@
1Using RCU to Protect Dynamic NMI Handlers
2
3
4Although RCU is usually used to protect read-mostly data structures,
5it is possible to use RCU to provide dynamic non-maskable interrupt
6handlers, as well as dynamic irq handlers. This document describes
7how to do this, drawing loosely from Zwane Mwaikambo's NMI-timer
8work in "arch/i386/oprofile/nmi_timer_int.c" and in
9"arch/i386/kernel/traps.c".
10
11The relevant pieces of code are listed below, each followed by a
12brief explanation.
13
14 static int dummy_nmi_callback(struct pt_regs *regs, int cpu)
15 {
16 return 0;
17 }
18
19The dummy_nmi_callback() function is a "dummy" NMI handler that does
20nothing, but returns zero, thus saying that it did nothing, allowing
21the NMI handler to take the default machine-specific action.
22
23 static nmi_callback_t nmi_callback = dummy_nmi_callback;
24
25This nmi_callback variable is a global function pointer to the current
26NMI handler.
27
28 fastcall void do_nmi(struct pt_regs * regs, long error_code)
29 {
30 int cpu;
31
32 nmi_enter();
33
34 cpu = smp_processor_id();
35 ++nmi_count(cpu);
36
37 if (!rcu_dereference(nmi_callback)(regs, cpu))
38 default_do_nmi(regs);
39
40 nmi_exit();
41 }
42
43The do_nmi() function processes each NMI. It first disables preemption
44in the same way that a hardware irq would, then increments the per-CPU
45count of NMIs. It then invokes the NMI handler stored in the nmi_callback
46function pointer. If this handler returns zero, do_nmi() invokes the
47default_do_nmi() function to handle a machine-specific NMI. Finally,
48preemption is restored.
49
50Strictly speaking, rcu_dereference() is not needed, since this code runs
51only on i386, which does not need rcu_dereference() anyway. However,
52it is a good documentation aid, particularly for anyone attempting to
53do something similar on Alpha.
54
55Quick Quiz: Why might the rcu_dereference() be necessary on Alpha,
56 given that the code referenced by the pointer is read-only?
57
58
59Back to the discussion of NMI and RCU...
60
61 void set_nmi_callback(nmi_callback_t callback)
62 {
63 rcu_assign_pointer(nmi_callback, callback);
64 }
65
66The set_nmi_callback() function registers an NMI handler. Note that any
67data that is to be used by the callback must be initialized up -before-
68the call to set_nmi_callback(). On architectures that do not order
69writes, the rcu_assign_pointer() ensures that the NMI handler sees the
70initialized values.
71
72 void unset_nmi_callback(void)
73 {
74 rcu_assign_pointer(nmi_callback, dummy_nmi_callback);
75 }
76
77This function unregisters an NMI handler, restoring the original
78dummy_nmi_handler(). However, there may well be an NMI handler
79currently executing on some other CPU. We therefore cannot free
80up any data structures used by the old NMI handler until execution
81of it completes on all other CPUs.
82
83One way to accomplish this is via synchronize_sched(), perhaps as
84follows:
85
86 unset_nmi_callback();
87 synchronize_sched();
88 kfree(my_nmi_data);
89
90This works because synchronize_sched() blocks until all CPUs complete
91any preemption-disabled segments of code that they were executing.
92Since NMI handlers disable preemption, synchronize_sched() is guaranteed
93not to return until all ongoing NMI handlers exit. It is therefore safe
94to free up the handler's data as soon as synchronize_sched() returns.
95
96
97Answer to Quick Quiz
98
99 Why might the rcu_dereference() be necessary on Alpha, given
100 that the code referenced by the pointer is read-only?
101
102 Answer: The caller to set_nmi_callback() might well have
103 initialized some data that is to be used by the
104 new NMI handler. In this case, the rcu_dereference()
105 would be needed, because otherwise a CPU that received
106 an NMI just after the new handler was set might see
107 the pointer to the new NMI handler, but the old
108 pre-initialized version of the handler's data.
109
110 More important, the rcu_dereference() makes it clear
111 to someone reading the code that the pointer is being
112 protected by RCU.
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 54629bb5893a..029bf94cda7d 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -657,7 +657,7 @@ fastcall void do_nmi(struct pt_regs * regs, long error_code)
657 657
658 ++nmi_count(cpu); 658 ++nmi_count(cpu);
659 659
660 if (!nmi_callback(regs, cpu)) 660 if (!rcu_dereference(nmi_callback)(regs, cpu))
661 default_do_nmi(regs); 661 default_do_nmi(regs);
662 662
663 nmi_exit(); 663 nmi_exit();
@@ -665,7 +665,7 @@ fastcall void do_nmi(struct pt_regs * regs, long error_code)
665 665
666void set_nmi_callback(nmi_callback_t callback) 666void set_nmi_callback(nmi_callback_t callback)
667{ 667{
668 nmi_callback = callback; 668 rcu_assign_pointer(nmi_callback, callback);
669} 669}
670EXPORT_SYMBOL_GPL(set_nmi_callback); 670EXPORT_SYMBOL_GPL(set_nmi_callback);
671 671
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index 84cae81fff8b..caf164959e19 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -524,14 +524,14 @@ asmlinkage void do_nmi(struct pt_regs * regs, long error_code)
524 524
525 nmi_enter(); 525 nmi_enter();
526 add_pda(__nmi_count,1); 526 add_pda(__nmi_count,1);
527 if (!nmi_callback(regs, cpu)) 527 if (!rcu_dereference(nmi_callback)(regs, cpu))
528 default_do_nmi(regs); 528 default_do_nmi(regs);
529 nmi_exit(); 529 nmi_exit();
530} 530}
531 531
532void set_nmi_callback(nmi_callback_t callback) 532void set_nmi_callback(nmi_callback_t callback)
533{ 533{
534 nmi_callback = callback; 534 rcu_assign_pointer(nmi_callback, callback);
535} 535}
536 536
537void unset_nmi_callback(void) 537void unset_nmi_callback(void)