diff options
Diffstat (limited to 'Documentation/RCU/rcuref.txt')
-rw-r--r-- | Documentation/RCU/rcuref.txt | 61 |
1 files changed, 59 insertions, 2 deletions
diff --git a/Documentation/RCU/rcuref.txt b/Documentation/RCU/rcuref.txt index 4202ad093130..141d531aa14b 100644 --- a/Documentation/RCU/rcuref.txt +++ b/Documentation/RCU/rcuref.txt | |||
@@ -20,7 +20,7 @@ release_referenced() delete() | |||
20 | { { | 20 | { { |
21 | ... write_lock(&list_lock); | 21 | ... write_lock(&list_lock); |
22 | atomic_dec(&el->rc, relfunc) ... | 22 | atomic_dec(&el->rc, relfunc) ... |
23 | ... delete_element | 23 | ... remove_element |
24 | } write_unlock(&list_lock); | 24 | } write_unlock(&list_lock); |
25 | ... | 25 | ... |
26 | if (atomic_dec_and_test(&el->rc)) | 26 | if (atomic_dec_and_test(&el->rc)) |
@@ -52,7 +52,7 @@ release_referenced() delete() | |||
52 | { { | 52 | { { |
53 | ... spin_lock(&list_lock); | 53 | ... spin_lock(&list_lock); |
54 | if (atomic_dec_and_test(&el->rc)) ... | 54 | if (atomic_dec_and_test(&el->rc)) ... |
55 | call_rcu(&el->head, el_free); delete_element | 55 | call_rcu(&el->head, el_free); remove_element |
56 | ... spin_unlock(&list_lock); | 56 | ... spin_unlock(&list_lock); |
57 | } ... | 57 | } ... |
58 | if (atomic_dec_and_test(&el->rc)) | 58 | if (atomic_dec_and_test(&el->rc)) |
@@ -64,3 +64,60 @@ Sometimes, a reference to the element needs to be obtained in the | |||
64 | update (write) stream. In such cases, atomic_inc_not_zero() might be | 64 | update (write) stream. In such cases, atomic_inc_not_zero() might be |
65 | overkill, since we hold the update-side spinlock. One might instead | 65 | overkill, since we hold the update-side spinlock. One might instead |
66 | use atomic_inc() in such cases. | 66 | use atomic_inc() in such cases. |
67 | |||
68 | It is not always convenient to deal with "FAIL" in the | ||
69 | search_and_reference() code path. In such cases, the | ||
70 | atomic_dec_and_test() may be moved from delete() to el_free() | ||
71 | as follows: | ||
72 | |||
73 | 1. 2. | ||
74 | add() search_and_reference() | ||
75 | { { | ||
76 | alloc_object rcu_read_lock(); | ||
77 | ... search_for_element | ||
78 | atomic_set(&el->rc, 1); atomic_inc(&el->rc); | ||
79 | spin_lock(&list_lock); ... | ||
80 | |||
81 | add_element rcu_read_unlock(); | ||
82 | ... } | ||
83 | spin_unlock(&list_lock); 4. | ||
84 | } delete() | ||
85 | 3. { | ||
86 | release_referenced() spin_lock(&list_lock); | ||
87 | { ... | ||
88 | ... remove_element | ||
89 | if (atomic_dec_and_test(&el->rc)) spin_unlock(&list_lock); | ||
90 | kfree(el); ... | ||
91 | ... call_rcu(&el->head, el_free); | ||
92 | } ... | ||
93 | 5. } | ||
94 | void el_free(struct rcu_head *rhp) | ||
95 | { | ||
96 | release_referenced(); | ||
97 | } | ||
98 | |||
99 | The key point is that the initial reference added by add() is not removed | ||
100 | until after a grace period has elapsed following removal. This means that | ||
101 | search_and_reference() cannot find this element, which means that the value | ||
102 | of el->rc cannot increase. Thus, once it reaches zero, there are no | ||
103 | readers that can or ever will be able to reference the element. The | ||
104 | element can therefore safely be freed. This in turn guarantees that if | ||
105 | any reader finds the element, that reader may safely acquire a reference | ||
106 | without checking the value of the reference counter. | ||
107 | |||
108 | In cases where delete() can sleep, synchronize_rcu() can be called from | ||
109 | delete(), so that el_free() can be subsumed into delete as follows: | ||
110 | |||
111 | 4. | ||
112 | delete() | ||
113 | { | ||
114 | spin_lock(&list_lock); | ||
115 | ... | ||
116 | remove_element | ||
117 | spin_unlock(&list_lock); | ||
118 | ... | ||
119 | synchronize_rcu(); | ||
120 | if (atomic_dec_and_test(&el->rc)) | ||
121 | kfree(el); | ||
122 | ... | ||
123 | } | ||