aboutsummaryrefslogtreecommitdiffstats
path: root/Documentation/DocBook/kernel-locking.tmpl
diff options
context:
space:
mode:
Diffstat (limited to 'Documentation/DocBook/kernel-locking.tmpl')
-rw-r--r--Documentation/DocBook/kernel-locking.tmpl16
1 files changed, 5 insertions, 11 deletions
diff --git a/Documentation/DocBook/kernel-locking.tmpl b/Documentation/DocBook/kernel-locking.tmpl
index a0d479d1e1dd..67e7ab41c0a6 100644
--- a/Documentation/DocBook/kernel-locking.tmpl
+++ b/Documentation/DocBook/kernel-locking.tmpl
@@ -1645,7 +1645,9 @@ the amount of locking which needs to be done.
1645 all the readers who were traversing the list when we deleted the 1645 all the readers who were traversing the list when we deleted the
1646 element are finished. We use <function>call_rcu()</function> to 1646 element are finished. We use <function>call_rcu()</function> to
1647 register a callback which will actually destroy the object once 1647 register a callback which will actually destroy the object once
1648 the readers are finished. 1648 all pre-existing readers are finished. Alternatively,
1649 <function>synchronize_rcu()</function> may be used to block until
1650 all pre-existing are finished.
1649 </para> 1651 </para>
1650 <para> 1652 <para>
1651 But how does Read Copy Update know when the readers are 1653 But how does Read Copy Update know when the readers are
@@ -1714,7 +1716,7 @@ the amount of locking which needs to be done.
1714- object_put(obj); 1716- object_put(obj);
1715+ list_del_rcu(&amp;obj-&gt;list); 1717+ list_del_rcu(&amp;obj-&gt;list);
1716 cache_num--; 1718 cache_num--;
1717+ call_rcu(&amp;obj-&gt;rcu, cache_delete_rcu, obj); 1719+ call_rcu(&amp;obj-&gt;rcu, cache_delete_rcu);
1718 } 1720 }
1719 1721
1720 /* Must be holding cache_lock */ 1722 /* Must be holding cache_lock */
@@ -1725,14 +1727,6 @@ the amount of locking which needs to be done.
1725 if (++cache_num > MAX_CACHE_SIZE) { 1727 if (++cache_num > MAX_CACHE_SIZE) {
1726 struct object *i, *outcast = NULL; 1728 struct object *i, *outcast = NULL;
1727 list_for_each_entry(i, &amp;cache, list) { 1729 list_for_each_entry(i, &amp;cache, list) {
1728@@ -85,6 +94,7 @@
1729 obj-&gt;popularity = 0;
1730 atomic_set(&amp;obj-&gt;refcnt, 1); /* The cache holds a reference */
1731 spin_lock_init(&amp;obj-&gt;lock);
1732+ INIT_RCU_HEAD(&amp;obj-&gt;rcu);
1733
1734 spin_lock_irqsave(&amp;cache_lock, flags);
1735 __cache_add(obj);
1736@@ -104,12 +114,11 @@ 1730@@ -104,12 +114,11 @@
1737 struct object *cache_find(int id) 1731 struct object *cache_find(int id)
1738 { 1732 {
@@ -1769,7 +1763,7 @@ as it would be on UP.
1769There is a furthur optimization possible here: remember our original 1763There is a furthur optimization possible here: remember our original
1770cache code, where there were no reference counts and the caller simply 1764cache code, where there were no reference counts and the caller simply
1771held the lock whenever using the object? This is still possible: if 1765held the lock whenever using the object? This is still possible: if
1772you hold the lock, noone can delete the object, so you don't need to 1766you hold the lock, no one can delete the object, so you don't need to
1773get and put the reference count. 1767get and put the reference count.
1774</para> 1768</para>
1775 1769