diff options
author | Dmitry Kasatkin <dmitry.kasatkin@intel.com> | 2012-02-08 14:15:42 -0500 |
---|---|---|
committer | Mimi Zohar <zohar@linux.vnet.ibm.com> | 2012-09-07 14:57:46 -0400 |
commit | a10bf26b2f53242836e9362c6c9c857b627b82a9 (patch) | |
tree | 98c7b83684f1df42571013af4c0572c7eeea8e76 /security/integrity/iint.c | |
parent | bf2276d10ce58ff44ab8857266a6718024496af6 (diff) |
ima: replace iint spinblock with rwlock/read_lock
For performance, replace the iint spinlock with rwlock/read_lock.
Eric Paris questioned this change, from spinlocks to rwlocks, saying
"rwlocks have been shown to actually be slower on multi processor
systems in a number of cases due to the cache line bouncing required."
Based on performance measurements compiling the kernel on a cold
boot with multiple jobs with/without this patch, Dmitry Kasatkin
and I found that rwlocks performed better than spinlocks, but very
insignificantly. For example with total compilation time around 6
minutes, with rwlocks time was 1 - 3 seconds shorter... but always
like that.
Changelog v2:
- new patch taken from the 'allocating iint improvements' patch
Signed-off-by: Dmitry Kasatkin <dmitry.kasatkin@intel.com>
Signed-off-by: Mimi Zohar <zohar@us.ibm.com>
Diffstat (limited to 'security/integrity/iint.c')
-rw-r--r-- | security/integrity/iint.c | 16 |
1 files changed, 7 insertions, 9 deletions
diff --git a/security/integrity/iint.c b/security/integrity/iint.c index c91a436e13ac..d82a5a13d855 100644 --- a/security/integrity/iint.c +++ b/security/integrity/iint.c | |||
@@ -22,7 +22,7 @@ | |||
22 | #include "integrity.h" | 22 | #include "integrity.h" |
23 | 23 | ||
24 | static struct rb_root integrity_iint_tree = RB_ROOT; | 24 | static struct rb_root integrity_iint_tree = RB_ROOT; |
25 | static DEFINE_SPINLOCK(integrity_iint_lock); | 25 | static DEFINE_RWLOCK(integrity_iint_lock); |
26 | static struct kmem_cache *iint_cache __read_mostly; | 26 | static struct kmem_cache *iint_cache __read_mostly; |
27 | 27 | ||
28 | int iint_initialized; | 28 | int iint_initialized; |
@@ -35,8 +35,6 @@ static struct integrity_iint_cache *__integrity_iint_find(struct inode *inode) | |||
35 | struct integrity_iint_cache *iint; | 35 | struct integrity_iint_cache *iint; |
36 | struct rb_node *n = integrity_iint_tree.rb_node; | 36 | struct rb_node *n = integrity_iint_tree.rb_node; |
37 | 37 | ||
38 | assert_spin_locked(&integrity_iint_lock); | ||
39 | |||
40 | while (n) { | 38 | while (n) { |
41 | iint = rb_entry(n, struct integrity_iint_cache, rb_node); | 39 | iint = rb_entry(n, struct integrity_iint_cache, rb_node); |
42 | 40 | ||
@@ -63,9 +61,9 @@ struct integrity_iint_cache *integrity_iint_find(struct inode *inode) | |||
63 | if (!IS_IMA(inode)) | 61 | if (!IS_IMA(inode)) |
64 | return NULL; | 62 | return NULL; |
65 | 63 | ||
66 | spin_lock(&integrity_iint_lock); | 64 | read_lock(&integrity_iint_lock); |
67 | iint = __integrity_iint_find(inode); | 65 | iint = __integrity_iint_find(inode); |
68 | spin_unlock(&integrity_iint_lock); | 66 | read_unlock(&integrity_iint_lock); |
69 | 67 | ||
70 | return iint; | 68 | return iint; |
71 | } | 69 | } |
@@ -100,7 +98,7 @@ struct integrity_iint_cache *integrity_inode_get(struct inode *inode) | |||
100 | if (!iint) | 98 | if (!iint) |
101 | return NULL; | 99 | return NULL; |
102 | 100 | ||
103 | spin_lock(&integrity_iint_lock); | 101 | write_lock(&integrity_iint_lock); |
104 | 102 | ||
105 | p = &integrity_iint_tree.rb_node; | 103 | p = &integrity_iint_tree.rb_node; |
106 | while (*p) { | 104 | while (*p) { |
@@ -119,7 +117,7 @@ struct integrity_iint_cache *integrity_inode_get(struct inode *inode) | |||
119 | rb_link_node(node, parent, p); | 117 | rb_link_node(node, parent, p); |
120 | rb_insert_color(node, &integrity_iint_tree); | 118 | rb_insert_color(node, &integrity_iint_tree); |
121 | 119 | ||
122 | spin_unlock(&integrity_iint_lock); | 120 | write_unlock(&integrity_iint_lock); |
123 | return iint; | 121 | return iint; |
124 | } | 122 | } |
125 | 123 | ||
@@ -136,10 +134,10 @@ void integrity_inode_free(struct inode *inode) | |||
136 | if (!IS_IMA(inode)) | 134 | if (!IS_IMA(inode)) |
137 | return; | 135 | return; |
138 | 136 | ||
139 | spin_lock(&integrity_iint_lock); | 137 | write_lock(&integrity_iint_lock); |
140 | iint = __integrity_iint_find(inode); | 138 | iint = __integrity_iint_find(inode); |
141 | rb_erase(&iint->rb_node, &integrity_iint_tree); | 139 | rb_erase(&iint->rb_node, &integrity_iint_tree); |
142 | spin_unlock(&integrity_iint_lock); | 140 | write_unlock(&integrity_iint_lock); |
143 | 141 | ||
144 | iint_free(iint); | 142 | iint_free(iint); |
145 | } | 143 | } |