aboutsummaryrefslogtreecommitdiffstats
path: root/security/keys
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2011-08-22 09:09:36 -0400
committerJames Morris <jmorris@namei.org>2011-08-22 19:57:37 -0400
commit0c061b5707ab84ebfe8f18f1c9c3110ae5cd6073 (patch)
treecb6e83458126f3cc9ef9f5504937c8445f790b0f /security/keys
parentd199798bdf969873f78d48140600ff0a98a87e69 (diff)
KEYS: Correctly destroy key payloads when their keytype is removed
unregister_key_type() has code to mark a key as dead and make it unavailable in one loop and then destroy all those unavailable key payloads in the next loop. However, the loop to mark keys dead renders the key undetectable to the second loop by changing the key type pointer also. Fix this by the following means: (1) The key code has two garbage collectors: one deletes unreferenced keys and the other alters keyrings to delete links to old dead, revoked and expired keys. They can end up holding each other up as both want to scan the key serial tree under spinlock. Combine these into a single routine. (2) Move the dead key marking, dead link removal and dead key removal into the garbage collector as a three phase process running over the three cycles of the normal garbage collection procedure. This is tracked by the KEY_GC_REAPING_DEAD_1, _2 and _3 state flags. unregister_key_type() then just unlinks the key type from the list, wakes up the garbage collector and waits for the third phase to complete. (3) Downgrade the key types sem in unregister_key_type() once it has deleted the key type from the list so that it doesn't block the keyctl() syscall. (4) Dead keys that cannot be simply removed in the third phase have their payloads destroyed with the key's semaphore write-locked to prevent interference by the keyctl() syscall. There should be no in-kernel users of dead keys of that type by the point of unregistration, though keyctl() may be holding a reference. (5) Only perform timer recalculation in the GC if the timer actually expired. If it didn't, we'll get another cycle when it goes off - and if the key that actually triggered it has been removed, it's not a problem. (6) Only garbage collect link if the timer expired or if we're doing dead key clean up phase 2. (7) As only key_garbage_collector() is permitted to use rb_erase() on the key serial tree, it doesn't need to revalidate its cursor after dropping the spinlock as the node the cursor points to must still exist in the tree. (8) Drop the spinlock in the GC if there is contention on it or if we need to reschedule. After dealing with that, get the spinlock again and resume scanning. This has been tested in the following ways: (1) Run the keyutils testsuite against it. (2) Using the AF_RXRPC and RxKAD modules to test keytype removal: Load the rxrpc_s key type: # insmod /tmp/af-rxrpc.ko # insmod /tmp/rxkad.ko Create a key (http://people.redhat.com/~dhowells/rxrpc/listen.c): # /tmp/listen & [1] 8173 Find the key: # grep rxrpc_s /proc/keys 091086e1 I--Q-- 1 perm 39390000 0 0 rxrpc_s 52:2 Link it to a session keyring, preferably one with a higher serial number: # keyctl link 0x20e36251 @s Kill the process (the key should remain as it's linked to another place): # fg /tmp/listen ^C Remove the key type: rmmod rxkad rmmod af-rxrpc This can be made a more effective test by altering the following part of the patch: if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2)) { /* Make sure everyone revalidates their keys if we marked a * bunch as being dead and make sure all keyring ex-payloads * are destroyed. */ kdebug("dead sync"); synchronize_rcu(); To call synchronize_rcu() in GC phase 1 instead. That causes that the keyring's old payload content to hang around longer until it's RCU destroyed - which usually happens after GC phase 3 is complete. This allows the destroy_dead_key branch to be tested. Reported-by: Benjamin Coddington <bcodding@gmail.com> Signed-off-by: David Howells <dhowells@redhat.com> Signed-off-by: James Morris <jmorris@namei.org>
Diffstat (limited to 'security/keys')
-rw-r--r--security/keys/gc.c411
-rw-r--r--security/keys/internal.h4
-rw-r--r--security/keys/key.c51
3 files changed, 258 insertions, 208 deletions
diff --git a/security/keys/gc.c b/security/keys/gc.c
index d67e88b791f2..bf4d8da5a795 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -1,6 +1,6 @@
1/* Key garbage collector 1/* Key garbage collector
2 * 2 *
3 * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved. 3 * Copyright (C) 2009-2011 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com) 4 * Written by David Howells (dhowells@redhat.com)
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
@@ -23,21 +23,31 @@ unsigned key_gc_delay = 5 * 60;
23/* 23/*
24 * Reaper for unused keys. 24 * Reaper for unused keys.
25 */ 25 */
26static void key_gc_unused_keys(struct work_struct *work); 26static void key_garbage_collector(struct work_struct *work);
27DECLARE_WORK(key_gc_unused_work, key_gc_unused_keys); 27DECLARE_WORK(key_gc_work, key_garbage_collector);
28 28
29/* 29/*
30 * Reaper for links from keyrings to dead keys. 30 * Reaper for links from keyrings to dead keys.
31 */ 31 */
32static void key_gc_timer_func(unsigned long); 32static void key_gc_timer_func(unsigned long);
33static void key_gc_dead_links(struct work_struct *);
34static DEFINE_TIMER(key_gc_timer, key_gc_timer_func, 0, 0); 33static DEFINE_TIMER(key_gc_timer, key_gc_timer_func, 0, 0);
35static DECLARE_WORK(key_gc_work, key_gc_dead_links); 34
36static key_serial_t key_gc_cursor; /* the last key the gc considered */
37static bool key_gc_again;
38static unsigned long key_gc_executing;
39static time_t key_gc_next_run = LONG_MAX; 35static time_t key_gc_next_run = LONG_MAX;
40static time_t key_gc_new_timer; 36static struct key_type *key_gc_dead_keytype;
37
38static unsigned long key_gc_flags;
39#define KEY_GC_KEY_EXPIRED 0 /* A key expired and needs unlinking */
40#define KEY_GC_REAP_KEYTYPE 1 /* A keytype is being unregistered */
41#define KEY_GC_REAPING_KEYTYPE 2 /* Cleared when keytype reaped */
42
43
44/*
45 * Any key whose type gets unregistered will be re-typed to this if it can't be
46 * immediately unlinked.
47 */
48struct key_type key_type_dead = {
49 .name = "dead",
50};
41 51
42/* 52/*
43 * Schedule a garbage collection run. 53 * Schedule a garbage collection run.
@@ -50,31 +60,75 @@ void key_schedule_gc(time_t gc_at)
50 60
51 kenter("%ld", gc_at - now); 61 kenter("%ld", gc_at - now);
52 62
53 if (gc_at <= now) { 63 if (gc_at <= now || test_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags)) {
64 kdebug("IMMEDIATE");
54 queue_work(system_nrt_wq, &key_gc_work); 65 queue_work(system_nrt_wq, &key_gc_work);
55 } else if (gc_at < key_gc_next_run) { 66 } else if (gc_at < key_gc_next_run) {
67 kdebug("DEFERRED");
68 key_gc_next_run = gc_at;
56 expires = jiffies + (gc_at - now) * HZ; 69 expires = jiffies + (gc_at - now) * HZ;
57 mod_timer(&key_gc_timer, expires); 70 mod_timer(&key_gc_timer, expires);
58 } 71 }
59} 72}
60 73
61/* 74/*
62 * The garbage collector timer kicked off 75 * Some key's cleanup time was met after it expired, so we need to get the
76 * reaper to go through a cycle finding expired keys.
63 */ 77 */
64static void key_gc_timer_func(unsigned long data) 78static void key_gc_timer_func(unsigned long data)
65{ 79{
66 kenter(""); 80 kenter("");
67 key_gc_next_run = LONG_MAX; 81 key_gc_next_run = LONG_MAX;
82 set_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags);
68 queue_work(system_nrt_wq, &key_gc_work); 83 queue_work(system_nrt_wq, &key_gc_work);
69} 84}
70 85
71/* 86/*
87 * wait_on_bit() sleep function for uninterruptible waiting
88 */
89static int key_gc_wait_bit(void *flags)
90{
91 schedule();
92 return 0;
93}
94
95/*
96 * Reap keys of dead type.
97 *
98 * We use three flags to make sure we see three complete cycles of the garbage
99 * collector: the first to mark keys of that type as being dead, the second to
100 * collect dead links and the third to clean up the dead keys. We have to be
101 * careful as there may already be a cycle in progress.
102 *
103 * The caller must be holding key_types_sem.
104 */
105void key_gc_keytype(struct key_type *ktype)
106{
107 kenter("%s", ktype->name);
108
109 key_gc_dead_keytype = ktype;
110 set_bit(KEY_GC_REAPING_KEYTYPE, &key_gc_flags);
111 smp_mb();
112 set_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags);
113
114 kdebug("schedule");
115 queue_work(system_nrt_wq, &key_gc_work);
116
117 kdebug("sleep");
118 wait_on_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE, key_gc_wait_bit,
119 TASK_UNINTERRUPTIBLE);
120
121 key_gc_dead_keytype = NULL;
122 kleave("");
123}
124
125/*
72 * Garbage collect pointers from a keyring. 126 * Garbage collect pointers from a keyring.
73 * 127 *
74 * Return true if we altered the keyring. 128 * Not called with any locks held. The keyring's key struct will not be
129 * deallocated under us as only our caller may deallocate it.
75 */ 130 */
76static bool key_gc_keyring(struct key *keyring, time_t limit) 131static void key_gc_keyring(struct key *keyring, time_t limit)
77 __releases(key_serial_lock)
78{ 132{
79 struct keyring_list *klist; 133 struct keyring_list *klist;
80 struct key *key; 134 struct key *key;
@@ -101,134 +155,49 @@ static bool key_gc_keyring(struct key *keyring, time_t limit)
101unlock_dont_gc: 155unlock_dont_gc:
102 rcu_read_unlock(); 156 rcu_read_unlock();
103dont_gc: 157dont_gc:
104 kleave(" = false"); 158 kleave(" [no gc]");
105 return false; 159 return;
106 160
107do_gc: 161do_gc:
108 rcu_read_unlock(); 162 rcu_read_unlock();
109 key_gc_cursor = keyring->serial; 163
110 key_get(keyring);
111 spin_unlock(&key_serial_lock);
112 keyring_gc(keyring, limit); 164 keyring_gc(keyring, limit);
113 key_put(keyring); 165 kleave(" [gc]");
114 kleave(" = true");
115 return true;
116} 166}
117 167
118/* 168/*
119 * Garbage collector for links to dead keys. 169 * Garbage collect an unreferenced, detached key
120 *
121 * This involves scanning the keyrings for dead, expired and revoked keys that
122 * have overstayed their welcome
123 */ 170 */
124static void key_gc_dead_links(struct work_struct *work) 171static noinline void key_gc_unused_key(struct key *key)
125{ 172{
126 struct rb_node *rb; 173 key_check(key);
127 key_serial_t cursor;
128 struct key *key, *xkey;
129 time_t new_timer = LONG_MAX, limit, now;
130
131 now = current_kernel_time().tv_sec;
132 kenter("[%x,%ld]", key_gc_cursor, key_gc_new_timer - now);
133
134 if (test_and_set_bit(0, &key_gc_executing)) {
135 key_schedule_gc(current_kernel_time().tv_sec + 1);
136 kleave(" [busy; deferring]");
137 return;
138 }
139
140 limit = now;
141 if (limit > key_gc_delay)
142 limit -= key_gc_delay;
143 else
144 limit = key_gc_delay;
145
146 spin_lock(&key_serial_lock);
147 174
148 if (unlikely(RB_EMPTY_ROOT(&key_serial_tree))) { 175 security_key_free(key);
149 spin_unlock(&key_serial_lock);
150 clear_bit(0, &key_gc_executing);
151 return;
152 }
153 176
154 cursor = key_gc_cursor; 177 /* deal with the user's key tracking and quota */
155 if (cursor < 0) 178 if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
156 cursor = 0; 179 spin_lock(&key->user->lock);
157 if (cursor > 0) 180 key->user->qnkeys--;
158 new_timer = key_gc_new_timer; 181 key->user->qnbytes -= key->quotalen;
159 else 182 spin_unlock(&key->user->lock);
160 key_gc_again = false;
161
162 /* find the first key above the cursor */
163 key = NULL;
164 rb = key_serial_tree.rb_node;
165 while (rb) {
166 xkey = rb_entry(rb, struct key, serial_node);
167 if (cursor < xkey->serial) {
168 key = xkey;
169 rb = rb->rb_left;
170 } else if (cursor > xkey->serial) {
171 rb = rb->rb_right;
172 } else {
173 rb = rb_next(rb);
174 if (!rb)
175 goto reached_the_end;
176 key = rb_entry(rb, struct key, serial_node);
177 break;
178 }
179 } 183 }
180 184
181 if (!key) 185 atomic_dec(&key->user->nkeys);
182 goto reached_the_end; 186 if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
183 187 atomic_dec(&key->user->nikeys);
184 /* trawl through the keys looking for keyrings */
185 for (;;) {
186 if (key->expiry > limit && key->expiry < new_timer) {
187 kdebug("will expire %x in %ld",
188 key_serial(key), key->expiry - limit);
189 new_timer = key->expiry;
190 }
191 188
192 if (key->type == &key_type_keyring && 189 key_user_put(key->user);
193 key_gc_keyring(key, limit))
194 /* the gc had to release our lock so that the keyring
195 * could be modified, so we have to get it again */
196 goto gc_released_our_lock;
197 190
198 rb = rb_next(&key->serial_node); 191 /* now throw away the key memory */
199 if (!rb) 192 if (key->type->destroy)
200 goto reached_the_end; 193 key->type->destroy(key);
201 key = rb_entry(rb, struct key, serial_node);
202 }
203 194
204gc_released_our_lock: 195 kfree(key->description);
205 kdebug("gc_released_our_lock");
206 key_gc_new_timer = new_timer;
207 key_gc_again = true;
208 clear_bit(0, &key_gc_executing);
209 queue_work(system_nrt_wq, &key_gc_work);
210 kleave(" [continue]");
211 return;
212 196
213 /* when we reach the end of the run, we set the timer for the next one */ 197#ifdef KEY_DEBUGGING
214reached_the_end: 198 key->magic = KEY_DEBUG_MAGIC_X;
215 kdebug("reached_the_end"); 199#endif
216 spin_unlock(&key_serial_lock); 200 kmem_cache_free(key_jar, key);
217 key_gc_new_timer = new_timer;
218 key_gc_cursor = 0;
219 clear_bit(0, &key_gc_executing);
220
221 if (key_gc_again) {
222 /* there may have been a key that expired whilst we were
223 * scanning, so if we discarded any links we should do another
224 * scan */
225 new_timer = now + 1;
226 key_schedule_gc(new_timer);
227 } else if (new_timer < LONG_MAX) {
228 new_timer += key_gc_delay;
229 key_schedule_gc(new_timer);
230 }
231 kleave(" [end]");
232} 201}
233 202
234/* 203/*
@@ -238,60 +207,182 @@ reached_the_end:
238 * all over the place. key_put() schedules this rather than trying to do the 207 * all over the place. key_put() schedules this rather than trying to do the
239 * cleanup itself, which means key_put() doesn't have to sleep. 208 * cleanup itself, which means key_put() doesn't have to sleep.
240 */ 209 */
241static void key_gc_unused_keys(struct work_struct *work) 210static void key_garbage_collector(struct work_struct *work)
242{ 211{
243 struct rb_node *_n; 212 static u8 gc_state; /* Internal persistent state */
213#define KEY_GC_REAP_AGAIN 0x01 /* - Need another cycle */
214#define KEY_GC_REAPING_LINKS 0x02 /* - We need to reap links */
215#define KEY_GC_SET_TIMER 0x04 /* - We need to restart the timer */
216#define KEY_GC_REAPING_DEAD_1 0x10 /* - We need to mark dead keys */
217#define KEY_GC_REAPING_DEAD_2 0x20 /* - We need to reap dead key links */
218#define KEY_GC_REAPING_DEAD_3 0x40 /* - We need to reap dead keys */
219#define KEY_GC_FOUND_DEAD_KEY 0x80 /* - We found at least one dead key */
220
221 struct rb_node *cursor;
244 struct key *key; 222 struct key *key;
223 time_t new_timer, limit;
224
225 kenter("[%lx,%x]", key_gc_flags, gc_state);
226
227 limit = current_kernel_time().tv_sec;
228 if (limit > key_gc_delay)
229 limit -= key_gc_delay;
230 else
231 limit = key_gc_delay;
232
233 /* Work out what we're going to be doing in this pass */
234 gc_state &= KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2;
235 gc_state <<= 1;
236 if (test_and_clear_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags))
237 gc_state |= KEY_GC_REAPING_LINKS | KEY_GC_SET_TIMER;
238
239 if (test_and_clear_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags))
240 gc_state |= KEY_GC_REAPING_DEAD_1;
241 kdebug("new pass %x", gc_state);
242
243 new_timer = LONG_MAX;
245 244
246go_again: 245 /* As only this function is permitted to remove things from the key
247 /* look for a dead key in the tree */ 246 * serial tree, if cursor is non-NULL then it will always point to a
247 * valid node in the tree - even if lock got dropped.
248 */
248 spin_lock(&key_serial_lock); 249 spin_lock(&key_serial_lock);
250 cursor = rb_first(&key_serial_tree);
249 251
250 for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) { 252continue_scanning:
251 key = rb_entry(_n, struct key, serial_node); 253 while (cursor) {
254 key = rb_entry(cursor, struct key, serial_node);
255 cursor = rb_next(cursor);
252 256
253 if (atomic_read(&key->usage) == 0) 257 if (atomic_read(&key->usage) == 0)
254 goto found_dead_key; 258 goto found_unreferenced_key;
259
260 if (unlikely(gc_state & KEY_GC_REAPING_DEAD_1)) {
261 if (key->type == key_gc_dead_keytype) {
262 gc_state |= KEY_GC_FOUND_DEAD_KEY;
263 set_bit(KEY_FLAG_DEAD, &key->flags);
264 key->perm = 0;
265 goto skip_dead_key;
266 }
267 }
268
269 if (gc_state & KEY_GC_SET_TIMER) {
270 if (key->expiry > limit && key->expiry < new_timer) {
271 kdebug("will expire %x in %ld",
272 key_serial(key), key->expiry - limit);
273 new_timer = key->expiry;
274 }
275 }
276
277 if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2))
278 if (key->type == key_gc_dead_keytype)
279 gc_state |= KEY_GC_FOUND_DEAD_KEY;
280
281 if ((gc_state & KEY_GC_REAPING_LINKS) ||
282 unlikely(gc_state & KEY_GC_REAPING_DEAD_2)) {
283 if (key->type == &key_type_keyring)
284 goto found_keyring;
285 }
286
287 if (unlikely(gc_state & KEY_GC_REAPING_DEAD_3))
288 if (key->type == key_gc_dead_keytype)
289 goto destroy_dead_key;
290
291 skip_dead_key:
292 if (spin_is_contended(&key_serial_lock) || need_resched())
293 goto contended;
255 } 294 }
256 295
296contended:
257 spin_unlock(&key_serial_lock); 297 spin_unlock(&key_serial_lock);
258 return;
259 298
260found_dead_key: 299maybe_resched:
261 /* we found a dead key - once we've removed it from the tree, we can 300 if (cursor) {
262 * drop the lock */ 301 cond_resched();
263 rb_erase(&key->serial_node, &key_serial_tree); 302 spin_lock(&key_serial_lock);
264 spin_unlock(&key_serial_lock); 303 goto continue_scanning;
304 }
265 305
266 key_check(key); 306 /* We've completed the pass. Set the timer if we need to and queue a
307 * new cycle if necessary. We keep executing cycles until we find one
308 * where we didn't reap any keys.
309 */
310 kdebug("pass complete");
267 311
268 security_key_free(key); 312 if (gc_state & KEY_GC_SET_TIMER && new_timer != (time_t)LONG_MAX) {
313 new_timer += key_gc_delay;
314 key_schedule_gc(new_timer);
315 }
269 316
270 /* deal with the user's key tracking and quota */ 317 if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2)) {
271 if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { 318 /* Make sure everyone revalidates their keys if we marked a
272 spin_lock(&key->user->lock); 319 * bunch as being dead and make sure all keyring ex-payloads
273 key->user->qnkeys--; 320 * are destroyed.
274 key->user->qnbytes -= key->quotalen; 321 */
275 spin_unlock(&key->user->lock); 322 kdebug("dead sync");
323 synchronize_rcu();
276 } 324 }
277 325
278 atomic_dec(&key->user->nkeys); 326 if (unlikely(gc_state & (KEY_GC_REAPING_DEAD_1 |
279 if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) 327 KEY_GC_REAPING_DEAD_2))) {
280 atomic_dec(&key->user->nikeys); 328 if (!(gc_state & KEY_GC_FOUND_DEAD_KEY)) {
329 /* No remaining dead keys: short circuit the remaining
330 * keytype reap cycles.
331 */
332 kdebug("dead short");
333 gc_state &= ~(KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2);
334 gc_state |= KEY_GC_REAPING_DEAD_3;
335 } else {
336 gc_state |= KEY_GC_REAP_AGAIN;
337 }
338 }
281 339
282 key_user_put(key->user); 340 if (unlikely(gc_state & KEY_GC_REAPING_DEAD_3)) {
341 kdebug("dead wake");
342 smp_mb();
343 clear_bit(KEY_GC_REAPING_KEYTYPE, &key_gc_flags);
344 wake_up_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE);
345 }
283 346
284 /* now throw away the key memory */ 347 if (gc_state & KEY_GC_REAP_AGAIN)
285 if (key->type->destroy) 348 queue_work(system_nrt_wq, &key_gc_work);
286 key->type->destroy(key); 349 kleave(" [end %x]", gc_state);
350 return;
287 351
288 kfree(key->description); 352 /* We found an unreferenced key - once we've removed it from the tree,
353 * we can safely drop the lock.
354 */
355found_unreferenced_key:
356 kdebug("unrefd key %d", key->serial);
357 rb_erase(&key->serial_node, &key_serial_tree);
358 spin_unlock(&key_serial_lock);
289 359
290#ifdef KEY_DEBUGGING 360 key_gc_unused_key(key);
291 key->magic = KEY_DEBUG_MAGIC_X; 361 gc_state |= KEY_GC_REAP_AGAIN;
292#endif 362 goto maybe_resched;
293 kmem_cache_free(key_jar, key);
294 363
295 /* there may, of course, be more than one key to destroy */ 364 /* We found a keyring and we need to check the payload for links to
296 goto go_again; 365 * dead or expired keys. We don't flag another reap immediately as we
366 * have to wait for the old payload to be destroyed by RCU before we
367 * can reap the keys to which it refers.
368 */
369found_keyring:
370 spin_unlock(&key_serial_lock);
371 kdebug("scan keyring %d", key->serial);
372 key_gc_keyring(key, limit);
373 goto maybe_resched;
374
375 /* We found a dead key that is still referenced. Reset its type and
376 * destroy its payload with its semaphore held.
377 */
378destroy_dead_key:
379 spin_unlock(&key_serial_lock);
380 kdebug("destroy key %d", key->serial);
381 down_write(&key->sem);
382 key->type = &key_type_dead;
383 if (key_gc_dead_keytype->destroy)
384 key_gc_dead_keytype->destroy(key);
385 memset(&key->payload, KEY_DESTROY, sizeof(key->payload));
386 up_write(&key->sem);
387 goto maybe_resched;
297} 388}
diff --git a/security/keys/internal.h b/security/keys/internal.h
index a7cd1a682321..c7a7caec4830 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -31,6 +31,7 @@
31 no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__) 31 no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__)
32#endif 32#endif
33 33
34extern struct key_type key_type_dead;
34extern struct key_type key_type_user; 35extern struct key_type key_type_user;
35 36
36/*****************************************************************************/ 37/*****************************************************************************/
@@ -147,10 +148,11 @@ extern key_ref_t lookup_user_key(key_serial_t id, unsigned long flags,
147 148
148extern long join_session_keyring(const char *name); 149extern long join_session_keyring(const char *name);
149 150
150extern struct work_struct key_gc_unused_work; 151extern struct work_struct key_gc_work;
151extern unsigned key_gc_delay; 152extern unsigned key_gc_delay;
152extern void keyring_gc(struct key *keyring, time_t limit); 153extern void keyring_gc(struct key *keyring, time_t limit);
153extern void key_schedule_gc(time_t expiry_at); 154extern void key_schedule_gc(time_t expiry_at);
155extern void key_gc_keytype(struct key_type *ktype);
154 156
155extern int key_task_permission(const key_ref_t key_ref, 157extern int key_task_permission(const key_ref_t key_ref,
156 const struct cred *cred, 158 const struct cred *cred,
diff --git a/security/keys/key.c b/security/keys/key.c
index 1f3ed44a83c0..4414abddcb5b 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -39,11 +39,6 @@ static DECLARE_RWSEM(key_types_sem);
39/* We serialise key instantiation and link */ 39/* We serialise key instantiation and link */
40DEFINE_MUTEX(key_construction_mutex); 40DEFINE_MUTEX(key_construction_mutex);
41 41
42/* Any key who's type gets unegistered will be re-typed to this */
43static struct key_type key_type_dead = {
44 .name = "dead",
45};
46
47#ifdef KEY_DEBUGGING 42#ifdef KEY_DEBUGGING
48void __key_check(const struct key *key) 43void __key_check(const struct key *key)
49{ 44{
@@ -602,7 +597,7 @@ void key_put(struct key *key)
602 key_check(key); 597 key_check(key);
603 598
604 if (atomic_dec_and_test(&key->usage)) 599 if (atomic_dec_and_test(&key->usage))
605 queue_work(system_nrt_wq, &key_gc_unused_work); 600 queue_work(system_nrt_wq, &key_gc_work);
606 } 601 }
607} 602}
608EXPORT_SYMBOL(key_put); 603EXPORT_SYMBOL(key_put);
@@ -980,49 +975,11 @@ EXPORT_SYMBOL(register_key_type);
980 */ 975 */
981void unregister_key_type(struct key_type *ktype) 976void unregister_key_type(struct key_type *ktype)
982{ 977{
983 struct rb_node *_n;
984 struct key *key;
985
986 down_write(&key_types_sem); 978 down_write(&key_types_sem);
987
988 /* withdraw the key type */
989 list_del_init(&ktype->link); 979 list_del_init(&ktype->link);
990 980 downgrade_write(&key_types_sem);
991 /* mark all the keys of this type dead */ 981 key_gc_keytype(ktype);
992 spin_lock(&key_serial_lock); 982 up_read(&key_types_sem);
993
994 for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
995 key = rb_entry(_n, struct key, serial_node);
996
997 if (key->type == ktype) {
998 key->type = &key_type_dead;
999 set_bit(KEY_FLAG_DEAD, &key->flags);
1000 }
1001 }
1002
1003 spin_unlock(&key_serial_lock);
1004
1005 /* make sure everyone revalidates their keys */
1006 synchronize_rcu();
1007
1008 /* we should now be able to destroy the payloads of all the keys of
1009 * this type with impunity */
1010 spin_lock(&key_serial_lock);
1011
1012 for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
1013 key = rb_entry(_n, struct key, serial_node);
1014
1015 if (key->type == ktype) {
1016 if (ktype->destroy)
1017 ktype->destroy(key);
1018 memset(&key->payload, KEY_DESTROY, sizeof(key->payload));
1019 }
1020 }
1021
1022 spin_unlock(&key_serial_lock);
1023 up_write(&key_types_sem);
1024
1025 key_schedule_gc(0);
1026} 983}
1027EXPORT_SYMBOL(unregister_key_type); 984EXPORT_SYMBOL(unregister_key_type);
1028 985