aboutsummaryrefslogtreecommitdiffstats
path: root/security
diff options
context:
space:
mode:
Diffstat (limited to 'security')
-rw-r--r--security/keys/key.c94
-rw-r--r--security/keys/keyctl.c23
-rw-r--r--security/keys/keyring.c245
-rw-r--r--security/keys/proc.c21
-rw-r--r--security/keys/process_keys.c12
-rw-r--r--security/keys/request_key.c32
-rw-r--r--security/keys/user_defined.c85
7 files changed, 289 insertions, 223 deletions
diff --git a/security/keys/key.c b/security/keys/key.c
index 59402c843203..1fdfccb3fe43 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -294,7 +294,6 @@ struct key *key_alloc(struct key_type *type, const char *desc,
294 } 294 }
295 295
296 atomic_set(&key->usage, 1); 296 atomic_set(&key->usage, 1);
297 rwlock_init(&key->lock);
298 init_rwsem(&key->sem); 297 init_rwsem(&key->sem);
299 key->type = type; 298 key->type = type;
300 key->user = user; 299 key->user = user;
@@ -308,7 +307,7 @@ struct key *key_alloc(struct key_type *type, const char *desc,
308 key->payload.data = NULL; 307 key->payload.data = NULL;
309 308
310 if (!not_in_quota) 309 if (!not_in_quota)
311 key->flags |= KEY_FLAG_IN_QUOTA; 310 key->flags |= 1 << KEY_FLAG_IN_QUOTA;
312 311
313 memset(&key->type_data, 0, sizeof(key->type_data)); 312 memset(&key->type_data, 0, sizeof(key->type_data));
314 313
@@ -359,7 +358,7 @@ int key_payload_reserve(struct key *key, size_t datalen)
359 key_check(key); 358 key_check(key);
360 359
361 /* contemplate the quota adjustment */ 360 /* contemplate the quota adjustment */
362 if (delta != 0 && key->flags & KEY_FLAG_IN_QUOTA) { 361 if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
363 spin_lock(&key->user->lock); 362 spin_lock(&key->user->lock);
364 363
365 if (delta > 0 && 364 if (delta > 0 &&
@@ -405,23 +404,17 @@ static int __key_instantiate_and_link(struct key *key,
405 down_write(&key_construction_sem); 404 down_write(&key_construction_sem);
406 405
407 /* can't instantiate twice */ 406 /* can't instantiate twice */
408 if (!(key->flags & KEY_FLAG_INSTANTIATED)) { 407 if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
409 /* instantiate the key */ 408 /* instantiate the key */
410 ret = key->type->instantiate(key, data, datalen); 409 ret = key->type->instantiate(key, data, datalen);
411 410
412 if (ret == 0) { 411 if (ret == 0) {
413 /* mark the key as being instantiated */ 412 /* mark the key as being instantiated */
414 write_lock(&key->lock);
415
416 atomic_inc(&key->user->nikeys); 413 atomic_inc(&key->user->nikeys);
417 key->flags |= KEY_FLAG_INSTANTIATED; 414 set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
418 415
419 if (key->flags & KEY_FLAG_USER_CONSTRUCT) { 416 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
420 key->flags &= ~KEY_FLAG_USER_CONSTRUCT;
421 awaken = 1; 417 awaken = 1;
422 }
423
424 write_unlock(&key->lock);
425 418
426 /* and link it into the destination keyring */ 419 /* and link it into the destination keyring */
427 if (keyring) 420 if (keyring)
@@ -486,21 +479,17 @@ int key_negate_and_link(struct key *key,
486 down_write(&key_construction_sem); 479 down_write(&key_construction_sem);
487 480
488 /* can't instantiate twice */ 481 /* can't instantiate twice */
489 if (!(key->flags & KEY_FLAG_INSTANTIATED)) { 482 if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
490 /* mark the key as being negatively instantiated */ 483 /* mark the key as being negatively instantiated */
491 write_lock(&key->lock);
492
493 atomic_inc(&key->user->nikeys); 484 atomic_inc(&key->user->nikeys);
494 key->flags |= KEY_FLAG_INSTANTIATED | KEY_FLAG_NEGATIVE; 485 set_bit(KEY_FLAG_NEGATIVE, &key->flags);
486 set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
495 now = current_kernel_time(); 487 now = current_kernel_time();
496 key->expiry = now.tv_sec + timeout; 488 key->expiry = now.tv_sec + timeout;
497 489
498 if (key->flags & KEY_FLAG_USER_CONSTRUCT) { 490 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
499 key->flags &= ~KEY_FLAG_USER_CONSTRUCT;
500 awaken = 1; 491 awaken = 1;
501 }
502 492
503 write_unlock(&key->lock);
504 ret = 0; 493 ret = 0;
505 494
506 /* and link it into the destination keyring */ 495 /* and link it into the destination keyring */
@@ -553,8 +542,10 @@ static void key_cleanup(void *data)
553 rb_erase(&key->serial_node, &key_serial_tree); 542 rb_erase(&key->serial_node, &key_serial_tree);
554 spin_unlock(&key_serial_lock); 543 spin_unlock(&key_serial_lock);
555 544
545 key_check(key);
546
556 /* deal with the user's key tracking and quota */ 547 /* deal with the user's key tracking and quota */
557 if (key->flags & KEY_FLAG_IN_QUOTA) { 548 if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
558 spin_lock(&key->user->lock); 549 spin_lock(&key->user->lock);
559 key->user->qnkeys--; 550 key->user->qnkeys--;
560 key->user->qnbytes -= key->quotalen; 551 key->user->qnbytes -= key->quotalen;
@@ -562,7 +553,7 @@ static void key_cleanup(void *data)
562 } 553 }
563 554
564 atomic_dec(&key->user->nkeys); 555 atomic_dec(&key->user->nkeys);
565 if (key->flags & KEY_FLAG_INSTANTIATED) 556 if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
566 atomic_dec(&key->user->nikeys); 557 atomic_dec(&key->user->nikeys);
567 558
568 key_user_put(key->user); 559 key_user_put(key->user);
@@ -631,9 +622,9 @@ struct key *key_lookup(key_serial_t id)
631 goto error; 622 goto error;
632 623
633 found: 624 found:
634 /* pretent doesn't exist if it's dead */ 625 /* pretend it doesn't exist if it's dead */
635 if (atomic_read(&key->usage) == 0 || 626 if (atomic_read(&key->usage) == 0 ||
636 (key->flags & KEY_FLAG_DEAD) || 627 test_bit(KEY_FLAG_DEAD, &key->flags) ||
637 key->type == &key_type_dead) 628 key->type == &key_type_dead)
638 goto not_found; 629 goto not_found;
639 630
@@ -708,12 +699,9 @@ static inline struct key *__key_update(struct key *key, const void *payload,
708 699
709 ret = key->type->update(key, payload, plen); 700 ret = key->type->update(key, payload, plen);
710 701
711 if (ret == 0) { 702 if (ret == 0)
712 /* updating a negative key instantiates it */ 703 /* updating a negative key instantiates it */
713 write_lock(&key->lock); 704 clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
714 key->flags &= ~KEY_FLAG_NEGATIVE;
715 write_unlock(&key->lock);
716 }
717 705
718 up_write(&key->sem); 706 up_write(&key->sem);
719 707
@@ -841,12 +829,9 @@ int key_update(struct key *key, const void *payload, size_t plen)
841 down_write(&key->sem); 829 down_write(&key->sem);
842 ret = key->type->update(key, payload, plen); 830 ret = key->type->update(key, payload, plen);
843 831
844 if (ret == 0) { 832 if (ret == 0)
845 /* updating a negative key instantiates it */ 833 /* updating a negative key instantiates it */
846 write_lock(&key->lock); 834 clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
847 key->flags &= ~KEY_FLAG_NEGATIVE;
848 write_unlock(&key->lock);
849 }
850 835
851 up_write(&key->sem); 836 up_write(&key->sem);
852 } 837 }
@@ -892,10 +877,7 @@ struct key *key_duplicate(struct key *source, const char *desc)
892 goto error2; 877 goto error2;
893 878
894 atomic_inc(&key->user->nikeys); 879 atomic_inc(&key->user->nikeys);
895 880 set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
896 write_lock(&key->lock);
897 key->flags |= KEY_FLAG_INSTANTIATED;
898 write_unlock(&key->lock);
899 881
900 error_k: 882 error_k:
901 up_read(&key_types_sem); 883 up_read(&key_types_sem);
@@ -922,9 +904,7 @@ void key_revoke(struct key *key)
922 /* make sure no one's trying to change or use the key when we mark 904 /* make sure no one's trying to change or use the key when we mark
923 * it */ 905 * it */
924 down_write(&key->sem); 906 down_write(&key->sem);
925 write_lock(&key->lock); 907 set_bit(KEY_FLAG_REVOKED, &key->flags);
926 key->flags |= KEY_FLAG_REVOKED;
927 write_unlock(&key->lock);
928 up_write(&key->sem); 908 up_write(&key->sem);
929 909
930} /* end key_revoke() */ 910} /* end key_revoke() */
@@ -975,24 +955,33 @@ void unregister_key_type(struct key_type *ktype)
975 /* withdraw the key type */ 955 /* withdraw the key type */
976 list_del_init(&ktype->link); 956 list_del_init(&ktype->link);
977 957
978 /* need to withdraw all keys of this type */ 958 /* mark all the keys of this type dead */
979 spin_lock(&key_serial_lock); 959 spin_lock(&key_serial_lock);
980 960
981 for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) { 961 for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
982 key = rb_entry(_n, struct key, serial_node); 962 key = rb_entry(_n, struct key, serial_node);
983 963
984 if (key->type != ktype) 964 if (key->type == ktype)
985 continue; 965 key->type = &key_type_dead;
966 }
967
968 spin_unlock(&key_serial_lock);
969
970 /* make sure everyone revalidates their keys */
971 synchronize_kernel();
972
973 /* we should now be able to destroy the payloads of all the keys of
974 * this type with impunity */
975 spin_lock(&key_serial_lock);
986 976
987 write_lock(&key->lock); 977 for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
988 key->type = &key_type_dead; 978 key = rb_entry(_n, struct key, serial_node);
989 write_unlock(&key->lock);
990 979
991 /* there shouldn't be anyone looking at the description or 980 if (key->type == ktype) {
992 * payload now */ 981 if (ktype->destroy)
993 if (ktype->destroy) 982 ktype->destroy(key);
994 ktype->destroy(key); 983 memset(&key->payload, 0xbd, sizeof(key->payload));
995 memset(&key->payload, 0xbd, sizeof(key->payload)); 984 }
996 } 985 }
997 986
998 spin_unlock(&key_serial_lock); 987 spin_unlock(&key_serial_lock);
@@ -1037,4 +1026,5 @@ void __init key_init(void)
1037 1026
1038 /* link the two root keyrings together */ 1027 /* link the two root keyrings together */
1039 key_link(&root_session_keyring, &root_user_keyring); 1028 key_link(&root_session_keyring, &root_user_keyring);
1029
1040} /* end key_init() */ 1030} /* end key_init() */
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index dc0011b3fac9..cedb7326de29 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -728,7 +728,6 @@ long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid)
728 /* make the changes with the locks held to prevent chown/chown races */ 728 /* make the changes with the locks held to prevent chown/chown races */
729 ret = -EACCES; 729 ret = -EACCES;
730 down_write(&key->sem); 730 down_write(&key->sem);
731 write_lock(&key->lock);
732 731
733 if (!capable(CAP_SYS_ADMIN)) { 732 if (!capable(CAP_SYS_ADMIN)) {
734 /* only the sysadmin can chown a key to some other UID */ 733 /* only the sysadmin can chown a key to some other UID */
@@ -755,7 +754,6 @@ long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid)
755 ret = 0; 754 ret = 0;
756 755
757 no_access: 756 no_access:
758 write_unlock(&key->lock);
759 up_write(&key->sem); 757 up_write(&key->sem);
760 key_put(key); 758 key_put(key);
761 error: 759 error:
@@ -784,26 +782,19 @@ long keyctl_setperm_key(key_serial_t id, key_perm_t perm)
784 goto error; 782 goto error;
785 } 783 }
786 784
787 /* make the changes with the locks held to prevent chown/chmod 785 /* make the changes with the locks held to prevent chown/chmod races */
788 * races */
789 ret = -EACCES; 786 ret = -EACCES;
790 down_write(&key->sem); 787 down_write(&key->sem);
791 write_lock(&key->lock);
792 788
793 /* if we're not the sysadmin, we can only chmod a key that we 789 /* if we're not the sysadmin, we can only change a key that we own */
794 * own */ 790 if (capable(CAP_SYS_ADMIN) || key->uid == current->fsuid) {
795 if (!capable(CAP_SYS_ADMIN) && key->uid != current->fsuid) 791 key->perm = perm;
796 goto no_access; 792 ret = 0;
797 793 }
798 /* changing the permissions mask */
799 key->perm = perm;
800 ret = 0;
801 794
802 no_access:
803 write_unlock(&key->lock);
804 up_write(&key->sem); 795 up_write(&key->sem);
805 key_put(key); 796 key_put(key);
806 error: 797error:
807 return ret; 798 return ret;
808 799
809} /* end keyctl_setperm_key() */ 800} /* end keyctl_setperm_key() */
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index e2ab4f8e7481..c9a5de197487 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -132,10 +132,17 @@ static int keyring_duplicate(struct key *keyring, const struct key *source)
132 (PAGE_SIZE - sizeof(*klist)) / sizeof(struct key); 132 (PAGE_SIZE - sizeof(*klist)) / sizeof(struct key);
133 133
134 ret = 0; 134 ret = 0;
135 sklist = source->payload.subscriptions;
136 135
137 if (sklist && sklist->nkeys > 0) { 136 /* find out how many keys are currently linked */
137 rcu_read_lock();
138 sklist = rcu_dereference(source->payload.subscriptions);
139 max = 0;
140 if (sklist)
138 max = sklist->nkeys; 141 max = sklist->nkeys;
142 rcu_read_unlock();
143
144 /* allocate a new payload and stuff load with key links */
145 if (max > 0) {
139 BUG_ON(max > limit); 146 BUG_ON(max > limit);
140 147
141 max = (max + 3) & ~3; 148 max = (max + 3) & ~3;
@@ -148,6 +155,10 @@ static int keyring_duplicate(struct key *keyring, const struct key *source)
148 if (!klist) 155 if (!klist)
149 goto error; 156 goto error;
150 157
158 /* set links */
159 rcu_read_lock();
160 sklist = rcu_dereference(source->payload.subscriptions);
161
151 klist->maxkeys = max; 162 klist->maxkeys = max;
152 klist->nkeys = sklist->nkeys; 163 klist->nkeys = sklist->nkeys;
153 memcpy(klist->keys, 164 memcpy(klist->keys,
@@ -157,7 +168,9 @@ static int keyring_duplicate(struct key *keyring, const struct key *source)
157 for (loop = klist->nkeys - 1; loop >= 0; loop--) 168 for (loop = klist->nkeys - 1; loop >= 0; loop--)
158 atomic_inc(&klist->keys[loop]->usage); 169 atomic_inc(&klist->keys[loop]->usage);
159 170
160 keyring->payload.subscriptions = klist; 171 rcu_read_unlock();
172
173 rcu_assign_pointer(keyring->payload.subscriptions, klist);
161 ret = 0; 174 ret = 0;
162 } 175 }
163 176
@@ -192,7 +205,7 @@ static void keyring_destroy(struct key *keyring)
192 write_unlock(&keyring_name_lock); 205 write_unlock(&keyring_name_lock);
193 } 206 }
194 207
195 klist = keyring->payload.subscriptions; 208 klist = rcu_dereference(keyring->payload.subscriptions);
196 if (klist) { 209 if (klist) {
197 for (loop = klist->nkeys - 1; loop >= 0; loop--) 210 for (loop = klist->nkeys - 1; loop >= 0; loop--)
198 key_put(klist->keys[loop]); 211 key_put(klist->keys[loop]);
@@ -216,17 +229,20 @@ static void keyring_describe(const struct key *keyring, struct seq_file *m)
216 seq_puts(m, "[anon]"); 229 seq_puts(m, "[anon]");
217 } 230 }
218 231
219 klist = keyring->payload.subscriptions; 232 rcu_read_lock();
233 klist = rcu_dereference(keyring->payload.subscriptions);
220 if (klist) 234 if (klist)
221 seq_printf(m, ": %u/%u", klist->nkeys, klist->maxkeys); 235 seq_printf(m, ": %u/%u", klist->nkeys, klist->maxkeys);
222 else 236 else
223 seq_puts(m, ": empty"); 237 seq_puts(m, ": empty");
238 rcu_read_unlock();
224 239
225} /* end keyring_describe() */ 240} /* end keyring_describe() */
226 241
227/*****************************************************************************/ 242/*****************************************************************************/
228/* 243/*
229 * read a list of key IDs from the keyring's contents 244 * read a list of key IDs from the keyring's contents
245 * - the keyring's semaphore is read-locked
230 */ 246 */
231static long keyring_read(const struct key *keyring, 247static long keyring_read(const struct key *keyring,
232 char __user *buffer, size_t buflen) 248 char __user *buffer, size_t buflen)
@@ -237,7 +253,7 @@ static long keyring_read(const struct key *keyring,
237 int loop, ret; 253 int loop, ret;
238 254
239 ret = 0; 255 ret = 0;
240 klist = keyring->payload.subscriptions; 256 klist = rcu_dereference(keyring->payload.subscriptions);
241 257
242 if (klist) { 258 if (klist) {
243 /* calculate how much data we could return */ 259 /* calculate how much data we could return */
@@ -320,7 +336,7 @@ struct key *keyring_search_aux(struct key *keyring,
320 key_match_func_t match) 336 key_match_func_t match)
321{ 337{
322 struct { 338 struct {
323 struct key *keyring; 339 struct keyring_list *keylist;
324 int kix; 340 int kix;
325 } stack[KEYRING_SEARCH_MAX_DEPTH]; 341 } stack[KEYRING_SEARCH_MAX_DEPTH];
326 342
@@ -328,10 +344,12 @@ struct key *keyring_search_aux(struct key *keyring,
328 struct timespec now; 344 struct timespec now;
329 struct key *key; 345 struct key *key;
330 long err; 346 long err;
331 int sp, psp, kix; 347 int sp, kix;
332 348
333 key_check(keyring); 349 key_check(keyring);
334 350
351 rcu_read_lock();
352
335 /* top keyring must have search permission to begin the search */ 353 /* top keyring must have search permission to begin the search */
336 key = ERR_PTR(-EACCES); 354 key = ERR_PTR(-EACCES);
337 if (!key_permission(keyring, KEY_SEARCH)) 355 if (!key_permission(keyring, KEY_SEARCH))
@@ -347,11 +365,10 @@ struct key *keyring_search_aux(struct key *keyring,
347 365
348 /* start processing a new keyring */ 366 /* start processing a new keyring */
349 descend: 367 descend:
350 read_lock(&keyring->lock); 368 if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
351 if (keyring->flags & KEY_FLAG_REVOKED)
352 goto not_this_keyring; 369 goto not_this_keyring;
353 370
354 keylist = keyring->payload.subscriptions; 371 keylist = rcu_dereference(keyring->payload.subscriptions);
355 if (!keylist) 372 if (!keylist)
356 goto not_this_keyring; 373 goto not_this_keyring;
357 374
@@ -364,7 +381,7 @@ struct key *keyring_search_aux(struct key *keyring,
364 continue; 381 continue;
365 382
366 /* skip revoked keys and expired keys */ 383 /* skip revoked keys and expired keys */
367 if (key->flags & KEY_FLAG_REVOKED) 384 if (test_bit(KEY_FLAG_REVOKED, &key->flags))
368 continue; 385 continue;
369 386
370 if (key->expiry && now.tv_sec >= key->expiry) 387 if (key->expiry && now.tv_sec >= key->expiry)
@@ -379,7 +396,7 @@ struct key *keyring_search_aux(struct key *keyring,
379 continue; 396 continue;
380 397
381 /* we set a different error code if we find a negative key */ 398 /* we set a different error code if we find a negative key */
382 if (key->flags & KEY_FLAG_NEGATIVE) { 399 if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) {
383 err = -ENOKEY; 400 err = -ENOKEY;
384 continue; 401 continue;
385 } 402 }
@@ -390,48 +407,37 @@ struct key *keyring_search_aux(struct key *keyring,
390 /* search through the keyrings nested in this one */ 407 /* search through the keyrings nested in this one */
391 kix = 0; 408 kix = 0;
392 ascend: 409 ascend:
393 while (kix < keylist->nkeys) { 410 for (; kix < keylist->nkeys; kix++) {
394 key = keylist->keys[kix]; 411 key = keylist->keys[kix];
395 if (key->type != &key_type_keyring) 412 if (key->type != &key_type_keyring)
396 goto next; 413 continue;
397 414
398 /* recursively search nested keyrings 415 /* recursively search nested keyrings
399 * - only search keyrings for which we have search permission 416 * - only search keyrings for which we have search permission
400 */ 417 */
401 if (sp >= KEYRING_SEARCH_MAX_DEPTH) 418 if (sp >= KEYRING_SEARCH_MAX_DEPTH)
402 goto next; 419 continue;
403 420
404 if (!key_permission(key, KEY_SEARCH)) 421 if (!key_permission(key, KEY_SEARCH))
405 goto next; 422 continue;
406
407 /* evade loops in the keyring tree */
408 for (psp = 0; psp < sp; psp++)
409 if (stack[psp].keyring == keyring)
410 goto next;
411 423
412 /* stack the current position */ 424 /* stack the current position */
413 stack[sp].keyring = keyring; 425 stack[sp].keylist = keylist;
414 stack[sp].kix = kix; 426 stack[sp].kix = kix;
415 sp++; 427 sp++;
416 428
417 /* begin again with the new keyring */ 429 /* begin again with the new keyring */
418 keyring = key; 430 keyring = key;
419 goto descend; 431 goto descend;
420
421 next:
422 kix++;
423 } 432 }
424 433
425 /* the keyring we're looking at was disqualified or didn't contain a 434 /* the keyring we're looking at was disqualified or didn't contain a
426 * matching key */ 435 * matching key */
427 not_this_keyring: 436 not_this_keyring:
428 read_unlock(&keyring->lock);
429
430 if (sp > 0) { 437 if (sp > 0) {
431 /* resume the processing of a keyring higher up in the tree */ 438 /* resume the processing of a keyring higher up in the tree */
432 sp--; 439 sp--;
433 keyring = stack[sp].keyring; 440 keylist = stack[sp].keylist;
434 keylist = keyring->payload.subscriptions;
435 kix = stack[sp].kix + 1; 441 kix = stack[sp].kix + 1;
436 goto ascend; 442 goto ascend;
437 } 443 }
@@ -442,16 +448,9 @@ struct key *keyring_search_aux(struct key *keyring,
442 /* we found a viable match */ 448 /* we found a viable match */
443 found: 449 found:
444 atomic_inc(&key->usage); 450 atomic_inc(&key->usage);
445 read_unlock(&keyring->lock);
446
447 /* unwind the keyring stack */
448 while (sp > 0) {
449 sp--;
450 read_unlock(&stack[sp].keyring->lock);
451 }
452
453 key_check(key); 451 key_check(key);
454 error: 452 error:
453 rcu_read_unlock();
455 return key; 454 return key;
456 455
457} /* end keyring_search_aux() */ 456} /* end keyring_search_aux() */
@@ -489,7 +488,9 @@ struct key *__keyring_search_one(struct key *keyring,
489 struct key *key; 488 struct key *key;
490 int loop; 489 int loop;
491 490
492 klist = keyring->payload.subscriptions; 491 rcu_read_lock();
492
493 klist = rcu_dereference(keyring->payload.subscriptions);
493 if (klist) { 494 if (klist) {
494 for (loop = 0; loop < klist->nkeys; loop++) { 495 for (loop = 0; loop < klist->nkeys; loop++) {
495 key = klist->keys[loop]; 496 key = klist->keys[loop];
@@ -497,7 +498,7 @@ struct key *__keyring_search_one(struct key *keyring,
497 if (key->type == ktype && 498 if (key->type == ktype &&
498 key->type->match(key, description) && 499 key->type->match(key, description) &&
499 key_permission(key, perm) && 500 key_permission(key, perm) &&
500 !(key->flags & KEY_FLAG_REVOKED) 501 !test_bit(KEY_FLAG_REVOKED, &key->flags)
501 ) 502 )
502 goto found; 503 goto found;
503 } 504 }
@@ -509,6 +510,7 @@ struct key *__keyring_search_one(struct key *keyring,
509 found: 510 found:
510 atomic_inc(&key->usage); 511 atomic_inc(&key->usage);
511 error: 512 error:
513 rcu_read_unlock();
512 return key; 514 return key;
513 515
514} /* end __keyring_search_one() */ 516} /* end __keyring_search_one() */
@@ -540,7 +542,7 @@ struct key *find_keyring_by_name(const char *name, key_serial_t bound)
540 &keyring_name_hash[bucket], 542 &keyring_name_hash[bucket],
541 type_data.link 543 type_data.link
542 ) { 544 ) {
543 if (keyring->flags & KEY_FLAG_REVOKED) 545 if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
544 continue; 546 continue;
545 547
546 if (strcmp(keyring->description, name) != 0) 548 if (strcmp(keyring->description, name) != 0)
@@ -579,7 +581,7 @@ struct key *find_keyring_by_name(const char *name, key_serial_t bound)
579static int keyring_detect_cycle(struct key *A, struct key *B) 581static int keyring_detect_cycle(struct key *A, struct key *B)
580{ 582{
581 struct { 583 struct {
582 struct key *subtree; 584 struct keyring_list *keylist;
583 int kix; 585 int kix;
584 } stack[KEYRING_SEARCH_MAX_DEPTH]; 586 } stack[KEYRING_SEARCH_MAX_DEPTH];
585 587
@@ -587,20 +589,21 @@ static int keyring_detect_cycle(struct key *A, struct key *B)
587 struct key *subtree, *key; 589 struct key *subtree, *key;
588 int sp, kix, ret; 590 int sp, kix, ret;
589 591
592 rcu_read_lock();
593
590 ret = -EDEADLK; 594 ret = -EDEADLK;
591 if (A == B) 595 if (A == B)
592 goto error; 596 goto cycle_detected;
593 597
594 subtree = B; 598 subtree = B;
595 sp = 0; 599 sp = 0;
596 600
597 /* start processing a new keyring */ 601 /* start processing a new keyring */
598 descend: 602 descend:
599 read_lock(&subtree->lock); 603 if (test_bit(KEY_FLAG_REVOKED, &subtree->flags))
600 if (subtree->flags & KEY_FLAG_REVOKED)
601 goto not_this_keyring; 604 goto not_this_keyring;
602 605
603 keylist = subtree->payload.subscriptions; 606 keylist = rcu_dereference(subtree->payload.subscriptions);
604 if (!keylist) 607 if (!keylist)
605 goto not_this_keyring; 608 goto not_this_keyring;
606 kix = 0; 609 kix = 0;
@@ -619,7 +622,7 @@ static int keyring_detect_cycle(struct key *A, struct key *B)
619 goto too_deep; 622 goto too_deep;
620 623
621 /* stack the current position */ 624 /* stack the current position */
622 stack[sp].subtree = subtree; 625 stack[sp].keylist = keylist;
623 stack[sp].kix = kix; 626 stack[sp].kix = kix;
624 sp++; 627 sp++;
625 628
@@ -632,13 +635,10 @@ static int keyring_detect_cycle(struct key *A, struct key *B)
632 /* the keyring we're looking at was disqualified or didn't contain a 635 /* the keyring we're looking at was disqualified or didn't contain a
633 * matching key */ 636 * matching key */
634 not_this_keyring: 637 not_this_keyring:
635 read_unlock(&subtree->lock);
636
637 if (sp > 0) { 638 if (sp > 0) {
638 /* resume the checking of a keyring higher up in the tree */ 639 /* resume the checking of a keyring higher up in the tree */
639 sp--; 640 sp--;
640 subtree = stack[sp].subtree; 641 keylist = stack[sp].keylist;
641 keylist = subtree->payload.subscriptions;
642 kix = stack[sp].kix + 1; 642 kix = stack[sp].kix + 1;
643 goto ascend; 643 goto ascend;
644 } 644 }
@@ -646,30 +646,36 @@ static int keyring_detect_cycle(struct key *A, struct key *B)
646 ret = 0; /* no cycles detected */ 646 ret = 0; /* no cycles detected */
647 647
648 error: 648 error:
649 rcu_read_unlock();
649 return ret; 650 return ret;
650 651
651 too_deep: 652 too_deep:
652 ret = -ELOOP; 653 ret = -ELOOP;
653 goto error_unwind; 654 goto error;
655
654 cycle_detected: 656 cycle_detected:
655 ret = -EDEADLK; 657 ret = -EDEADLK;
656 error_unwind:
657 read_unlock(&subtree->lock);
658
659 /* unwind the keyring stack */
660 while (sp > 0) {
661 sp--;
662 read_unlock(&stack[sp].subtree->lock);
663 }
664
665 goto error; 658 goto error;
666 659
667} /* end keyring_detect_cycle() */ 660} /* end keyring_detect_cycle() */
668 661
669/*****************************************************************************/ 662/*****************************************************************************/
670/* 663/*
664 * dispose of a keyring list after the RCU grace period
665 */
666static void keyring_link_rcu_disposal(struct rcu_head *rcu)
667{
668 struct keyring_list *klist =
669 container_of(rcu, struct keyring_list, rcu);
670
671 kfree(klist);
672
673} /* end keyring_link_rcu_disposal() */
674
675/*****************************************************************************/
676/*
671 * link a key into to a keyring 677 * link a key into to a keyring
672 * - must be called with the keyring's semaphore held 678 * - must be called with the keyring's semaphore write-locked
673 */ 679 */
674int __key_link(struct key *keyring, struct key *key) 680int __key_link(struct key *keyring, struct key *key)
675{ 681{
@@ -679,7 +685,7 @@ int __key_link(struct key *keyring, struct key *key)
679 int ret; 685 int ret;
680 686
681 ret = -EKEYREVOKED; 687 ret = -EKEYREVOKED;
682 if (keyring->flags & KEY_FLAG_REVOKED) 688 if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
683 goto error; 689 goto error;
684 690
685 ret = -ENOTDIR; 691 ret = -ENOTDIR;
@@ -710,9 +716,10 @@ int __key_link(struct key *keyring, struct key *key)
710 /* there's sufficient slack space to add directly */ 716 /* there's sufficient slack space to add directly */
711 atomic_inc(&key->usage); 717 atomic_inc(&key->usage);
712 718
713 write_lock(&keyring->lock); 719 klist->keys[klist->nkeys] = key;
714 klist->keys[klist->nkeys++] = key; 720 smp_wmb();
715 write_unlock(&keyring->lock); 721 klist->nkeys++;
722 smp_wmb();
716 723
717 ret = 0; 724 ret = 0;
718 } 725 }
@@ -723,6 +730,8 @@ int __key_link(struct key *keyring, struct key *key)
723 max += klist->maxkeys; 730 max += klist->maxkeys;
724 731
725 ret = -ENFILE; 732 ret = -ENFILE;
733 if (max > 65535)
734 goto error3;
726 size = sizeof(*klist) + sizeof(*key) * max; 735 size = sizeof(*klist) + sizeof(*key) * max;
727 if (size > PAGE_SIZE) 736 if (size > PAGE_SIZE)
728 goto error3; 737 goto error3;
@@ -743,14 +752,13 @@ int __key_link(struct key *keyring, struct key *key)
743 752
744 /* add the key into the new space */ 753 /* add the key into the new space */
745 atomic_inc(&key->usage); 754 atomic_inc(&key->usage);
746
747 write_lock(&keyring->lock);
748 keyring->payload.subscriptions = nklist;
749 nklist->keys[nklist->nkeys++] = key; 755 nklist->keys[nklist->nkeys++] = key;
750 write_unlock(&keyring->lock); 756
757 rcu_assign_pointer(keyring->payload.subscriptions, nklist);
751 758
752 /* dispose of the old keyring list */ 759 /* dispose of the old keyring list */
753 kfree(klist); 760 if (klist)
761 call_rcu(&klist->rcu, keyring_link_rcu_disposal);
754 762
755 ret = 0; 763 ret = 0;
756 } 764 }
@@ -791,11 +799,26 @@ EXPORT_SYMBOL(key_link);
791 799
792/*****************************************************************************/ 800/*****************************************************************************/
793/* 801/*
802 * dispose of a keyring list after the RCU grace period, freeing the unlinked
803 * key
804 */
805static void keyring_unlink_rcu_disposal(struct rcu_head *rcu)
806{
807 struct keyring_list *klist =
808 container_of(rcu, struct keyring_list, rcu);
809
810 key_put(klist->keys[klist->delkey]);
811 kfree(klist);
812
813} /* end keyring_unlink_rcu_disposal() */
814
815/*****************************************************************************/
816/*
794 * unlink the first link to a key from a keyring 817 * unlink the first link to a key from a keyring
795 */ 818 */
796int key_unlink(struct key *keyring, struct key *key) 819int key_unlink(struct key *keyring, struct key *key)
797{ 820{
798 struct keyring_list *klist; 821 struct keyring_list *klist, *nklist;
799 int loop, ret; 822 int loop, ret;
800 823
801 key_check(keyring); 824 key_check(keyring);
@@ -819,31 +842,45 @@ int key_unlink(struct key *keyring, struct key *key)
819 ret = -ENOENT; 842 ret = -ENOENT;
820 goto error; 843 goto error;
821 844
822 key_is_present: 845key_is_present:
846 /* we need to copy the key list for RCU purposes */
847 nklist = kmalloc(sizeof(*klist) + sizeof(*key) * klist->maxkeys,
848 GFP_KERNEL);
849 if (!nklist)
850 goto nomem;
851 nklist->maxkeys = klist->maxkeys;
852 nklist->nkeys = klist->nkeys - 1;
853
854 if (loop > 0)
855 memcpy(&nklist->keys[0],
856 &klist->keys[0],
857 loop * sizeof(klist->keys[0]));
858
859 if (loop < nklist->nkeys)
860 memcpy(&nklist->keys[loop],
861 &klist->keys[loop + 1],
862 (nklist->nkeys - loop) * sizeof(klist->keys[0]));
863
823 /* adjust the user's quota */ 864 /* adjust the user's quota */
824 key_payload_reserve(keyring, 865 key_payload_reserve(keyring,
825 keyring->datalen - KEYQUOTA_LINK_BYTES); 866 keyring->datalen - KEYQUOTA_LINK_BYTES);
826 867
827 /* shuffle down the key pointers 868 rcu_assign_pointer(keyring->payload.subscriptions, nklist);
828 * - it might be worth shrinking the allocated memory, but that runs
829 * the risk of ENOMEM as we would have to copy
830 */
831 write_lock(&keyring->lock);
832 869
833 klist->nkeys--; 870 up_write(&keyring->sem);
834 if (loop < klist->nkeys)
835 memcpy(&klist->keys[loop],
836 &klist->keys[loop + 1],
837 (klist->nkeys - loop) * sizeof(struct key *));
838 871
839 write_unlock(&keyring->lock); 872 /* schedule for later cleanup */
873 klist->delkey = loop;
874 call_rcu(&klist->rcu, keyring_unlink_rcu_disposal);
840 875
841 up_write(&keyring->sem);
842 key_put(key);
843 ret = 0; 876 ret = 0;
844 877
845 error: 878error:
846 return ret; 879 return ret;
880nomem:
881 ret = -ENOMEM;
882 up_write(&keyring->sem);
883 goto error;
847 884
848} /* end key_unlink() */ 885} /* end key_unlink() */
849 886
@@ -851,13 +888,32 @@ EXPORT_SYMBOL(key_unlink);
851 888
852/*****************************************************************************/ 889/*****************************************************************************/
853/* 890/*
891 * dispose of a keyring list after the RCU grace period, releasing the keys it
892 * links to
893 */
894static void keyring_clear_rcu_disposal(struct rcu_head *rcu)
895{
896 struct keyring_list *klist;
897 int loop;
898
899 klist = container_of(rcu, struct keyring_list, rcu);
900
901 for (loop = klist->nkeys - 1; loop >= 0; loop--)
902 key_put(klist->keys[loop]);
903
904 kfree(klist);
905
906} /* end keyring_clear_rcu_disposal() */
907
908/*****************************************************************************/
909/*
854 * clear the specified process keyring 910 * clear the specified process keyring
855 * - implements keyctl(KEYCTL_CLEAR) 911 * - implements keyctl(KEYCTL_CLEAR)
856 */ 912 */
857int keyring_clear(struct key *keyring) 913int keyring_clear(struct key *keyring)
858{ 914{
859 struct keyring_list *klist; 915 struct keyring_list *klist;
860 int loop, ret; 916 int ret;
861 917
862 ret = -ENOTDIR; 918 ret = -ENOTDIR;
863 if (keyring->type == &key_type_keyring) { 919 if (keyring->type == &key_type_keyring) {
@@ -870,20 +926,15 @@ int keyring_clear(struct key *keyring)
870 key_payload_reserve(keyring, 926 key_payload_reserve(keyring,
871 sizeof(struct keyring_list)); 927 sizeof(struct keyring_list));
872 928
873 write_lock(&keyring->lock); 929 rcu_assign_pointer(keyring->payload.subscriptions,
874 keyring->payload.subscriptions = NULL; 930 NULL);
875 write_unlock(&keyring->lock);
876 } 931 }
877 932
878 up_write(&keyring->sem); 933 up_write(&keyring->sem);
879 934
880 /* free the keys after the locks have been dropped */ 935 /* free the keys after the locks have been dropped */
881 if (klist) { 936 if (klist)
882 for (loop = klist->nkeys - 1; loop >= 0; loop--) 937 call_rcu(&klist->rcu, keyring_clear_rcu_disposal);
883 key_put(klist->keys[loop]);
884
885 kfree(klist);
886 }
887 938
888 ret = 0; 939 ret = 0;
889 } 940 }
diff --git a/security/keys/proc.c b/security/keys/proc.c
index 91343b85c39c..c55cf1fd0826 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -140,7 +140,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
140 140
141 now = current_kernel_time(); 141 now = current_kernel_time();
142 142
143 read_lock(&key->lock); 143 rcu_read_lock();
144 144
145 /* come up with a suitable timeout value */ 145 /* come up with a suitable timeout value */
146 if (key->expiry == 0) { 146 if (key->expiry == 0) {
@@ -164,14 +164,17 @@ static int proc_keys_show(struct seq_file *m, void *v)
164 sprintf(xbuf, "%luw", timo / (60*60*24*7)); 164 sprintf(xbuf, "%luw", timo / (60*60*24*7));
165 } 165 }
166 166
167#define showflag(KEY, LETTER, FLAG) \
168 (test_bit(FLAG, &(KEY)->flags) ? LETTER : '-')
169
167 seq_printf(m, "%08x %c%c%c%c%c%c %5d %4s %06x %5d %5d %-9.9s ", 170 seq_printf(m, "%08x %c%c%c%c%c%c %5d %4s %06x %5d %5d %-9.9s ",
168 key->serial, 171 key->serial,
169 key->flags & KEY_FLAG_INSTANTIATED ? 'I' : '-', 172 showflag(key, 'I', KEY_FLAG_INSTANTIATED),
170 key->flags & KEY_FLAG_REVOKED ? 'R' : '-', 173 showflag(key, 'R', KEY_FLAG_REVOKED),
171 key->flags & KEY_FLAG_DEAD ? 'D' : '-', 174 showflag(key, 'D', KEY_FLAG_DEAD),
172 key->flags & KEY_FLAG_IN_QUOTA ? 'Q' : '-', 175 showflag(key, 'Q', KEY_FLAG_IN_QUOTA),
173 key->flags & KEY_FLAG_USER_CONSTRUCT ? 'U' : '-', 176 showflag(key, 'U', KEY_FLAG_USER_CONSTRUCT),
174 key->flags & KEY_FLAG_NEGATIVE ? 'N' : '-', 177 showflag(key, 'N', KEY_FLAG_NEGATIVE),
175 atomic_read(&key->usage), 178 atomic_read(&key->usage),
176 xbuf, 179 xbuf,
177 key->perm, 180 key->perm,
@@ -179,11 +182,13 @@ static int proc_keys_show(struct seq_file *m, void *v)
179 key->gid, 182 key->gid,
180 key->type->name); 183 key->type->name);
181 184
185#undef showflag
186
182 if (key->type->describe) 187 if (key->type->describe)
183 key->type->describe(key, m); 188 key->type->describe(key, m);
184 seq_putc(m, '\n'); 189 seq_putc(m, '\n');
185 190
186 read_unlock(&key->lock); 191 rcu_read_unlock();
187 192
188 return 0; 193 return 0;
189 194
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 2eb0e471cd40..059c350cac46 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -38,10 +38,9 @@ struct key root_user_keyring = {
38 .serial = 2, 38 .serial = 2,
39 .type = &key_type_keyring, 39 .type = &key_type_keyring,
40 .user = &root_key_user, 40 .user = &root_key_user,
41 .lock = RW_LOCK_UNLOCKED,
42 .sem = __RWSEM_INITIALIZER(root_user_keyring.sem), 41 .sem = __RWSEM_INITIALIZER(root_user_keyring.sem),
43 .perm = KEY_USR_ALL, 42 .perm = KEY_USR_ALL,
44 .flags = KEY_FLAG_INSTANTIATED, 43 .flags = 1 << KEY_FLAG_INSTANTIATED,
45 .description = "_uid.0", 44 .description = "_uid.0",
46#ifdef KEY_DEBUGGING 45#ifdef KEY_DEBUGGING
47 .magic = KEY_DEBUG_MAGIC, 46 .magic = KEY_DEBUG_MAGIC,
@@ -54,10 +53,9 @@ struct key root_session_keyring = {
54 .serial = 1, 53 .serial = 1,
55 .type = &key_type_keyring, 54 .type = &key_type_keyring,
56 .user = &root_key_user, 55 .user = &root_key_user,
57 .lock = RW_LOCK_UNLOCKED,
58 .sem = __RWSEM_INITIALIZER(root_session_keyring.sem), 56 .sem = __RWSEM_INITIALIZER(root_session_keyring.sem),
59 .perm = KEY_USR_ALL, 57 .perm = KEY_USR_ALL,
60 .flags = KEY_FLAG_INSTANTIATED, 58 .flags = 1 << KEY_FLAG_INSTANTIATED,
61 .description = "_uid_ses.0", 59 .description = "_uid_ses.0",
62#ifdef KEY_DEBUGGING 60#ifdef KEY_DEBUGGING
63 .magic = KEY_DEBUG_MAGIC, 61 .magic = KEY_DEBUG_MAGIC,
@@ -349,9 +347,7 @@ void key_fsuid_changed(struct task_struct *tsk)
349 /* update the ownership of the thread keyring */ 347 /* update the ownership of the thread keyring */
350 if (tsk->thread_keyring) { 348 if (tsk->thread_keyring) {
351 down_write(&tsk->thread_keyring->sem); 349 down_write(&tsk->thread_keyring->sem);
352 write_lock(&tsk->thread_keyring->lock);
353 tsk->thread_keyring->uid = tsk->fsuid; 350 tsk->thread_keyring->uid = tsk->fsuid;
354 write_unlock(&tsk->thread_keyring->lock);
355 up_write(&tsk->thread_keyring->sem); 351 up_write(&tsk->thread_keyring->sem);
356 } 352 }
357 353
@@ -366,9 +362,7 @@ void key_fsgid_changed(struct task_struct *tsk)
366 /* update the ownership of the thread keyring */ 362 /* update the ownership of the thread keyring */
367 if (tsk->thread_keyring) { 363 if (tsk->thread_keyring) {
368 down_write(&tsk->thread_keyring->sem); 364 down_write(&tsk->thread_keyring->sem);
369 write_lock(&tsk->thread_keyring->lock);
370 tsk->thread_keyring->gid = tsk->fsgid; 365 tsk->thread_keyring->gid = tsk->fsgid;
371 write_unlock(&tsk->thread_keyring->lock);
372 up_write(&tsk->thread_keyring->sem); 366 up_write(&tsk->thread_keyring->sem);
373 } 367 }
374 368
@@ -588,7 +582,7 @@ struct key *lookup_user_key(key_serial_t id, int create, int partial,
588 } 582 }
589 583
590 ret = -EIO; 584 ret = -EIO;
591 if (!partial && !(key->flags & KEY_FLAG_INSTANTIATED)) 585 if (!partial && !test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
592 goto invalid_key; 586 goto invalid_key;
593 587
594 ret = -EACCES; 588 ret = -EACCES;
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 9705b1aeba5d..1f6c0940297f 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -105,7 +105,7 @@ static struct key *__request_key_construction(struct key_type *type,
105 struct key_construction cons; 105 struct key_construction cons;
106 struct timespec now; 106 struct timespec now;
107 struct key *key; 107 struct key *key;
108 int ret, negative; 108 int ret, negated;
109 109
110 /* create a key and add it to the queue */ 110 /* create a key and add it to the queue */
111 key = key_alloc(type, description, 111 key = key_alloc(type, description,
@@ -113,9 +113,7 @@ static struct key *__request_key_construction(struct key_type *type,
113 if (IS_ERR(key)) 113 if (IS_ERR(key))
114 goto alloc_failed; 114 goto alloc_failed;
115 115
116 write_lock(&key->lock); 116 set_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags);
117 key->flags |= KEY_FLAG_USER_CONSTRUCT;
118 write_unlock(&key->lock);
119 117
120 cons.key = key; 118 cons.key = key;
121 list_add_tail(&cons.link, &key->user->consq); 119 list_add_tail(&cons.link, &key->user->consq);
@@ -130,7 +128,7 @@ static struct key *__request_key_construction(struct key_type *type,
130 128
131 /* if the key wasn't instantiated, then we want to give an error */ 129 /* if the key wasn't instantiated, then we want to give an error */
132 ret = -ENOKEY; 130 ret = -ENOKEY;
133 if (!(key->flags & KEY_FLAG_INSTANTIATED)) 131 if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
134 goto request_failed; 132 goto request_failed;
135 133
136 down_write(&key_construction_sem); 134 down_write(&key_construction_sem);
@@ -139,7 +137,7 @@ static struct key *__request_key_construction(struct key_type *type,
139 137
140 /* also give an error if the key was negatively instantiated */ 138 /* also give an error if the key was negatively instantiated */
141 check_not_negative: 139 check_not_negative:
142 if (key->flags & KEY_FLAG_NEGATIVE) { 140 if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) {
143 key_put(key); 141 key_put(key);
144 key = ERR_PTR(-ENOKEY); 142 key = ERR_PTR(-ENOKEY);
145 } 143 }
@@ -152,24 +150,23 @@ static struct key *__request_key_construction(struct key_type *type,
152 * - remove from construction queue 150 * - remove from construction queue
153 * - mark the key as dead 151 * - mark the key as dead
154 */ 152 */
155 negative = 0; 153 negated = 0;
156 down_write(&key_construction_sem); 154 down_write(&key_construction_sem);
157 155
158 list_del(&cons.link); 156 list_del(&cons.link);
159 157
160 write_lock(&key->lock);
161 key->flags &= ~KEY_FLAG_USER_CONSTRUCT;
162
163 /* check it didn't get instantiated between the check and the down */ 158 /* check it didn't get instantiated between the check and the down */
164 if (!(key->flags & KEY_FLAG_INSTANTIATED)) { 159 if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
165 key->flags |= KEY_FLAG_INSTANTIATED | KEY_FLAG_NEGATIVE; 160 set_bit(KEY_FLAG_NEGATIVE, &key->flags);
166 negative = 1; 161 set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
162 negated = 1;
167 } 163 }
168 164
169 write_unlock(&key->lock); 165 clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags);
166
170 up_write(&key_construction_sem); 167 up_write(&key_construction_sem);
171 168
172 if (!negative) 169 if (!negated)
173 goto check_not_negative; /* surprisingly, the key got 170 goto check_not_negative; /* surprisingly, the key got
174 * instantiated */ 171 * instantiated */
175 172
@@ -250,7 +247,7 @@ static struct key *request_key_construction(struct key_type *type,
250 247
251 for (;;) { 248 for (;;) {
252 set_current_state(TASK_UNINTERRUPTIBLE); 249 set_current_state(TASK_UNINTERRUPTIBLE);
253 if (!(ckey->flags & KEY_FLAG_USER_CONSTRUCT)) 250 if (!test_bit(KEY_FLAG_USER_CONSTRUCT, &ckey->flags))
254 break; 251 break;
255 schedule(); 252 schedule();
256 } 253 }
@@ -339,7 +336,8 @@ int key_validate(struct key *key)
339 if (key) { 336 if (key) {
340 /* check it's still accessible */ 337 /* check it's still accessible */
341 ret = -EKEYREVOKED; 338 ret = -EKEYREVOKED;
342 if (key->flags & (KEY_FLAG_REVOKED | KEY_FLAG_DEAD)) 339 if (test_bit(KEY_FLAG_REVOKED, &key->flags) ||
340 test_bit(KEY_FLAG_DEAD, &key->flags))
343 goto error; 341 goto error;
344 342
345 /* check it hasn't expired */ 343 /* check it hasn't expired */
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
index 8d65b3a28129..c33d3614a0db 100644
--- a/security/keys/user_defined.c
+++ b/security/keys/user_defined.c
@@ -42,12 +42,19 @@ struct key_type key_type_user = {
42 .read = user_read, 42 .read = user_read,
43}; 43};
44 44
45struct user_key_payload {
46 struct rcu_head rcu; /* RCU destructor */
47 unsigned short datalen; /* length of this data */
48 char data[0]; /* actual data */
49};
50
45/*****************************************************************************/ 51/*****************************************************************************/
46/* 52/*
47 * instantiate a user defined key 53 * instantiate a user defined key
48 */ 54 */
49static int user_instantiate(struct key *key, const void *data, size_t datalen) 55static int user_instantiate(struct key *key, const void *data, size_t datalen)
50{ 56{
57 struct user_key_payload *upayload;
51 int ret; 58 int ret;
52 59
53 ret = -EINVAL; 60 ret = -EINVAL;
@@ -58,13 +65,15 @@ static int user_instantiate(struct key *key, const void *data, size_t datalen)
58 if (ret < 0) 65 if (ret < 0)
59 goto error; 66 goto error;
60 67
61 /* attach the data */
62 ret = -ENOMEM; 68 ret = -ENOMEM;
63 key->payload.data = kmalloc(datalen, GFP_KERNEL); 69 upayload = kmalloc(sizeof(*upayload) + datalen, GFP_KERNEL);
64 if (!key->payload.data) 70 if (!upayload)
65 goto error; 71 goto error;
66 72
67 memcpy(key->payload.data, data, datalen); 73 /* attach the data */
74 upayload->datalen = datalen;
75 memcpy(upayload->data, data, datalen);
76 rcu_assign_pointer(key->payload.data, upayload);
68 ret = 0; 77 ret = 0;
69 78
70 error: 79 error:
@@ -75,18 +84,25 @@ static int user_instantiate(struct key *key, const void *data, size_t datalen)
75/*****************************************************************************/ 84/*****************************************************************************/
76/* 85/*
77 * duplicate a user defined key 86 * duplicate a user defined key
87 * - both keys' semaphores are locked against further modification
88 * - the new key cannot yet be accessed
78 */ 89 */
79static int user_duplicate(struct key *key, const struct key *source) 90static int user_duplicate(struct key *key, const struct key *source)
80{ 91{
92 struct user_key_payload *upayload, *spayload;
81 int ret; 93 int ret;
82 94
83 /* just copy the payload */ 95 /* just copy the payload */
84 ret = -ENOMEM; 96 ret = -ENOMEM;
85 key->payload.data = kmalloc(source->datalen, GFP_KERNEL); 97 upayload = kmalloc(sizeof(*upayload) + source->datalen, GFP_KERNEL);
98 if (upayload) {
99 spayload = rcu_dereference(source->payload.data);
100 BUG_ON(source->datalen != spayload->datalen);
86 101
87 if (key->payload.data) { 102 upayload->datalen = key->datalen = spayload->datalen;
88 key->datalen = source->datalen; 103 memcpy(upayload->data, spayload->data, key->datalen);
89 memcpy(key->payload.data, source->payload.data, source->datalen); 104
105 key->payload.data = upayload;
90 ret = 0; 106 ret = 0;
91 } 107 }
92 108
@@ -96,40 +112,54 @@ static int user_duplicate(struct key *key, const struct key *source)
96 112
97/*****************************************************************************/ 113/*****************************************************************************/
98/* 114/*
115 * dispose of the old data from an updated user defined key
116 */
117static void user_update_rcu_disposal(struct rcu_head *rcu)
118{
119 struct user_key_payload *upayload;
120
121 upayload = container_of(rcu, struct user_key_payload, rcu);
122
123 kfree(upayload);
124
125} /* end user_update_rcu_disposal() */
126
127/*****************************************************************************/
128/*
99 * update a user defined key 129 * update a user defined key
130 * - the key's semaphore is write-locked
100 */ 131 */
101static int user_update(struct key *key, const void *data, size_t datalen) 132static int user_update(struct key *key, const void *data, size_t datalen)
102{ 133{
103 void *new, *zap; 134 struct user_key_payload *upayload, *zap;
104 int ret; 135 int ret;
105 136
106 ret = -EINVAL; 137 ret = -EINVAL;
107 if (datalen <= 0 || datalen > 32767 || !data) 138 if (datalen <= 0 || datalen > 32767 || !data)
108 goto error; 139 goto error;
109 140
110 /* copy the data */ 141 /* construct a replacement payload */
111 ret = -ENOMEM; 142 ret = -ENOMEM;
112 new = kmalloc(datalen, GFP_KERNEL); 143 upayload = kmalloc(sizeof(*upayload) + datalen, GFP_KERNEL);
113 if (!new) 144 if (!upayload)
114 goto error; 145 goto error;
115 146
116 memcpy(new, data, datalen); 147 upayload->datalen = datalen;
148 memcpy(upayload->data, data, datalen);
117 149
118 /* check the quota and attach the new data */ 150 /* check the quota and attach the new data */
119 zap = new; 151 zap = upayload;
120 write_lock(&key->lock);
121 152
122 ret = key_payload_reserve(key, datalen); 153 ret = key_payload_reserve(key, datalen);
123 154
124 if (ret == 0) { 155 if (ret == 0) {
125 /* attach the new data, displacing the old */ 156 /* attach the new data, displacing the old */
126 zap = key->payload.data; 157 zap = key->payload.data;
127 key->payload.data = new; 158 rcu_assign_pointer(key->payload.data, upayload);
128 key->expiry = 0; 159 key->expiry = 0;
129 } 160 }
130 161
131 write_unlock(&key->lock); 162 call_rcu(&zap->rcu, user_update_rcu_disposal);
132 kfree(zap);
133 163
134 error: 164 error:
135 return ret; 165 return ret;
@@ -152,13 +182,15 @@ static int user_match(const struct key *key, const void *description)
152 */ 182 */
153static void user_destroy(struct key *key) 183static void user_destroy(struct key *key)
154{ 184{
155 kfree(key->payload.data); 185 struct user_key_payload *upayload = key->payload.data;
186
187 kfree(upayload);
156 188
157} /* end user_destroy() */ 189} /* end user_destroy() */
158 190
159/*****************************************************************************/ 191/*****************************************************************************/
160/* 192/*
161 * describe the user 193 * describe the user key
162 */ 194 */
163static void user_describe(const struct key *key, struct seq_file *m) 195static void user_describe(const struct key *key, struct seq_file *m)
164{ 196{
@@ -171,18 +203,23 @@ static void user_describe(const struct key *key, struct seq_file *m)
171/*****************************************************************************/ 203/*****************************************************************************/
172/* 204/*
173 * read the key data 205 * read the key data
206 * - the key's semaphore is read-locked
174 */ 207 */
175static long user_read(const struct key *key, 208static long user_read(const struct key *key,
176 char __user *buffer, size_t buflen) 209 char __user *buffer, size_t buflen)
177{ 210{
178 long ret = key->datalen; 211 struct user_key_payload *upayload;
212 long ret;
213
214 upayload = rcu_dereference(key->payload.data);
215 ret = upayload->datalen;
179 216
180 /* we can return the data as is */ 217 /* we can return the data as is */
181 if (buffer && buflen > 0) { 218 if (buffer && buflen > 0) {
182 if (buflen > key->datalen) 219 if (buflen > upayload->datalen)
183 buflen = key->datalen; 220 buflen = upayload->datalen;
184 221
185 if (copy_to_user(buffer, key->payload.data, buflen) != 0) 222 if (copy_to_user(buffer, upayload->data, buflen) != 0)
186 ret = -EFAULT; 223 ret = -EFAULT;
187 } 224 }
188 225