aboutsummaryrefslogtreecommitdiffstats
path: root/fs/afs
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2019-04-25 09:26:50 -0400
committerDavid Howells <dhowells@redhat.com>2019-04-25 09:26:50 -0400
commitd46966013bb4f1fbed7cf995828523a3d1f265c6 (patch)
tree10566311df9ba7fa1149c801620b40f4d5b13532 /fs/afs
parent4be5975aea154e164696128d049dec9ed341585c (diff)
afs: Add file locking tracepoints
Add two tracepoints for monitoring AFS file locking. Firstly, add one that follows the operational part: echo 1 >/sys/kernel/debug/tracing/events/afs/afs_flock_op/enable And add a second that more follows the event-driven part: echo 1 >/sys/kernel/debug/tracing/events/afs/afs_flock_ev/enable Individual file_lock structs seen by afs are tagged with debugging IDs that are displayed in the trace log to make it easier to see what's going on, especially as setting the first lock always seems to involve copying the file_lock twice. Signed-off-by: David Howells <dhowells@redhat.com>
Diffstat (limited to 'fs/afs')
-rw-r--r--fs/afs/flock.c72
1 files changed, 62 insertions, 10 deletions
diff --git a/fs/afs/flock.c b/fs/afs/flock.c
index 827d5a711088..3e06a560f66b 100644
--- a/fs/afs/flock.c
+++ b/fs/afs/flock.c
@@ -32,6 +32,8 @@ static inline void afs_set_lock_state(struct afs_vnode *vnode, enum afs_lock_sta
32 vnode->lock_state = state; 32 vnode->lock_state = state;
33} 33}
34 34
35static atomic_t afs_file_lock_debug_id;
36
35/* 37/*
36 * if the callback is broken on this vnode, then the lock may now be available 38 * if the callback is broken on this vnode, then the lock may now be available
37 */ 39 */
@@ -45,6 +47,7 @@ void afs_lock_may_be_available(struct afs_vnode *vnode)
45 spin_lock(&vnode->lock); 47 spin_lock(&vnode->lock);
46 if (vnode->lock_state == AFS_VNODE_LOCK_WAITING_FOR_CB) 48 if (vnode->lock_state == AFS_VNODE_LOCK_WAITING_FOR_CB)
47 afs_next_locker(vnode, 0); 49 afs_next_locker(vnode, 0);
50 trace_afs_flock_ev(vnode, NULL, afs_flock_callback_break, 0);
48 spin_unlock(&vnode->lock); 51 spin_unlock(&vnode->lock);
49} 52}
50 53
@@ -78,6 +81,7 @@ void afs_lock_op_done(struct afs_call *call)
78 81
79 if (call->error == 0) { 82 if (call->error == 0) {
80 spin_lock(&vnode->lock); 83 spin_lock(&vnode->lock);
84 trace_afs_flock_ev(vnode, NULL, afs_flock_timestamp, 0);
81 vnode->locked_at = call->reply_time; 85 vnode->locked_at = call->reply_time;
82 afs_schedule_lock_extension(vnode); 86 afs_schedule_lock_extension(vnode);
83 spin_unlock(&vnode->lock); 87 spin_unlock(&vnode->lock);
@@ -100,6 +104,7 @@ static void afs_grant_locks(struct afs_vnode *vnode)
100 104
101 list_move_tail(&p->fl_u.afs.link, &vnode->granted_locks); 105 list_move_tail(&p->fl_u.afs.link, &vnode->granted_locks);
102 p->fl_u.afs.state = AFS_LOCK_GRANTED; 106 p->fl_u.afs.state = AFS_LOCK_GRANTED;
107 trace_afs_flock_op(vnode, p, afs_flock_op_grant);
103 wake_up(&p->fl_wait); 108 wake_up(&p->fl_wait);
104 } 109 }
105} 110}
@@ -142,9 +147,11 @@ static void afs_next_locker(struct afs_vnode *vnode, int error)
142 if (next) { 147 if (next) {
143 afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING); 148 afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING);
144 next->fl_u.afs.state = AFS_LOCK_YOUR_TRY; 149 next->fl_u.afs.state = AFS_LOCK_YOUR_TRY;
150 trace_afs_flock_op(vnode, next, afs_flock_op_wake);
145 wake_up(&next->fl_wait); 151 wake_up(&next->fl_wait);
146 } else { 152 } else {
147 afs_set_lock_state(vnode, AFS_VNODE_LOCK_NONE); 153 afs_set_lock_state(vnode, AFS_VNODE_LOCK_NONE);
154 trace_afs_flock_ev(vnode, NULL, afs_flock_no_lockers, 0);
148 } 155 }
149 156
150 _leave(""); 157 _leave("");
@@ -264,8 +271,8 @@ again:
264 _debug("wstate %u for %p", vnode->lock_state, vnode); 271 _debug("wstate %u for %p", vnode->lock_state, vnode);
265 switch (vnode->lock_state) { 272 switch (vnode->lock_state) {
266 case AFS_VNODE_LOCK_NEED_UNLOCK: 273 case AFS_VNODE_LOCK_NEED_UNLOCK:
267 _debug("unlock");
268 afs_set_lock_state(vnode, AFS_VNODE_LOCK_UNLOCKING); 274 afs_set_lock_state(vnode, AFS_VNODE_LOCK_UNLOCKING);
275 trace_afs_flock_ev(vnode, NULL, afs_flock_work_unlocking, 0);
269 spin_unlock(&vnode->lock); 276 spin_unlock(&vnode->lock);
270 277
271 /* attempt to release the server lock; if it fails, we just 278 /* attempt to release the server lock; if it fails, we just
@@ -291,6 +298,7 @@ again:
291 298
292 key = key_get(vnode->lock_key); 299 key = key_get(vnode->lock_key);
293 afs_set_lock_state(vnode, AFS_VNODE_LOCK_EXTENDING); 300 afs_set_lock_state(vnode, AFS_VNODE_LOCK_EXTENDING);
301 trace_afs_flock_ev(vnode, NULL, afs_flock_work_extending, 0);
294 spin_unlock(&vnode->lock); 302 spin_unlock(&vnode->lock);
295 303
296 ret = afs_extend_lock(vnode, key); /* RPC */ 304 ret = afs_extend_lock(vnode, key); /* RPC */
@@ -349,6 +357,7 @@ static void afs_defer_unlock(struct afs_vnode *vnode)
349 cancel_delayed_work(&vnode->lock_work); 357 cancel_delayed_work(&vnode->lock_work);
350 358
351 afs_set_lock_state(vnode, AFS_VNODE_LOCK_NEED_UNLOCK); 359 afs_set_lock_state(vnode, AFS_VNODE_LOCK_NEED_UNLOCK);
360 trace_afs_flock_ev(vnode, NULL, afs_flock_defer_unlock, 0);
352 queue_delayed_work(afs_lock_manager, &vnode->lock_work, 0); 361 queue_delayed_work(afs_lock_manager, &vnode->lock_work, 0);
353 } 362 }
354} 363}
@@ -421,6 +430,8 @@ static int afs_do_setlk(struct file *file, struct file_lock *fl)
421 if (ret < 0) 430 if (ret < 0)
422 return ret; 431 return ret;
423 432
433 trace_afs_flock_op(vnode, fl, afs_flock_op_set_lock);
434
424 spin_lock(&vnode->lock); 435 spin_lock(&vnode->lock);
425 list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks); 436 list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks);
426 437
@@ -457,7 +468,7 @@ try_to_lock:
457 * though we don't wait for the reply (it's not too bad a problem - the 468 * though we don't wait for the reply (it's not too bad a problem - the
458 * lock will expire in 5 mins anyway). 469 * lock will expire in 5 mins anyway).
459 */ 470 */
460 _debug("not locked"); 471 trace_afs_flock_ev(vnode, fl, afs_flock_try_to_lock, 0);
461 vnode->lock_key = key_get(key); 472 vnode->lock_key = key_get(key);
462 vnode->lock_type = type; 473 vnode->lock_type = type;
463 afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING); 474 afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING);
@@ -473,12 +484,14 @@ try_to_lock:
473 case -EPERM: 484 case -EPERM:
474 case -EACCES: 485 case -EACCES:
475 fl->fl_u.afs.state = ret; 486 fl->fl_u.afs.state = ret;
487 trace_afs_flock_ev(vnode, fl, afs_flock_fail_perm, ret);
476 list_del_init(&fl->fl_u.afs.link); 488 list_del_init(&fl->fl_u.afs.link);
477 afs_next_locker(vnode, ret); 489 afs_next_locker(vnode, ret);
478 goto error_unlock; 490 goto error_unlock;
479 491
480 default: 492 default:
481 fl->fl_u.afs.state = ret; 493 fl->fl_u.afs.state = ret;
494 trace_afs_flock_ev(vnode, fl, afs_flock_fail_other, ret);
482 list_del_init(&fl->fl_u.afs.link); 495 list_del_init(&fl->fl_u.afs.link);
483 afs_next_locker(vnode, 0); 496 afs_next_locker(vnode, 0);
484 goto error_unlock; 497 goto error_unlock;
@@ -488,14 +501,13 @@ try_to_lock:
488 * will have to retry. The server will break the outstanding 501 * will have to retry. The server will break the outstanding
489 * callbacks on a file when a lock is released. 502 * callbacks on a file when a lock is released.
490 */ 503 */
491 _debug("would block");
492 ASSERT(list_empty(&vnode->granted_locks)); 504 ASSERT(list_empty(&vnode->granted_locks));
493 ASSERTCMP(vnode->pending_locks.next, ==, &fl->fl_u.afs.link); 505 ASSERTCMP(vnode->pending_locks.next, ==, &fl->fl_u.afs.link);
494 goto lock_is_contended; 506 goto lock_is_contended;
495 507
496 case 0: 508 case 0:
497 _debug("acquired");
498 afs_set_lock_state(vnode, AFS_VNODE_LOCK_GRANTED); 509 afs_set_lock_state(vnode, AFS_VNODE_LOCK_GRANTED);
510 trace_afs_flock_ev(vnode, fl, afs_flock_acquired, type);
499 afs_grant_locks(vnode); 511 afs_grant_locks(vnode);
500 goto vnode_is_locked_u; 512 goto vnode_is_locked_u;
501 } 513 }
@@ -507,7 +519,9 @@ vnode_is_locked:
507 ASSERTCMP(fl->fl_u.afs.state, ==, AFS_LOCK_GRANTED); 519 ASSERTCMP(fl->fl_u.afs.state, ==, AFS_LOCK_GRANTED);
508 520
509 /* ... but the VFS still needs to distribute access on this client. */ 521 /* ... but the VFS still needs to distribute access on this client. */
522 trace_afs_flock_ev(vnode, fl, afs_flock_vfs_locking, 0);
510 ret = locks_lock_file_wait(file, fl); 523 ret = locks_lock_file_wait(file, fl);
524 trace_afs_flock_ev(vnode, fl, afs_flock_vfs_lock, ret);
511 if (ret < 0) 525 if (ret < 0)
512 goto vfs_rejected_lock; 526 goto vfs_rejected_lock;
513 527
@@ -528,6 +542,7 @@ lock_is_contended:
528 } 542 }
529 543
530 afs_set_lock_state(vnode, AFS_VNODE_LOCK_WAITING_FOR_CB); 544 afs_set_lock_state(vnode, AFS_VNODE_LOCK_WAITING_FOR_CB);
545 trace_afs_flock_ev(vnode, fl, afs_flock_would_block, ret);
531 queue_delayed_work(afs_lock_manager, &vnode->lock_work, HZ * 5); 546 queue_delayed_work(afs_lock_manager, &vnode->lock_work, HZ * 5);
532 547
533need_to_wait: 548need_to_wait:
@@ -538,10 +553,10 @@ need_to_wait:
538 */ 553 */
539 spin_unlock(&vnode->lock); 554 spin_unlock(&vnode->lock);
540 555
541 _debug("sleep"); 556 trace_afs_flock_ev(vnode, fl, afs_flock_waiting, 0);
542 ret = wait_event_interruptible(fl->fl_wait, 557 ret = wait_event_interruptible(fl->fl_wait,
543 fl->fl_u.afs.state != AFS_LOCK_PENDING); 558 fl->fl_u.afs.state != AFS_LOCK_PENDING);
544 _debug("wait = %d", ret); 559 trace_afs_flock_ev(vnode, fl, afs_flock_waited, ret);
545 560
546 if (fl->fl_u.afs.state >= 0 && fl->fl_u.afs.state != AFS_LOCK_GRANTED) { 561 if (fl->fl_u.afs.state >= 0 && fl->fl_u.afs.state != AFS_LOCK_GRANTED) {
547 spin_lock(&vnode->lock); 562 spin_lock(&vnode->lock);
@@ -602,6 +617,8 @@ static int afs_do_unlk(struct file *file, struct file_lock *fl)
602 617
603 _enter("{%llx:%llu},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type); 618 _enter("{%llx:%llu},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type);
604 619
620 trace_afs_flock_op(vnode, fl, afs_flock_op_unlock);
621
605 /* Flush all pending writes before doing anything with locks. */ 622 /* Flush all pending writes before doing anything with locks. */
606 vfs_fsync(file, 0); 623 vfs_fsync(file, 0);
607 624
@@ -655,6 +672,8 @@ error:
655int afs_lock(struct file *file, int cmd, struct file_lock *fl) 672int afs_lock(struct file *file, int cmd, struct file_lock *fl)
656{ 673{
657 struct afs_vnode *vnode = AFS_FS_I(locks_inode(file)); 674 struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
675 enum afs_flock_operation op;
676 int ret;
658 677
659 _enter("{%llx:%llu},%d,{t=%x,fl=%x,r=%Ld:%Ld}", 678 _enter("{%llx:%llu},%d,{t=%x,fl=%x,r=%Ld:%Ld}",
660 vnode->fid.vid, vnode->fid.vnode, cmd, 679 vnode->fid.vid, vnode->fid.vnode, cmd,
@@ -667,9 +686,23 @@ int afs_lock(struct file *file, int cmd, struct file_lock *fl)
667 686
668 if (IS_GETLK(cmd)) 687 if (IS_GETLK(cmd))
669 return afs_do_getlk(file, fl); 688 return afs_do_getlk(file, fl);
689
690 fl->fl_u.afs.debug_id = atomic_inc_return(&afs_file_lock_debug_id);
691 trace_afs_flock_op(vnode, fl, afs_flock_op_lock);
692
670 if (fl->fl_type == F_UNLCK) 693 if (fl->fl_type == F_UNLCK)
671 return afs_do_unlk(file, fl); 694 ret = afs_do_unlk(file, fl);
672 return afs_do_setlk(file, fl); 695 else
696 ret = afs_do_setlk(file, fl);
697
698 switch (ret) {
699 case 0: op = afs_flock_op_return_ok; break;
700 case -EAGAIN: op = afs_flock_op_return_eagain; break;
701 case -EDEADLK: op = afs_flock_op_return_edeadlk; break;
702 default: op = afs_flock_op_return_error; break;
703 }
704 trace_afs_flock_op(vnode, fl, op);
705 return ret;
673} 706}
674 707
675/* 708/*
@@ -678,6 +711,8 @@ int afs_lock(struct file *file, int cmd, struct file_lock *fl)
678int afs_flock(struct file *file, int cmd, struct file_lock *fl) 711int afs_flock(struct file *file, int cmd, struct file_lock *fl)
679{ 712{
680 struct afs_vnode *vnode = AFS_FS_I(locks_inode(file)); 713 struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
714 enum afs_flock_operation op;
715 int ret;
681 716
682 _enter("{%llx:%llu},%d,{t=%x,fl=%x}", 717 _enter("{%llx:%llu},%d,{t=%x,fl=%x}",
683 vnode->fid.vid, vnode->fid.vnode, cmd, 718 vnode->fid.vid, vnode->fid.vnode, cmd,
@@ -693,10 +728,23 @@ int afs_flock(struct file *file, int cmd, struct file_lock *fl)
693 if (!(fl->fl_flags & FL_FLOCK)) 728 if (!(fl->fl_flags & FL_FLOCK))
694 return -ENOLCK; 729 return -ENOLCK;
695 730
731 fl->fl_u.afs.debug_id = atomic_inc_return(&afs_file_lock_debug_id);
732 trace_afs_flock_op(vnode, fl, afs_flock_op_flock);
733
696 /* we're simulating flock() locks using posix locks on the server */ 734 /* we're simulating flock() locks using posix locks on the server */
697 if (fl->fl_type == F_UNLCK) 735 if (fl->fl_type == F_UNLCK)
698 return afs_do_unlk(file, fl); 736 ret = afs_do_unlk(file, fl);
699 return afs_do_setlk(file, fl); 737 else
738 ret = afs_do_setlk(file, fl);
739
740 switch (ret) {
741 case 0: op = afs_flock_op_return_ok; break;
742 case -EAGAIN: op = afs_flock_op_return_eagain; break;
743 case -EDEADLK: op = afs_flock_op_return_edeadlk; break;
744 default: op = afs_flock_op_return_error; break;
745 }
746 trace_afs_flock_op(vnode, fl, op);
747 return ret;
700} 748}
701 749
702/* 750/*
@@ -711,7 +759,10 @@ static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl)
711 759
712 _enter(""); 760 _enter("");
713 761
762 new->fl_u.afs.debug_id = atomic_inc_return(&afs_file_lock_debug_id);
763
714 spin_lock(&vnode->lock); 764 spin_lock(&vnode->lock);
765 trace_afs_flock_op(vnode, new, afs_flock_op_copy_lock);
715 list_add(&new->fl_u.afs.link, &fl->fl_u.afs.link); 766 list_add(&new->fl_u.afs.link, &fl->fl_u.afs.link);
716 spin_unlock(&vnode->lock); 767 spin_unlock(&vnode->lock);
717} 768}
@@ -728,6 +779,7 @@ static void afs_fl_release_private(struct file_lock *fl)
728 779
729 spin_lock(&vnode->lock); 780 spin_lock(&vnode->lock);
730 781
782 trace_afs_flock_op(vnode, fl, afs_flock_op_release_lock);
731 list_del_init(&fl->fl_u.afs.link); 783 list_del_init(&fl->fl_u.afs.link);
732 if (list_empty(&vnode->granted_locks)) 784 if (list_empty(&vnode->granted_locks))
733 afs_defer_unlock(vnode); 785 afs_defer_unlock(vnode);