aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ceph/caps.c
diff options
context:
space:
mode:
authorSage Weil <sage@newdream.net>2010-06-09 19:47:10 -0400
committerSage Weil <sage@newdream.net>2010-06-10 16:30:07 -0400
commit3d7ded4d81d807c2f75f310a8d74a5d72be13a1b (patch)
treec633b938f72dcd39c84f2430c7d4025331ab4369 /fs/ceph/caps.c
parent9dbd412f56c453f15014396c6024b895c1485ccb (diff)
ceph: release cap on import if we don't have the inode
If we get an IMPORT that give us a cap, but we don't have the inode, queue a release (and try to send it immediately) so that the MDS doesn't get stuck waiting for us. Signed-off-by: Sage Weil <sage@newdream.net>
Diffstat (limited to 'fs/ceph/caps.c')
-rw-r--r--fs/ceph/caps.c90
1 files changed, 55 insertions, 35 deletions
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index da2a0e3cb200..7c692e746237 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -981,6 +981,46 @@ static int send_cap_msg(struct ceph_mds_session *session,
981 return 0; 981 return 0;
982} 982}
983 983
984static void __queue_cap_release(struct ceph_mds_session *session,
985 u64 ino, u64 cap_id, u32 migrate_seq,
986 u32 issue_seq)
987{
988 struct ceph_msg *msg;
989 struct ceph_mds_cap_release *head;
990 struct ceph_mds_cap_item *item;
991
992 spin_lock(&session->s_cap_lock);
993 BUG_ON(!session->s_num_cap_releases);
994 msg = list_first_entry(&session->s_cap_releases,
995 struct ceph_msg, list_head);
996
997 dout(" adding %llx release to mds%d msg %p (%d left)\n",
998 ino, session->s_mds, msg, session->s_num_cap_releases);
999
1000 BUG_ON(msg->front.iov_len + sizeof(*item) > PAGE_CACHE_SIZE);
1001 head = msg->front.iov_base;
1002 head->num = cpu_to_le32(le32_to_cpu(head->num) + 1);
1003 item = msg->front.iov_base + msg->front.iov_len;
1004 item->ino = cpu_to_le64(ino);
1005 item->cap_id = cpu_to_le64(cap_id);
1006 item->migrate_seq = cpu_to_le32(migrate_seq);
1007 item->seq = cpu_to_le32(issue_seq);
1008
1009 session->s_num_cap_releases--;
1010
1011 msg->front.iov_len += sizeof(*item);
1012 if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
1013 dout(" release msg %p full\n", msg);
1014 list_move_tail(&msg->list_head, &session->s_cap_releases_done);
1015 } else {
1016 dout(" release msg %p at %d/%d (%d)\n", msg,
1017 (int)le32_to_cpu(head->num),
1018 (int)CEPH_CAPS_PER_RELEASE,
1019 (int)msg->front.iov_len);
1020 }
1021 spin_unlock(&session->s_cap_lock);
1022}
1023
984/* 1024/*
985 * Queue cap releases when an inode is dropped from our cache. Since 1025 * Queue cap releases when an inode is dropped from our cache. Since
986 * inode is about to be destroyed, there is no need for i_lock. 1026 * inode is about to be destroyed, there is no need for i_lock.
@@ -994,41 +1034,9 @@ void ceph_queue_caps_release(struct inode *inode)
994 while (p) { 1034 while (p) {
995 struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node); 1035 struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
996 struct ceph_mds_session *session = cap->session; 1036 struct ceph_mds_session *session = cap->session;
997 struct ceph_msg *msg;
998 struct ceph_mds_cap_release *head;
999 struct ceph_mds_cap_item *item;
1000 1037
1001 spin_lock(&session->s_cap_lock); 1038 __queue_cap_release(session, ceph_ino(inode), cap->cap_id,
1002 BUG_ON(!session->s_num_cap_releases); 1039 cap->mseq, cap->issue_seq);
1003 msg = list_first_entry(&session->s_cap_releases,
1004 struct ceph_msg, list_head);
1005
1006 dout(" adding %p release to mds%d msg %p (%d left)\n",
1007 inode, session->s_mds, msg, session->s_num_cap_releases);
1008
1009 BUG_ON(msg->front.iov_len + sizeof(*item) > PAGE_CACHE_SIZE);
1010 head = msg->front.iov_base;
1011 head->num = cpu_to_le32(le32_to_cpu(head->num) + 1);
1012 item = msg->front.iov_base + msg->front.iov_len;
1013 item->ino = cpu_to_le64(ceph_ino(inode));
1014 item->cap_id = cpu_to_le64(cap->cap_id);
1015 item->migrate_seq = cpu_to_le32(cap->mseq);
1016 item->seq = cpu_to_le32(cap->issue_seq);
1017
1018 session->s_num_cap_releases--;
1019
1020 msg->front.iov_len += sizeof(*item);
1021 if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
1022 dout(" release msg %p full\n", msg);
1023 list_move_tail(&msg->list_head,
1024 &session->s_cap_releases_done);
1025 } else {
1026 dout(" release msg %p at %d/%d (%d)\n", msg,
1027 (int)le32_to_cpu(head->num),
1028 (int)CEPH_CAPS_PER_RELEASE,
1029 (int)msg->front.iov_len);
1030 }
1031 spin_unlock(&session->s_cap_lock);
1032 p = rb_next(p); 1040 p = rb_next(p);
1033 __ceph_remove_cap(cap); 1041 __ceph_remove_cap(cap);
1034 } 1042 }
@@ -2655,7 +2663,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
2655 struct ceph_mds_caps *h; 2663 struct ceph_mds_caps *h;
2656 int mds = session->s_mds; 2664 int mds = session->s_mds;
2657 int op; 2665 int op;
2658 u32 seq; 2666 u32 seq, mseq;
2659 struct ceph_vino vino; 2667 struct ceph_vino vino;
2660 u64 cap_id; 2668 u64 cap_id;
2661 u64 size, max_size; 2669 u64 size, max_size;
@@ -2675,6 +2683,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
2675 vino.snap = CEPH_NOSNAP; 2683 vino.snap = CEPH_NOSNAP;
2676 cap_id = le64_to_cpu(h->cap_id); 2684 cap_id = le64_to_cpu(h->cap_id);
2677 seq = le32_to_cpu(h->seq); 2685 seq = le32_to_cpu(h->seq);
2686 mseq = le32_to_cpu(h->migrate_seq);
2678 size = le64_to_cpu(h->size); 2687 size = le64_to_cpu(h->size);
2679 max_size = le64_to_cpu(h->max_size); 2688 max_size = le64_to_cpu(h->max_size);
2680 2689
@@ -2689,6 +2698,17 @@ void ceph_handle_caps(struct ceph_mds_session *session,
2689 vino.snap, inode); 2698 vino.snap, inode);
2690 if (!inode) { 2699 if (!inode) {
2691 dout(" i don't have ino %llx\n", vino.ino); 2700 dout(" i don't have ino %llx\n", vino.ino);
2701
2702 if (op == CEPH_CAP_OP_IMPORT)
2703 __queue_cap_release(session, vino.ino, cap_id,
2704 mseq, seq);
2705
2706 /*
2707 * send any full release message to try to move things
2708 * along for the mds (who clearly thinks we still have this
2709 * cap).
2710 */
2711 ceph_send_cap_releases(mdsc, session);
2692 goto done; 2712 goto done;
2693 } 2713 }
2694 2714