diff options
Diffstat (limited to 'fs/ceph/mds_client.c')
| -rw-r--r-- | fs/ceph/mds_client.c | 385 |
1 files changed, 237 insertions, 148 deletions
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 24561a557e01..885aa5710cfd 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c | |||
| @@ -40,7 +40,7 @@ | |||
| 40 | static void __wake_requests(struct ceph_mds_client *mdsc, | 40 | static void __wake_requests(struct ceph_mds_client *mdsc, |
| 41 | struct list_head *head); | 41 | struct list_head *head); |
| 42 | 42 | ||
| 43 | const static struct ceph_connection_operations mds_con_ops; | 43 | static const struct ceph_connection_operations mds_con_ops; |
| 44 | 44 | ||
| 45 | 45 | ||
| 46 | /* | 46 | /* |
| @@ -665,10 +665,10 @@ static struct ceph_msg *create_session_msg(u32 op, u64 seq) | |||
| 665 | struct ceph_msg *msg; | 665 | struct ceph_msg *msg; |
| 666 | struct ceph_mds_session_head *h; | 666 | struct ceph_mds_session_head *h; |
| 667 | 667 | ||
| 668 | msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), 0, 0, NULL); | 668 | msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), GFP_NOFS); |
| 669 | if (IS_ERR(msg)) { | 669 | if (!msg) { |
| 670 | pr_err("create_session_msg ENOMEM creating msg\n"); | 670 | pr_err("create_session_msg ENOMEM creating msg\n"); |
| 671 | return ERR_PTR(PTR_ERR(msg)); | 671 | return NULL; |
| 672 | } | 672 | } |
| 673 | h = msg->front.iov_base; | 673 | h = msg->front.iov_base; |
| 674 | h->op = cpu_to_le32(op); | 674 | h->op = cpu_to_le32(op); |
| @@ -687,7 +687,6 @@ static int __open_session(struct ceph_mds_client *mdsc, | |||
| 687 | struct ceph_msg *msg; | 687 | struct ceph_msg *msg; |
| 688 | int mstate; | 688 | int mstate; |
| 689 | int mds = session->s_mds; | 689 | int mds = session->s_mds; |
| 690 | int err = 0; | ||
| 691 | 690 | ||
| 692 | /* wait for mds to go active? */ | 691 | /* wait for mds to go active? */ |
| 693 | mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds); | 692 | mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds); |
| @@ -698,13 +697,9 @@ static int __open_session(struct ceph_mds_client *mdsc, | |||
| 698 | 697 | ||
| 699 | /* send connect message */ | 698 | /* send connect message */ |
| 700 | msg = create_session_msg(CEPH_SESSION_REQUEST_OPEN, session->s_seq); | 699 | msg = create_session_msg(CEPH_SESSION_REQUEST_OPEN, session->s_seq); |
| 701 | if (IS_ERR(msg)) { | 700 | if (!msg) |
| 702 | err = PTR_ERR(msg); | 701 | return -ENOMEM; |
| 703 | goto out; | ||
| 704 | } | ||
| 705 | ceph_con_send(&session->s_con, msg); | 702 | ceph_con_send(&session->s_con, msg); |
| 706 | |||
| 707 | out: | ||
| 708 | return 0; | 703 | return 0; |
| 709 | } | 704 | } |
| 710 | 705 | ||
| @@ -804,12 +799,49 @@ out: | |||
| 804 | } | 799 | } |
| 805 | 800 | ||
| 806 | static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap, | 801 | static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap, |
| 807 | void *arg) | 802 | void *arg) |
| 808 | { | 803 | { |
| 809 | struct ceph_inode_info *ci = ceph_inode(inode); | 804 | struct ceph_inode_info *ci = ceph_inode(inode); |
| 805 | int drop = 0; | ||
| 806 | |||
| 810 | dout("removing cap %p, ci is %p, inode is %p\n", | 807 | dout("removing cap %p, ci is %p, inode is %p\n", |
| 811 | cap, ci, &ci->vfs_inode); | 808 | cap, ci, &ci->vfs_inode); |
| 812 | ceph_remove_cap(cap); | 809 | spin_lock(&inode->i_lock); |
| 810 | __ceph_remove_cap(cap); | ||
| 811 | if (!__ceph_is_any_real_caps(ci)) { | ||
| 812 | struct ceph_mds_client *mdsc = | ||
| 813 | &ceph_sb_to_client(inode->i_sb)->mdsc; | ||
| 814 | |||
| 815 | spin_lock(&mdsc->cap_dirty_lock); | ||
| 816 | if (!list_empty(&ci->i_dirty_item)) { | ||
| 817 | pr_info(" dropping dirty %s state for %p %lld\n", | ||
| 818 | ceph_cap_string(ci->i_dirty_caps), | ||
| 819 | inode, ceph_ino(inode)); | ||
| 820 | ci->i_dirty_caps = 0; | ||
| 821 | list_del_init(&ci->i_dirty_item); | ||
| 822 | drop = 1; | ||
| 823 | } | ||
| 824 | if (!list_empty(&ci->i_flushing_item)) { | ||
| 825 | pr_info(" dropping dirty+flushing %s state for %p %lld\n", | ||
| 826 | ceph_cap_string(ci->i_flushing_caps), | ||
| 827 | inode, ceph_ino(inode)); | ||
| 828 | ci->i_flushing_caps = 0; | ||
| 829 | list_del_init(&ci->i_flushing_item); | ||
| 830 | mdsc->num_cap_flushing--; | ||
| 831 | drop = 1; | ||
| 832 | } | ||
| 833 | if (drop && ci->i_wrbuffer_ref) { | ||
| 834 | pr_info(" dropping dirty data for %p %lld\n", | ||
| 835 | inode, ceph_ino(inode)); | ||
| 836 | ci->i_wrbuffer_ref = 0; | ||
| 837 | ci->i_wrbuffer_ref_head = 0; | ||
| 838 | drop++; | ||
| 839 | } | ||
| 840 | spin_unlock(&mdsc->cap_dirty_lock); | ||
| 841 | } | ||
| 842 | spin_unlock(&inode->i_lock); | ||
| 843 | while (drop--) | ||
| 844 | iput(inode); | ||
| 813 | return 0; | 845 | return 0; |
| 814 | } | 846 | } |
| 815 | 847 | ||
| @@ -821,6 +853,7 @@ static void remove_session_caps(struct ceph_mds_session *session) | |||
| 821 | dout("remove_session_caps on %p\n", session); | 853 | dout("remove_session_caps on %p\n", session); |
| 822 | iterate_session_caps(session, remove_session_caps_cb, NULL); | 854 | iterate_session_caps(session, remove_session_caps_cb, NULL); |
| 823 | BUG_ON(session->s_nr_caps > 0); | 855 | BUG_ON(session->s_nr_caps > 0); |
| 856 | BUG_ON(!list_empty(&session->s_cap_flushing)); | ||
| 824 | cleanup_cap_releases(session); | 857 | cleanup_cap_releases(session); |
| 825 | } | 858 | } |
| 826 | 859 | ||
| @@ -883,8 +916,8 @@ static int send_renew_caps(struct ceph_mds_client *mdsc, | |||
| 883 | ceph_mds_state_name(state)); | 916 | ceph_mds_state_name(state)); |
| 884 | msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS, | 917 | msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS, |
| 885 | ++session->s_renew_seq); | 918 | ++session->s_renew_seq); |
| 886 | if (IS_ERR(msg)) | 919 | if (!msg) |
| 887 | return PTR_ERR(msg); | 920 | return -ENOMEM; |
| 888 | ceph_con_send(&session->s_con, msg); | 921 | ceph_con_send(&session->s_con, msg); |
| 889 | return 0; | 922 | return 0; |
| 890 | } | 923 | } |
| @@ -931,17 +964,15 @@ static int request_close_session(struct ceph_mds_client *mdsc, | |||
| 931 | struct ceph_mds_session *session) | 964 | struct ceph_mds_session *session) |
| 932 | { | 965 | { |
| 933 | struct ceph_msg *msg; | 966 | struct ceph_msg *msg; |
| 934 | int err = 0; | ||
| 935 | 967 | ||
| 936 | dout("request_close_session mds%d state %s seq %lld\n", | 968 | dout("request_close_session mds%d state %s seq %lld\n", |
| 937 | session->s_mds, session_state_name(session->s_state), | 969 | session->s_mds, session_state_name(session->s_state), |
| 938 | session->s_seq); | 970 | session->s_seq); |
| 939 | msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq); | 971 | msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq); |
| 940 | if (IS_ERR(msg)) | 972 | if (!msg) |
| 941 | err = PTR_ERR(msg); | 973 | return -ENOMEM; |
| 942 | else | 974 | ceph_con_send(&session->s_con, msg); |
| 943 | ceph_con_send(&session->s_con, msg); | 975 | return 0; |
| 944 | return err; | ||
| 945 | } | 976 | } |
| 946 | 977 | ||
| 947 | /* | 978 | /* |
| @@ -1059,7 +1090,7 @@ static int add_cap_releases(struct ceph_mds_client *mdsc, | |||
| 1059 | while (session->s_num_cap_releases < session->s_nr_caps + extra) { | 1090 | while (session->s_num_cap_releases < session->s_nr_caps + extra) { |
| 1060 | spin_unlock(&session->s_cap_lock); | 1091 | spin_unlock(&session->s_cap_lock); |
| 1061 | msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, PAGE_CACHE_SIZE, | 1092 | msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, PAGE_CACHE_SIZE, |
| 1062 | 0, 0, NULL); | 1093 | GFP_NOFS); |
| 1063 | if (!msg) | 1094 | if (!msg) |
| 1064 | goto out_unlocked; | 1095 | goto out_unlocked; |
| 1065 | dout("add_cap_releases %p msg %p now %d\n", session, msg, | 1096 | dout("add_cap_releases %p msg %p now %d\n", session, msg, |
| @@ -1151,10 +1182,8 @@ static void send_cap_releases(struct ceph_mds_client *mdsc, | |||
| 1151 | struct ceph_msg *msg; | 1182 | struct ceph_msg *msg; |
| 1152 | 1183 | ||
| 1153 | dout("send_cap_releases mds%d\n", session->s_mds); | 1184 | dout("send_cap_releases mds%d\n", session->s_mds); |
| 1154 | while (1) { | 1185 | spin_lock(&session->s_cap_lock); |
| 1155 | spin_lock(&session->s_cap_lock); | 1186 | while (!list_empty(&session->s_cap_releases_done)) { |
| 1156 | if (list_empty(&session->s_cap_releases_done)) | ||
| 1157 | break; | ||
| 1158 | msg = list_first_entry(&session->s_cap_releases_done, | 1187 | msg = list_first_entry(&session->s_cap_releases_done, |
| 1159 | struct ceph_msg, list_head); | 1188 | struct ceph_msg, list_head); |
| 1160 | list_del_init(&msg->list_head); | 1189 | list_del_init(&msg->list_head); |
| @@ -1162,10 +1191,49 @@ static void send_cap_releases(struct ceph_mds_client *mdsc, | |||
| 1162 | msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); | 1191 | msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); |
| 1163 | dout("send_cap_releases mds%d %p\n", session->s_mds, msg); | 1192 | dout("send_cap_releases mds%d %p\n", session->s_mds, msg); |
| 1164 | ceph_con_send(&session->s_con, msg); | 1193 | ceph_con_send(&session->s_con, msg); |
| 1194 | spin_lock(&session->s_cap_lock); | ||
| 1165 | } | 1195 | } |
| 1166 | spin_unlock(&session->s_cap_lock); | 1196 | spin_unlock(&session->s_cap_lock); |
| 1167 | } | 1197 | } |
| 1168 | 1198 | ||
| 1199 | static void discard_cap_releases(struct ceph_mds_client *mdsc, | ||
| 1200 | struct ceph_mds_session *session) | ||
| 1201 | { | ||
| 1202 | struct ceph_msg *msg; | ||
| 1203 | struct ceph_mds_cap_release *head; | ||
| 1204 | unsigned num; | ||
| 1205 | |||
| 1206 | dout("discard_cap_releases mds%d\n", session->s_mds); | ||
| 1207 | spin_lock(&session->s_cap_lock); | ||
| 1208 | |||
| 1209 | /* zero out the in-progress message */ | ||
| 1210 | msg = list_first_entry(&session->s_cap_releases, | ||
| 1211 | struct ceph_msg, list_head); | ||
| 1212 | head = msg->front.iov_base; | ||
| 1213 | num = le32_to_cpu(head->num); | ||
| 1214 | dout("discard_cap_releases mds%d %p %u\n", session->s_mds, msg, num); | ||
| 1215 | head->num = cpu_to_le32(0); | ||
| 1216 | session->s_num_cap_releases += num; | ||
| 1217 | |||
| 1218 | /* requeue completed messages */ | ||
| 1219 | while (!list_empty(&session->s_cap_releases_done)) { | ||
| 1220 | msg = list_first_entry(&session->s_cap_releases_done, | ||
| 1221 | struct ceph_msg, list_head); | ||
| 1222 | list_del_init(&msg->list_head); | ||
| 1223 | |||
| 1224 | head = msg->front.iov_base; | ||
| 1225 | num = le32_to_cpu(head->num); | ||
| 1226 | dout("discard_cap_releases mds%d %p %u\n", session->s_mds, msg, | ||
| 1227 | num); | ||
| 1228 | session->s_num_cap_releases += num; | ||
| 1229 | head->num = cpu_to_le32(0); | ||
| 1230 | msg->front.iov_len = sizeof(*head); | ||
| 1231 | list_add(&msg->list_head, &session->s_cap_releases); | ||
| 1232 | } | ||
| 1233 | |||
| 1234 | spin_unlock(&session->s_cap_lock); | ||
| 1235 | } | ||
| 1236 | |||
| 1169 | /* | 1237 | /* |
| 1170 | * requests | 1238 | * requests |
| 1171 | */ | 1239 | */ |
| @@ -1181,6 +1249,7 @@ ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode) | |||
| 1181 | if (!req) | 1249 | if (!req) |
| 1182 | return ERR_PTR(-ENOMEM); | 1250 | return ERR_PTR(-ENOMEM); |
| 1183 | 1251 | ||
| 1252 | mutex_init(&req->r_fill_mutex); | ||
| 1184 | req->r_started = jiffies; | 1253 | req->r_started = jiffies; |
| 1185 | req->r_resend_mds = -1; | 1254 | req->r_resend_mds = -1; |
| 1186 | INIT_LIST_HEAD(&req->r_unsafe_dir_item); | 1255 | INIT_LIST_HEAD(&req->r_unsafe_dir_item); |
| @@ -1251,7 +1320,7 @@ retry: | |||
| 1251 | len += 1 + temp->d_name.len; | 1320 | len += 1 + temp->d_name.len; |
| 1252 | temp = temp->d_parent; | 1321 | temp = temp->d_parent; |
| 1253 | if (temp == NULL) { | 1322 | if (temp == NULL) { |
| 1254 | pr_err("build_path_dentry corrupt dentry %p\n", dentry); | 1323 | pr_err("build_path corrupt dentry %p\n", dentry); |
| 1255 | return ERR_PTR(-EINVAL); | 1324 | return ERR_PTR(-EINVAL); |
| 1256 | } | 1325 | } |
| 1257 | } | 1326 | } |
| @@ -1267,7 +1336,7 @@ retry: | |||
| 1267 | struct inode *inode = temp->d_inode; | 1336 | struct inode *inode = temp->d_inode; |
| 1268 | 1337 | ||
| 1269 | if (inode && ceph_snap(inode) == CEPH_SNAPDIR) { | 1338 | if (inode && ceph_snap(inode) == CEPH_SNAPDIR) { |
| 1270 | dout("build_path_dentry path+%d: %p SNAPDIR\n", | 1339 | dout("build_path path+%d: %p SNAPDIR\n", |
| 1271 | pos, temp); | 1340 | pos, temp); |
| 1272 | } else if (stop_on_nosnap && inode && | 1341 | } else if (stop_on_nosnap && inode && |
| 1273 | ceph_snap(inode) == CEPH_NOSNAP) { | 1342 | ceph_snap(inode) == CEPH_NOSNAP) { |
| @@ -1278,20 +1347,18 @@ retry: | |||
| 1278 | break; | 1347 | break; |
| 1279 | strncpy(path + pos, temp->d_name.name, | 1348 | strncpy(path + pos, temp->d_name.name, |
| 1280 | temp->d_name.len); | 1349 | temp->d_name.len); |
| 1281 | dout("build_path_dentry path+%d: %p '%.*s'\n", | ||
| 1282 | pos, temp, temp->d_name.len, path + pos); | ||
| 1283 | } | 1350 | } |
| 1284 | if (pos) | 1351 | if (pos) |
| 1285 | path[--pos] = '/'; | 1352 | path[--pos] = '/'; |
| 1286 | temp = temp->d_parent; | 1353 | temp = temp->d_parent; |
| 1287 | if (temp == NULL) { | 1354 | if (temp == NULL) { |
| 1288 | pr_err("build_path_dentry corrupt dentry\n"); | 1355 | pr_err("build_path corrupt dentry\n"); |
| 1289 | kfree(path); | 1356 | kfree(path); |
| 1290 | return ERR_PTR(-EINVAL); | 1357 | return ERR_PTR(-EINVAL); |
| 1291 | } | 1358 | } |
| 1292 | } | 1359 | } |
| 1293 | if (pos != 0) { | 1360 | if (pos != 0) { |
| 1294 | pr_err("build_path_dentry did not end path lookup where " | 1361 | pr_err("build_path did not end path lookup where " |
| 1295 | "expected, namelen is %d, pos is %d\n", len, pos); | 1362 | "expected, namelen is %d, pos is %d\n", len, pos); |
| 1296 | /* presumably this is only possible if racing with a | 1363 | /* presumably this is only possible if racing with a |
| 1297 | rename of one of the parent directories (we can not | 1364 | rename of one of the parent directories (we can not |
| @@ -1303,7 +1370,7 @@ retry: | |||
| 1303 | 1370 | ||
| 1304 | *base = ceph_ino(temp->d_inode); | 1371 | *base = ceph_ino(temp->d_inode); |
| 1305 | *plen = len; | 1372 | *plen = len; |
| 1306 | dout("build_path_dentry on %p %d built %llx '%.*s'\n", | 1373 | dout("build_path on %p %d built %llx '%.*s'\n", |
| 1307 | dentry, atomic_read(&dentry->d_count), *base, len, path); | 1374 | dentry, atomic_read(&dentry->d_count), *base, len, path); |
| 1308 | return path; | 1375 | return path; |
| 1309 | } | 1376 | } |
| @@ -1426,9 +1493,11 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, | |||
| 1426 | if (req->r_old_dentry_drop) | 1493 | if (req->r_old_dentry_drop) |
| 1427 | len += req->r_old_dentry->d_name.len; | 1494 | len += req->r_old_dentry->d_name.len; |
| 1428 | 1495 | ||
| 1429 | msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, 0, 0, NULL); | 1496 | msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, GFP_NOFS); |
| 1430 | if (IS_ERR(msg)) | 1497 | if (!msg) { |
| 1498 | msg = ERR_PTR(-ENOMEM); | ||
| 1431 | goto out_free2; | 1499 | goto out_free2; |
| 1500 | } | ||
| 1432 | 1501 | ||
| 1433 | msg->hdr.tid = cpu_to_le64(req->r_tid); | 1502 | msg->hdr.tid = cpu_to_le64(req->r_tid); |
| 1434 | 1503 | ||
| @@ -1517,9 +1586,9 @@ static int __prepare_send_request(struct ceph_mds_client *mdsc, | |||
| 1517 | } | 1586 | } |
| 1518 | msg = create_request_message(mdsc, req, mds); | 1587 | msg = create_request_message(mdsc, req, mds); |
| 1519 | if (IS_ERR(msg)) { | 1588 | if (IS_ERR(msg)) { |
| 1520 | req->r_reply = ERR_PTR(PTR_ERR(msg)); | 1589 | req->r_err = PTR_ERR(msg); |
| 1521 | complete_request(mdsc, req); | 1590 | complete_request(mdsc, req); |
| 1522 | return -PTR_ERR(msg); | 1591 | return PTR_ERR(msg); |
| 1523 | } | 1592 | } |
| 1524 | req->r_request = msg; | 1593 | req->r_request = msg; |
| 1525 | 1594 | ||
| @@ -1552,7 +1621,7 @@ static int __do_request(struct ceph_mds_client *mdsc, | |||
| 1552 | int mds = -1; | 1621 | int mds = -1; |
| 1553 | int err = -EAGAIN; | 1622 | int err = -EAGAIN; |
| 1554 | 1623 | ||
| 1555 | if (req->r_reply) | 1624 | if (req->r_err || req->r_got_result) |
| 1556 | goto out; | 1625 | goto out; |
| 1557 | 1626 | ||
| 1558 | if (req->r_timeout && | 1627 | if (req->r_timeout && |
| @@ -1609,7 +1678,7 @@ out: | |||
| 1609 | return err; | 1678 | return err; |
| 1610 | 1679 | ||
| 1611 | finish: | 1680 | finish: |
| 1612 | req->r_reply = ERR_PTR(err); | 1681 | req->r_err = err; |
| 1613 | complete_request(mdsc, req); | 1682 | complete_request(mdsc, req); |
| 1614 | goto out; | 1683 | goto out; |
| 1615 | } | 1684 | } |
| @@ -1630,10 +1699,9 @@ static void __wake_requests(struct ceph_mds_client *mdsc, | |||
| 1630 | 1699 | ||
| 1631 | /* | 1700 | /* |
| 1632 | * Wake up threads with requests pending for @mds, so that they can | 1701 | * Wake up threads with requests pending for @mds, so that they can |
| 1633 | * resubmit their requests to a possibly different mds. If @all is set, | 1702 | * resubmit their requests to a possibly different mds. |
| 1634 | * wake up if their requests has been forwarded to @mds, too. | ||
| 1635 | */ | 1703 | */ |
| 1636 | static void kick_requests(struct ceph_mds_client *mdsc, int mds, int all) | 1704 | static void kick_requests(struct ceph_mds_client *mdsc, int mds) |
| 1637 | { | 1705 | { |
| 1638 | struct ceph_mds_request *req; | 1706 | struct ceph_mds_request *req; |
| 1639 | struct rb_node *p; | 1707 | struct rb_node *p; |
| @@ -1689,64 +1757,78 @@ int ceph_mdsc_do_request(struct ceph_mds_client *mdsc, | |||
| 1689 | __register_request(mdsc, req, dir); | 1757 | __register_request(mdsc, req, dir); |
| 1690 | __do_request(mdsc, req); | 1758 | __do_request(mdsc, req); |
| 1691 | 1759 | ||
| 1692 | /* wait */ | 1760 | if (req->r_err) { |
| 1693 | if (!req->r_reply) { | 1761 | err = req->r_err; |
| 1694 | mutex_unlock(&mdsc->mutex); | 1762 | __unregister_request(mdsc, req); |
| 1695 | if (req->r_timeout) { | 1763 | dout("do_request early error %d\n", err); |
| 1696 | err = (long)wait_for_completion_interruptible_timeout( | 1764 | goto out; |
| 1697 | &req->r_completion, req->r_timeout); | ||
| 1698 | if (err == 0) | ||
| 1699 | req->r_reply = ERR_PTR(-EIO); | ||
| 1700 | else if (err < 0) | ||
| 1701 | req->r_reply = ERR_PTR(err); | ||
| 1702 | } else { | ||
| 1703 | err = wait_for_completion_interruptible( | ||
| 1704 | &req->r_completion); | ||
| 1705 | if (err) | ||
| 1706 | req->r_reply = ERR_PTR(err); | ||
| 1707 | } | ||
| 1708 | mutex_lock(&mdsc->mutex); | ||
| 1709 | } | 1765 | } |
| 1710 | 1766 | ||
| 1711 | if (IS_ERR(req->r_reply)) { | 1767 | /* wait */ |
| 1712 | err = PTR_ERR(req->r_reply); | 1768 | mutex_unlock(&mdsc->mutex); |
| 1713 | req->r_reply = NULL; | 1769 | dout("do_request waiting\n"); |
| 1770 | if (req->r_timeout) { | ||
| 1771 | err = (long)wait_for_completion_interruptible_timeout( | ||
| 1772 | &req->r_completion, req->r_timeout); | ||
| 1773 | if (err == 0) | ||
| 1774 | err = -EIO; | ||
| 1775 | } else { | ||
| 1776 | err = wait_for_completion_interruptible(&req->r_completion); | ||
| 1777 | } | ||
| 1778 | dout("do_request waited, got %d\n", err); | ||
| 1779 | mutex_lock(&mdsc->mutex); | ||
| 1714 | 1780 | ||
| 1715 | if (err == -ERESTARTSYS) { | 1781 | /* only abort if we didn't race with a real reply */ |
| 1716 | /* aborted */ | 1782 | if (req->r_got_result) { |
| 1717 | req->r_aborted = true; | 1783 | err = le32_to_cpu(req->r_reply_info.head->result); |
| 1784 | } else if (err < 0) { | ||
| 1785 | dout("aborted request %lld with %d\n", req->r_tid, err); | ||
| 1718 | 1786 | ||
| 1719 | if (req->r_locked_dir && | 1787 | /* |
| 1720 | (req->r_op & CEPH_MDS_OP_WRITE)) { | 1788 | * ensure we aren't running concurrently with |
| 1721 | struct ceph_inode_info *ci = | 1789 | * ceph_fill_trace or ceph_readdir_prepopulate, which |
| 1722 | ceph_inode(req->r_locked_dir); | 1790 | * rely on locks (dir mutex) held by our caller. |
| 1791 | */ | ||
| 1792 | mutex_lock(&req->r_fill_mutex); | ||
| 1793 | req->r_err = err; | ||
| 1794 | req->r_aborted = true; | ||
| 1795 | mutex_unlock(&req->r_fill_mutex); | ||
| 1723 | 1796 | ||
| 1724 | dout("aborted, clearing I_COMPLETE on %p\n", | 1797 | if (req->r_locked_dir && |
| 1725 | req->r_locked_dir); | 1798 | (req->r_op & CEPH_MDS_OP_WRITE)) |
| 1726 | spin_lock(&req->r_locked_dir->i_lock); | 1799 | ceph_invalidate_dir_request(req); |
| 1727 | ci->i_ceph_flags &= ~CEPH_I_COMPLETE; | ||
| 1728 | ci->i_release_count++; | ||
| 1729 | spin_unlock(&req->r_locked_dir->i_lock); | ||
| 1730 | } | ||
| 1731 | } else { | ||
| 1732 | /* clean up this request */ | ||
| 1733 | __unregister_request(mdsc, req); | ||
| 1734 | if (!list_empty(&req->r_unsafe_item)) | ||
| 1735 | list_del_init(&req->r_unsafe_item); | ||
| 1736 | complete(&req->r_safe_completion); | ||
| 1737 | } | ||
| 1738 | } else if (req->r_err) { | ||
| 1739 | err = req->r_err; | ||
| 1740 | } else { | 1800 | } else { |
| 1741 | err = le32_to_cpu(req->r_reply_info.head->result); | 1801 | err = req->r_err; |
| 1742 | } | 1802 | } |
| 1743 | mutex_unlock(&mdsc->mutex); | ||
| 1744 | 1803 | ||
| 1804 | out: | ||
| 1805 | mutex_unlock(&mdsc->mutex); | ||
| 1745 | dout("do_request %p done, result %d\n", req, err); | 1806 | dout("do_request %p done, result %d\n", req, err); |
| 1746 | return err; | 1807 | return err; |
| 1747 | } | 1808 | } |
| 1748 | 1809 | ||
| 1749 | /* | 1810 | /* |
| 1811 | * Invalidate dir I_COMPLETE, dentry lease state on an aborted MDS | ||
| 1812 | * namespace request. | ||
| 1813 | */ | ||
| 1814 | void ceph_invalidate_dir_request(struct ceph_mds_request *req) | ||
| 1815 | { | ||
| 1816 | struct inode *inode = req->r_locked_dir; | ||
| 1817 | struct ceph_inode_info *ci = ceph_inode(inode); | ||
| 1818 | |||
| 1819 | dout("invalidate_dir_request %p (I_COMPLETE, lease(s))\n", inode); | ||
| 1820 | spin_lock(&inode->i_lock); | ||
| 1821 | ci->i_ceph_flags &= ~CEPH_I_COMPLETE; | ||
| 1822 | ci->i_release_count++; | ||
| 1823 | spin_unlock(&inode->i_lock); | ||
| 1824 | |||
| 1825 | if (req->r_dentry) | ||
| 1826 | ceph_invalidate_dentry_lease(req->r_dentry); | ||
| 1827 | if (req->r_old_dentry) | ||
| 1828 | ceph_invalidate_dentry_lease(req->r_old_dentry); | ||
| 1829 | } | ||
| 1830 | |||
| 1831 | /* | ||
| 1750 | * Handle mds reply. | 1832 | * Handle mds reply. |
| 1751 | * | 1833 | * |
| 1752 | * We take the session mutex and parse and process the reply immediately. | 1834 | * We take the session mutex and parse and process the reply immediately. |
| @@ -1797,6 +1879,12 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) | |||
| 1797 | mutex_unlock(&mdsc->mutex); | 1879 | mutex_unlock(&mdsc->mutex); |
| 1798 | goto out; | 1880 | goto out; |
| 1799 | } | 1881 | } |
| 1882 | if (req->r_got_safe && !head->safe) { | ||
| 1883 | pr_warning("got unsafe after safe on %llu from mds%d\n", | ||
| 1884 | tid, mds); | ||
| 1885 | mutex_unlock(&mdsc->mutex); | ||
| 1886 | goto out; | ||
| 1887 | } | ||
| 1800 | 1888 | ||
| 1801 | result = le32_to_cpu(head->result); | 1889 | result = le32_to_cpu(head->result); |
| 1802 | 1890 | ||
| @@ -1838,11 +1926,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) | |||
| 1838 | mutex_unlock(&mdsc->mutex); | 1926 | mutex_unlock(&mdsc->mutex); |
| 1839 | goto out; | 1927 | goto out; |
| 1840 | } | 1928 | } |
| 1841 | } | 1929 | } else { |
| 1842 | |||
| 1843 | BUG_ON(req->r_reply); | ||
| 1844 | |||
| 1845 | if (!head->safe) { | ||
| 1846 | req->r_got_unsafe = true; | 1930 | req->r_got_unsafe = true; |
| 1847 | list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe); | 1931 | list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe); |
| 1848 | } | 1932 | } |
| @@ -1871,21 +1955,30 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) | |||
| 1871 | } | 1955 | } |
| 1872 | 1956 | ||
| 1873 | /* insert trace into our cache */ | 1957 | /* insert trace into our cache */ |
| 1958 | mutex_lock(&req->r_fill_mutex); | ||
| 1874 | err = ceph_fill_trace(mdsc->client->sb, req, req->r_session); | 1959 | err = ceph_fill_trace(mdsc->client->sb, req, req->r_session); |
| 1875 | if (err == 0) { | 1960 | if (err == 0) { |
| 1876 | if (result == 0 && rinfo->dir_nr) | 1961 | if (result == 0 && rinfo->dir_nr) |
| 1877 | ceph_readdir_prepopulate(req, req->r_session); | 1962 | ceph_readdir_prepopulate(req, req->r_session); |
| 1878 | ceph_unreserve_caps(&req->r_caps_reservation); | 1963 | ceph_unreserve_caps(&req->r_caps_reservation); |
| 1879 | } | 1964 | } |
| 1965 | mutex_unlock(&req->r_fill_mutex); | ||
| 1880 | 1966 | ||
| 1881 | up_read(&mdsc->snap_rwsem); | 1967 | up_read(&mdsc->snap_rwsem); |
| 1882 | out_err: | 1968 | out_err: |
| 1883 | if (err) { | 1969 | mutex_lock(&mdsc->mutex); |
| 1884 | req->r_err = err; | 1970 | if (!req->r_aborted) { |
| 1971 | if (err) { | ||
| 1972 | req->r_err = err; | ||
| 1973 | } else { | ||
| 1974 | req->r_reply = msg; | ||
| 1975 | ceph_msg_get(msg); | ||
| 1976 | req->r_got_result = true; | ||
| 1977 | } | ||
| 1885 | } else { | 1978 | } else { |
| 1886 | req->r_reply = msg; | 1979 | dout("reply arrived after request %lld was aborted\n", tid); |
| 1887 | ceph_msg_get(msg); | ||
| 1888 | } | 1980 | } |
| 1981 | mutex_unlock(&mdsc->mutex); | ||
| 1889 | 1982 | ||
| 1890 | add_cap_releases(mdsc, req->r_session, -1); | 1983 | add_cap_releases(mdsc, req->r_session, -1); |
| 1891 | mutex_unlock(&session->s_mutex); | 1984 | mutex_unlock(&session->s_mutex); |
| @@ -1984,6 +2077,8 @@ static void handle_session(struct ceph_mds_session *session, | |||
| 1984 | 2077 | ||
| 1985 | switch (op) { | 2078 | switch (op) { |
| 1986 | case CEPH_SESSION_OPEN: | 2079 | case CEPH_SESSION_OPEN: |
| 2080 | if (session->s_state == CEPH_MDS_SESSION_RECONNECTING) | ||
| 2081 | pr_info("mds%d reconnect success\n", session->s_mds); | ||
| 1987 | session->s_state = CEPH_MDS_SESSION_OPEN; | 2082 | session->s_state = CEPH_MDS_SESSION_OPEN; |
| 1988 | renewed_caps(mdsc, session, 0); | 2083 | renewed_caps(mdsc, session, 0); |
| 1989 | wake = 1; | 2084 | wake = 1; |
| @@ -1997,10 +2092,12 @@ static void handle_session(struct ceph_mds_session *session, | |||
| 1997 | break; | 2092 | break; |
| 1998 | 2093 | ||
| 1999 | case CEPH_SESSION_CLOSE: | 2094 | case CEPH_SESSION_CLOSE: |
| 2095 | if (session->s_state == CEPH_MDS_SESSION_RECONNECTING) | ||
| 2096 | pr_info("mds%d reconnect denied\n", session->s_mds); | ||
| 2000 | remove_session_caps(session); | 2097 | remove_session_caps(session); |
| 2001 | wake = 1; /* for good measure */ | 2098 | wake = 1; /* for good measure */ |
| 2002 | complete(&mdsc->session_close_waiters); | 2099 | complete(&mdsc->session_close_waiters); |
| 2003 | kick_requests(mdsc, mds, 0); /* cur only */ | 2100 | kick_requests(mdsc, mds); |
| 2004 | break; | 2101 | break; |
| 2005 | 2102 | ||
| 2006 | case CEPH_SESSION_STALE: | 2103 | case CEPH_SESSION_STALE: |
| @@ -2132,54 +2229,44 @@ out: | |||
| 2132 | * | 2229 | * |
| 2133 | * called with mdsc->mutex held. | 2230 | * called with mdsc->mutex held. |
| 2134 | */ | 2231 | */ |
| 2135 | static void send_mds_reconnect(struct ceph_mds_client *mdsc, int mds) | 2232 | static void send_mds_reconnect(struct ceph_mds_client *mdsc, |
| 2233 | struct ceph_mds_session *session) | ||
| 2136 | { | 2234 | { |
| 2137 | struct ceph_mds_session *session = NULL; | ||
| 2138 | struct ceph_msg *reply; | 2235 | struct ceph_msg *reply; |
| 2139 | struct rb_node *p; | 2236 | struct rb_node *p; |
| 2237 | int mds = session->s_mds; | ||
| 2140 | int err = -ENOMEM; | 2238 | int err = -ENOMEM; |
| 2141 | struct ceph_pagelist *pagelist; | 2239 | struct ceph_pagelist *pagelist; |
| 2142 | 2240 | ||
| 2143 | pr_info("reconnect to recovering mds%d\n", mds); | 2241 | pr_info("mds%d reconnect start\n", mds); |
| 2144 | 2242 | ||
| 2145 | pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS); | 2243 | pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS); |
| 2146 | if (!pagelist) | 2244 | if (!pagelist) |
| 2147 | goto fail_nopagelist; | 2245 | goto fail_nopagelist; |
| 2148 | ceph_pagelist_init(pagelist); | 2246 | ceph_pagelist_init(pagelist); |
| 2149 | 2247 | ||
| 2150 | reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, 0, 0, 0, NULL); | 2248 | reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, 0, GFP_NOFS); |
| 2151 | if (IS_ERR(reply)) { | 2249 | if (!reply) |
| 2152 | err = PTR_ERR(reply); | ||
| 2153 | goto fail_nomsg; | 2250 | goto fail_nomsg; |
| 2154 | } | ||
| 2155 | |||
| 2156 | /* find session */ | ||
| 2157 | session = __ceph_lookup_mds_session(mdsc, mds); | ||
| 2158 | mutex_unlock(&mdsc->mutex); /* drop lock for duration */ | ||
| 2159 | 2251 | ||
| 2160 | if (session) { | 2252 | mutex_lock(&session->s_mutex); |
| 2161 | mutex_lock(&session->s_mutex); | 2253 | session->s_state = CEPH_MDS_SESSION_RECONNECTING; |
| 2254 | session->s_seq = 0; | ||
| 2162 | 2255 | ||
| 2163 | session->s_state = CEPH_MDS_SESSION_RECONNECTING; | 2256 | ceph_con_open(&session->s_con, |
| 2164 | session->s_seq = 0; | 2257 | ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); |
| 2165 | 2258 | ||
| 2166 | ceph_con_open(&session->s_con, | 2259 | /* replay unsafe requests */ |
| 2167 | ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); | 2260 | replay_unsafe_requests(mdsc, session); |
| 2168 | |||
| 2169 | /* replay unsafe requests */ | ||
| 2170 | replay_unsafe_requests(mdsc, session); | ||
| 2171 | } else { | ||
| 2172 | dout("no session for mds%d, will send short reconnect\n", | ||
| 2173 | mds); | ||
| 2174 | } | ||
| 2175 | 2261 | ||
| 2176 | down_read(&mdsc->snap_rwsem); | 2262 | down_read(&mdsc->snap_rwsem); |
| 2177 | 2263 | ||
| 2178 | if (!session) | ||
| 2179 | goto send; | ||
| 2180 | dout("session %p state %s\n", session, | 2264 | dout("session %p state %s\n", session, |
| 2181 | session_state_name(session->s_state)); | 2265 | session_state_name(session->s_state)); |
| 2182 | 2266 | ||
| 2267 | /* drop old cap expires; we're about to reestablish that state */ | ||
| 2268 | discard_cap_releases(mdsc, session); | ||
| 2269 | |||
| 2183 | /* traverse this session's caps */ | 2270 | /* traverse this session's caps */ |
| 2184 | err = ceph_pagelist_encode_32(pagelist, session->s_nr_caps); | 2271 | err = ceph_pagelist_encode_32(pagelist, session->s_nr_caps); |
| 2185 | if (err) | 2272 | if (err) |
| @@ -2208,36 +2295,29 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc, int mds) | |||
| 2208 | goto fail; | 2295 | goto fail; |
| 2209 | } | 2296 | } |
| 2210 | 2297 | ||
| 2211 | send: | ||
| 2212 | reply->pagelist = pagelist; | 2298 | reply->pagelist = pagelist; |
| 2213 | reply->hdr.data_len = cpu_to_le32(pagelist->length); | 2299 | reply->hdr.data_len = cpu_to_le32(pagelist->length); |
| 2214 | reply->nr_pages = calc_pages_for(0, pagelist->length); | 2300 | reply->nr_pages = calc_pages_for(0, pagelist->length); |
| 2215 | ceph_con_send(&session->s_con, reply); | 2301 | ceph_con_send(&session->s_con, reply); |
| 2216 | 2302 | ||
| 2217 | session->s_state = CEPH_MDS_SESSION_OPEN; | ||
| 2218 | mutex_unlock(&session->s_mutex); | 2303 | mutex_unlock(&session->s_mutex); |
| 2219 | 2304 | ||
| 2220 | mutex_lock(&mdsc->mutex); | 2305 | mutex_lock(&mdsc->mutex); |
| 2221 | __wake_requests(mdsc, &session->s_waiting); | 2306 | __wake_requests(mdsc, &session->s_waiting); |
| 2222 | mutex_unlock(&mdsc->mutex); | 2307 | mutex_unlock(&mdsc->mutex); |
| 2223 | 2308 | ||
| 2224 | ceph_put_mds_session(session); | ||
| 2225 | |||
| 2226 | up_read(&mdsc->snap_rwsem); | 2309 | up_read(&mdsc->snap_rwsem); |
| 2227 | mutex_lock(&mdsc->mutex); | ||
| 2228 | return; | 2310 | return; |
| 2229 | 2311 | ||
| 2230 | fail: | 2312 | fail: |
| 2231 | ceph_msg_put(reply); | 2313 | ceph_msg_put(reply); |
| 2232 | up_read(&mdsc->snap_rwsem); | 2314 | up_read(&mdsc->snap_rwsem); |
| 2233 | mutex_unlock(&session->s_mutex); | 2315 | mutex_unlock(&session->s_mutex); |
| 2234 | ceph_put_mds_session(session); | ||
| 2235 | fail_nomsg: | 2316 | fail_nomsg: |
| 2236 | ceph_pagelist_release(pagelist); | 2317 | ceph_pagelist_release(pagelist); |
| 2237 | kfree(pagelist); | 2318 | kfree(pagelist); |
| 2238 | fail_nopagelist: | 2319 | fail_nopagelist: |
| 2239 | pr_err("error %d preparing reconnect for mds%d\n", err, mds); | 2320 | pr_err("error %d preparing reconnect for mds%d\n", err, mds); |
| 2240 | mutex_lock(&mdsc->mutex); | ||
| 2241 | return; | 2321 | return; |
| 2242 | } | 2322 | } |
| 2243 | 2323 | ||
| @@ -2290,7 +2370,7 @@ static void check_new_map(struct ceph_mds_client *mdsc, | |||
| 2290 | } | 2370 | } |
| 2291 | 2371 | ||
| 2292 | /* kick any requests waiting on the recovering mds */ | 2372 | /* kick any requests waiting on the recovering mds */ |
| 2293 | kick_requests(mdsc, i, 1); | 2373 | kick_requests(mdsc, i); |
| 2294 | } else if (oldstate == newstate) { | 2374 | } else if (oldstate == newstate) { |
| 2295 | continue; /* nothing new with this mds */ | 2375 | continue; /* nothing new with this mds */ |
| 2296 | } | 2376 | } |
| @@ -2299,22 +2379,21 @@ static void check_new_map(struct ceph_mds_client *mdsc, | |||
| 2299 | * send reconnect? | 2379 | * send reconnect? |
| 2300 | */ | 2380 | */ |
| 2301 | if (s->s_state == CEPH_MDS_SESSION_RESTARTING && | 2381 | if (s->s_state == CEPH_MDS_SESSION_RESTARTING && |
| 2302 | newstate >= CEPH_MDS_STATE_RECONNECT) | 2382 | newstate >= CEPH_MDS_STATE_RECONNECT) { |
| 2303 | send_mds_reconnect(mdsc, i); | 2383 | mutex_unlock(&mdsc->mutex); |
| 2384 | send_mds_reconnect(mdsc, s); | ||
| 2385 | mutex_lock(&mdsc->mutex); | ||
| 2386 | } | ||
| 2304 | 2387 | ||
| 2305 | /* | 2388 | /* |
| 2306 | * kick requests on any mds that has gone active. | 2389 | * kick request on any mds that has gone active. |
| 2307 | * | ||
| 2308 | * kick requests on cur or forwarder: we may have sent | ||
| 2309 | * the request to mds1, mds1 told us it forwarded it | ||
| 2310 | * to mds2, but then we learn mds1 failed and can't be | ||
| 2311 | * sure it successfully forwarded our request before | ||
| 2312 | * it died. | ||
| 2313 | */ | 2390 | */ |
| 2314 | if (oldstate < CEPH_MDS_STATE_ACTIVE && | 2391 | if (oldstate < CEPH_MDS_STATE_ACTIVE && |
| 2315 | newstate >= CEPH_MDS_STATE_ACTIVE) { | 2392 | newstate >= CEPH_MDS_STATE_ACTIVE) { |
| 2316 | pr_info("mds%d reconnect completed\n", s->s_mds); | 2393 | if (oldstate != CEPH_MDS_STATE_CREATING && |
| 2317 | kick_requests(mdsc, i, 1); | 2394 | oldstate != CEPH_MDS_STATE_STARTING) |
| 2395 | pr_info("mds%d recovery completed\n", s->s_mds); | ||
| 2396 | kick_requests(mdsc, i); | ||
| 2318 | ceph_kick_flushing_caps(mdsc, s); | 2397 | ceph_kick_flushing_caps(mdsc, s); |
| 2319 | wake_up_session_caps(s, 1); | 2398 | wake_up_session_caps(s, 1); |
| 2320 | } | 2399 | } |
| @@ -2457,8 +2536,8 @@ void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session, | |||
| 2457 | dnamelen = dentry->d_name.len; | 2536 | dnamelen = dentry->d_name.len; |
| 2458 | len += dnamelen; | 2537 | len += dnamelen; |
| 2459 | 2538 | ||
| 2460 | msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, 0, 0, NULL); | 2539 | msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, GFP_NOFS); |
| 2461 | if (IS_ERR(msg)) | 2540 | if (!msg) |
| 2462 | return; | 2541 | return; |
| 2463 | lease = msg->front.iov_base; | 2542 | lease = msg->front.iov_base; |
| 2464 | lease->action = action; | 2543 | lease->action = action; |
| @@ -2603,7 +2682,9 @@ static void delayed_work(struct work_struct *work) | |||
| 2603 | else | 2682 | else |
| 2604 | ceph_con_keepalive(&s->s_con); | 2683 | ceph_con_keepalive(&s->s_con); |
| 2605 | add_cap_releases(mdsc, s, -1); | 2684 | add_cap_releases(mdsc, s, -1); |
| 2606 | send_cap_releases(mdsc, s); | 2685 | if (s->s_state == CEPH_MDS_SESSION_OPEN || |
| 2686 | s->s_state == CEPH_MDS_SESSION_HUNG) | ||
| 2687 | send_cap_releases(mdsc, s); | ||
| 2607 | mutex_unlock(&s->s_mutex); | 2688 | mutex_unlock(&s->s_mutex); |
| 2608 | ceph_put_mds_session(s); | 2689 | ceph_put_mds_session(s); |
| 2609 | 2690 | ||
| @@ -2620,6 +2701,9 @@ int ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client) | |||
| 2620 | mdsc->client = client; | 2701 | mdsc->client = client; |
| 2621 | mutex_init(&mdsc->mutex); | 2702 | mutex_init(&mdsc->mutex); |
| 2622 | mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS); | 2703 | mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS); |
| 2704 | if (mdsc->mdsmap == NULL) | ||
| 2705 | return -ENOMEM; | ||
| 2706 | |||
| 2623 | init_completion(&mdsc->safe_umount_waiters); | 2707 | init_completion(&mdsc->safe_umount_waiters); |
| 2624 | init_completion(&mdsc->session_close_waiters); | 2708 | init_completion(&mdsc->session_close_waiters); |
| 2625 | INIT_LIST_HEAD(&mdsc->waiting_for_map); | 2709 | INIT_LIST_HEAD(&mdsc->waiting_for_map); |
| @@ -2645,6 +2729,7 @@ int ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client) | |||
| 2645 | init_waitqueue_head(&mdsc->cap_flushing_wq); | 2729 | init_waitqueue_head(&mdsc->cap_flushing_wq); |
| 2646 | spin_lock_init(&mdsc->dentry_lru_lock); | 2730 | spin_lock_init(&mdsc->dentry_lru_lock); |
| 2647 | INIT_LIST_HEAD(&mdsc->dentry_lru); | 2731 | INIT_LIST_HEAD(&mdsc->dentry_lru); |
| 2732 | |||
| 2648 | return 0; | 2733 | return 0; |
| 2649 | } | 2734 | } |
| 2650 | 2735 | ||
| @@ -2740,6 +2825,9 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc) | |||
| 2740 | { | 2825 | { |
| 2741 | u64 want_tid, want_flush; | 2826 | u64 want_tid, want_flush; |
| 2742 | 2827 | ||
| 2828 | if (mdsc->client->mount_state == CEPH_MOUNT_SHUTDOWN) | ||
| 2829 | return; | ||
| 2830 | |||
| 2743 | dout("sync\n"); | 2831 | dout("sync\n"); |
| 2744 | mutex_lock(&mdsc->mutex); | 2832 | mutex_lock(&mdsc->mutex); |
| 2745 | want_tid = mdsc->last_tid; | 2833 | want_tid = mdsc->last_tid; |
| @@ -2922,9 +3010,10 @@ static void con_put(struct ceph_connection *con) | |||
| 2922 | static void peer_reset(struct ceph_connection *con) | 3010 | static void peer_reset(struct ceph_connection *con) |
| 2923 | { | 3011 | { |
| 2924 | struct ceph_mds_session *s = con->private; | 3012 | struct ceph_mds_session *s = con->private; |
| 3013 | struct ceph_mds_client *mdsc = s->s_mdsc; | ||
| 2925 | 3014 | ||
| 2926 | pr_err("mds%d gave us the boot. IMPLEMENT RECONNECT.\n", | 3015 | pr_warning("mds%d closed our session\n", s->s_mds); |
| 2927 | s->s_mds); | 3016 | send_mds_reconnect(mdsc, s); |
| 2928 | } | 3017 | } |
| 2929 | 3018 | ||
| 2930 | static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) | 3019 | static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) |
| @@ -3031,7 +3120,7 @@ static int invalidate_authorizer(struct ceph_connection *con) | |||
| 3031 | return ceph_monc_validate_auth(&mdsc->client->monc); | 3120 | return ceph_monc_validate_auth(&mdsc->client->monc); |
| 3032 | } | 3121 | } |
| 3033 | 3122 | ||
| 3034 | const static struct ceph_connection_operations mds_con_ops = { | 3123 | static const struct ceph_connection_operations mds_con_ops = { |
| 3035 | .get = con_get, | 3124 | .get = con_get, |
| 3036 | .put = con_put, | 3125 | .put = con_put, |
| 3037 | .dispatch = dispatch, | 3126 | .dispatch = dispatch, |
