diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-09 15:39:10 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-09 15:39:10 -0400 |
commit | 9a5889ae1ce41f376e6a5b56e17e0c5a755fda80 (patch) | |
tree | 0eaadb5530d5b82460e0bfb0b4403e080d7b1b8f /net | |
parent | e3a0dd98e1ddfd135b7ef889fcc0269e8c2ca445 (diff) | |
parent | 8b8cf8917f9b5d74e04f281272d8719ce335a497 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client
Pull Ceph updates from Sage Weil:
"There is some follow-on RBD cleanup after the last window's code drop,
a series from Yan fixing multi-mds behavior in cephfs, and then a
sprinkling of bug fixes all around. Some warnings, sleeping while
atomic, a null dereference, and cleanups"
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client: (36 commits)
libceph: fix invalid unsigned->signed conversion for timespec encoding
libceph: call r_unsafe_callback when unsafe reply is received
ceph: fix race between cap issue and revoke
ceph: fix cap revoke race
ceph: fix pending vmtruncate race
ceph: avoid accessing invalid memory
libceph: Fix NULL pointer dereference in auth client code
ceph: Reconstruct the func ceph_reserve_caps.
ceph: Free mdsc if alloc mdsc->mdsmap failed.
ceph: remove sb_start/end_write in ceph_aio_write.
ceph: avoid meaningless calling ceph_caps_revoking if sync_mode == WB_SYNC_ALL.
ceph: fix sleeping function called from invalid context.
ceph: move inode to proper flushing list when auth MDS changes
rbd: fix a couple warnings
ceph: clear migrate seq when MDS restarts
ceph: check migrate seq before changing auth cap
ceph: fix race between page writeback and truncate
ceph: reset iov_len when discarding cap release messages
ceph: fix cap release race
libceph: fix truncate size calculation
...
Diffstat (limited to 'net')
-rw-r--r-- | net/ceph/auth_none.c | 6 | ||||
-rw-r--r-- | net/ceph/osd_client.c | 63 |
2 files changed, 38 insertions, 31 deletions
diff --git a/net/ceph/auth_none.c b/net/ceph/auth_none.c index 925ca583c09c..8c93fa8d81bc 100644 --- a/net/ceph/auth_none.c +++ b/net/ceph/auth_none.c | |||
@@ -39,6 +39,11 @@ static int should_authenticate(struct ceph_auth_client *ac) | |||
39 | return xi->starting; | 39 | return xi->starting; |
40 | } | 40 | } |
41 | 41 | ||
42 | static int build_request(struct ceph_auth_client *ac, void *buf, void *end) | ||
43 | { | ||
44 | return 0; | ||
45 | } | ||
46 | |||
42 | /* | 47 | /* |
43 | * the generic auth code decode the global_id, and we carry no actual | 48 | * the generic auth code decode the global_id, and we carry no actual |
44 | * authenticate state, so nothing happens here. | 49 | * authenticate state, so nothing happens here. |
@@ -106,6 +111,7 @@ static const struct ceph_auth_client_ops ceph_auth_none_ops = { | |||
106 | .destroy = destroy, | 111 | .destroy = destroy, |
107 | .is_authenticated = is_authenticated, | 112 | .is_authenticated = is_authenticated, |
108 | .should_authenticate = should_authenticate, | 113 | .should_authenticate = should_authenticate, |
114 | .build_request = build_request, | ||
109 | .handle_reply = handle_reply, | 115 | .handle_reply = handle_reply, |
110 | .create_authorizer = ceph_auth_none_create_authorizer, | 116 | .create_authorizer = ceph_auth_none_create_authorizer, |
111 | .destroy_authorizer = ceph_auth_none_destroy_authorizer, | 117 | .destroy_authorizer = ceph_auth_none_destroy_authorizer, |
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 3a246a6cab47..dd47889adc4a 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c | |||
@@ -733,12 +733,14 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, | |||
733 | 733 | ||
734 | object_size = le32_to_cpu(layout->fl_object_size); | 734 | object_size = le32_to_cpu(layout->fl_object_size); |
735 | object_base = off - objoff; | 735 | object_base = off - objoff; |
736 | if (truncate_size <= object_base) { | 736 | if (!(truncate_seq == 1 && truncate_size == -1ULL)) { |
737 | truncate_size = 0; | 737 | if (truncate_size <= object_base) { |
738 | } else { | 738 | truncate_size = 0; |
739 | truncate_size -= object_base; | 739 | } else { |
740 | if (truncate_size > object_size) | 740 | truncate_size -= object_base; |
741 | truncate_size = object_size; | 741 | if (truncate_size > object_size) |
742 | truncate_size = object_size; | ||
743 | } | ||
742 | } | 744 | } |
743 | 745 | ||
744 | osd_req_op_extent_init(req, 0, opcode, objoff, objlen, | 746 | osd_req_op_extent_init(req, 0, opcode, objoff, objlen, |
@@ -1174,6 +1176,7 @@ static void __register_linger_request(struct ceph_osd_client *osdc, | |||
1174 | struct ceph_osd_request *req) | 1176 | struct ceph_osd_request *req) |
1175 | { | 1177 | { |
1176 | dout("__register_linger_request %p\n", req); | 1178 | dout("__register_linger_request %p\n", req); |
1179 | ceph_osdc_get_request(req); | ||
1177 | list_add_tail(&req->r_linger_item, &osdc->req_linger); | 1180 | list_add_tail(&req->r_linger_item, &osdc->req_linger); |
1178 | if (req->r_osd) | 1181 | if (req->r_osd) |
1179 | list_add_tail(&req->r_linger_osd, | 1182 | list_add_tail(&req->r_linger_osd, |
@@ -1196,6 +1199,7 @@ static void __unregister_linger_request(struct ceph_osd_client *osdc, | |||
1196 | if (list_empty(&req->r_osd_item)) | 1199 | if (list_empty(&req->r_osd_item)) |
1197 | req->r_osd = NULL; | 1200 | req->r_osd = NULL; |
1198 | } | 1201 | } |
1202 | ceph_osdc_put_request(req); | ||
1199 | } | 1203 | } |
1200 | 1204 | ||
1201 | void ceph_osdc_unregister_linger_request(struct ceph_osd_client *osdc, | 1205 | void ceph_osdc_unregister_linger_request(struct ceph_osd_client *osdc, |
@@ -1203,9 +1207,8 @@ void ceph_osdc_unregister_linger_request(struct ceph_osd_client *osdc, | |||
1203 | { | 1207 | { |
1204 | mutex_lock(&osdc->request_mutex); | 1208 | mutex_lock(&osdc->request_mutex); |
1205 | if (req->r_linger) { | 1209 | if (req->r_linger) { |
1206 | __unregister_linger_request(osdc, req); | ||
1207 | req->r_linger = 0; | 1210 | req->r_linger = 0; |
1208 | ceph_osdc_put_request(req); | 1211 | __unregister_linger_request(osdc, req); |
1209 | } | 1212 | } |
1210 | mutex_unlock(&osdc->request_mutex); | 1213 | mutex_unlock(&osdc->request_mutex); |
1211 | } | 1214 | } |
@@ -1217,11 +1220,6 @@ void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc, | |||
1217 | if (!req->r_linger) { | 1220 | if (!req->r_linger) { |
1218 | dout("set_request_linger %p\n", req); | 1221 | dout("set_request_linger %p\n", req); |
1219 | req->r_linger = 1; | 1222 | req->r_linger = 1; |
1220 | /* | ||
1221 | * caller is now responsible for calling | ||
1222 | * unregister_linger_request | ||
1223 | */ | ||
1224 | ceph_osdc_get_request(req); | ||
1225 | } | 1223 | } |
1226 | } | 1224 | } |
1227 | EXPORT_SYMBOL(ceph_osdc_set_request_linger); | 1225 | EXPORT_SYMBOL(ceph_osdc_set_request_linger); |
@@ -1339,10 +1337,6 @@ static void __send_request(struct ceph_osd_client *osdc, | |||
1339 | 1337 | ||
1340 | ceph_msg_get(req->r_request); /* send consumes a ref */ | 1338 | ceph_msg_get(req->r_request); /* send consumes a ref */ |
1341 | 1339 | ||
1342 | /* Mark the request unsafe if this is the first timet's being sent. */ | ||
1343 | |||
1344 | if (!req->r_sent && req->r_unsafe_callback) | ||
1345 | req->r_unsafe_callback(req, true); | ||
1346 | req->r_sent = req->r_osd->o_incarnation; | 1340 | req->r_sent = req->r_osd->o_incarnation; |
1347 | 1341 | ||
1348 | ceph_con_send(&req->r_osd->o_con, req->r_request); | 1342 | ceph_con_send(&req->r_osd->o_con, req->r_request); |
@@ -1433,8 +1427,6 @@ static void handle_osds_timeout(struct work_struct *work) | |||
1433 | 1427 | ||
1434 | static void complete_request(struct ceph_osd_request *req) | 1428 | static void complete_request(struct ceph_osd_request *req) |
1435 | { | 1429 | { |
1436 | if (req->r_unsafe_callback) | ||
1437 | req->r_unsafe_callback(req, false); | ||
1438 | complete_all(&req->r_safe_completion); /* fsync waiter */ | 1430 | complete_all(&req->r_safe_completion); /* fsync waiter */ |
1439 | } | 1431 | } |
1440 | 1432 | ||
@@ -1526,6 +1518,8 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, | |||
1526 | for (i = 0; i < numops; i++) | 1518 | for (i = 0; i < numops; i++) |
1527 | req->r_reply_op_result[i] = ceph_decode_32(&p); | 1519 | req->r_reply_op_result[i] = ceph_decode_32(&p); |
1528 | 1520 | ||
1521 | already_completed = req->r_got_reply; | ||
1522 | |||
1529 | if (!req->r_got_reply) { | 1523 | if (!req->r_got_reply) { |
1530 | 1524 | ||
1531 | req->r_result = result; | 1525 | req->r_result = result; |
@@ -1556,19 +1550,23 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, | |||
1556 | ((flags & CEPH_OSD_FLAG_WRITE) == 0)) | 1550 | ((flags & CEPH_OSD_FLAG_WRITE) == 0)) |
1557 | __unregister_request(osdc, req); | 1551 | __unregister_request(osdc, req); |
1558 | 1552 | ||
1559 | already_completed = req->r_completed; | ||
1560 | req->r_completed = 1; | ||
1561 | mutex_unlock(&osdc->request_mutex); | 1553 | mutex_unlock(&osdc->request_mutex); |
1562 | if (already_completed) | ||
1563 | goto done; | ||
1564 | 1554 | ||
1565 | if (req->r_callback) | 1555 | if (!already_completed) { |
1566 | req->r_callback(req, msg); | 1556 | if (req->r_unsafe_callback && |
1567 | else | 1557 | result >= 0 && !(flags & CEPH_OSD_FLAG_ONDISK)) |
1568 | complete_all(&req->r_completion); | 1558 | req->r_unsafe_callback(req, true); |
1559 | if (req->r_callback) | ||
1560 | req->r_callback(req, msg); | ||
1561 | else | ||
1562 | complete_all(&req->r_completion); | ||
1563 | } | ||
1569 | 1564 | ||
1570 | if (flags & CEPH_OSD_FLAG_ONDISK) | 1565 | if (flags & CEPH_OSD_FLAG_ONDISK) { |
1566 | if (req->r_unsafe_callback && already_completed) | ||
1567 | req->r_unsafe_callback(req, false); | ||
1571 | complete_request(req); | 1568 | complete_request(req); |
1569 | } | ||
1572 | 1570 | ||
1573 | done: | 1571 | done: |
1574 | dout("req=%p req->r_linger=%d\n", req, req->r_linger); | 1572 | dout("req=%p req->r_linger=%d\n", req, req->r_linger); |
@@ -1633,8 +1631,10 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend) | |||
1633 | dout("%p tid %llu restart on osd%d\n", | 1631 | dout("%p tid %llu restart on osd%d\n", |
1634 | req, req->r_tid, | 1632 | req, req->r_tid, |
1635 | req->r_osd ? req->r_osd->o_osd : -1); | 1633 | req->r_osd ? req->r_osd->o_osd : -1); |
1634 | ceph_osdc_get_request(req); | ||
1636 | __unregister_request(osdc, req); | 1635 | __unregister_request(osdc, req); |
1637 | __register_linger_request(osdc, req); | 1636 | __register_linger_request(osdc, req); |
1637 | ceph_osdc_put_request(req); | ||
1638 | continue; | 1638 | continue; |
1639 | } | 1639 | } |
1640 | 1640 | ||
@@ -2123,7 +2123,6 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc, | |||
2123 | __register_request(osdc, req); | 2123 | __register_request(osdc, req); |
2124 | req->r_sent = 0; | 2124 | req->r_sent = 0; |
2125 | req->r_got_reply = 0; | 2125 | req->r_got_reply = 0; |
2126 | req->r_completed = 0; | ||
2127 | rc = __map_request(osdc, req, 0); | 2126 | rc = __map_request(osdc, req, 0); |
2128 | if (rc < 0) { | 2127 | if (rc < 0) { |
2129 | if (nofail) { | 2128 | if (nofail) { |
@@ -2456,8 +2455,10 @@ static struct ceph_msg *get_reply(struct ceph_connection *con, | |||
2456 | ceph_msg_revoke_incoming(req->r_reply); | 2455 | ceph_msg_revoke_incoming(req->r_reply); |
2457 | 2456 | ||
2458 | if (front > req->r_reply->front.iov_len) { | 2457 | if (front > req->r_reply->front.iov_len) { |
2459 | pr_warning("get_reply front %d > preallocated %d\n", | 2458 | pr_warning("get_reply front %d > preallocated %d (%u#%llu)\n", |
2460 | front, (int)req->r_reply->front.iov_len); | 2459 | front, (int)req->r_reply->front.iov_len, |
2460 | (unsigned int)con->peer_name.type, | ||
2461 | le64_to_cpu(con->peer_name.num)); | ||
2461 | m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front, GFP_NOFS, false); | 2462 | m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front, GFP_NOFS, false); |
2462 | if (!m) | 2463 | if (!m) |
2463 | goto out; | 2464 | goto out; |