aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-20 17:00:13 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-20 17:00:13 -0500
commit40889e8d9fc6355980cf2bc94ef4356c10dec4ec (patch)
treec03f4e218477052c665cd9b01352f83e32c4a593 /net
parent1ca22254b32657d65315af261ae0e699b8427fb7 (diff)
parentc3e946ce7276faf0b302acd25c7b874edbeba661 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client
Pull Ceph update from Sage Weil: "There are a few different groups of commits here. The largest is Alex's ongoing work to enable the coming RBD features (cloning, striping). There is some cleanup in libceph that goes along with it. Cyril and David have fixed some problems with NFS reexport (leaking dentries and page locks), and there is a batch of patches from Yan fixing problems with the fs client when running against a clustered MDS. There are a few bug fixes mixed in for good measure, many of which will be going to the stable trees once they're upstream. My apologies for the late pull. There is still a gremlin in the rbd map/unmap code and I was hoping to include the fix for that as well, but we haven't been able to confirm the fix is correct yet; I'll send that in a separate pull once it's nailed down." * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client: (68 commits) rbd: get rid of rbd_{get,put}_dev() libceph: register request before unregister linger libceph: don't use rb_init_node() in ceph_osdc_alloc_request() libceph: init event->node in ceph_osdc_create_event() libceph: init osd->o_node in create_osd() libceph: report connection fault with warning libceph: socket can close in any connection state rbd: don't use ENOTSUPP rbd: remove linger unconditionally rbd: get rid of RBD_MAX_SEG_NAME_LEN libceph: avoid using freed osd in __kick_osd_requests() ceph: don't reference req after put rbd: do not allow remove of mounted-on image libceph: Unlock unprocessed pages in start_read() error path ceph: call handle_cap_grant() for cap import message ceph: Fix __ceph_do_pending_vmtruncate ceph: Don't add dirty inode to dirty list if caps is in migration ceph: Fix infinite loop in __wake_requests ceph: Don't update i_max_size when handling non-auth cap bdi_register: add __printf verification, fix arg mismatch ...
Diffstat (limited to 'net')
-rw-r--r--net/ceph/ceph_common.c3
-rw-r--r--net/ceph/messenger.c107
-rw-r--r--net/ceph/osd_client.c59
-rw-r--r--net/ceph/osdmap.c47
4 files changed, 105 insertions, 111 deletions
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index a8020293f342..ee71ea26777a 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -305,7 +305,6 @@ ceph_parse_options(char *options, const char *dev_name,
305 305
306 /* start with defaults */ 306 /* start with defaults */
307 opt->flags = CEPH_OPT_DEFAULT; 307 opt->flags = CEPH_OPT_DEFAULT;
308 opt->osd_timeout = CEPH_OSD_TIMEOUT_DEFAULT;
309 opt->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT; 308 opt->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT;
310 opt->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT; /* seconds */ 309 opt->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT; /* seconds */
311 opt->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT; /* seconds */ 310 opt->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT; /* seconds */
@@ -391,7 +390,7 @@ ceph_parse_options(char *options, const char *dev_name,
391 390
392 /* misc */ 391 /* misc */
393 case Opt_osdtimeout: 392 case Opt_osdtimeout:
394 opt->osd_timeout = intval; 393 pr_warning("ignoring deprecated osdtimeout option\n");
395 break; 394 break;
396 case Opt_osdkeepalivetimeout: 395 case Opt_osdkeepalivetimeout:
397 opt->osd_keepalive_timeout = intval; 396 opt->osd_keepalive_timeout = intval;
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 3ef1759403b4..4d111fd2b492 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -2244,22 +2244,62 @@ bad_tag:
2244 2244
2245 2245
2246/* 2246/*
2247 * Atomically queue work on a connection. Bump @con reference to 2247 * Atomically queue work on a connection after the specified delay.
2248 * avoid races with connection teardown. 2248 * Bump @con reference to avoid races with connection teardown.
2249 * Returns 0 if work was queued, or an error code otherwise.
2249 */ 2250 */
2250static void queue_con(struct ceph_connection *con) 2251static int queue_con_delay(struct ceph_connection *con, unsigned long delay)
2251{ 2252{
2252 if (!con->ops->get(con)) { 2253 if (!con->ops->get(con)) {
2253 dout("queue_con %p ref count 0\n", con); 2254 dout("%s %p ref count 0\n", __func__, con);
2254 return; 2255
2256 return -ENOENT;
2255 } 2257 }
2256 2258
2257 if (!queue_delayed_work(ceph_msgr_wq, &con->work, 0)) { 2259 if (!queue_delayed_work(ceph_msgr_wq, &con->work, delay)) {
2258 dout("queue_con %p - already queued\n", con); 2260 dout("%s %p - already queued\n", __func__, con);
2259 con->ops->put(con); 2261 con->ops->put(con);
2260 } else { 2262
2261 dout("queue_con %p\n", con); 2263 return -EBUSY;
2262 } 2264 }
2265
2266 dout("%s %p %lu\n", __func__, con, delay);
2267
2268 return 0;
2269}
2270
2271static void queue_con(struct ceph_connection *con)
2272{
2273 (void) queue_con_delay(con, 0);
2274}
2275
2276static bool con_sock_closed(struct ceph_connection *con)
2277{
2278 if (!test_and_clear_bit(CON_FLAG_SOCK_CLOSED, &con->flags))
2279 return false;
2280
2281#define CASE(x) \
2282 case CON_STATE_ ## x: \
2283 con->error_msg = "socket closed (con state " #x ")"; \
2284 break;
2285
2286 switch (con->state) {
2287 CASE(CLOSED);
2288 CASE(PREOPEN);
2289 CASE(CONNECTING);
2290 CASE(NEGOTIATING);
2291 CASE(OPEN);
2292 CASE(STANDBY);
2293 default:
2294 pr_warning("%s con %p unrecognized state %lu\n",
2295 __func__, con, con->state);
2296 con->error_msg = "unrecognized con state";
2297 BUG();
2298 break;
2299 }
2300#undef CASE
2301
2302 return true;
2263} 2303}
2264 2304
2265/* 2305/*
@@ -2273,35 +2313,16 @@ static void con_work(struct work_struct *work)
2273 2313
2274 mutex_lock(&con->mutex); 2314 mutex_lock(&con->mutex);
2275restart: 2315restart:
2276 if (test_and_clear_bit(CON_FLAG_SOCK_CLOSED, &con->flags)) { 2316 if (con_sock_closed(con))
2277 switch (con->state) {
2278 case CON_STATE_CONNECTING:
2279 con->error_msg = "connection failed";
2280 break;
2281 case CON_STATE_NEGOTIATING:
2282 con->error_msg = "negotiation failed";
2283 break;
2284 case CON_STATE_OPEN:
2285 con->error_msg = "socket closed";
2286 break;
2287 default:
2288 dout("unrecognized con state %d\n", (int)con->state);
2289 con->error_msg = "unrecognized con state";
2290 BUG();
2291 }
2292 goto fault; 2317 goto fault;
2293 }
2294 2318
2295 if (test_and_clear_bit(CON_FLAG_BACKOFF, &con->flags)) { 2319 if (test_and_clear_bit(CON_FLAG_BACKOFF, &con->flags)) {
2296 dout("con_work %p backing off\n", con); 2320 dout("con_work %p backing off\n", con);
2297 if (queue_delayed_work(ceph_msgr_wq, &con->work, 2321 ret = queue_con_delay(con, round_jiffies_relative(con->delay));
2298 round_jiffies_relative(con->delay))) { 2322 if (ret) {
2299 dout("con_work %p backoff %lu\n", con, con->delay);
2300 mutex_unlock(&con->mutex);
2301 return;
2302 } else {
2303 dout("con_work %p FAILED to back off %lu\n", con, 2323 dout("con_work %p FAILED to back off %lu\n", con,
2304 con->delay); 2324 con->delay);
2325 BUG_ON(ret == -ENOENT);
2305 set_bit(CON_FLAG_BACKOFF, &con->flags); 2326 set_bit(CON_FLAG_BACKOFF, &con->flags);
2306 } 2327 }
2307 goto done; 2328 goto done;
@@ -2356,7 +2377,7 @@ fault:
2356static void ceph_fault(struct ceph_connection *con) 2377static void ceph_fault(struct ceph_connection *con)
2357 __releases(con->mutex) 2378 __releases(con->mutex)
2358{ 2379{
2359 pr_err("%s%lld %s %s\n", ENTITY_NAME(con->peer_name), 2380 pr_warning("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
2360 ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg); 2381 ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg);
2361 dout("fault %p state %lu to peer %s\n", 2382 dout("fault %p state %lu to peer %s\n",
2362 con, con->state, ceph_pr_addr(&con->peer_addr.in_addr)); 2383 con, con->state, ceph_pr_addr(&con->peer_addr.in_addr));
@@ -2398,24 +2419,8 @@ static void ceph_fault(struct ceph_connection *con)
2398 con->delay = BASE_DELAY_INTERVAL; 2419 con->delay = BASE_DELAY_INTERVAL;
2399 else if (con->delay < MAX_DELAY_INTERVAL) 2420 else if (con->delay < MAX_DELAY_INTERVAL)
2400 con->delay *= 2; 2421 con->delay *= 2;
2401 con->ops->get(con); 2422 set_bit(CON_FLAG_BACKOFF, &con->flags);
2402 if (queue_delayed_work(ceph_msgr_wq, &con->work, 2423 queue_con(con);
2403 round_jiffies_relative(con->delay))) {
2404 dout("fault queued %p delay %lu\n", con, con->delay);
2405 } else {
2406 con->ops->put(con);
2407 dout("fault failed to queue %p delay %lu, backoff\n",
2408 con, con->delay);
2409 /*
2410 * In many cases we see a socket state change
2411 * while con_work is running and end up
2412 * queuing (non-delayed) work, such that we
2413 * can't backoff with a delay. Set a flag so
2414 * that when con_work restarts we schedule the
2415 * delay then.
2416 */
2417 set_bit(CON_FLAG_BACKOFF, &con->flags);
2418 }
2419 } 2424 }
2420 2425
2421out_unlock: 2426out_unlock:
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index c1d756cc7448..780caf6b0491 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -221,6 +221,7 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
221 kref_init(&req->r_kref); 221 kref_init(&req->r_kref);
222 init_completion(&req->r_completion); 222 init_completion(&req->r_completion);
223 init_completion(&req->r_safe_completion); 223 init_completion(&req->r_safe_completion);
224 RB_CLEAR_NODE(&req->r_node);
224 INIT_LIST_HEAD(&req->r_unsafe_item); 225 INIT_LIST_HEAD(&req->r_unsafe_item);
225 INIT_LIST_HEAD(&req->r_linger_item); 226 INIT_LIST_HEAD(&req->r_linger_item);
226 INIT_LIST_HEAD(&req->r_linger_osd); 227 INIT_LIST_HEAD(&req->r_linger_osd);
@@ -580,7 +581,7 @@ static void __kick_osd_requests(struct ceph_osd_client *osdc,
580 581
581 dout("__kick_osd_requests osd%d\n", osd->o_osd); 582 dout("__kick_osd_requests osd%d\n", osd->o_osd);
582 err = __reset_osd(osdc, osd); 583 err = __reset_osd(osdc, osd);
583 if (err == -EAGAIN) 584 if (err)
584 return; 585 return;
585 586
586 list_for_each_entry(req, &osd->o_requests, r_osd_item) { 587 list_for_each_entry(req, &osd->o_requests, r_osd_item) {
@@ -607,14 +608,6 @@ static void __kick_osd_requests(struct ceph_osd_client *osdc,
607 } 608 }
608} 609}
609 610
610static void kick_osd_requests(struct ceph_osd_client *osdc,
611 struct ceph_osd *kickosd)
612{
613 mutex_lock(&osdc->request_mutex);
614 __kick_osd_requests(osdc, kickosd);
615 mutex_unlock(&osdc->request_mutex);
616}
617
618/* 611/*
619 * If the osd connection drops, we need to resubmit all requests. 612 * If the osd connection drops, we need to resubmit all requests.
620 */ 613 */
@@ -628,7 +621,9 @@ static void osd_reset(struct ceph_connection *con)
628 dout("osd_reset osd%d\n", osd->o_osd); 621 dout("osd_reset osd%d\n", osd->o_osd);
629 osdc = osd->o_osdc; 622 osdc = osd->o_osdc;
630 down_read(&osdc->map_sem); 623 down_read(&osdc->map_sem);
631 kick_osd_requests(osdc, osd); 624 mutex_lock(&osdc->request_mutex);
625 __kick_osd_requests(osdc, osd);
626 mutex_unlock(&osdc->request_mutex);
632 send_queued(osdc); 627 send_queued(osdc);
633 up_read(&osdc->map_sem); 628 up_read(&osdc->map_sem);
634} 629}
@@ -647,6 +642,7 @@ static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
647 atomic_set(&osd->o_ref, 1); 642 atomic_set(&osd->o_ref, 1);
648 osd->o_osdc = osdc; 643 osd->o_osdc = osdc;
649 osd->o_osd = onum; 644 osd->o_osd = onum;
645 RB_CLEAR_NODE(&osd->o_node);
650 INIT_LIST_HEAD(&osd->o_requests); 646 INIT_LIST_HEAD(&osd->o_requests);
651 INIT_LIST_HEAD(&osd->o_linger_requests); 647 INIT_LIST_HEAD(&osd->o_linger_requests);
652 INIT_LIST_HEAD(&osd->o_osd_lru); 648 INIT_LIST_HEAD(&osd->o_osd_lru);
@@ -750,6 +746,7 @@ static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
750 if (list_empty(&osd->o_requests) && 746 if (list_empty(&osd->o_requests) &&
751 list_empty(&osd->o_linger_requests)) { 747 list_empty(&osd->o_linger_requests)) {
752 __remove_osd(osdc, osd); 748 __remove_osd(osdc, osd);
749 ret = -ENODEV;
753 } else if (memcmp(&osdc->osdmap->osd_addr[osd->o_osd], 750 } else if (memcmp(&osdc->osdmap->osd_addr[osd->o_osd],
754 &osd->o_con.peer_addr, 751 &osd->o_con.peer_addr,
755 sizeof(osd->o_con.peer_addr)) == 0 && 752 sizeof(osd->o_con.peer_addr)) == 0 &&
@@ -876,9 +873,9 @@ static void __unregister_request(struct ceph_osd_client *osdc,
876 req->r_osd = NULL; 873 req->r_osd = NULL;
877 } 874 }
878 875
876 list_del_init(&req->r_req_lru_item);
879 ceph_osdc_put_request(req); 877 ceph_osdc_put_request(req);
880 878
881 list_del_init(&req->r_req_lru_item);
882 if (osdc->num_requests == 0) { 879 if (osdc->num_requests == 0) {
883 dout(" no requests, canceling timeout\n"); 880 dout(" no requests, canceling timeout\n");
884 __cancel_osd_timeout(osdc); 881 __cancel_osd_timeout(osdc);
@@ -910,8 +907,8 @@ static void __unregister_linger_request(struct ceph_osd_client *osdc,
910 struct ceph_osd_request *req) 907 struct ceph_osd_request *req)
911{ 908{
912 dout("__unregister_linger_request %p\n", req); 909 dout("__unregister_linger_request %p\n", req);
910 list_del_init(&req->r_linger_item);
913 if (req->r_osd) { 911 if (req->r_osd) {
914 list_del_init(&req->r_linger_item);
915 list_del_init(&req->r_linger_osd); 912 list_del_init(&req->r_linger_osd);
916 913
917 if (list_empty(&req->r_osd->o_requests) && 914 if (list_empty(&req->r_osd->o_requests) &&
@@ -1090,12 +1087,10 @@ static void handle_timeout(struct work_struct *work)
1090{ 1087{
1091 struct ceph_osd_client *osdc = 1088 struct ceph_osd_client *osdc =
1092 container_of(work, struct ceph_osd_client, timeout_work.work); 1089 container_of(work, struct ceph_osd_client, timeout_work.work);
1093 struct ceph_osd_request *req, *last_req = NULL; 1090 struct ceph_osd_request *req;
1094 struct ceph_osd *osd; 1091 struct ceph_osd *osd;
1095 unsigned long timeout = osdc->client->options->osd_timeout * HZ;
1096 unsigned long keepalive = 1092 unsigned long keepalive =
1097 osdc->client->options->osd_keepalive_timeout * HZ; 1093 osdc->client->options->osd_keepalive_timeout * HZ;
1098 unsigned long last_stamp = 0;
1099 struct list_head slow_osds; 1094 struct list_head slow_osds;
1100 dout("timeout\n"); 1095 dout("timeout\n");
1101 down_read(&osdc->map_sem); 1096 down_read(&osdc->map_sem);
@@ -1105,37 +1100,6 @@ static void handle_timeout(struct work_struct *work)
1105 mutex_lock(&osdc->request_mutex); 1100 mutex_lock(&osdc->request_mutex);
1106 1101
1107 /* 1102 /*
1108 * reset osds that appear to be _really_ unresponsive. this
1109 * is a failsafe measure.. we really shouldn't be getting to
1110 * this point if the system is working properly. the monitors
1111 * should mark the osd as failed and we should find out about
1112 * it from an updated osd map.
1113 */
1114 while (timeout && !list_empty(&osdc->req_lru)) {
1115 req = list_entry(osdc->req_lru.next, struct ceph_osd_request,
1116 r_req_lru_item);
1117
1118 /* hasn't been long enough since we sent it? */
1119 if (time_before(jiffies, req->r_stamp + timeout))
1120 break;
1121
1122 /* hasn't been long enough since it was acked? */
1123 if (req->r_request->ack_stamp == 0 ||
1124 time_before(jiffies, req->r_request->ack_stamp + timeout))
1125 break;
1126
1127 BUG_ON(req == last_req && req->r_stamp == last_stamp);
1128 last_req = req;
1129 last_stamp = req->r_stamp;
1130
1131 osd = req->r_osd;
1132 BUG_ON(!osd);
1133 pr_warning(" tid %llu timed out on osd%d, will reset osd\n",
1134 req->r_tid, osd->o_osd);
1135 __kick_osd_requests(osdc, osd);
1136 }
1137
1138 /*
1139 * ping osds that are a bit slow. this ensures that if there 1103 * ping osds that are a bit slow. this ensures that if there
1140 * is a break in the TCP connection we will notice, and reopen 1104 * is a break in the TCP connection we will notice, and reopen
1141 * a connection with that osd (from the fault callback). 1105 * a connection with that osd (from the fault callback).
@@ -1364,8 +1328,8 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
1364 1328
1365 dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid, 1329 dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid,
1366 req->r_osd ? req->r_osd->o_osd : -1); 1330 req->r_osd ? req->r_osd->o_osd : -1);
1367 __unregister_linger_request(osdc, req);
1368 __register_request(osdc, req); 1331 __register_request(osdc, req);
1332 __unregister_linger_request(osdc, req);
1369 } 1333 }
1370 mutex_unlock(&osdc->request_mutex); 1334 mutex_unlock(&osdc->request_mutex);
1371 1335
@@ -1599,6 +1563,7 @@ int ceph_osdc_create_event(struct ceph_osd_client *osdc,
1599 event->data = data; 1563 event->data = data;
1600 event->osdc = osdc; 1564 event->osdc = osdc;
1601 INIT_LIST_HEAD(&event->osd_node); 1565 INIT_LIST_HEAD(&event->osd_node);
1566 RB_CLEAR_NODE(&event->node);
1602 kref_init(&event->kref); /* one ref for us */ 1567 kref_init(&event->kref); /* one ref for us */
1603 kref_get(&event->kref); /* one ref for the caller */ 1568 kref_get(&event->kref); /* one ref for the caller */
1604 init_completion(&event->completion); 1569 init_completion(&event->completion);
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 5433fb0eb3c6..de73214b5d26 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -469,6 +469,22 @@ static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, int id)
469 return NULL; 469 return NULL;
470} 470}
471 471
472const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id)
473{
474 struct ceph_pg_pool_info *pi;
475
476 if (id == CEPH_NOPOOL)
477 return NULL;
478
479 if (WARN_ON_ONCE(id > (u64) INT_MAX))
480 return NULL;
481
482 pi = __lookup_pg_pool(&map->pg_pools, (int) id);
483
484 return pi ? pi->name : NULL;
485}
486EXPORT_SYMBOL(ceph_pg_pool_name_by_id);
487
472int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name) 488int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name)
473{ 489{
474 struct rb_node *rbp; 490 struct rb_node *rbp;
@@ -645,10 +661,12 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
645 ceph_decode_32_safe(p, end, max, bad); 661 ceph_decode_32_safe(p, end, max, bad);
646 while (max--) { 662 while (max--) {
647 ceph_decode_need(p, end, 4 + 1 + sizeof(pi->v), bad); 663 ceph_decode_need(p, end, 4 + 1 + sizeof(pi->v), bad);
664 err = -ENOMEM;
648 pi = kzalloc(sizeof(*pi), GFP_NOFS); 665 pi = kzalloc(sizeof(*pi), GFP_NOFS);
649 if (!pi) 666 if (!pi)
650 goto bad; 667 goto bad;
651 pi->id = ceph_decode_32(p); 668 pi->id = ceph_decode_32(p);
669 err = -EINVAL;
652 ev = ceph_decode_8(p); /* encoding version */ 670 ev = ceph_decode_8(p); /* encoding version */
653 if (ev > CEPH_PG_POOL_VERSION) { 671 if (ev > CEPH_PG_POOL_VERSION) {
654 pr_warning("got unknown v %d > %d of ceph_pg_pool\n", 672 pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
@@ -664,8 +682,13 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
664 __insert_pg_pool(&map->pg_pools, pi); 682 __insert_pg_pool(&map->pg_pools, pi);
665 } 683 }
666 684
667 if (version >= 5 && __decode_pool_names(p, end, map) < 0) 685 if (version >= 5) {
668 goto bad; 686 err = __decode_pool_names(p, end, map);
687 if (err < 0) {
688 dout("fail to decode pool names");
689 goto bad;
690 }
691 }
669 692
670 ceph_decode_32_safe(p, end, map->pool_max, bad); 693 ceph_decode_32_safe(p, end, map->pool_max, bad);
671 694
@@ -745,7 +768,7 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
745 return map; 768 return map;
746 769
747bad: 770bad:
748 dout("osdmap_decode fail\n"); 771 dout("osdmap_decode fail err %d\n", err);
749 ceph_osdmap_destroy(map); 772 ceph_osdmap_destroy(map);
750 return ERR_PTR(err); 773 return ERR_PTR(err);
751} 774}
@@ -839,6 +862,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
839 if (ev > CEPH_PG_POOL_VERSION) { 862 if (ev > CEPH_PG_POOL_VERSION) {
840 pr_warning("got unknown v %d > %d of ceph_pg_pool\n", 863 pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
841 ev, CEPH_PG_POOL_VERSION); 864 ev, CEPH_PG_POOL_VERSION);
865 err = -EINVAL;
842 goto bad; 866 goto bad;
843 } 867 }
844 pi = __lookup_pg_pool(&map->pg_pools, pool); 868 pi = __lookup_pg_pool(&map->pg_pools, pool);
@@ -855,8 +879,11 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
855 if (err < 0) 879 if (err < 0)
856 goto bad; 880 goto bad;
857 } 881 }
858 if (version >= 5 && __decode_pool_names(p, end, map) < 0) 882 if (version >= 5) {
859 goto bad; 883 err = __decode_pool_names(p, end, map);
884 if (err < 0)
885 goto bad;
886 }
860 887
861 /* old_pool */ 888 /* old_pool */
862 ceph_decode_32_safe(p, end, len, bad); 889 ceph_decode_32_safe(p, end, len, bad);
@@ -932,15 +959,13 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
932 (void) __remove_pg_mapping(&map->pg_temp, pgid); 959 (void) __remove_pg_mapping(&map->pg_temp, pgid);
933 960
934 /* insert */ 961 /* insert */
935 if (pglen > (UINT_MAX - sizeof(*pg)) / sizeof(u32)) { 962 err = -EINVAL;
936 err = -EINVAL; 963 if (pglen > (UINT_MAX - sizeof(*pg)) / sizeof(u32))
937 goto bad; 964 goto bad;
938 } 965 err = -ENOMEM;
939 pg = kmalloc(sizeof(*pg) + sizeof(u32)*pglen, GFP_NOFS); 966 pg = kmalloc(sizeof(*pg) + sizeof(u32)*pglen, GFP_NOFS);
940 if (!pg) { 967 if (!pg)
941 err = -ENOMEM;
942 goto bad; 968 goto bad;
943 }
944 pg->pgid = pgid; 969 pg->pgid = pgid;
945 pg->len = pglen; 970 pg->len = pglen;
946 for (j = 0; j < pglen; j++) 971 for (j = 0; j < pglen; j++)