aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ceph/osdmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/ceph/osdmap.c')
-rw-r--r--fs/ceph/osdmap.c70
1 files changed, 35 insertions, 35 deletions
diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c
index 342e5f80996b..6f0aeff4185a 100644
--- a/fs/ceph/osdmap.c
+++ b/fs/ceph/osdmap.c
@@ -67,7 +67,7 @@ static int crush_decode_uniform_bucket(void **p, void *end,
67{ 67{
68 dout("crush_decode_uniform_bucket %p to %p\n", *p, end); 68 dout("crush_decode_uniform_bucket %p to %p\n", *p, end);
69 ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad); 69 ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad);
70 ceph_decode_32(p, b->item_weight); 70 b->item_weight = ceph_decode_32(p);
71 return 0; 71 return 0;
72bad: 72bad:
73 return -EINVAL; 73 return -EINVAL;
@@ -86,8 +86,8 @@ static int crush_decode_list_bucket(void **p, void *end,
86 return -ENOMEM; 86 return -ENOMEM;
87 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad); 87 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
88 for (j = 0; j < b->h.size; j++) { 88 for (j = 0; j < b->h.size; j++) {
89 ceph_decode_32(p, b->item_weights[j]); 89 b->item_weights[j] = ceph_decode_32(p);
90 ceph_decode_32(p, b->sum_weights[j]); 90 b->sum_weights[j] = ceph_decode_32(p);
91 } 91 }
92 return 0; 92 return 0;
93bad: 93bad:
@@ -105,7 +105,7 @@ static int crush_decode_tree_bucket(void **p, void *end,
105 return -ENOMEM; 105 return -ENOMEM;
106 ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad); 106 ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad);
107 for (j = 0; j < b->num_nodes; j++) 107 for (j = 0; j < b->num_nodes; j++)
108 ceph_decode_32(p, b->node_weights[j]); 108 b->node_weights[j] = ceph_decode_32(p);
109 return 0; 109 return 0;
110bad: 110bad:
111 return -EINVAL; 111 return -EINVAL;
@@ -124,8 +124,8 @@ static int crush_decode_straw_bucket(void **p, void *end,
124 return -ENOMEM; 124 return -ENOMEM;
125 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad); 125 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
126 for (j = 0; j < b->h.size; j++) { 126 for (j = 0; j < b->h.size; j++) {
127 ceph_decode_32(p, b->item_weights[j]); 127 b->item_weights[j] = ceph_decode_32(p);
128 ceph_decode_32(p, b->straws[j]); 128 b->straws[j] = ceph_decode_32(p);
129 } 129 }
130 return 0; 130 return 0;
131bad: 131bad:
@@ -148,15 +148,15 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
148 return ERR_PTR(-ENOMEM); 148 return ERR_PTR(-ENOMEM);
149 149
150 ceph_decode_need(p, end, 4*sizeof(u32), bad); 150 ceph_decode_need(p, end, 4*sizeof(u32), bad);
151 ceph_decode_32(p, magic); 151 magic = ceph_decode_32(p);
152 if (magic != CRUSH_MAGIC) { 152 if (magic != CRUSH_MAGIC) {
153 pr_err("crush_decode magic %x != current %x\n", 153 pr_err("crush_decode magic %x != current %x\n",
154 (unsigned)magic, (unsigned)CRUSH_MAGIC); 154 (unsigned)magic, (unsigned)CRUSH_MAGIC);
155 goto bad; 155 goto bad;
156 } 156 }
157 ceph_decode_32(p, c->max_buckets); 157 c->max_buckets = ceph_decode_32(p);
158 ceph_decode_32(p, c->max_rules); 158 c->max_rules = ceph_decode_32(p);
159 ceph_decode_32(p, c->max_devices); 159 c->max_devices = ceph_decode_32(p);
160 160
161 c->device_parents = kcalloc(c->max_devices, sizeof(u32), GFP_NOFS); 161 c->device_parents = kcalloc(c->max_devices, sizeof(u32), GFP_NOFS);
162 if (c->device_parents == NULL) 162 if (c->device_parents == NULL)
@@ -208,11 +208,11 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
208 goto badmem; 208 goto badmem;
209 209
210 ceph_decode_need(p, end, 4*sizeof(u32), bad); 210 ceph_decode_need(p, end, 4*sizeof(u32), bad);
211 ceph_decode_32(p, b->id); 211 b->id = ceph_decode_32(p);
212 ceph_decode_16(p, b->type); 212 b->type = ceph_decode_16(p);
213 ceph_decode_16(p, b->alg); 213 b->alg = ceph_decode_16(p);
214 ceph_decode_32(p, b->weight); 214 b->weight = ceph_decode_32(p);
215 ceph_decode_32(p, b->size); 215 b->size = ceph_decode_32(p);
216 216
217 dout("crush_decode bucket size %d off %x %p to %p\n", 217 dout("crush_decode bucket size %d off %x %p to %p\n",
218 b->size, (int)(*p-start), *p, end); 218 b->size, (int)(*p-start), *p, end);
@@ -227,7 +227,7 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
227 227
228 ceph_decode_need(p, end, b->size*sizeof(u32), bad); 228 ceph_decode_need(p, end, b->size*sizeof(u32), bad);
229 for (j = 0; j < b->size; j++) 229 for (j = 0; j < b->size; j++)
230 ceph_decode_32(p, b->items[j]); 230 b->items[j] = ceph_decode_32(p);
231 231
232 switch (b->alg) { 232 switch (b->alg) {
233 case CRUSH_BUCKET_UNIFORM: 233 case CRUSH_BUCKET_UNIFORM:
@@ -290,9 +290,9 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
290 ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */ 290 ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */
291 ceph_decode_need(p, end, r->len*3*sizeof(u32), bad); 291 ceph_decode_need(p, end, r->len*3*sizeof(u32), bad);
292 for (j = 0; j < r->len; j++) { 292 for (j = 0; j < r->len; j++) {
293 ceph_decode_32(p, r->steps[j].op); 293 r->steps[j].op = ceph_decode_32(p);
294 ceph_decode_32(p, r->steps[j].arg1); 294 r->steps[j].arg1 = ceph_decode_32(p);
295 ceph_decode_32(p, r->steps[j].arg2); 295 r->steps[j].arg2 = ceph_decode_32(p);
296 } 296 }
297 } 297 }
298 298
@@ -411,11 +411,11 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
411 411
412 ceph_decode_need(p, end, 2*sizeof(u64)+6*sizeof(u32), bad); 412 ceph_decode_need(p, end, 2*sizeof(u64)+6*sizeof(u32), bad);
413 ceph_decode_copy(p, &map->fsid, sizeof(map->fsid)); 413 ceph_decode_copy(p, &map->fsid, sizeof(map->fsid));
414 ceph_decode_32(p, map->epoch); 414 map->epoch = ceph_decode_32(p);
415 ceph_decode_copy(p, &map->created, sizeof(map->created)); 415 ceph_decode_copy(p, &map->created, sizeof(map->created));
416 ceph_decode_copy(p, &map->modified, sizeof(map->modified)); 416 ceph_decode_copy(p, &map->modified, sizeof(map->modified));
417 417
418 ceph_decode_32(p, map->num_pools); 418 map->num_pools = ceph_decode_32(p);
419 map->pg_pool = kcalloc(map->num_pools, sizeof(*map->pg_pool), 419 map->pg_pool = kcalloc(map->num_pools, sizeof(*map->pg_pool),
420 GFP_NOFS); 420 GFP_NOFS);
421 if (!map->pg_pool) { 421 if (!map->pg_pool) {
@@ -425,7 +425,7 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
425 ceph_decode_32_safe(p, end, max, bad); 425 ceph_decode_32_safe(p, end, max, bad);
426 while (max--) { 426 while (max--) {
427 ceph_decode_need(p, end, 4+sizeof(map->pg_pool->v), bad); 427 ceph_decode_need(p, end, 4+sizeof(map->pg_pool->v), bad);
428 ceph_decode_32(p, i); 428 i = ceph_decode_32(p);
429 if (i >= map->num_pools) 429 if (i >= map->num_pools)
430 goto bad; 430 goto bad;
431 ceph_decode_copy(p, &map->pg_pool[i].v, 431 ceph_decode_copy(p, &map->pg_pool[i].v,
@@ -438,7 +438,7 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
438 438
439 ceph_decode_32_safe(p, end, map->flags, bad); 439 ceph_decode_32_safe(p, end, map->flags, bad);
440 440
441 ceph_decode_32(p, max); 441 max = ceph_decode_32(p);
442 442
443 /* (re)alloc osd arrays */ 443 /* (re)alloc osd arrays */
444 err = osdmap_set_max_osd(map, max); 444 err = osdmap_set_max_osd(map, max);
@@ -456,7 +456,7 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
456 456
457 *p += 4; /* skip length field (should match max) */ 457 *p += 4; /* skip length field (should match max) */
458 for (i = 0; i < map->max_osd; i++) 458 for (i = 0; i < map->max_osd; i++)
459 ceph_decode_32(p, map->osd_weight[i]); 459 map->osd_weight[i] = ceph_decode_32(p);
460 460
461 *p += 4; /* skip length field (should match max) */ 461 *p += 4; /* skip length field (should match max) */
462 ceph_decode_copy(p, map->osd_addr, map->max_osd*sizeof(*map->osd_addr)); 462 ceph_decode_copy(p, map->osd_addr, map->max_osd*sizeof(*map->osd_addr));
@@ -469,8 +469,8 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
469 struct ceph_pg_mapping *pg; 469 struct ceph_pg_mapping *pg;
470 470
471 ceph_decode_need(p, end, sizeof(u32) + sizeof(u64), bad); 471 ceph_decode_need(p, end, sizeof(u32) + sizeof(u64), bad);
472 ceph_decode_64(p, pgid); 472 pgid = ceph_decode_64(p);
473 ceph_decode_32(p, n); 473 n = ceph_decode_32(p);
474 ceph_decode_need(p, end, n * sizeof(u32), bad); 474 ceph_decode_need(p, end, n * sizeof(u32), bad);
475 pg = kmalloc(sizeof(*pg) + n*sizeof(u32), GFP_NOFS); 475 pg = kmalloc(sizeof(*pg) + n*sizeof(u32), GFP_NOFS);
476 if (!pg) { 476 if (!pg) {
@@ -480,7 +480,7 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
480 pg->pgid = pgid; 480 pg->pgid = pgid;
481 pg->len = n; 481 pg->len = n;
482 for (j = 0; j < n; j++) 482 for (j = 0; j < n; j++)
483 ceph_decode_32(p, pg->osds[j]); 483 pg->osds[j] = ceph_decode_32(p);
484 484
485 err = __insert_pg_mapping(pg, &map->pg_temp); 485 err = __insert_pg_mapping(pg, &map->pg_temp);
486 if (err) 486 if (err)
@@ -537,10 +537,10 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
537 ceph_decode_need(p, end, sizeof(fsid)+sizeof(modified)+2*sizeof(u32), 537 ceph_decode_need(p, end, sizeof(fsid)+sizeof(modified)+2*sizeof(u32),
538 bad); 538 bad);
539 ceph_decode_copy(p, &fsid, sizeof(fsid)); 539 ceph_decode_copy(p, &fsid, sizeof(fsid));
540 ceph_decode_32(p, epoch); 540 epoch = ceph_decode_32(p);
541 BUG_ON(epoch != map->epoch+1); 541 BUG_ON(epoch != map->epoch+1);
542 ceph_decode_copy(p, &modified, sizeof(modified)); 542 ceph_decode_copy(p, &modified, sizeof(modified));
543 ceph_decode_32(p, new_flags); 543 new_flags = ceph_decode_32(p);
544 544
545 /* full map? */ 545 /* full map? */
546 ceph_decode_32_safe(p, end, len, bad); 546 ceph_decode_32_safe(p, end, len, bad);
@@ -568,7 +568,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
568 ceph_decode_need(p, end, 5*sizeof(u32), bad); 568 ceph_decode_need(p, end, 5*sizeof(u32), bad);
569 569
570 /* new max? */ 570 /* new max? */
571 ceph_decode_32(p, max); 571 max = ceph_decode_32(p);
572 if (max >= 0) { 572 if (max >= 0) {
573 err = osdmap_set_max_osd(map, max); 573 err = osdmap_set_max_osd(map, max);
574 if (err < 0) 574 if (err < 0)
@@ -641,8 +641,8 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
641 while (len--) { 641 while (len--) {
642 u32 osd, off; 642 u32 osd, off;
643 ceph_decode_need(p, end, sizeof(u32)*2, bad); 643 ceph_decode_need(p, end, sizeof(u32)*2, bad);
644 ceph_decode_32(p, osd); 644 osd = ceph_decode_32(p);
645 ceph_decode_32(p, off); 645 off = ceph_decode_32(p);
646 pr_info("osd%d weight 0x%x %s\n", osd, off, 646 pr_info("osd%d weight 0x%x %s\n", osd, off,
647 off == CEPH_OSD_IN ? "(in)" : 647 off == CEPH_OSD_IN ? "(in)" :
648 (off == CEPH_OSD_OUT ? "(out)" : "")); 648 (off == CEPH_OSD_OUT ? "(out)" : ""));
@@ -659,8 +659,8 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
659 u64 pgid; 659 u64 pgid;
660 u32 pglen; 660 u32 pglen;
661 ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad); 661 ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad);
662 ceph_decode_64(p, pgid); 662 pgid = ceph_decode_64(p);
663 ceph_decode_32(p, pglen); 663 pglen = ceph_decode_32(p);
664 664
665 /* remove any? */ 665 /* remove any? */
666 while (rbp && rb_entry(rbp, struct ceph_pg_mapping, 666 while (rbp && rb_entry(rbp, struct ceph_pg_mapping,
@@ -683,7 +683,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
683 pg->pgid = pgid; 683 pg->pgid = pgid;
684 pg->len = pglen; 684 pg->len = pglen;
685 for (j = 0; j < len; j++) 685 for (j = 0; j < len; j++)
686 ceph_decode_32(p, pg->osds[j]); 686 pg->osds[j] = ceph_decode_32(p);
687 err = __insert_pg_mapping(pg, &map->pg_temp); 687 err = __insert_pg_mapping(pg, &map->pg_temp);
688 if (err) 688 if (err)
689 goto bad; 689 goto bad;