aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/of
diff options
context:
space:
mode:
authorGavin Shan <gwshan@linux.vnet.ibm.com>2016-05-03 09:22:48 -0400
committerRob Herring <robh@kernel.org>2016-05-16 08:22:34 -0400
commit50800082f17645620bfdd357ba9141c86b76363d (patch)
treed5e13e0dd6bf52d2908d090c9b4654aa7b8ecf8d /drivers/of
parentdfbd4c6eff35f1b1065cca046003cc9d7ff27222 (diff)
drivers/of: Avoid recursively calling unflatten_dt_node()
In current implementation, unflatten_dt_node() is called recursively to unflatten device nodes in FDT blob. It's stress to limited stack capacity, especially to adopt the function to unflatten device sub-tree that possibly has multiple root nodes. In that case, we runs out of stack and the system can't boot up successfully. In order to reuse the function to unflatten device sub-tree, this avoids calling the function recursively, meaning the device nodes are unflattened in one call on unflatten_dt_node(): two arrays are introduced to track the parent path size and the device node of current level of depth, which will be used by the device node on next level of depth to be unflattened. All device nodes in more than 64 level of depth are dropped and hopefully, the system can boot up successfully with the partial device-tree. Also, the parameter "poffset" and "fpsize" are unused and dropped and the parameter "dryrun" is figured out from "mem == NULL". Besides, the return value of the function is changed to indicate the size of memory consumed by the unflatten device tree or error code. Signed-off-by: Gavin Shan <gwshan@linux.vnet.ibm.com> Acked-by: Rob Herring <robh@kernel.org> Signed-off-by: Rob Herring <robh@kernel.org>
Diffstat (limited to 'drivers/of')
-rw-r--r--drivers/of/fdt.c122
1 files changed, 74 insertions, 48 deletions
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 1b8c4ab0574d..c2c4afcbb971 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -356,63 +356,90 @@ static unsigned long populate_node(const void *blob,
356 return fpsize; 356 return fpsize;
357} 357}
358 358
359static void reverse_nodes(struct device_node *parent)
360{
361 struct device_node *child, *next;
362
363 /* In-depth first */
364 child = parent->child;
365 while (child) {
366 reverse_nodes(child);
367
368 child = child->sibling;
369 }
370
371 /* Reverse the nodes in the child list */
372 child = parent->child;
373 parent->child = NULL;
374 while (child) {
375 next = child->sibling;
376
377 child->sibling = parent->child;
378 parent->child = child;
379 child = next;
380 }
381}
382
359/** 383/**
360 * unflatten_dt_node - Alloc and populate a device_node from the flat tree 384 * unflatten_dt_node - Alloc and populate a device_node from the flat tree
361 * @blob: The parent device tree blob 385 * @blob: The parent device tree blob
362 * @mem: Memory chunk to use for allocating device nodes and properties 386 * @mem: Memory chunk to use for allocating device nodes and properties
363 * @poffset: pointer to node in flat tree
364 * @dad: Parent struct device_node 387 * @dad: Parent struct device_node
365 * @nodepp: The device_node tree created by the call 388 * @nodepp: The device_node tree created by the call
366 * @fpsize: Size of the node path up at the current depth. 389 *
367 * @dryrun: If true, do not allocate device nodes but still calculate needed 390 * It returns the size of unflattened device tree or error code
368 * memory size
369 */ 391 */
370static void *unflatten_dt_node(const void *blob, 392static int unflatten_dt_node(const void *blob,
371 void *mem, 393 void *mem,
372 int *poffset, 394 struct device_node *dad,
373 struct device_node *dad, 395 struct device_node **nodepp)
374 struct device_node **nodepp,
375 unsigned long fpsize,
376 bool dryrun)
377{ 396{
378 struct device_node *np; 397 struct device_node *root;
379 static int depth; 398 int offset = 0, depth = 0;
380 int old_depth; 399#define FDT_MAX_DEPTH 64
400 unsigned long fpsizes[FDT_MAX_DEPTH];
401 struct device_node *nps[FDT_MAX_DEPTH];
402 void *base = mem;
403 bool dryrun = !base;
381 404
382 fpsize = populate_node(blob, *poffset, &mem, dad, fpsize, &np, dryrun); 405 if (nodepp)
383 if (!fpsize) 406 *nodepp = NULL;
384 return mem; 407
408 root = dad;
409 fpsizes[depth] = dad ? strlen(of_node_full_name(dad)) : 0;
410 nps[depth++] = dad;
411 for (offset = 0;
412 offset >= 0;
413 offset = fdt_next_node(blob, offset, &depth)) {
414 if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH))
415 continue;
385 416
386 old_depth = depth; 417 fpsizes[depth] = populate_node(blob, offset, &mem,
387 *poffset = fdt_next_node(blob, *poffset, &depth); 418 nps[depth - 1],
388 if (depth < 0) 419 fpsizes[depth - 1],
389 depth = 0; 420 &nps[depth], dryrun);
390 while (*poffset > 0 && depth > old_depth) 421 if (!fpsizes[depth])
391 mem = unflatten_dt_node(blob, mem, poffset, np, NULL, 422 return mem - base;
392 fpsize, dryrun); 423
424 if (!dryrun && nodepp && !*nodepp)
425 *nodepp = nps[depth];
426 if (!dryrun && !root)
427 root = nps[depth];
428 }
393 429
394 if (*poffset < 0 && *poffset != -FDT_ERR_NOTFOUND) 430 if (offset < 0 && offset != -FDT_ERR_NOTFOUND) {
395 pr_err("unflatten: error %d processing FDT\n", *poffset); 431 pr_err("%s: Error %d processing FDT\n", __func__, offset);
432 return -EINVAL;
433 }
396 434
397 /* 435 /*
398 * Reverse the child list. Some drivers assumes node order matches .dts 436 * Reverse the child list. Some drivers assumes node order matches .dts
399 * node order 437 * node order
400 */ 438 */
401 if (!dryrun && np->child) { 439 if (!dryrun)
402 struct device_node *child = np->child; 440 reverse_nodes(root);
403 np->child = NULL;
404 while (child) {
405 struct device_node *next = child->sibling;
406 child->sibling = np->child;
407 np->child = child;
408 child = next;
409 }
410 }
411
412 if (nodepp)
413 *nodepp = np;
414 441
415 return mem; 442 return mem - base;
416} 443}
417 444
418/** 445/**
@@ -431,8 +458,7 @@ static void __unflatten_device_tree(const void *blob,
431 struct device_node **mynodes, 458 struct device_node **mynodes,
432 void * (*dt_alloc)(u64 size, u64 align)) 459 void * (*dt_alloc)(u64 size, u64 align))
433{ 460{
434 unsigned long size; 461 int size;
435 int start;
436 void *mem; 462 void *mem;
437 463
438 pr_debug(" -> unflatten_device_tree()\n"); 464 pr_debug(" -> unflatten_device_tree()\n");
@@ -453,11 +479,12 @@ static void __unflatten_device_tree(const void *blob,
453 } 479 }
454 480
455 /* First pass, scan for size */ 481 /* First pass, scan for size */
456 start = 0; 482 size = unflatten_dt_node(blob, NULL, NULL, NULL);
457 size = (unsigned long)unflatten_dt_node(blob, NULL, &start, NULL, NULL, 0, true); 483 if (size < 0)
458 size = ALIGN(size, 4); 484 return;
459 485
460 pr_debug(" size is %lx, allocating...\n", size); 486 size = ALIGN(size, 4);
487 pr_debug(" size is %d, allocating...\n", size);
461 488
462 /* Allocate memory for the expanded device tree */ 489 /* Allocate memory for the expanded device tree */
463 mem = dt_alloc(size + 4, __alignof__(struct device_node)); 490 mem = dt_alloc(size + 4, __alignof__(struct device_node));
@@ -468,8 +495,7 @@ static void __unflatten_device_tree(const void *blob,
468 pr_debug(" unflattening %p...\n", mem); 495 pr_debug(" unflattening %p...\n", mem);
469 496
470 /* Second pass, do actual unflattening */ 497 /* Second pass, do actual unflattening */
471 start = 0; 498 unflatten_dt_node(blob, mem, NULL, mynodes);
472 unflatten_dt_node(blob, mem, &start, NULL, mynodes, 0, false);
473 if (be32_to_cpup(mem + size) != 0xdeadbeef) 499 if (be32_to_cpup(mem + size) != 0xdeadbeef)
474 pr_warning("End of tree marker overwritten: %08x\n", 500 pr_warning("End of tree marker overwritten: %08x\n",
475 be32_to_cpup(mem + size)); 501 be32_to_cpup(mem + size));