aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/of/base.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2013-02-06 15:30:56 -0500
committerRob Herring <rob.herring@calxeda.com>2013-02-08 18:02:40 -0500
commitd6d3c4e656513dcea61ce900f0ecb9ca820ee7cd (patch)
tree9d5602c46a048aa23be0f5f3fffcf126ace3f651 /drivers/of/base.c
parenteb7ccb8184ab36b61fb596bd8d573e22e04d6266 (diff)
OF: convert devtree lock from rw_lock to raw spinlock
With the locking cleanup in place (from "OF: Fixup resursive locking code paths"), we can now do the conversion from the rw_lock to a raw spinlock as required for preempt-rt. The previous cleanup and this conversion were originally separate since they predated when mainline got raw spinlock (in commit c2f21ce2e31286a "locking: Implement new raw_spinlock"). So, at that point in time, the cleanup was considered plausible for mainline, but not this conversion. In any case, we've kept them separate as it makes for easier review and better bisection. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> [PG: taken from preempt-rt, update subject & add a commit log] Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com> Signed-off-by: Rob Herring <rob.herring@calxeda.com>
Diffstat (limited to 'drivers/of/base.c')
-rw-r--r--drivers/of/base.c100
1 files changed, 56 insertions, 44 deletions
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 16ee7a08e044..f86be5594a15 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -55,7 +55,7 @@ static DEFINE_MUTEX(of_aliases_mutex);
55/* use when traversing tree through the allnext, child, sibling, 55/* use when traversing tree through the allnext, child, sibling,
56 * or parent members of struct device_node. 56 * or parent members of struct device_node.
57 */ 57 */
58DEFINE_RWLOCK(devtree_lock); 58DEFINE_RAW_SPINLOCK(devtree_lock);
59 59
60int of_n_addr_cells(struct device_node *np) 60int of_n_addr_cells(struct device_node *np)
61{ 61{
@@ -188,10 +188,11 @@ struct property *of_find_property(const struct device_node *np,
188 int *lenp) 188 int *lenp)
189{ 189{
190 struct property *pp; 190 struct property *pp;
191 unsigned long flags;
191 192
192 read_lock(&devtree_lock); 193 raw_spin_lock_irqsave(&devtree_lock, flags);
193 pp = __of_find_property(np, name, lenp); 194 pp = __of_find_property(np, name, lenp);
194 read_unlock(&devtree_lock); 195 raw_spin_unlock_irqrestore(&devtree_lock, flags);
195 196
196 return pp; 197 return pp;
197} 198}
@@ -209,13 +210,13 @@ struct device_node *of_find_all_nodes(struct device_node *prev)
209{ 210{
210 struct device_node *np; 211 struct device_node *np;
211 212
212 read_lock(&devtree_lock); 213 raw_spin_lock(&devtree_lock);
213 np = prev ? prev->allnext : of_allnodes; 214 np = prev ? prev->allnext : of_allnodes;
214 for (; np != NULL; np = np->allnext) 215 for (; np != NULL; np = np->allnext)
215 if (of_node_get(np)) 216 if (of_node_get(np))
216 break; 217 break;
217 of_node_put(prev); 218 of_node_put(prev);
218 read_unlock(&devtree_lock); 219 raw_spin_unlock(&devtree_lock);
219 return np; 220 return np;
220} 221}
221EXPORT_SYMBOL(of_find_all_nodes); 222EXPORT_SYMBOL(of_find_all_nodes);
@@ -274,11 +275,12 @@ static int __of_device_is_compatible(const struct device_node *device,
274int of_device_is_compatible(const struct device_node *device, 275int of_device_is_compatible(const struct device_node *device,
275 const char *compat) 276 const char *compat)
276{ 277{
278 unsigned long flags;
277 int res; 279 int res;
278 280
279 read_lock(&devtree_lock); 281 raw_spin_lock_irqsave(&devtree_lock, flags);
280 res = __of_device_is_compatible(device, compat); 282 res = __of_device_is_compatible(device, compat);
281 read_unlock(&devtree_lock); 283 raw_spin_unlock_irqrestore(&devtree_lock, flags);
282 return res; 284 return res;
283} 285}
284EXPORT_SYMBOL(of_device_is_compatible); 286EXPORT_SYMBOL(of_device_is_compatible);
@@ -340,13 +342,14 @@ EXPORT_SYMBOL(of_device_is_available);
340struct device_node *of_get_parent(const struct device_node *node) 342struct device_node *of_get_parent(const struct device_node *node)
341{ 343{
342 struct device_node *np; 344 struct device_node *np;
345 unsigned long flags;
343 346
344 if (!node) 347 if (!node)
345 return NULL; 348 return NULL;
346 349
347 read_lock(&devtree_lock); 350 raw_spin_lock_irqsave(&devtree_lock, flags);
348 np = of_node_get(node->parent); 351 np = of_node_get(node->parent);
349 read_unlock(&devtree_lock); 352 raw_spin_unlock_irqrestore(&devtree_lock, flags);
350 return np; 353 return np;
351} 354}
352EXPORT_SYMBOL(of_get_parent); 355EXPORT_SYMBOL(of_get_parent);
@@ -365,14 +368,15 @@ EXPORT_SYMBOL(of_get_parent);
365struct device_node *of_get_next_parent(struct device_node *node) 368struct device_node *of_get_next_parent(struct device_node *node)
366{ 369{
367 struct device_node *parent; 370 struct device_node *parent;
371 unsigned long flags;
368 372
369 if (!node) 373 if (!node)
370 return NULL; 374 return NULL;
371 375
372 read_lock(&devtree_lock); 376 raw_spin_lock_irqsave(&devtree_lock, flags);
373 parent = of_node_get(node->parent); 377 parent = of_node_get(node->parent);
374 of_node_put(node); 378 of_node_put(node);
375 read_unlock(&devtree_lock); 379 raw_spin_unlock_irqrestore(&devtree_lock, flags);
376 return parent; 380 return parent;
377} 381}
378 382
@@ -388,14 +392,15 @@ struct device_node *of_get_next_child(const struct device_node *node,
388 struct device_node *prev) 392 struct device_node *prev)
389{ 393{
390 struct device_node *next; 394 struct device_node *next;
395 unsigned long flags;
391 396
392 read_lock(&devtree_lock); 397 raw_spin_lock_irqsave(&devtree_lock, flags);
393 next = prev ? prev->sibling : node->child; 398 next = prev ? prev->sibling : node->child;
394 for (; next; next = next->sibling) 399 for (; next; next = next->sibling)
395 if (of_node_get(next)) 400 if (of_node_get(next))
396 break; 401 break;
397 of_node_put(prev); 402 of_node_put(prev);
398 read_unlock(&devtree_lock); 403 raw_spin_unlock_irqrestore(&devtree_lock, flags);
399 return next; 404 return next;
400} 405}
401EXPORT_SYMBOL(of_get_next_child); 406EXPORT_SYMBOL(of_get_next_child);
@@ -413,7 +418,7 @@ struct device_node *of_get_next_available_child(const struct device_node *node,
413{ 418{
414 struct device_node *next; 419 struct device_node *next;
415 420
416 read_lock(&devtree_lock); 421 raw_spin_lock(&devtree_lock);
417 next = prev ? prev->sibling : node->child; 422 next = prev ? prev->sibling : node->child;
418 for (; next; next = next->sibling) { 423 for (; next; next = next->sibling) {
419 if (!of_device_is_available(next)) 424 if (!of_device_is_available(next))
@@ -422,7 +427,7 @@ struct device_node *of_get_next_available_child(const struct device_node *node,
422 break; 427 break;
423 } 428 }
424 of_node_put(prev); 429 of_node_put(prev);
425 read_unlock(&devtree_lock); 430 raw_spin_unlock(&devtree_lock);
426 return next; 431 return next;
427} 432}
428EXPORT_SYMBOL(of_get_next_available_child); 433EXPORT_SYMBOL(of_get_next_available_child);
@@ -460,14 +465,15 @@ EXPORT_SYMBOL(of_get_child_by_name);
460struct device_node *of_find_node_by_path(const char *path) 465struct device_node *of_find_node_by_path(const char *path)
461{ 466{
462 struct device_node *np = of_allnodes; 467 struct device_node *np = of_allnodes;
468 unsigned long flags;
463 469
464 read_lock(&devtree_lock); 470 raw_spin_lock_irqsave(&devtree_lock, flags);
465 for (; np; np = np->allnext) { 471 for (; np; np = np->allnext) {
466 if (np->full_name && (of_node_cmp(np->full_name, path) == 0) 472 if (np->full_name && (of_node_cmp(np->full_name, path) == 0)
467 && of_node_get(np)) 473 && of_node_get(np))
468 break; 474 break;
469 } 475 }
470 read_unlock(&devtree_lock); 476 raw_spin_unlock_irqrestore(&devtree_lock, flags);
471 return np; 477 return np;
472} 478}
473EXPORT_SYMBOL(of_find_node_by_path); 479EXPORT_SYMBOL(of_find_node_by_path);
@@ -487,15 +493,16 @@ struct device_node *of_find_node_by_name(struct device_node *from,
487 const char *name) 493 const char *name)
488{ 494{
489 struct device_node *np; 495 struct device_node *np;
496 unsigned long flags;
490 497
491 read_lock(&devtree_lock); 498 raw_spin_lock_irqsave(&devtree_lock, flags);
492 np = from ? from->allnext : of_allnodes; 499 np = from ? from->allnext : of_allnodes;
493 for (; np; np = np->allnext) 500 for (; np; np = np->allnext)
494 if (np->name && (of_node_cmp(np->name, name) == 0) 501 if (np->name && (of_node_cmp(np->name, name) == 0)
495 && of_node_get(np)) 502 && of_node_get(np))
496 break; 503 break;
497 of_node_put(from); 504 of_node_put(from);
498 read_unlock(&devtree_lock); 505 raw_spin_unlock_irqrestore(&devtree_lock, flags);
499 return np; 506 return np;
500} 507}
501EXPORT_SYMBOL(of_find_node_by_name); 508EXPORT_SYMBOL(of_find_node_by_name);
@@ -516,15 +523,16 @@ struct device_node *of_find_node_by_type(struct device_node *from,
516 const char *type) 523 const char *type)
517{ 524{
518 struct device_node *np; 525 struct device_node *np;
526 unsigned long flags;
519 527
520 read_lock(&devtree_lock); 528 raw_spin_lock_irqsave(&devtree_lock, flags);
521 np = from ? from->allnext : of_allnodes; 529 np = from ? from->allnext : of_allnodes;
522 for (; np; np = np->allnext) 530 for (; np; np = np->allnext)
523 if (np->type && (of_node_cmp(np->type, type) == 0) 531 if (np->type && (of_node_cmp(np->type, type) == 0)
524 && of_node_get(np)) 532 && of_node_get(np))
525 break; 533 break;
526 of_node_put(from); 534 of_node_put(from);
527 read_unlock(&devtree_lock); 535 raw_spin_unlock_irqrestore(&devtree_lock, flags);
528 return np; 536 return np;
529} 537}
530EXPORT_SYMBOL(of_find_node_by_type); 538EXPORT_SYMBOL(of_find_node_by_type);
@@ -547,8 +555,9 @@ struct device_node *of_find_compatible_node(struct device_node *from,
547 const char *type, const char *compatible) 555 const char *type, const char *compatible)
548{ 556{
549 struct device_node *np; 557 struct device_node *np;
558 unsigned long flags;
550 559
551 read_lock(&devtree_lock); 560 raw_spin_lock_irqsave(&devtree_lock, flags);
552 np = from ? from->allnext : of_allnodes; 561 np = from ? from->allnext : of_allnodes;
553 for (; np; np = np->allnext) { 562 for (; np; np = np->allnext) {
554 if (type 563 if (type
@@ -559,7 +568,7 @@ struct device_node *of_find_compatible_node(struct device_node *from,
559 break; 568 break;
560 } 569 }
561 of_node_put(from); 570 of_node_put(from);
562 read_unlock(&devtree_lock); 571 raw_spin_unlock_irqrestore(&devtree_lock, flags);
563 return np; 572 return np;
564} 573}
565EXPORT_SYMBOL(of_find_compatible_node); 574EXPORT_SYMBOL(of_find_compatible_node);
@@ -581,8 +590,9 @@ struct device_node *of_find_node_with_property(struct device_node *from,
581{ 590{
582 struct device_node *np; 591 struct device_node *np;
583 struct property *pp; 592 struct property *pp;
593 unsigned long flags;
584 594
585 read_lock(&devtree_lock); 595 raw_spin_lock_irqsave(&devtree_lock, flags);
586 np = from ? from->allnext : of_allnodes; 596 np = from ? from->allnext : of_allnodes;
587 for (; np; np = np->allnext) { 597 for (; np; np = np->allnext) {
588 for (pp = np->properties; pp; pp = pp->next) { 598 for (pp = np->properties; pp; pp = pp->next) {
@@ -594,7 +604,7 @@ struct device_node *of_find_node_with_property(struct device_node *from,
594 } 604 }
595out: 605out:
596 of_node_put(from); 606 of_node_put(from);
597 read_unlock(&devtree_lock); 607 raw_spin_unlock_irqrestore(&devtree_lock, flags);
598 return np; 608 return np;
599} 609}
600EXPORT_SYMBOL(of_find_node_with_property); 610EXPORT_SYMBOL(of_find_node_with_property);
@@ -635,10 +645,11 @@ const struct of_device_id *of_match_node(const struct of_device_id *matches,
635 const struct device_node *node) 645 const struct device_node *node)
636{ 646{
637 const struct of_device_id *match; 647 const struct of_device_id *match;
648 unsigned long flags;
638 649
639 read_lock(&devtree_lock); 650 raw_spin_lock_irqsave(&devtree_lock, flags);
640 match = __of_match_node(matches, node); 651 match = __of_match_node(matches, node);
641 read_unlock(&devtree_lock); 652 raw_spin_unlock_irqrestore(&devtree_lock, flags);
642 return match; 653 return match;
643} 654}
644EXPORT_SYMBOL(of_match_node); 655EXPORT_SYMBOL(of_match_node);
@@ -662,11 +673,12 @@ struct device_node *of_find_matching_node_and_match(struct device_node *from,
662{ 673{
663 struct device_node *np; 674 struct device_node *np;
664 const struct of_device_id *m; 675 const struct of_device_id *m;
676 unsigned long flags;
665 677
666 if (match) 678 if (match)
667 *match = NULL; 679 *match = NULL;
668 680
669 read_lock(&devtree_lock); 681 raw_spin_lock_irqsave(&devtree_lock, flags);
670 np = from ? from->allnext : of_allnodes; 682 np = from ? from->allnext : of_allnodes;
671 for (; np; np = np->allnext) { 683 for (; np; np = np->allnext) {
672 m = __of_match_node(matches, np); 684 m = __of_match_node(matches, np);
@@ -677,7 +689,7 @@ struct device_node *of_find_matching_node_and_match(struct device_node *from,
677 } 689 }
678 } 690 }
679 of_node_put(from); 691 of_node_put(from);
680 read_unlock(&devtree_lock); 692 raw_spin_unlock_irqrestore(&devtree_lock, flags);
681 return np; 693 return np;
682} 694}
683EXPORT_SYMBOL(of_find_matching_node_and_match); 695EXPORT_SYMBOL(of_find_matching_node_and_match);
@@ -720,12 +732,12 @@ struct device_node *of_find_node_by_phandle(phandle handle)
720{ 732{
721 struct device_node *np; 733 struct device_node *np;
722 734
723 read_lock(&devtree_lock); 735 raw_spin_lock(&devtree_lock);
724 for (np = of_allnodes; np; np = np->allnext) 736 for (np = of_allnodes; np; np = np->allnext)
725 if (np->phandle == handle) 737 if (np->phandle == handle)
726 break; 738 break;
727 of_node_get(np); 739 of_node_get(np);
728 read_unlock(&devtree_lock); 740 raw_spin_unlock(&devtree_lock);
729 return np; 741 return np;
730} 742}
731EXPORT_SYMBOL(of_find_node_by_phandle); 743EXPORT_SYMBOL(of_find_node_by_phandle);
@@ -1197,18 +1209,18 @@ int of_add_property(struct device_node *np, struct property *prop)
1197 return rc; 1209 return rc;
1198 1210
1199 prop->next = NULL; 1211 prop->next = NULL;
1200 write_lock_irqsave(&devtree_lock, flags); 1212 raw_spin_lock_irqsave(&devtree_lock, flags);
1201 next = &np->properties; 1213 next = &np->properties;
1202 while (*next) { 1214 while (*next) {
1203 if (strcmp(prop->name, (*next)->name) == 0) { 1215 if (strcmp(prop->name, (*next)->name) == 0) {
1204 /* duplicate ! don't insert it */ 1216 /* duplicate ! don't insert it */
1205 write_unlock_irqrestore(&devtree_lock, flags); 1217 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1206 return -1; 1218 return -1;
1207 } 1219 }
1208 next = &(*next)->next; 1220 next = &(*next)->next;
1209 } 1221 }
1210 *next = prop; 1222 *next = prop;
1211 write_unlock_irqrestore(&devtree_lock, flags); 1223 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1212 1224
1213#ifdef CONFIG_PROC_DEVICETREE 1225#ifdef CONFIG_PROC_DEVICETREE
1214 /* try to add to proc as well if it was initialized */ 1226 /* try to add to proc as well if it was initialized */
@@ -1238,7 +1250,7 @@ int of_remove_property(struct device_node *np, struct property *prop)
1238 if (rc) 1250 if (rc)
1239 return rc; 1251 return rc;
1240 1252
1241 write_lock_irqsave(&devtree_lock, flags); 1253 raw_spin_lock_irqsave(&devtree_lock, flags);
1242 next = &np->properties; 1254 next = &np->properties;
1243 while (*next) { 1255 while (*next) {
1244 if (*next == prop) { 1256 if (*next == prop) {
@@ -1251,7 +1263,7 @@ int of_remove_property(struct device_node *np, struct property *prop)
1251 } 1263 }
1252 next = &(*next)->next; 1264 next = &(*next)->next;
1253 } 1265 }
1254 write_unlock_irqrestore(&devtree_lock, flags); 1266 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1255 1267
1256 if (!found) 1268 if (!found)
1257 return -ENODEV; 1269 return -ENODEV;
@@ -1291,7 +1303,7 @@ int of_update_property(struct device_node *np, struct property *newprop)
1291 if (!oldprop) 1303 if (!oldprop)
1292 return of_add_property(np, newprop); 1304 return of_add_property(np, newprop);
1293 1305
1294 write_lock_irqsave(&devtree_lock, flags); 1306 raw_spin_lock_irqsave(&devtree_lock, flags);
1295 next = &np->properties; 1307 next = &np->properties;
1296 while (*next) { 1308 while (*next) {
1297 if (*next == oldprop) { 1309 if (*next == oldprop) {
@@ -1305,7 +1317,7 @@ int of_update_property(struct device_node *np, struct property *newprop)
1305 } 1317 }
1306 next = &(*next)->next; 1318 next = &(*next)->next;
1307 } 1319 }
1308 write_unlock_irqrestore(&devtree_lock, flags); 1320 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1309 1321
1310 if (!found) 1322 if (!found)
1311 return -ENODEV; 1323 return -ENODEV;
@@ -1378,12 +1390,12 @@ int of_attach_node(struct device_node *np)
1378 if (rc) 1390 if (rc)
1379 return rc; 1391 return rc;
1380 1392
1381 write_lock_irqsave(&devtree_lock, flags); 1393 raw_spin_lock_irqsave(&devtree_lock, flags);
1382 np->sibling = np->parent->child; 1394 np->sibling = np->parent->child;
1383 np->allnext = of_allnodes; 1395 np->allnext = of_allnodes;
1384 np->parent->child = np; 1396 np->parent->child = np;
1385 of_allnodes = np; 1397 of_allnodes = np;
1386 write_unlock_irqrestore(&devtree_lock, flags); 1398 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1387 1399
1388 of_add_proc_dt_entry(np); 1400 of_add_proc_dt_entry(np);
1389 return 0; 1401 return 0;
@@ -1426,17 +1438,17 @@ int of_detach_node(struct device_node *np)
1426 if (rc) 1438 if (rc)
1427 return rc; 1439 return rc;
1428 1440
1429 write_lock_irqsave(&devtree_lock, flags); 1441 raw_spin_lock_irqsave(&devtree_lock, flags);
1430 1442
1431 if (of_node_check_flag(np, OF_DETACHED)) { 1443 if (of_node_check_flag(np, OF_DETACHED)) {
1432 /* someone already detached it */ 1444 /* someone already detached it */
1433 write_unlock_irqrestore(&devtree_lock, flags); 1445 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1434 return rc; 1446 return rc;
1435 } 1447 }
1436 1448
1437 parent = np->parent; 1449 parent = np->parent;
1438 if (!parent) { 1450 if (!parent) {
1439 write_unlock_irqrestore(&devtree_lock, flags); 1451 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1440 return rc; 1452 return rc;
1441 } 1453 }
1442 1454
@@ -1463,7 +1475,7 @@ int of_detach_node(struct device_node *np)
1463 } 1475 }
1464 1476
1465 of_node_set_flag(np, OF_DETACHED); 1477 of_node_set_flag(np, OF_DETACHED);
1466 write_unlock_irqrestore(&devtree_lock, flags); 1478 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1467 1479
1468 of_remove_proc_dt_entry(np); 1480 of_remove_proc_dt_entry(np);
1469 return rc; 1481 return rc;