aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/prom.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/prom.c')
-rw-r--r--arch/powerpc/kernel/prom.c2141
1 files changed, 2141 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
new file mode 100644
index 000000000000..dc3d24ea3bff
--- /dev/null
+++ b/arch/powerpc/kernel/prom.c
@@ -0,0 +1,2141 @@
1/*
2 * Procedures for creating, accessing and interpreting the device tree.
3 *
4 * Paul Mackerras August 1996.
5 * Copyright (C) 1996-2005 Paul Mackerras.
6 *
7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8 * {engebret|bergner}@us.ibm.com
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#undef DEBUG
17
18#include <stdarg.h>
19#include <linux/config.h>
20#include <linux/kernel.h>
21#include <linux/string.h>
22#include <linux/init.h>
23#include <linux/threads.h>
24#include <linux/spinlock.h>
25#include <linux/types.h>
26#include <linux/pci.h>
27#include <linux/stringify.h>
28#include <linux/delay.h>
29#include <linux/initrd.h>
30#include <linux/bitops.h>
31#include <linux/module.h>
32
33#include <asm/prom.h>
34#include <asm/rtas.h>
35#include <asm/lmb.h>
36#include <asm/page.h>
37#include <asm/processor.h>
38#include <asm/irq.h>
39#include <asm/io.h>
40#include <asm/smp.h>
41#include <asm/system.h>
42#include <asm/mmu.h>
43#include <asm/pgtable.h>
44#include <asm/pci.h>
45#include <asm/iommu.h>
46#include <asm/btext.h>
47#include <asm/sections.h>
48#include <asm/machdep.h>
49#include <asm/pSeries_reconfig.h>
50
51#ifdef DEBUG
52#define DBG(fmt...) printk(KERN_ERR fmt)
53#else
54#define DBG(fmt...)
55#endif
56
57struct pci_reg_property {
58 struct pci_address addr;
59 u32 size_hi;
60 u32 size_lo;
61};
62
63struct isa_reg_property {
64 u32 space;
65 u32 address;
66 u32 size;
67};
68
69
70typedef int interpret_func(struct device_node *, unsigned long *,
71 int, int, int);
72
73extern struct rtas_t rtas;
74extern struct lmb lmb;
75extern unsigned long klimit;
76
77static unsigned long memory_limit;
78
79static int __initdata dt_root_addr_cells;
80static int __initdata dt_root_size_cells;
81
82#ifdef CONFIG_PPC64
83static int __initdata iommu_is_off;
84int __initdata iommu_force_on;
85extern unsigned long tce_alloc_start, tce_alloc_end;
86#endif
87
88typedef u32 cell_t;
89
90#if 0
91static struct boot_param_header *initial_boot_params __initdata;
92#else
93struct boot_param_header *initial_boot_params;
94#endif
95
96static struct device_node *allnodes = NULL;
97
98/* use when traversing tree through the allnext, child, sibling,
99 * or parent members of struct device_node.
100 */
101static DEFINE_RWLOCK(devtree_lock);
102
103/* export that to outside world */
104struct device_node *of_chosen;
105
106struct device_node *dflt_interrupt_controller;
107int num_interrupt_controllers;
108
109u32 rtas_data;
110u32 rtas_entry;
111
112/*
113 * Wrapper for allocating memory for various data that needs to be
114 * attached to device nodes as they are processed at boot or when
115 * added to the device tree later (e.g. DLPAR). At boot there is
116 * already a region reserved so we just increment *mem_start by size;
117 * otherwise we call kmalloc.
118 */
119static void * prom_alloc(unsigned long size, unsigned long *mem_start)
120{
121 unsigned long tmp;
122
123 if (!mem_start)
124 return kmalloc(size, GFP_KERNEL);
125
126 tmp = *mem_start;
127 *mem_start += size;
128 return (void *)tmp;
129}
130
131/*
132 * Find the device_node with a given phandle.
133 */
134static struct device_node * find_phandle(phandle ph)
135{
136 struct device_node *np;
137
138 for (np = allnodes; np != 0; np = np->allnext)
139 if (np->linux_phandle == ph)
140 return np;
141 return NULL;
142}
143
144/*
145 * Find the interrupt parent of a node.
146 */
147static struct device_node * __devinit intr_parent(struct device_node *p)
148{
149 phandle *parp;
150
151 parp = (phandle *) get_property(p, "interrupt-parent", NULL);
152 if (parp == NULL)
153 return p->parent;
154 p = find_phandle(*parp);
155 if (p != NULL)
156 return p;
157 /*
158 * On a powermac booted with BootX, we don't get to know the
159 * phandles for any nodes, so find_phandle will return NULL.
160 * Fortunately these machines only have one interrupt controller
161 * so there isn't in fact any ambiguity. -- paulus
162 */
163 if (num_interrupt_controllers == 1)
164 p = dflt_interrupt_controller;
165 return p;
166}
167
168/*
169 * Find out the size of each entry of the interrupts property
170 * for a node.
171 */
172int __devinit prom_n_intr_cells(struct device_node *np)
173{
174 struct device_node *p;
175 unsigned int *icp;
176
177 for (p = np; (p = intr_parent(p)) != NULL; ) {
178 icp = (unsigned int *)
179 get_property(p, "#interrupt-cells", NULL);
180 if (icp != NULL)
181 return *icp;
182 if (get_property(p, "interrupt-controller", NULL) != NULL
183 || get_property(p, "interrupt-map", NULL) != NULL) {
184 printk("oops, node %s doesn't have #interrupt-cells\n",
185 p->full_name);
186 return 1;
187 }
188 }
189#ifdef DEBUG_IRQ
190 printk("prom_n_intr_cells failed for %s\n", np->full_name);
191#endif
192 return 1;
193}
194
195/*
196 * Map an interrupt from a device up to the platform interrupt
197 * descriptor.
198 */
199static int __devinit map_interrupt(unsigned int **irq, struct device_node **ictrler,
200 struct device_node *np, unsigned int *ints,
201 int nintrc)
202{
203 struct device_node *p, *ipar;
204 unsigned int *imap, *imask, *ip;
205 int i, imaplen, match;
206 int newintrc = 0, newaddrc = 0;
207 unsigned int *reg;
208 int naddrc;
209
210 reg = (unsigned int *) get_property(np, "reg", NULL);
211 naddrc = prom_n_addr_cells(np);
212 p = intr_parent(np);
213 while (p != NULL) {
214 if (get_property(p, "interrupt-controller", NULL) != NULL)
215 /* this node is an interrupt controller, stop here */
216 break;
217 imap = (unsigned int *)
218 get_property(p, "interrupt-map", &imaplen);
219 if (imap == NULL) {
220 p = intr_parent(p);
221 continue;
222 }
223 imask = (unsigned int *)
224 get_property(p, "interrupt-map-mask", NULL);
225 if (imask == NULL) {
226 printk("oops, %s has interrupt-map but no mask\n",
227 p->full_name);
228 return 0;
229 }
230 imaplen /= sizeof(unsigned int);
231 match = 0;
232 ipar = NULL;
233 while (imaplen > 0 && !match) {
234 /* check the child-interrupt field */
235 match = 1;
236 for (i = 0; i < naddrc && match; ++i)
237 match = ((reg[i] ^ imap[i]) & imask[i]) == 0;
238 for (; i < naddrc + nintrc && match; ++i)
239 match = ((ints[i-naddrc] ^ imap[i]) & imask[i]) == 0;
240 imap += naddrc + nintrc;
241 imaplen -= naddrc + nintrc;
242 /* grab the interrupt parent */
243 ipar = find_phandle((phandle) *imap++);
244 --imaplen;
245 if (ipar == NULL && num_interrupt_controllers == 1)
246 /* cope with BootX not giving us phandles */
247 ipar = dflt_interrupt_controller;
248 if (ipar == NULL) {
249 printk("oops, no int parent %x in map of %s\n",
250 imap[-1], p->full_name);
251 return 0;
252 }
253 /* find the parent's # addr and intr cells */
254 ip = (unsigned int *)
255 get_property(ipar, "#interrupt-cells", NULL);
256 if (ip == NULL) {
257 printk("oops, no #interrupt-cells on %s\n",
258 ipar->full_name);
259 return 0;
260 }
261 newintrc = *ip;
262 ip = (unsigned int *)
263 get_property(ipar, "#address-cells", NULL);
264 newaddrc = (ip == NULL)? 0: *ip;
265 imap += newaddrc + newintrc;
266 imaplen -= newaddrc + newintrc;
267 }
268 if (imaplen < 0) {
269 printk("oops, error decoding int-map on %s, len=%d\n",
270 p->full_name, imaplen);
271 return 0;
272 }
273 if (!match) {
274#ifdef DEBUG_IRQ
275 printk("oops, no match in %s int-map for %s\n",
276 p->full_name, np->full_name);
277#endif
278 return 0;
279 }
280 p = ipar;
281 naddrc = newaddrc;
282 nintrc = newintrc;
283 ints = imap - nintrc;
284 reg = ints - naddrc;
285 }
286 if (p == NULL) {
287#ifdef DEBUG_IRQ
288 printk("hmmm, int tree for %s doesn't have ctrler\n",
289 np->full_name);
290#endif
291 return 0;
292 }
293 *irq = ints;
294 *ictrler = p;
295 return nintrc;
296}
297
298static int __devinit finish_node_interrupts(struct device_node *np,
299 unsigned long *mem_start,
300 int measure_only)
301{
302 unsigned int *ints;
303 int intlen, intrcells, intrcount;
304 int i, j, n;
305 unsigned int *irq, virq;
306 struct device_node *ic;
307
308 ints = (unsigned int *) get_property(np, "interrupts", &intlen);
309 if (ints == NULL)
310 return 0;
311 intrcells = prom_n_intr_cells(np);
312 intlen /= intrcells * sizeof(unsigned int);
313
314 np->intrs = prom_alloc(intlen * sizeof(*(np->intrs)), mem_start);
315 if (!np->intrs)
316 return -ENOMEM;
317
318 if (measure_only)
319 return 0;
320
321 intrcount = 0;
322 for (i = 0; i < intlen; ++i, ints += intrcells) {
323 n = map_interrupt(&irq, &ic, np, ints, intrcells);
324 if (n <= 0)
325 continue;
326
327 /* don't map IRQ numbers under a cascaded 8259 controller */
328 if (ic && device_is_compatible(ic, "chrp,iic")) {
329 np->intrs[intrcount].line = irq[0];
330 } else {
331#ifdef CONFIG_PPC64
332 virq = virt_irq_create_mapping(irq[0]);
333 if (virq == NO_IRQ) {
334 printk(KERN_CRIT "Could not allocate interrupt"
335 " number for %s\n", np->full_name);
336 continue;
337 }
338 virq = irq_offset_up(virq);
339#else
340 virq = irq[0];
341#endif
342 np->intrs[intrcount].line = virq;
343 }
344
345#ifdef CONFIG_PPC64
346 /* We offset irq numbers for the u3 MPIC by 128 in PowerMac */
347 if (systemcfg->platform == PLATFORM_POWERMAC && ic && ic->parent) {
348 char *name = get_property(ic->parent, "name", NULL);
349 if (name && !strcmp(name, "u3"))
350 np->intrs[intrcount].line += 128;
351 else if (!(name && !strcmp(name, "mac-io")))
352 /* ignore other cascaded controllers, such as
353 the k2-sata-root */
354 break;
355 }
356#endif
357 np->intrs[intrcount].sense = 1;
358 if (n > 1)
359 np->intrs[intrcount].sense = irq[1];
360 if (n > 2) {
361 printk("hmmm, got %d intr cells for %s:", n,
362 np->full_name);
363 for (j = 0; j < n; ++j)
364 printk(" %d", irq[j]);
365 printk("\n");
366 }
367 ++intrcount;
368 }
369 np->n_intrs = intrcount;
370
371 return 0;
372}
373
374static int __devinit interpret_pci_props(struct device_node *np,
375 unsigned long *mem_start,
376 int naddrc, int nsizec,
377 int measure_only)
378{
379 struct address_range *adr;
380 struct pci_reg_property *pci_addrs;
381 int i, l, n_addrs;
382
383 pci_addrs = (struct pci_reg_property *)
384 get_property(np, "assigned-addresses", &l);
385 if (!pci_addrs)
386 return 0;
387
388 n_addrs = l / sizeof(*pci_addrs);
389
390 adr = prom_alloc(n_addrs * sizeof(*adr), mem_start);
391 if (!adr)
392 return -ENOMEM;
393
394 if (measure_only)
395 return 0;
396
397 np->addrs = adr;
398 np->n_addrs = n_addrs;
399
400 for (i = 0; i < n_addrs; i++) {
401 adr[i].space = pci_addrs[i].addr.a_hi;
402 adr[i].address = pci_addrs[i].addr.a_lo |
403 ((u64)pci_addrs[i].addr.a_mid << 32);
404 adr[i].size = pci_addrs[i].size_lo;
405 }
406
407 return 0;
408}
409
410static int __init interpret_dbdma_props(struct device_node *np,
411 unsigned long *mem_start,
412 int naddrc, int nsizec,
413 int measure_only)
414{
415 struct reg_property32 *rp;
416 struct address_range *adr;
417 unsigned long base_address;
418 int i, l;
419 struct device_node *db;
420
421 base_address = 0;
422 if (!measure_only) {
423 for (db = np->parent; db != NULL; db = db->parent) {
424 if (!strcmp(db->type, "dbdma") && db->n_addrs != 0) {
425 base_address = db->addrs[0].address;
426 break;
427 }
428 }
429 }
430
431 rp = (struct reg_property32 *) get_property(np, "reg", &l);
432 if (rp != 0 && l >= sizeof(struct reg_property32)) {
433 i = 0;
434 adr = (struct address_range *) (*mem_start);
435 while ((l -= sizeof(struct reg_property32)) >= 0) {
436 if (!measure_only) {
437 adr[i].space = 2;
438 adr[i].address = rp[i].address + base_address;
439 adr[i].size = rp[i].size;
440 }
441 ++i;
442 }
443 np->addrs = adr;
444 np->n_addrs = i;
445 (*mem_start) += i * sizeof(struct address_range);
446 }
447
448 return 0;
449}
450
451static int __init interpret_macio_props(struct device_node *np,
452 unsigned long *mem_start,
453 int naddrc, int nsizec,
454 int measure_only)
455{
456 struct reg_property32 *rp;
457 struct address_range *adr;
458 unsigned long base_address;
459 int i, l;
460 struct device_node *db;
461
462 base_address = 0;
463 if (!measure_only) {
464 for (db = np->parent; db != NULL; db = db->parent) {
465 if (!strcmp(db->type, "mac-io") && db->n_addrs != 0) {
466 base_address = db->addrs[0].address;
467 break;
468 }
469 }
470 }
471
472 rp = (struct reg_property32 *) get_property(np, "reg", &l);
473 if (rp != 0 && l >= sizeof(struct reg_property32)) {
474 i = 0;
475 adr = (struct address_range *) (*mem_start);
476 while ((l -= sizeof(struct reg_property32)) >= 0) {
477 if (!measure_only) {
478 adr[i].space = 2;
479 adr[i].address = rp[i].address + base_address;
480 adr[i].size = rp[i].size;
481 }
482 ++i;
483 }
484 np->addrs = adr;
485 np->n_addrs = i;
486 (*mem_start) += i * sizeof(struct address_range);
487 }
488
489 return 0;
490}
491
492static int __init interpret_isa_props(struct device_node *np,
493 unsigned long *mem_start,
494 int naddrc, int nsizec,
495 int measure_only)
496{
497 struct isa_reg_property *rp;
498 struct address_range *adr;
499 int i, l;
500
501 rp = (struct isa_reg_property *) get_property(np, "reg", &l);
502 if (rp != 0 && l >= sizeof(struct isa_reg_property)) {
503 i = 0;
504 adr = (struct address_range *) (*mem_start);
505 while ((l -= sizeof(struct isa_reg_property)) >= 0) {
506 if (!measure_only) {
507 adr[i].space = rp[i].space;
508 adr[i].address = rp[i].address;
509 adr[i].size = rp[i].size;
510 }
511 ++i;
512 }
513 np->addrs = adr;
514 np->n_addrs = i;
515 (*mem_start) += i * sizeof(struct address_range);
516 }
517
518 return 0;
519}
520
521static int __init interpret_root_props(struct device_node *np,
522 unsigned long *mem_start,
523 int naddrc, int nsizec,
524 int measure_only)
525{
526 struct address_range *adr;
527 int i, l;
528 unsigned int *rp;
529 int rpsize = (naddrc + nsizec) * sizeof(unsigned int);
530
531 rp = (unsigned int *) get_property(np, "reg", &l);
532 if (rp != 0 && l >= rpsize) {
533 i = 0;
534 adr = (struct address_range *) (*mem_start);
535 while ((l -= rpsize) >= 0) {
536 if (!measure_only) {
537 adr[i].space = 0;
538 adr[i].address = rp[naddrc - 1];
539 adr[i].size = rp[naddrc + nsizec - 1];
540 }
541 ++i;
542 rp += naddrc + nsizec;
543 }
544 np->addrs = adr;
545 np->n_addrs = i;
546 (*mem_start) += i * sizeof(struct address_range);
547 }
548
549 return 0;
550}
551
552static int __devinit finish_node(struct device_node *np,
553 unsigned long *mem_start,
554 interpret_func *ifunc,
555 int naddrc, int nsizec,
556 int measure_only)
557{
558 struct device_node *child;
559 int *ip, rc = 0;
560
561 /* get the device addresses and interrupts */
562 if (ifunc != NULL)
563 rc = ifunc(np, mem_start, naddrc, nsizec, measure_only);
564 if (rc)
565 goto out;
566
567 rc = finish_node_interrupts(np, mem_start, measure_only);
568 if (rc)
569 goto out;
570
571 /* Look for #address-cells and #size-cells properties. */
572 ip = (int *) get_property(np, "#address-cells", NULL);
573 if (ip != NULL)
574 naddrc = *ip;
575 ip = (int *) get_property(np, "#size-cells", NULL);
576 if (ip != NULL)
577 nsizec = *ip;
578
579 if (!strcmp(np->name, "device-tree") || np->parent == NULL)
580 ifunc = interpret_root_props;
581 else if (np->type == 0)
582 ifunc = NULL;
583 else if (!strcmp(np->type, "pci") || !strcmp(np->type, "vci"))
584 ifunc = interpret_pci_props;
585 else if (!strcmp(np->type, "dbdma"))
586 ifunc = interpret_dbdma_props;
587 else if (!strcmp(np->type, "mac-io") || ifunc == interpret_macio_props)
588 ifunc = interpret_macio_props;
589 else if (!strcmp(np->type, "isa"))
590 ifunc = interpret_isa_props;
591 else if (!strcmp(np->name, "uni-n") || !strcmp(np->name, "u3"))
592 ifunc = interpret_root_props;
593 else if (!((ifunc == interpret_dbdma_props
594 || ifunc == interpret_macio_props)
595 && (!strcmp(np->type, "escc")
596 || !strcmp(np->type, "media-bay"))))
597 ifunc = NULL;
598
599 for (child = np->child; child != NULL; child = child->sibling) {
600 rc = finish_node(child, mem_start, ifunc,
601 naddrc, nsizec, measure_only);
602 if (rc)
603 goto out;
604 }
605out:
606 return rc;
607}
608
609static void __init scan_interrupt_controllers(void)
610{
611 struct device_node *np;
612 int n = 0;
613 char *name, *ic;
614 int iclen;
615
616 for (np = allnodes; np != NULL; np = np->allnext) {
617 ic = get_property(np, "interrupt-controller", &iclen);
618 name = get_property(np, "name", NULL);
619 /* checking iclen makes sure we don't get a false
620 match on /chosen.interrupt_controller */
621 if ((name != NULL
622 && strcmp(name, "interrupt-controller") == 0)
623 || (ic != NULL && iclen == 0
624 && strcmp(name, "AppleKiwi"))) {
625 if (n == 0)
626 dflt_interrupt_controller = np;
627 ++n;
628 }
629 }
630 num_interrupt_controllers = n;
631}
632
633/**
634 * finish_device_tree is called once things are running normally
635 * (i.e. with text and data mapped to the address they were linked at).
636 * It traverses the device tree and fills in some of the additional,
637 * fields in each node like {n_}addrs and {n_}intrs, the virt interrupt
638 * mapping is also initialized at this point.
639 */
640void __init finish_device_tree(void)
641{
642 unsigned long start, end, size = 0;
643
644 DBG(" -> finish_device_tree\n");
645
646#ifdef CONFIG_PPC64
647 /* Initialize virtual IRQ map */
648 virt_irq_init();
649#endif
650 scan_interrupt_controllers();
651
652 /*
653 * Finish device-tree (pre-parsing some properties etc...)
654 * We do this in 2 passes. One with "measure_only" set, which
655 * will only measure the amount of memory needed, then we can
656 * allocate that memory, and call finish_node again. However,
657 * we must be careful as most routines will fail nowadays when
658 * prom_alloc() returns 0, so we must make sure our first pass
659 * doesn't start at 0. We pre-initialize size to 16 for that
660 * reason and then remove those additional 16 bytes
661 */
662 size = 16;
663 finish_node(allnodes, &size, NULL, 0, 0, 1);
664 size -= 16;
665 end = start = (unsigned long) __va(lmb_alloc(size, 128));
666 finish_node(allnodes, &end, NULL, 0, 0, 0);
667 BUG_ON(end != start + size);
668
669 DBG(" <- finish_device_tree\n");
670}
671
672static inline char *find_flat_dt_string(u32 offset)
673{
674 return ((char *)initial_boot_params) +
675 initial_boot_params->off_dt_strings + offset;
676}
677
678/**
679 * This function is used to scan the flattened device-tree, it is
680 * used to extract the memory informations at boot before we can
681 * unflatten the tree
682 */
683static int __init scan_flat_dt(int (*it)(unsigned long node,
684 const char *uname, int depth,
685 void *data),
686 void *data)
687{
688 unsigned long p = ((unsigned long)initial_boot_params) +
689 initial_boot_params->off_dt_struct;
690 int rc = 0;
691 int depth = -1;
692
693 do {
694 u32 tag = *((u32 *)p);
695 char *pathp;
696
697 p += 4;
698 if (tag == OF_DT_END_NODE) {
699 depth --;
700 continue;
701 }
702 if (tag == OF_DT_NOP)
703 continue;
704 if (tag == OF_DT_END)
705 break;
706 if (tag == OF_DT_PROP) {
707 u32 sz = *((u32 *)p);
708 p += 8;
709 if (initial_boot_params->version < 0x10)
710 p = _ALIGN(p, sz >= 8 ? 8 : 4);
711 p += sz;
712 p = _ALIGN(p, 4);
713 continue;
714 }
715 if (tag != OF_DT_BEGIN_NODE) {
716 printk(KERN_WARNING "Invalid tag %x scanning flattened"
717 " device tree !\n", tag);
718 return -EINVAL;
719 }
720 depth++;
721 pathp = (char *)p;
722 p = _ALIGN(p + strlen(pathp) + 1, 4);
723 if ((*pathp) == '/') {
724 char *lp, *np;
725 for (lp = NULL, np = pathp; *np; np++)
726 if ((*np) == '/')
727 lp = np+1;
728 if (lp != NULL)
729 pathp = lp;
730 }
731 rc = it(p, pathp, depth, data);
732 if (rc != 0)
733 break;
734 } while(1);
735
736 return rc;
737}
738
739/**
740 * This function can be used within scan_flattened_dt callback to get
741 * access to properties
742 */
743static void* __init get_flat_dt_prop(unsigned long node, const char *name,
744 unsigned long *size)
745{
746 unsigned long p = node;
747
748 do {
749 u32 tag = *((u32 *)p);
750 u32 sz, noff;
751 const char *nstr;
752
753 p += 4;
754 if (tag == OF_DT_NOP)
755 continue;
756 if (tag != OF_DT_PROP)
757 return NULL;
758
759 sz = *((u32 *)p);
760 noff = *((u32 *)(p + 4));
761 p += 8;
762 if (initial_boot_params->version < 0x10)
763 p = _ALIGN(p, sz >= 8 ? 8 : 4);
764
765 nstr = find_flat_dt_string(noff);
766 if (nstr == NULL) {
767 printk(KERN_WARNING "Can't find property index"
768 " name !\n");
769 return NULL;
770 }
771 if (strcmp(name, nstr) == 0) {
772 if (size)
773 *size = sz;
774 return (void *)p;
775 }
776 p += sz;
777 p = _ALIGN(p, 4);
778 } while(1);
779}
780
781static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
782 unsigned long align)
783{
784 void *res;
785
786 *mem = _ALIGN(*mem, align);
787 res = (void *)*mem;
788 *mem += size;
789
790 return res;
791}
792
793static unsigned long __init unflatten_dt_node(unsigned long mem,
794 unsigned long *p,
795 struct device_node *dad,
796 struct device_node ***allnextpp,
797 unsigned long fpsize)
798{
799 struct device_node *np;
800 struct property *pp, **prev_pp = NULL;
801 char *pathp;
802 u32 tag;
803 unsigned int l, allocl;
804 int has_name = 0;
805 int new_format = 0;
806
807 tag = *((u32 *)(*p));
808 if (tag != OF_DT_BEGIN_NODE) {
809 printk("Weird tag at start of node: %x\n", tag);
810 return mem;
811 }
812 *p += 4;
813 pathp = (char *)*p;
814 l = allocl = strlen(pathp) + 1;
815 *p = _ALIGN(*p + l, 4);
816
817 /* version 0x10 has a more compact unit name here instead of the full
818 * path. we accumulate the full path size using "fpsize", we'll rebuild
819 * it later. We detect this because the first character of the name is
820 * not '/'.
821 */
822 if ((*pathp) != '/') {
823 new_format = 1;
824 if (fpsize == 0) {
825 /* root node: special case. fpsize accounts for path
826 * plus terminating zero. root node only has '/', so
827 * fpsize should be 2, but we want to avoid the first
828 * level nodes to have two '/' so we use fpsize 1 here
829 */
830 fpsize = 1;
831 allocl = 2;
832 } else {
833 /* account for '/' and path size minus terminal 0
834 * already in 'l'
835 */
836 fpsize += l;
837 allocl = fpsize;
838 }
839 }
840
841
842 np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl,
843 __alignof__(struct device_node));
844 if (allnextpp) {
845 memset(np, 0, sizeof(*np));
846 np->full_name = ((char*)np) + sizeof(struct device_node);
847 if (new_format) {
848 char *p = np->full_name;
849 /* rebuild full path for new format */
850 if (dad && dad->parent) {
851 strcpy(p, dad->full_name);
852#ifdef DEBUG
853 if ((strlen(p) + l + 1) != allocl) {
854 DBG("%s: p: %d, l: %d, a: %d\n",
855 pathp, strlen(p), l, allocl);
856 }
857#endif
858 p += strlen(p);
859 }
860 *(p++) = '/';
861 memcpy(p, pathp, l);
862 } else
863 memcpy(np->full_name, pathp, l);
864 prev_pp = &np->properties;
865 **allnextpp = np;
866 *allnextpp = &np->allnext;
867 if (dad != NULL) {
868 np->parent = dad;
869 /* we temporarily use the next field as `last_child'*/
870 if (dad->next == 0)
871 dad->child = np;
872 else
873 dad->next->sibling = np;
874 dad->next = np;
875 }
876 kref_init(&np->kref);
877 }
878 while(1) {
879 u32 sz, noff;
880 char *pname;
881
882 tag = *((u32 *)(*p));
883 if (tag == OF_DT_NOP) {
884 *p += 4;
885 continue;
886 }
887 if (tag != OF_DT_PROP)
888 break;
889 *p += 4;
890 sz = *((u32 *)(*p));
891 noff = *((u32 *)((*p) + 4));
892 *p += 8;
893 if (initial_boot_params->version < 0x10)
894 *p = _ALIGN(*p, sz >= 8 ? 8 : 4);
895
896 pname = find_flat_dt_string(noff);
897 if (pname == NULL) {
898 printk("Can't find property name in list !\n");
899 break;
900 }
901 if (strcmp(pname, "name") == 0)
902 has_name = 1;
903 l = strlen(pname) + 1;
904 pp = unflatten_dt_alloc(&mem, sizeof(struct property),
905 __alignof__(struct property));
906 if (allnextpp) {
907 if (strcmp(pname, "linux,phandle") == 0) {
908 np->node = *((u32 *)*p);
909 if (np->linux_phandle == 0)
910 np->linux_phandle = np->node;
911 }
912 if (strcmp(pname, "ibm,phandle") == 0)
913 np->linux_phandle = *((u32 *)*p);
914 pp->name = pname;
915 pp->length = sz;
916 pp->value = (void *)*p;
917 *prev_pp = pp;
918 prev_pp = &pp->next;
919 }
920 *p = _ALIGN((*p) + sz, 4);
921 }
922 /* with version 0x10 we may not have the name property, recreate
923 * it here from the unit name if absent
924 */
925 if (!has_name) {
926 char *p = pathp, *ps = pathp, *pa = NULL;
927 int sz;
928
929 while (*p) {
930 if ((*p) == '@')
931 pa = p;
932 if ((*p) == '/')
933 ps = p + 1;
934 p++;
935 }
936 if (pa < ps)
937 pa = p;
938 sz = (pa - ps) + 1;
939 pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz,
940 __alignof__(struct property));
941 if (allnextpp) {
942 pp->name = "name";
943 pp->length = sz;
944 pp->value = (unsigned char *)(pp + 1);
945 *prev_pp = pp;
946 prev_pp = &pp->next;
947 memcpy(pp->value, ps, sz - 1);
948 ((char *)pp->value)[sz - 1] = 0;
949 DBG("fixed up name for %s -> %s\n", pathp, pp->value);
950 }
951 }
952 if (allnextpp) {
953 *prev_pp = NULL;
954 np->name = get_property(np, "name", NULL);
955 np->type = get_property(np, "device_type", NULL);
956
957 if (!np->name)
958 np->name = "<NULL>";
959 if (!np->type)
960 np->type = "<NULL>";
961 }
962 while (tag == OF_DT_BEGIN_NODE) {
963 mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
964 tag = *((u32 *)(*p));
965 }
966 if (tag != OF_DT_END_NODE) {
967 printk("Weird tag at end of node: %x\n", tag);
968 return mem;
969 }
970 *p += 4;
971 return mem;
972}
973
974
975/**
976 * unflattens the device-tree passed by the firmware, creating the
977 * tree of struct device_node. It also fills the "name" and "type"
978 * pointers of the nodes so the normal device-tree walking functions
979 * can be used (this used to be done by finish_device_tree)
980 */
981void __init unflatten_device_tree(void)
982{
983 unsigned long start, mem, size;
984 struct device_node **allnextp = &allnodes;
985 char *p = NULL;
986 int l = 0;
987
988 DBG(" -> unflatten_device_tree()\n");
989
990 /* First pass, scan for size */
991 start = ((unsigned long)initial_boot_params) +
992 initial_boot_params->off_dt_struct;
993 size = unflatten_dt_node(0, &start, NULL, NULL, 0);
994 size = (size | 3) + 1;
995
996 DBG(" size is %lx, allocating...\n", size);
997
998 /* Allocate memory for the expanded device tree */
999 mem = lmb_alloc(size + 4, __alignof__(struct device_node));
1000 if (!mem) {
1001 DBG("Couldn't allocate memory with lmb_alloc()!\n");
1002 panic("Couldn't allocate memory with lmb_alloc()!\n");
1003 }
1004 mem = (unsigned long) __va(mem);
1005
1006 ((u32 *)mem)[size / 4] = 0xdeadbeef;
1007
1008 DBG(" unflattening %lx...\n", mem);
1009
1010 /* Second pass, do actual unflattening */
1011 start = ((unsigned long)initial_boot_params) +
1012 initial_boot_params->off_dt_struct;
1013 unflatten_dt_node(mem, &start, NULL, &allnextp, 0);
1014 if (*((u32 *)start) != OF_DT_END)
1015 printk(KERN_WARNING "Weird tag at end of tree: %08x\n", *((u32 *)start));
1016 if (((u32 *)mem)[size / 4] != 0xdeadbeef)
1017 printk(KERN_WARNING "End of tree marker overwritten: %08x\n",
1018 ((u32 *)mem)[size / 4] );
1019 *allnextp = NULL;
1020
1021 /* Get pointer to OF "/chosen" node for use everywhere */
1022 of_chosen = of_find_node_by_path("/chosen");
1023
1024 /* Retreive command line */
1025 if (of_chosen != NULL) {
1026 p = (char *)get_property(of_chosen, "bootargs", &l);
1027 if (p != NULL && l > 0)
1028 strlcpy(cmd_line, p, min(l, COMMAND_LINE_SIZE));
1029 }
1030#ifdef CONFIG_CMDLINE
1031 if (l == 0 || (l == 1 && (*p) == 0))
1032 strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1033#endif /* CONFIG_CMDLINE */
1034
1035 DBG("Command line is: %s\n", cmd_line);
1036
1037 DBG(" <- unflatten_device_tree()\n");
1038}
1039
1040
1041static int __init early_init_dt_scan_cpus(unsigned long node,
1042 const char *uname, int depth, void *data)
1043{
1044 char *type = get_flat_dt_prop(node, "device_type", NULL);
1045 u32 *prop;
1046 unsigned long size = 0;
1047
1048 /* We are scanning "cpu" nodes only */
1049 if (type == NULL || strcmp(type, "cpu") != 0)
1050 return 0;
1051
1052#ifdef CONFIG_PPC_PSERIES
1053 /* On LPAR, look for the first ibm,pft-size property for the hash table size
1054 */
1055 if (systemcfg->platform == PLATFORM_PSERIES_LPAR && ppc64_pft_size == 0) {
1056 u32 *pft_size;
1057 pft_size = get_flat_dt_prop(node, "ibm,pft-size", NULL);
1058 if (pft_size != NULL) {
1059 /* pft_size[0] is the NUMA CEC cookie */
1060 ppc64_pft_size = pft_size[1];
1061 }
1062 }
1063#endif
1064
1065#ifdef CONFIG_PPC64
1066 if (initial_boot_params && initial_boot_params->version >= 2) {
1067 /* version 2 of the kexec param format adds the phys cpuid
1068 * of booted proc.
1069 */
1070 boot_cpuid_phys = initial_boot_params->boot_cpuid_phys;
1071 boot_cpuid = 0;
1072 } else {
1073 /* Check if it's the boot-cpu, set it's hw index in paca now */
1074 if (get_flat_dt_prop(node, "linux,boot-cpu", NULL) != NULL) {
1075 u32 *prop = get_flat_dt_prop(node, "reg", NULL);
1076 set_hard_smp_processor_id(0, prop == NULL ? 0 : *prop);
1077 boot_cpuid_phys = get_hard_smp_processor_id(0);
1078 }
1079 }
1080#endif
1081
1082#ifdef CONFIG_ALTIVEC
1083 /* Check if we have a VMX and eventually update CPU features */
1084 prop = (u32 *)get_flat_dt_prop(node, "ibm,vmx", &size);
1085 if (prop && (*prop) > 0) {
1086 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1087 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
1088 }
1089
1090 /* Same goes for Apple's "altivec" property */
1091 prop = (u32 *)get_flat_dt_prop(node, "altivec", NULL);
1092 if (prop) {
1093 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1094 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
1095 }
1096#endif /* CONFIG_ALTIVEC */
1097
1098#ifdef CONFIG_PPC_PSERIES
1099 /*
1100 * Check for an SMT capable CPU and set the CPU feature. We do
1101 * this by looking at the size of the ibm,ppc-interrupt-server#s
1102 * property
1103 */
1104 prop = (u32 *)get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s",
1105 &size);
1106 cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
1107 if (prop && ((size / sizeof(u32)) > 1))
1108 cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
1109#endif
1110
1111 return 0;
1112}
1113
1114static int __init early_init_dt_scan_chosen(unsigned long node,
1115 const char *uname, int depth, void *data)
1116{
1117 u32 *prop;
1118 unsigned long *lprop;
1119
1120 DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
1121
1122 if (depth != 1 || strcmp(uname, "chosen") != 0)
1123 return 0;
1124
1125 /* get platform type */
1126 prop = (u32 *)get_flat_dt_prop(node, "linux,platform", NULL);
1127 if (prop == NULL)
1128 return 0;
1129#ifdef CONFIG_PPC64
1130 systemcfg->platform = *prop;
1131#else
1132 _machine = *prop;
1133#endif
1134
1135#ifdef CONFIG_PPC64
1136 /* check if iommu is forced on or off */
1137 if (get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
1138 iommu_is_off = 1;
1139 if (get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
1140 iommu_force_on = 1;
1141#endif
1142
1143 lprop = get_flat_dt_prop(node, "linux,memory-limit", NULL);
1144 if (lprop)
1145 memory_limit = *lprop;
1146
1147#ifdef CONFIG_PPC64
1148 lprop = get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
1149 if (lprop)
1150 tce_alloc_start = *lprop;
1151 lprop = get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
1152 if (lprop)
1153 tce_alloc_end = *lprop;
1154#endif
1155
1156#ifdef CONFIG_PPC_RTAS
1157 /* To help early debugging via the front panel, we retreive a minimal
1158 * set of RTAS infos now if available
1159 */
1160 {
1161 u64 *basep, *entryp;
1162
1163 basep = get_flat_dt_prop(node, "linux,rtas-base", NULL);
1164 entryp = get_flat_dt_prop(node, "linux,rtas-entry", NULL);
1165 prop = get_flat_dt_prop(node, "linux,rtas-size", NULL);
1166 if (basep && entryp && prop) {
1167 rtas.base = *basep;
1168 rtas.entry = *entryp;
1169 rtas.size = *prop;
1170 }
1171 }
1172#endif /* CONFIG_PPC_RTAS */
1173
1174 /* break now */
1175 return 1;
1176}
1177
1178static int __init early_init_dt_scan_root(unsigned long node,
1179 const char *uname, int depth, void *data)
1180{
1181 u32 *prop;
1182
1183 if (depth != 0)
1184 return 0;
1185
1186 prop = get_flat_dt_prop(node, "#size-cells", NULL);
1187 dt_root_size_cells = (prop == NULL) ? 1 : *prop;
1188 DBG("dt_root_size_cells = %x\n", dt_root_size_cells);
1189
1190 prop = get_flat_dt_prop(node, "#address-cells", NULL);
1191 dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
1192 DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells);
1193
1194 /* break now */
1195 return 1;
1196}
1197
1198static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp)
1199{
1200 cell_t *p = *cellp;
1201 unsigned long r;
1202
1203 /* Ignore more than 2 cells */
1204 while (s > sizeof(unsigned long) / 4) {
1205 p++;
1206 s--;
1207 }
1208 r = *p++;
1209#ifdef CONFIG_PPC64
1210 if (s > 1) {
1211 r <<= 32;
1212 r |= *(p++);
1213 s--;
1214 }
1215#endif
1216
1217 *cellp = p;
1218 return r;
1219}
1220
1221
1222static int __init early_init_dt_scan_memory(unsigned long node,
1223 const char *uname, int depth, void *data)
1224{
1225 char *type = get_flat_dt_prop(node, "device_type", NULL);
1226 cell_t *reg, *endp;
1227 unsigned long l;
1228
1229 /* We are scanning "memory" nodes only */
1230 if (type == NULL || strcmp(type, "memory") != 0)
1231 return 0;
1232
1233 reg = (cell_t *)get_flat_dt_prop(node, "reg", &l);
1234 if (reg == NULL)
1235 return 0;
1236
1237 endp = reg + (l / sizeof(cell_t));
1238
1239 DBG("memory scan node %s ..., reg size %ld, data: %x %x %x %x, ...\n",
1240 uname, l, reg[0], reg[1], reg[2], reg[3]);
1241
1242 while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
1243 unsigned long base, size;
1244
1245 base = dt_mem_next_cell(dt_root_addr_cells, &reg);
1246 size = dt_mem_next_cell(dt_root_size_cells, &reg);
1247
1248 if (size == 0)
1249 continue;
1250 DBG(" - %lx , %lx\n", base, size);
1251#ifdef CONFIG_PPC64
1252 if (iommu_is_off) {
1253 if (base >= 0x80000000ul)
1254 continue;
1255 if ((base + size) > 0x80000000ul)
1256 size = 0x80000000ul - base;
1257 }
1258#endif
1259 lmb_add(base, size);
1260 }
1261 return 0;
1262}
1263
1264static void __init early_reserve_mem(void)
1265{
1266 unsigned long base, size;
1267 unsigned long *reserve_map;
1268
1269 reserve_map = (unsigned long *)(((unsigned long)initial_boot_params) +
1270 initial_boot_params->off_mem_rsvmap);
1271 while (1) {
1272 base = *(reserve_map++);
1273 size = *(reserve_map++);
1274 if (size == 0)
1275 break;
1276 DBG("reserving: %lx -> %lx\n", base, size);
1277 lmb_reserve(base, size);
1278 }
1279
1280#if 0
1281 DBG("memory reserved, lmbs :\n");
1282 lmb_dump_all();
1283#endif
1284}
1285
1286void __init early_init_devtree(void *params)
1287{
1288 DBG(" -> early_init_devtree()\n");
1289
1290 /* Setup flat device-tree pointer */
1291 initial_boot_params = params;
1292
1293 /* Retrieve various informations from the /chosen node of the
1294 * device-tree, including the platform type, initrd location and
1295 * size, TCE reserve, and more ...
1296 */
1297 scan_flat_dt(early_init_dt_scan_chosen, NULL);
1298
1299 /* Scan memory nodes and rebuild LMBs */
1300 lmb_init();
1301 scan_flat_dt(early_init_dt_scan_root, NULL);
1302 scan_flat_dt(early_init_dt_scan_memory, NULL);
1303 lmb_enforce_memory_limit(memory_limit);
1304 lmb_analyze();
1305#ifdef CONFIG_PPC64
1306 systemcfg->physicalMemorySize = lmb_phys_mem_size();
1307#endif
1308 lmb_reserve(0, __pa(klimit));
1309
1310 DBG("Phys. mem: %lx\n", lmb_phys_mem_size());
1311
1312 /* Reserve LMB regions used by kernel, initrd, dt, etc... */
1313 early_reserve_mem();
1314
1315 DBG("Scanning CPUs ...\n");
1316
1317 /* Retreive hash table size from flattened tree plus other
1318 * CPU related informations (altivec support, boot CPU ID, ...)
1319 */
1320 scan_flat_dt(early_init_dt_scan_cpus, NULL);
1321
1322#ifdef CONFIG_PPC_PSERIES
1323 /* If hash size wasn't obtained above, we calculate it now based on
1324 * the total RAM size
1325 */
1326 if (ppc64_pft_size == 0) {
1327 unsigned long rnd_mem_size, pteg_count;
1328
1329 /* round mem_size up to next power of 2 */
1330 rnd_mem_size = 1UL << __ilog2(systemcfg->physicalMemorySize);
1331 if (rnd_mem_size < systemcfg->physicalMemorySize)
1332 rnd_mem_size <<= 1;
1333
1334 /* # pages / 2 */
1335 pteg_count = max(rnd_mem_size >> (12 + 1), 1UL << 11);
1336
1337 ppc64_pft_size = __ilog2(pteg_count << 7);
1338 }
1339
1340 DBG("Hash pftSize: %x\n", (int)ppc64_pft_size);
1341#endif
1342 DBG(" <- early_init_devtree()\n");
1343}
1344
1345#undef printk
1346
1347int
1348prom_n_addr_cells(struct device_node* np)
1349{
1350 int* ip;
1351 do {
1352 if (np->parent)
1353 np = np->parent;
1354 ip = (int *) get_property(np, "#address-cells", NULL);
1355 if (ip != NULL)
1356 return *ip;
1357 } while (np->parent);
1358 /* No #address-cells property for the root node, default to 1 */
1359 return 1;
1360}
1361
1362int
1363prom_n_size_cells(struct device_node* np)
1364{
1365 int* ip;
1366 do {
1367 if (np->parent)
1368 np = np->parent;
1369 ip = (int *) get_property(np, "#size-cells", NULL);
1370 if (ip != NULL)
1371 return *ip;
1372 } while (np->parent);
1373 /* No #size-cells property for the root node, default to 1 */
1374 return 1;
1375}
1376
1377/**
1378 * Work out the sense (active-low level / active-high edge)
1379 * of each interrupt from the device tree.
1380 */
1381void __init prom_get_irq_senses(unsigned char *senses, int off, int max)
1382{
1383 struct device_node *np;
1384 int i, j;
1385
1386 /* default to level-triggered */
1387 memset(senses, 1, max - off);
1388
1389 for (np = allnodes; np != 0; np = np->allnext) {
1390 for (j = 0; j < np->n_intrs; j++) {
1391 i = np->intrs[j].line;
1392 if (i >= off && i < max)
1393 senses[i-off] = np->intrs[j].sense ?
1394 IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE :
1395 IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE;
1396 }
1397 }
1398}
1399
1400/**
1401 * Construct and return a list of the device_nodes with a given name.
1402 */
1403struct device_node *find_devices(const char *name)
1404{
1405 struct device_node *head, **prevp, *np;
1406
1407 prevp = &head;
1408 for (np = allnodes; np != 0; np = np->allnext) {
1409 if (np->name != 0 && strcasecmp(np->name, name) == 0) {
1410 *prevp = np;
1411 prevp = &np->next;
1412 }
1413 }
1414 *prevp = NULL;
1415 return head;
1416}
1417EXPORT_SYMBOL(find_devices);
1418
1419/**
1420 * Construct and return a list of the device_nodes with a given type.
1421 */
1422struct device_node *find_type_devices(const char *type)
1423{
1424 struct device_node *head, **prevp, *np;
1425
1426 prevp = &head;
1427 for (np = allnodes; np != 0; np = np->allnext) {
1428 if (np->type != 0 && strcasecmp(np->type, type) == 0) {
1429 *prevp = np;
1430 prevp = &np->next;
1431 }
1432 }
1433 *prevp = NULL;
1434 return head;
1435}
1436EXPORT_SYMBOL(find_type_devices);
1437
1438/**
1439 * Returns all nodes linked together
1440 */
1441struct device_node *find_all_nodes(void)
1442{
1443 struct device_node *head, **prevp, *np;
1444
1445 prevp = &head;
1446 for (np = allnodes; np != 0; np = np->allnext) {
1447 *prevp = np;
1448 prevp = &np->next;
1449 }
1450 *prevp = NULL;
1451 return head;
1452}
1453EXPORT_SYMBOL(find_all_nodes);
1454
1455/** Checks if the given "compat" string matches one of the strings in
1456 * the device's "compatible" property
1457 */
1458int device_is_compatible(struct device_node *device, const char *compat)
1459{
1460 const char* cp;
1461 int cplen, l;
1462
1463 cp = (char *) get_property(device, "compatible", &cplen);
1464 if (cp == NULL)
1465 return 0;
1466 while (cplen > 0) {
1467 if (strncasecmp(cp, compat, strlen(compat)) == 0)
1468 return 1;
1469 l = strlen(cp) + 1;
1470 cp += l;
1471 cplen -= l;
1472 }
1473
1474 return 0;
1475}
1476EXPORT_SYMBOL(device_is_compatible);
1477
1478
1479/**
1480 * Indicates whether the root node has a given value in its
1481 * compatible property.
1482 */
1483int machine_is_compatible(const char *compat)
1484{
1485 struct device_node *root;
1486 int rc = 0;
1487
1488 root = of_find_node_by_path("/");
1489 if (root) {
1490 rc = device_is_compatible(root, compat);
1491 of_node_put(root);
1492 }
1493 return rc;
1494}
1495EXPORT_SYMBOL(machine_is_compatible);
1496
1497/**
1498 * Construct and return a list of the device_nodes with a given type
1499 * and compatible property.
1500 */
1501struct device_node *find_compatible_devices(const char *type,
1502 const char *compat)
1503{
1504 struct device_node *head, **prevp, *np;
1505
1506 prevp = &head;
1507 for (np = allnodes; np != 0; np = np->allnext) {
1508 if (type != NULL
1509 && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1510 continue;
1511 if (device_is_compatible(np, compat)) {
1512 *prevp = np;
1513 prevp = &np->next;
1514 }
1515 }
1516 *prevp = NULL;
1517 return head;
1518}
1519EXPORT_SYMBOL(find_compatible_devices);
1520
1521/**
1522 * Find the device_node with a given full_name.
1523 */
1524struct device_node *find_path_device(const char *path)
1525{
1526 struct device_node *np;
1527
1528 for (np = allnodes; np != 0; np = np->allnext)
1529 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0)
1530 return np;
1531 return NULL;
1532}
1533EXPORT_SYMBOL(find_path_device);
1534
1535/*******
1536 *
1537 * New implementation of the OF "find" APIs, return a refcounted
1538 * object, call of_node_put() when done. The device tree and list
1539 * are protected by a rw_lock.
1540 *
1541 * Note that property management will need some locking as well,
1542 * this isn't dealt with yet.
1543 *
1544 *******/
1545
1546/**
1547 * of_find_node_by_name - Find a node by its "name" property
1548 * @from: The node to start searching from or NULL, the node
1549 * you pass will not be searched, only the next one
1550 * will; typically, you pass what the previous call
1551 * returned. of_node_put() will be called on it
1552 * @name: The name string to match against
1553 *
1554 * Returns a node pointer with refcount incremented, use
1555 * of_node_put() on it when done.
1556 */
1557struct device_node *of_find_node_by_name(struct device_node *from,
1558 const char *name)
1559{
1560 struct device_node *np;
1561
1562 read_lock(&devtree_lock);
1563 np = from ? from->allnext : allnodes;
1564 for (; np != 0; np = np->allnext)
1565 if (np->name != 0 && strcasecmp(np->name, name) == 0
1566 && of_node_get(np))
1567 break;
1568 if (from)
1569 of_node_put(from);
1570 read_unlock(&devtree_lock);
1571 return np;
1572}
1573EXPORT_SYMBOL(of_find_node_by_name);
1574
1575/**
1576 * of_find_node_by_type - Find a node by its "device_type" property
1577 * @from: The node to start searching from or NULL, the node
1578 * you pass will not be searched, only the next one
1579 * will; typically, you pass what the previous call
1580 * returned. of_node_put() will be called on it
1581 * @name: The type string to match against
1582 *
1583 * Returns a node pointer with refcount incremented, use
1584 * of_node_put() on it when done.
1585 */
1586struct device_node *of_find_node_by_type(struct device_node *from,
1587 const char *type)
1588{
1589 struct device_node *np;
1590
1591 read_lock(&devtree_lock);
1592 np = from ? from->allnext : allnodes;
1593 for (; np != 0; np = np->allnext)
1594 if (np->type != 0 && strcasecmp(np->type, type) == 0
1595 && of_node_get(np))
1596 break;
1597 if (from)
1598 of_node_put(from);
1599 read_unlock(&devtree_lock);
1600 return np;
1601}
1602EXPORT_SYMBOL(of_find_node_by_type);
1603
1604/**
1605 * of_find_compatible_node - Find a node based on type and one of the
1606 * tokens in its "compatible" property
1607 * @from: The node to start searching from or NULL, the node
1608 * you pass will not be searched, only the next one
1609 * will; typically, you pass what the previous call
1610 * returned. of_node_put() will be called on it
1611 * @type: The type string to match "device_type" or NULL to ignore
1612 * @compatible: The string to match to one of the tokens in the device
1613 * "compatible" list.
1614 *
1615 * Returns a node pointer with refcount incremented, use
1616 * of_node_put() on it when done.
1617 */
1618struct device_node *of_find_compatible_node(struct device_node *from,
1619 const char *type, const char *compatible)
1620{
1621 struct device_node *np;
1622
1623 read_lock(&devtree_lock);
1624 np = from ? from->allnext : allnodes;
1625 for (; np != 0; np = np->allnext) {
1626 if (type != NULL
1627 && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1628 continue;
1629 if (device_is_compatible(np, compatible) && of_node_get(np))
1630 break;
1631 }
1632 if (from)
1633 of_node_put(from);
1634 read_unlock(&devtree_lock);
1635 return np;
1636}
1637EXPORT_SYMBOL(of_find_compatible_node);
1638
1639/**
1640 * of_find_node_by_path - Find a node matching a full OF path
1641 * @path: The full path to match
1642 *
1643 * Returns a node pointer with refcount incremented, use
1644 * of_node_put() on it when done.
1645 */
1646struct device_node *of_find_node_by_path(const char *path)
1647{
1648 struct device_node *np = allnodes;
1649
1650 read_lock(&devtree_lock);
1651 for (; np != 0; np = np->allnext) {
1652 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0
1653 && of_node_get(np))
1654 break;
1655 }
1656 read_unlock(&devtree_lock);
1657 return np;
1658}
1659EXPORT_SYMBOL(of_find_node_by_path);
1660
1661/**
1662 * of_find_node_by_phandle - Find a node given a phandle
1663 * @handle: phandle of the node to find
1664 *
1665 * Returns a node pointer with refcount incremented, use
1666 * of_node_put() on it when done.
1667 */
1668struct device_node *of_find_node_by_phandle(phandle handle)
1669{
1670 struct device_node *np;
1671
1672 read_lock(&devtree_lock);
1673 for (np = allnodes; np != 0; np = np->allnext)
1674 if (np->linux_phandle == handle)
1675 break;
1676 if (np)
1677 of_node_get(np);
1678 read_unlock(&devtree_lock);
1679 return np;
1680}
1681EXPORT_SYMBOL(of_find_node_by_phandle);
1682
1683/**
1684 * of_find_all_nodes - Get next node in global list
1685 * @prev: Previous node or NULL to start iteration
1686 * of_node_put() will be called on it
1687 *
1688 * Returns a node pointer with refcount incremented, use
1689 * of_node_put() on it when done.
1690 */
1691struct device_node *of_find_all_nodes(struct device_node *prev)
1692{
1693 struct device_node *np;
1694
1695 read_lock(&devtree_lock);
1696 np = prev ? prev->allnext : allnodes;
1697 for (; np != 0; np = np->allnext)
1698 if (of_node_get(np))
1699 break;
1700 if (prev)
1701 of_node_put(prev);
1702 read_unlock(&devtree_lock);
1703 return np;
1704}
1705EXPORT_SYMBOL(of_find_all_nodes);
1706
1707/**
1708 * of_get_parent - Get a node's parent if any
1709 * @node: Node to get parent
1710 *
1711 * Returns a node pointer with refcount incremented, use
1712 * of_node_put() on it when done.
1713 */
1714struct device_node *of_get_parent(const struct device_node *node)
1715{
1716 struct device_node *np;
1717
1718 if (!node)
1719 return NULL;
1720
1721 read_lock(&devtree_lock);
1722 np = of_node_get(node->parent);
1723 read_unlock(&devtree_lock);
1724 return np;
1725}
1726EXPORT_SYMBOL(of_get_parent);
1727
1728/**
1729 * of_get_next_child - Iterate a node childs
1730 * @node: parent node
1731 * @prev: previous child of the parent node, or NULL to get first
1732 *
1733 * Returns a node pointer with refcount incremented, use
1734 * of_node_put() on it when done.
1735 */
1736struct device_node *of_get_next_child(const struct device_node *node,
1737 struct device_node *prev)
1738{
1739 struct device_node *next;
1740
1741 read_lock(&devtree_lock);
1742 next = prev ? prev->sibling : node->child;
1743 for (; next != 0; next = next->sibling)
1744 if (of_node_get(next))
1745 break;
1746 if (prev)
1747 of_node_put(prev);
1748 read_unlock(&devtree_lock);
1749 return next;
1750}
1751EXPORT_SYMBOL(of_get_next_child);
1752
1753/**
1754 * of_node_get - Increment refcount of a node
1755 * @node: Node to inc refcount, NULL is supported to
1756 * simplify writing of callers
1757 *
1758 * Returns node.
1759 */
1760struct device_node *of_node_get(struct device_node *node)
1761{
1762 if (node)
1763 kref_get(&node->kref);
1764 return node;
1765}
1766EXPORT_SYMBOL(of_node_get);
1767
1768static inline struct device_node * kref_to_device_node(struct kref *kref)
1769{
1770 return container_of(kref, struct device_node, kref);
1771}
1772
1773/**
1774 * of_node_release - release a dynamically allocated node
1775 * @kref: kref element of the node to be released
1776 *
1777 * In of_node_put() this function is passed to kref_put()
1778 * as the destructor.
1779 */
1780static void of_node_release(struct kref *kref)
1781{
1782 struct device_node *node = kref_to_device_node(kref);
1783 struct property *prop = node->properties;
1784
1785 if (!OF_IS_DYNAMIC(node))
1786 return;
1787 while (prop) {
1788 struct property *next = prop->next;
1789 kfree(prop->name);
1790 kfree(prop->value);
1791 kfree(prop);
1792 prop = next;
1793 }
1794 kfree(node->intrs);
1795 kfree(node->addrs);
1796 kfree(node->full_name);
1797 kfree(node->data);
1798 kfree(node);
1799}
1800
1801/**
1802 * of_node_put - Decrement refcount of a node
1803 * @node: Node to dec refcount, NULL is supported to
1804 * simplify writing of callers
1805 *
1806 */
1807void of_node_put(struct device_node *node)
1808{
1809 if (node)
1810 kref_put(&node->kref, of_node_release);
1811}
1812EXPORT_SYMBOL(of_node_put);
1813
1814/*
1815 * Plug a device node into the tree and global list.
1816 */
1817void of_attach_node(struct device_node *np)
1818{
1819 write_lock(&devtree_lock);
1820 np->sibling = np->parent->child;
1821 np->allnext = allnodes;
1822 np->parent->child = np;
1823 allnodes = np;
1824 write_unlock(&devtree_lock);
1825}
1826
1827/*
1828 * "Unplug" a node from the device tree. The caller must hold
1829 * a reference to the node. The memory associated with the node
1830 * is not freed until its refcount goes to zero.
1831 */
1832void of_detach_node(const struct device_node *np)
1833{
1834 struct device_node *parent;
1835
1836 write_lock(&devtree_lock);
1837
1838 parent = np->parent;
1839
1840 if (allnodes == np)
1841 allnodes = np->allnext;
1842 else {
1843 struct device_node *prev;
1844 for (prev = allnodes;
1845 prev->allnext != np;
1846 prev = prev->allnext)
1847 ;
1848 prev->allnext = np->allnext;
1849 }
1850
1851 if (parent->child == np)
1852 parent->child = np->sibling;
1853 else {
1854 struct device_node *prevsib;
1855 for (prevsib = np->parent->child;
1856 prevsib->sibling != np;
1857 prevsib = prevsib->sibling)
1858 ;
1859 prevsib->sibling = np->sibling;
1860 }
1861
1862 write_unlock(&devtree_lock);
1863}
1864
1865#ifdef CONFIG_PPC_PSERIES
1866/*
1867 * Fix up the uninitialized fields in a new device node:
1868 * name, type, n_addrs, addrs, n_intrs, intrs, and pci-specific fields
1869 *
1870 * A lot of boot-time code is duplicated here, because functions such
1871 * as finish_node_interrupts, interpret_pci_props, etc. cannot use the
1872 * slab allocator.
1873 *
1874 * This should probably be split up into smaller chunks.
1875 */
1876
1877static int of_finish_dynamic_node(struct device_node *node,
1878 unsigned long *unused1, int unused2,
1879 int unused3, int unused4)
1880{
1881 struct device_node *parent = of_get_parent(node);
1882 int err = 0;
1883 phandle *ibm_phandle;
1884
1885 node->name = get_property(node, "name", NULL);
1886 node->type = get_property(node, "device_type", NULL);
1887
1888 if (!parent) {
1889 err = -ENODEV;
1890 goto out;
1891 }
1892
1893 /* We don't support that function on PowerMac, at least
1894 * not yet
1895 */
1896 if (systemcfg->platform == PLATFORM_POWERMAC)
1897 return -ENODEV;
1898
1899 /* fix up new node's linux_phandle field */
1900 if ((ibm_phandle = (unsigned int *)get_property(node, "ibm,phandle", NULL)))
1901 node->linux_phandle = *ibm_phandle;
1902
1903out:
1904 of_node_put(parent);
1905 return err;
1906}
1907
1908static int prom_reconfig_notifier(struct notifier_block *nb,
1909 unsigned long action, void *node)
1910{
1911 int err;
1912
1913 switch (action) {
1914 case PSERIES_RECONFIG_ADD:
1915 err = finish_node(node, NULL, of_finish_dynamic_node, 0, 0, 0);
1916 if (err < 0) {
1917 printk(KERN_ERR "finish_node returned %d\n", err);
1918 err = NOTIFY_BAD;
1919 }
1920 break;
1921 default:
1922 err = NOTIFY_DONE;
1923 break;
1924 }
1925 return err;
1926}
1927
1928static struct notifier_block prom_reconfig_nb = {
1929 .notifier_call = prom_reconfig_notifier,
1930 .priority = 10, /* This one needs to run first */
1931};
1932
1933static int __init prom_reconfig_setup(void)
1934{
1935 return pSeries_reconfig_notifier_register(&prom_reconfig_nb);
1936}
1937__initcall(prom_reconfig_setup);
1938#endif
1939
1940/*
1941 * Find a property with a given name for a given node
1942 * and return the value.
1943 */
1944unsigned char *get_property(struct device_node *np, const char *name,
1945 int *lenp)
1946{
1947 struct property *pp;
1948
1949 for (pp = np->properties; pp != 0; pp = pp->next)
1950 if (strcmp(pp->name, name) == 0) {
1951 if (lenp != 0)
1952 *lenp = pp->length;
1953 return pp->value;
1954 }
1955 return NULL;
1956}
1957EXPORT_SYMBOL(get_property);
1958
1959/*
1960 * Add a property to a node
1961 */
1962void prom_add_property(struct device_node* np, struct property* prop)
1963{
1964 struct property **next = &np->properties;
1965
1966 prop->next = NULL;
1967 while (*next)
1968 next = &(*next)->next;
1969 *next = prop;
1970}
1971
1972/* I quickly hacked that one, check against spec ! */
1973static inline unsigned long
1974bus_space_to_resource_flags(unsigned int bus_space)
1975{
1976 u8 space = (bus_space >> 24) & 0xf;
1977 if (space == 0)
1978 space = 0x02;
1979 if (space == 0x02)
1980 return IORESOURCE_MEM;
1981 else if (space == 0x01)
1982 return IORESOURCE_IO;
1983 else {
1984 printk(KERN_WARNING "prom.c: bus_space_to_resource_flags(), space: %x\n",
1985 bus_space);
1986 return 0;
1987 }
1988}
1989
1990static struct resource *find_parent_pci_resource(struct pci_dev* pdev,
1991 struct address_range *range)
1992{
1993 unsigned long mask;
1994 int i;
1995
1996 /* Check this one */
1997 mask = bus_space_to_resource_flags(range->space);
1998 for (i=0; i<DEVICE_COUNT_RESOURCE; i++) {
1999 if ((pdev->resource[i].flags & mask) == mask &&
2000 pdev->resource[i].start <= range->address &&
2001 pdev->resource[i].end > range->address) {
2002 if ((range->address + range->size - 1) > pdev->resource[i].end) {
2003 /* Add better message */
2004 printk(KERN_WARNING "PCI/OF resource overlap !\n");
2005 return NULL;
2006 }
2007 break;
2008 }
2009 }
2010 if (i == DEVICE_COUNT_RESOURCE)
2011 return NULL;
2012 return &pdev->resource[i];
2013}
2014
2015/*
2016 * Request an OF device resource. Currently handles child of PCI devices,
2017 * or other nodes attached to the root node. Ultimately, put some
2018 * link to resources in the OF node.
2019 */
2020struct resource *request_OF_resource(struct device_node* node, int index,
2021 const char* name_postfix)
2022{
2023 struct pci_dev* pcidev;
2024 u8 pci_bus, pci_devfn;
2025 unsigned long iomask;
2026 struct device_node* nd;
2027 struct resource* parent;
2028 struct resource *res = NULL;
2029 int nlen, plen;
2030
2031 if (index >= node->n_addrs)
2032 goto fail;
2033
2034 /* Sanity check on bus space */
2035 iomask = bus_space_to_resource_flags(node->addrs[index].space);
2036 if (iomask & IORESOURCE_MEM)
2037 parent = &iomem_resource;
2038 else if (iomask & IORESOURCE_IO)
2039 parent = &ioport_resource;
2040 else
2041 goto fail;
2042
2043 /* Find a PCI parent if any */
2044 nd = node;
2045 pcidev = NULL;
2046 while (nd) {
2047 if (!pci_device_from_OF_node(nd, &pci_bus, &pci_devfn))
2048 pcidev = pci_find_slot(pci_bus, pci_devfn);
2049 if (pcidev) break;
2050 nd = nd->parent;
2051 }
2052 if (pcidev)
2053 parent = find_parent_pci_resource(pcidev, &node->addrs[index]);
2054 if (!parent) {
2055 printk(KERN_WARNING "request_OF_resource(%s), parent not found\n",
2056 node->name);
2057 goto fail;
2058 }
2059
2060 res = __request_region(parent, node->addrs[index].address,
2061 node->addrs[index].size, NULL);
2062 if (!res)
2063 goto fail;
2064 nlen = strlen(node->name);
2065 plen = name_postfix ? strlen(name_postfix) : 0;
2066 res->name = (const char *)kmalloc(nlen+plen+1, GFP_KERNEL);
2067 if (res->name) {
2068 strcpy((char *)res->name, node->name);
2069 if (plen)
2070 strcpy((char *)res->name+nlen, name_postfix);
2071 }
2072 return res;
2073fail:
2074 return NULL;
2075}
2076EXPORT_SYMBOL(request_OF_resource);
2077
2078int release_OF_resource(struct device_node *node, int index)
2079{
2080 struct pci_dev* pcidev;
2081 u8 pci_bus, pci_devfn;
2082 unsigned long iomask, start, end;
2083 struct device_node* nd;
2084 struct resource* parent;
2085 struct resource *res = NULL;
2086
2087 if (index >= node->n_addrs)
2088 return -EINVAL;
2089
2090 /* Sanity check on bus space */
2091 iomask = bus_space_to_resource_flags(node->addrs[index].space);
2092 if (iomask & IORESOURCE_MEM)
2093 parent = &iomem_resource;
2094 else if (iomask & IORESOURCE_IO)
2095 parent = &ioport_resource;
2096 else
2097 return -EINVAL;
2098
2099 /* Find a PCI parent if any */
2100 nd = node;
2101 pcidev = NULL;
2102 while(nd) {
2103 if (!pci_device_from_OF_node(nd, &pci_bus, &pci_devfn))
2104 pcidev = pci_find_slot(pci_bus, pci_devfn);
2105 if (pcidev) break;
2106 nd = nd->parent;
2107 }
2108 if (pcidev)
2109 parent = find_parent_pci_resource(pcidev, &node->addrs[index]);
2110 if (!parent) {
2111 printk(KERN_WARNING "release_OF_resource(%s), parent not found\n",
2112 node->name);
2113 return -ENODEV;
2114 }
2115
2116 /* Find us in the parent and its childs */
2117 res = parent->child;
2118 start = node->addrs[index].address;
2119 end = start + node->addrs[index].size - 1;
2120 while (res) {
2121 if (res->start == start && res->end == end &&
2122 (res->flags & IORESOURCE_BUSY))
2123 break;
2124 if (res->start <= start && res->end >= end)
2125 res = res->child;
2126 else
2127 res = res->sibling;
2128 }
2129 if (!res)
2130 return -ENODEV;
2131
2132 if (res->name) {
2133 kfree(res->name);
2134 res->name = NULL;
2135 }
2136 release_resource(res);
2137 kfree(res);
2138
2139 return 0;
2140}
2141EXPORT_SYMBOL(release_OF_resource);