2 * Procedures for creating, accessing and interpreting the device tree.
4 * Paul Mackerras August 1996.
5 * Copyright (C) 1996-2005 Paul Mackerras.
7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8 * {engebret|bergner}@us.ibm.com
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
19 #include <linux/config.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/init.h>
23 #include <linux/threads.h>
24 #include <linux/spinlock.h>
25 #include <linux/types.h>
26 #include <linux/pci.h>
27 #include <linux/stringify.h>
28 #include <linux/delay.h>
29 #include <linux/initrd.h>
30 #include <linux/bitops.h>
31 #include <linux/module.h>
32 #include <linux/kexec.h>
33 #include <linux/debugfs.h>
39 #include <asm/processor.h>
42 #include <asm/kdump.h>
44 #include <asm/system.h>
46 #include <asm/pgtable.h>
48 #include <asm/iommu.h>
49 #include <asm/btext.h>
50 #include <asm/sections.h>
51 #include <asm/machdep.h>
52 #include <asm/pSeries_reconfig.h>
53 #include <asm/pci-bridge.h>
54 #include <asm/kexec.h>
57 #define DBG(fmt...) printk(KERN_ERR fmt)
63 static int __initdata dt_root_addr_cells;
64 static int __initdata dt_root_size_cells;
67 int __initdata iommu_is_off;
68 int __initdata iommu_force_on;
69 unsigned long tce_alloc_start, tce_alloc_end;
75 static struct boot_param_header *initial_boot_params __initdata;
77 struct boot_param_header *initial_boot_params;
80 static struct device_node *allnodes = NULL;
82 /* use when traversing tree through the allnext, child, sibling,
83 * or parent members of struct device_node.
85 static DEFINE_RWLOCK(devtree_lock);
87 /* export that to outside world */
88 struct device_node *of_chosen;
90 struct device_node *dflt_interrupt_controller;
91 int num_interrupt_controllers;
94 * Wrapper for allocating memory for various data that needs to be
95 * attached to device nodes as they are processed at boot or when
96 * added to the device tree later (e.g. DLPAR). At boot there is
97 * already a region reserved so we just increment *mem_start by size;
98 * otherwise we call kmalloc.
100 static void * prom_alloc(unsigned long size, unsigned long *mem_start)
105 return kmalloc(size, GFP_KERNEL);
113 * Find the device_node with a given phandle.
115 static struct device_node * find_phandle(phandle ph)
117 struct device_node *np;
119 for (np = allnodes; np != 0; np = np->allnext)
120 if (np->linux_phandle == ph)
126 * Find the interrupt parent of a node.
128 static struct device_node * __devinit intr_parent(struct device_node *p)
132 parp = (phandle *) get_property(p, "interrupt-parent", NULL);
135 p = find_phandle(*parp);
139 * On a powermac booted with BootX, we don't get to know the
140 * phandles for any nodes, so find_phandle will return NULL.
141 * Fortunately these machines only have one interrupt controller
142 * so there isn't in fact any ambiguity. -- paulus
144 if (num_interrupt_controllers == 1)
145 p = dflt_interrupt_controller;
150 * Find out the size of each entry of the interrupts property
153 int __devinit prom_n_intr_cells(struct device_node *np)
155 struct device_node *p;
158 for (p = np; (p = intr_parent(p)) != NULL; ) {
159 icp = (unsigned int *)
160 get_property(p, "#interrupt-cells", NULL);
163 if (get_property(p, "interrupt-controller", NULL) != NULL
164 || get_property(p, "interrupt-map", NULL) != NULL) {
165 printk("oops, node %s doesn't have #interrupt-cells\n",
171 printk("prom_n_intr_cells failed for %s\n", np->full_name);
177 * Map an interrupt from a device up to the platform interrupt
180 static int __devinit map_interrupt(unsigned int **irq, struct device_node **ictrler,
181 struct device_node *np, unsigned int *ints,
184 struct device_node *p, *ipar;
185 unsigned int *imap, *imask, *ip;
186 int i, imaplen, match;
187 int newintrc = 0, newaddrc = 0;
191 reg = (unsigned int *) get_property(np, "reg", NULL);
192 naddrc = prom_n_addr_cells(np);
195 if (get_property(p, "interrupt-controller", NULL) != NULL)
196 /* this node is an interrupt controller, stop here */
198 imap = (unsigned int *)
199 get_property(p, "interrupt-map", &imaplen);
204 imask = (unsigned int *)
205 get_property(p, "interrupt-map-mask", NULL);
207 printk("oops, %s has interrupt-map but no mask\n",
211 imaplen /= sizeof(unsigned int);
214 while (imaplen > 0 && !match) {
215 /* check the child-interrupt field */
217 for (i = 0; i < naddrc && match; ++i)
218 match = ((reg[i] ^ imap[i]) & imask[i]) == 0;
219 for (; i < naddrc + nintrc && match; ++i)
220 match = ((ints[i-naddrc] ^ imap[i]) & imask[i]) == 0;
221 imap += naddrc + nintrc;
222 imaplen -= naddrc + nintrc;
223 /* grab the interrupt parent */
224 ipar = find_phandle((phandle) *imap++);
226 if (ipar == NULL && num_interrupt_controllers == 1)
227 /* cope with BootX not giving us phandles */
228 ipar = dflt_interrupt_controller;
230 printk("oops, no int parent %x in map of %s\n",
231 imap[-1], p->full_name);
234 /* find the parent's # addr and intr cells */
235 ip = (unsigned int *)
236 get_property(ipar, "#interrupt-cells", NULL);
238 printk("oops, no #interrupt-cells on %s\n",
243 ip = (unsigned int *)
244 get_property(ipar, "#address-cells", NULL);
245 newaddrc = (ip == NULL)? 0: *ip;
246 imap += newaddrc + newintrc;
247 imaplen -= newaddrc + newintrc;
250 printk("oops, error decoding int-map on %s, len=%d\n",
251 p->full_name, imaplen);
256 printk("oops, no match in %s int-map for %s\n",
257 p->full_name, np->full_name);
264 ints = imap - nintrc;
269 printk("hmmm, int tree for %s doesn't have ctrler\n",
279 static unsigned char map_isa_senses[4] = {
280 IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE,
281 IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE,
282 IRQ_SENSE_EDGE | IRQ_POLARITY_NEGATIVE,
283 IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE
286 static unsigned char map_mpic_senses[4] = {
287 IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE,
288 IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE,
289 /* 2 seems to be used for the 8259 cascade... */
290 IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE,
291 IRQ_SENSE_EDGE | IRQ_POLARITY_NEGATIVE,
294 static int __devinit finish_node_interrupts(struct device_node *np,
295 unsigned long *mem_start,
299 int intlen, intrcells, intrcount;
301 unsigned int *irq, virq;
302 struct device_node *ic;
305 //#define TRACE(fmt...) do { if (trace) { printk(fmt); mdelay(1000); } } while(0)
306 #define TRACE(fmt...)
308 if (!strcmp(np->name, "smu-doorbell"))
311 TRACE("Finishing SMU doorbell ! num_interrupt_controllers = %d\n",
312 num_interrupt_controllers);
314 if (num_interrupt_controllers == 0) {
316 * Old machines just have a list of interrupt numbers
317 * and no interrupt-controller nodes.
319 ints = (unsigned int *) get_property(np, "AAPL,interrupts",
321 /* XXX old interpret_pci_props looked in parent too */
322 /* XXX old interpret_macio_props looked for interrupts
323 before AAPL,interrupts */
325 ints = (unsigned int *) get_property(np, "interrupts",
330 np->n_intrs = intlen / sizeof(unsigned int);
331 np->intrs = prom_alloc(np->n_intrs * sizeof(np->intrs[0]),
338 for (i = 0; i < np->n_intrs; ++i) {
339 np->intrs[i].line = *ints++;
340 np->intrs[i].sense = IRQ_SENSE_LEVEL
341 | IRQ_POLARITY_NEGATIVE;
346 ints = (unsigned int *) get_property(np, "interrupts", &intlen);
347 TRACE("ints=%p, intlen=%d\n", ints, intlen);
350 intrcells = prom_n_intr_cells(np);
351 intlen /= intrcells * sizeof(unsigned int);
352 TRACE("intrcells=%d, new intlen=%d\n", intrcells, intlen);
353 np->intrs = prom_alloc(intlen * sizeof(*(np->intrs)), mem_start);
361 for (i = 0; i < intlen; ++i, ints += intrcells) {
362 n = map_interrupt(&irq, &ic, np, ints, intrcells);
363 TRACE("map, irq=%d, ic=%p, n=%d\n", irq, ic, n);
367 /* don't map IRQ numbers under a cascaded 8259 controller */
368 if (ic && device_is_compatible(ic, "chrp,iic")) {
369 np->intrs[intrcount].line = irq[0];
370 sense = (n > 1)? (irq[1] & 3): 3;
371 np->intrs[intrcount].sense = map_isa_senses[sense];
373 virq = virt_irq_create_mapping(irq[0]);
374 TRACE("virq=%d\n", virq);
376 if (virq == NO_IRQ) {
377 printk(KERN_CRIT "Could not allocate interrupt"
378 " number for %s\n", np->full_name);
382 np->intrs[intrcount].line = irq_offset_up(virq);
383 sense = (n > 1)? (irq[1] & 3): 1;
385 /* Apple uses bits in there in a different way, let's
386 * only keep the real sense bit on macs
388 if (machine_is(powermac))
390 np->intrs[intrcount].sense = map_mpic_senses[sense];
394 /* We offset irq numbers for the u3 MPIC by 128 in PowerMac */
395 if (machine_is(powermac) && ic && ic->parent) {
396 char *name = get_property(ic->parent, "name", NULL);
397 if (name && !strcmp(name, "u3"))
398 np->intrs[intrcount].line += 128;
399 else if (!(name && (!strcmp(name, "mac-io") ||
400 !strcmp(name, "u4"))))
401 /* ignore other cascaded controllers, such as
405 #endif /* CONFIG_PPC64 */
407 printk("hmmm, got %d intr cells for %s:", n,
409 for (j = 0; j < n; ++j)
410 printk(" %d", irq[j]);
415 np->n_intrs = intrcount;
420 static int __devinit finish_node(struct device_node *np,
421 unsigned long *mem_start,
424 struct device_node *child;
427 rc = finish_node_interrupts(np, mem_start, measure_only);
431 for (child = np->child; child != NULL; child = child->sibling) {
432 rc = finish_node(child, mem_start, measure_only);
440 static void __init scan_interrupt_controllers(void)
442 struct device_node *np;
447 for (np = allnodes; np != NULL; np = np->allnext) {
448 ic = get_property(np, "interrupt-controller", &iclen);
449 name = get_property(np, "name", NULL);
450 /* checking iclen makes sure we don't get a false
451 match on /chosen.interrupt_controller */
453 && strcmp(name, "interrupt-controller") == 0)
454 || (ic != NULL && iclen == 0
455 && strcmp(name, "AppleKiwi"))) {
457 dflt_interrupt_controller = np;
461 num_interrupt_controllers = n;
465 * finish_device_tree is called once things are running normally
466 * (i.e. with text and data mapped to the address they were linked at).
467 * It traverses the device tree and fills in some of the additional,
468 * fields in each node like {n_}addrs and {n_}intrs, the virt interrupt
469 * mapping is also initialized at this point.
471 void __init finish_device_tree(void)
473 unsigned long start, end, size = 0;
475 DBG(" -> finish_device_tree\n");
478 /* Initialize virtual IRQ map */
481 scan_interrupt_controllers();
484 * Finish device-tree (pre-parsing some properties etc...)
485 * We do this in 2 passes. One with "measure_only" set, which
486 * will only measure the amount of memory needed, then we can
487 * allocate that memory, and call finish_node again. However,
488 * we must be careful as most routines will fail nowadays when
489 * prom_alloc() returns 0, so we must make sure our first pass
490 * doesn't start at 0. We pre-initialize size to 16 for that
491 * reason and then remove those additional 16 bytes
494 finish_node(allnodes, &size, 1);
500 end = start = (unsigned long)__va(lmb_alloc(size, 128));
502 finish_node(allnodes, &end, 0);
503 BUG_ON(end != start + size);
505 DBG(" <- finish_device_tree\n");
508 static inline char *find_flat_dt_string(u32 offset)
510 return ((char *)initial_boot_params) +
511 initial_boot_params->off_dt_strings + offset;
515 * This function is used to scan the flattened device-tree, it is
516 * used to extract the memory informations at boot before we can
519 int __init of_scan_flat_dt(int (*it)(unsigned long node,
520 const char *uname, int depth,
524 unsigned long p = ((unsigned long)initial_boot_params) +
525 initial_boot_params->off_dt_struct;
530 u32 tag = *((u32 *)p);
534 if (tag == OF_DT_END_NODE) {
538 if (tag == OF_DT_NOP)
540 if (tag == OF_DT_END)
542 if (tag == OF_DT_PROP) {
543 u32 sz = *((u32 *)p);
545 if (initial_boot_params->version < 0x10)
546 p = _ALIGN(p, sz >= 8 ? 8 : 4);
551 if (tag != OF_DT_BEGIN_NODE) {
552 printk(KERN_WARNING "Invalid tag %x scanning flattened"
553 " device tree !\n", tag);
558 p = _ALIGN(p + strlen(pathp) + 1, 4);
559 if ((*pathp) == '/') {
561 for (lp = NULL, np = pathp; *np; np++)
567 rc = it(p, pathp, depth, data);
575 unsigned long __init of_get_flat_dt_root(void)
577 unsigned long p = ((unsigned long)initial_boot_params) +
578 initial_boot_params->off_dt_struct;
580 while(*((u32 *)p) == OF_DT_NOP)
582 BUG_ON (*((u32 *)p) != OF_DT_BEGIN_NODE);
584 return _ALIGN(p + strlen((char *)p) + 1, 4);
588 * This function can be used within scan_flattened_dt callback to get
589 * access to properties
591 void* __init of_get_flat_dt_prop(unsigned long node, const char *name,
594 unsigned long p = node;
597 u32 tag = *((u32 *)p);
602 if (tag == OF_DT_NOP)
604 if (tag != OF_DT_PROP)
608 noff = *((u32 *)(p + 4));
610 if (initial_boot_params->version < 0x10)
611 p = _ALIGN(p, sz >= 8 ? 8 : 4);
613 nstr = find_flat_dt_string(noff);
615 printk(KERN_WARNING "Can't find property index"
619 if (strcmp(name, nstr) == 0) {
629 int __init of_flat_dt_is_compatible(unsigned long node, const char *compat)
632 unsigned long cplen, l;
634 cp = of_get_flat_dt_prop(node, "compatible", &cplen);
638 if (strncasecmp(cp, compat, strlen(compat)) == 0)
648 static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
653 *mem = _ALIGN(*mem, align);
660 static unsigned long __init unflatten_dt_node(unsigned long mem,
662 struct device_node *dad,
663 struct device_node ***allnextpp,
664 unsigned long fpsize)
666 struct device_node *np;
667 struct property *pp, **prev_pp = NULL;
670 unsigned int l, allocl;
674 tag = *((u32 *)(*p));
675 if (tag != OF_DT_BEGIN_NODE) {
676 printk("Weird tag at start of node: %x\n", tag);
681 l = allocl = strlen(pathp) + 1;
682 *p = _ALIGN(*p + l, 4);
684 /* version 0x10 has a more compact unit name here instead of the full
685 * path. we accumulate the full path size using "fpsize", we'll rebuild
686 * it later. We detect this because the first character of the name is
689 if ((*pathp) != '/') {
692 /* root node: special case. fpsize accounts for path
693 * plus terminating zero. root node only has '/', so
694 * fpsize should be 2, but we want to avoid the first
695 * level nodes to have two '/' so we use fpsize 1 here
700 /* account for '/' and path size minus terminal 0
709 np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl,
710 __alignof__(struct device_node));
712 memset(np, 0, sizeof(*np));
713 np->full_name = ((char*)np) + sizeof(struct device_node);
715 char *p = np->full_name;
716 /* rebuild full path for new format */
717 if (dad && dad->parent) {
718 strcpy(p, dad->full_name);
720 if ((strlen(p) + l + 1) != allocl) {
721 DBG("%s: p: %d, l: %d, a: %d\n",
722 pathp, (int)strlen(p), l, allocl);
730 memcpy(np->full_name, pathp, l);
731 prev_pp = &np->properties;
733 *allnextpp = &np->allnext;
736 /* we temporarily use the next field as `last_child'*/
740 dad->next->sibling = np;
743 kref_init(&np->kref);
749 tag = *((u32 *)(*p));
750 if (tag == OF_DT_NOP) {
754 if (tag != OF_DT_PROP)
758 noff = *((u32 *)((*p) + 4));
760 if (initial_boot_params->version < 0x10)
761 *p = _ALIGN(*p, sz >= 8 ? 8 : 4);
763 pname = find_flat_dt_string(noff);
765 printk("Can't find property name in list !\n");
768 if (strcmp(pname, "name") == 0)
770 l = strlen(pname) + 1;
771 pp = unflatten_dt_alloc(&mem, sizeof(struct property),
772 __alignof__(struct property));
774 if (strcmp(pname, "linux,phandle") == 0) {
775 np->node = *((u32 *)*p);
776 if (np->linux_phandle == 0)
777 np->linux_phandle = np->node;
779 if (strcmp(pname, "ibm,phandle") == 0)
780 np->linux_phandle = *((u32 *)*p);
783 pp->value = (void *)*p;
787 *p = _ALIGN((*p) + sz, 4);
789 /* with version 0x10 we may not have the name property, recreate
790 * it here from the unit name if absent
793 char *p = pathp, *ps = pathp, *pa = NULL;
806 pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz,
807 __alignof__(struct property));
811 pp->value = (unsigned char *)(pp + 1);
814 memcpy(pp->value, ps, sz - 1);
815 ((char *)pp->value)[sz - 1] = 0;
816 DBG("fixed up name for %s -> %s\n", pathp, pp->value);
821 np->name = get_property(np, "name", NULL);
822 np->type = get_property(np, "device_type", NULL);
829 while (tag == OF_DT_BEGIN_NODE) {
830 mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
831 tag = *((u32 *)(*p));
833 if (tag != OF_DT_END_NODE) {
834 printk("Weird tag at end of node: %x\n", tag);
841 static int __init early_parse_mem(char *p)
846 memory_limit = PAGE_ALIGN(memparse(p, &p));
847 DBG("memory limit = 0x%lx\n", memory_limit);
851 early_param("mem", early_parse_mem);
854 * The device tree may be allocated below our memory limit, or inside the
855 * crash kernel region for kdump. If so, move it out now.
857 static void move_device_tree(void)
859 unsigned long start, size;
862 DBG("-> move_device_tree\n");
864 start = __pa(initial_boot_params);
865 size = initial_boot_params->totalsize;
867 if ((memory_limit && (start + size) > memory_limit) ||
868 overlaps_crashkernel(start, size)) {
869 p = __va(lmb_alloc_base(size, PAGE_SIZE, lmb.rmo_size));
870 memcpy(p, initial_boot_params, size);
871 initial_boot_params = (struct boot_param_header *)p;
872 DBG("Moved device tree to 0x%p\n", p);
875 DBG("<- move_device_tree\n");
879 * unflattens the device-tree passed by the firmware, creating the
880 * tree of struct device_node. It also fills the "name" and "type"
881 * pointers of the nodes so the normal device-tree walking functions
882 * can be used (this used to be done by finish_device_tree)
884 void __init unflatten_device_tree(void)
886 unsigned long start, mem, size;
887 struct device_node **allnextp = &allnodes;
889 DBG(" -> unflatten_device_tree()\n");
891 /* First pass, scan for size */
892 start = ((unsigned long)initial_boot_params) +
893 initial_boot_params->off_dt_struct;
894 size = unflatten_dt_node(0, &start, NULL, NULL, 0);
895 size = (size | 3) + 1;
897 DBG(" size is %lx, allocating...\n", size);
899 /* Allocate memory for the expanded device tree */
900 mem = lmb_alloc(size + 4, __alignof__(struct device_node));
901 mem = (unsigned long) __va(mem);
903 ((u32 *)mem)[size / 4] = 0xdeadbeef;
905 DBG(" unflattening %lx...\n", mem);
907 /* Second pass, do actual unflattening */
908 start = ((unsigned long)initial_boot_params) +
909 initial_boot_params->off_dt_struct;
910 unflatten_dt_node(mem, &start, NULL, &allnextp, 0);
911 if (*((u32 *)start) != OF_DT_END)
912 printk(KERN_WARNING "Weird tag at end of tree: %08x\n", *((u32 *)start));
913 if (((u32 *)mem)[size / 4] != 0xdeadbeef)
914 printk(KERN_WARNING "End of tree marker overwritten: %08x\n",
915 ((u32 *)mem)[size / 4] );
918 /* Get pointer to OF "/chosen" node for use everywhere */
919 of_chosen = of_find_node_by_path("/chosen");
920 if (of_chosen == NULL)
921 of_chosen = of_find_node_by_path("/chosen@0");
923 DBG(" <- unflatten_device_tree()\n");
927 * ibm,pa-features is a per-cpu property that contains a string of
928 * attribute descriptors, each of which has a 2 byte header plus up
929 * to 254 bytes worth of processor attribute bits. First header
930 * byte specifies the number of bytes following the header.
931 * Second header byte is an "attribute-specifier" type, of which
932 * zero is the only currently-defined value.
933 * Implementation: Pass in the byte and bit offset for the feature
934 * that we are interested in. The function will return -1 if the
935 * pa-features property is missing, or a 1/0 to indicate if the feature
936 * is supported/not supported. Note that the bit numbers are
937 * big-endian to match the definition in PAPR.
939 static struct ibm_pa_feature {
940 unsigned long cpu_features; /* CPU_FTR_xxx bit */
941 unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */
942 unsigned char pabyte; /* byte number in ibm,pa-features */
943 unsigned char pabit; /* bit number (big-endian) */
944 unsigned char invert; /* if 1, pa bit set => clear feature */
945 } ibm_pa_features[] __initdata = {
946 {0, PPC_FEATURE_HAS_MMU, 0, 0, 0},
947 {0, PPC_FEATURE_HAS_FPU, 0, 1, 0},
948 {CPU_FTR_SLB, 0, 0, 2, 0},
949 {CPU_FTR_CTRL, 0, 0, 3, 0},
950 {CPU_FTR_NOEXECUTE, 0, 0, 6, 0},
951 {CPU_FTR_NODSISRALIGN, 0, 1, 1, 1},
953 /* put this back once we know how to test if firmware does 64k IO */
954 {CPU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0},
958 static void __init check_cpu_pa_features(unsigned long node)
960 unsigned char *pa_ftrs;
961 unsigned long len, tablelen, i, bit;
963 pa_ftrs = of_get_flat_dt_prop(node, "ibm,pa-features", &tablelen);
967 /* find descriptor with type == 0 */
971 len = 2 + pa_ftrs[0];
973 return; /* descriptor 0 not found */
980 /* loop over bits we know about */
981 for (i = 0; i < ARRAY_SIZE(ibm_pa_features); ++i) {
982 struct ibm_pa_feature *fp = &ibm_pa_features[i];
984 if (fp->pabyte >= pa_ftrs[0])
986 bit = (pa_ftrs[2 + fp->pabyte] >> (7 - fp->pabit)) & 1;
987 if (bit ^ fp->invert) {
988 cur_cpu_spec->cpu_features |= fp->cpu_features;
989 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
991 cur_cpu_spec->cpu_features &= ~fp->cpu_features;
992 cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
997 static int __init early_init_dt_scan_cpus(unsigned long node,
998 const char *uname, int depth,
1001 static int logical_cpuid = 0;
1002 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
1003 #ifdef CONFIG_ALTIVEC
1011 /* We are scanning "cpu" nodes only */
1012 if (type == NULL || strcmp(type, "cpu") != 0)
1015 /* Get physical cpuid */
1016 intserv = of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", &len);
1018 nthreads = len / sizeof(int);
1020 intserv = of_get_flat_dt_prop(node, "reg", NULL);
1025 * Now see if any of these threads match our boot cpu.
1026 * NOTE: This must match the parsing done in smp_setup_cpu_maps.
1028 for (i = 0; i < nthreads; i++) {
1030 * version 2 of the kexec param format adds the phys cpuid of
1033 if (initial_boot_params && initial_boot_params->version >= 2) {
1035 initial_boot_params->boot_cpuid_phys) {
1041 * Check if it's the boot-cpu, set it's hw index now,
1042 * unfortunately this format did not support booting
1043 * off secondary threads.
1045 if (of_get_flat_dt_prop(node,
1046 "linux,boot-cpu", NULL) != NULL) {
1053 /* logical cpu id is always 0 on UP kernels */
1059 DBG("boot cpu: logical %d physical %d\n", logical_cpuid,
1061 boot_cpuid = logical_cpuid;
1062 set_hard_smp_processor_id(boot_cpuid, intserv[i]);
1065 #ifdef CONFIG_ALTIVEC
1066 /* Check if we have a VMX and eventually update CPU features */
1067 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", NULL);
1068 if (prop && (*prop) > 0) {
1069 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1070 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
1073 /* Same goes for Apple's "altivec" property */
1074 prop = (u32 *)of_get_flat_dt_prop(node, "altivec", NULL);
1076 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1077 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
1079 #endif /* CONFIG_ALTIVEC */
1081 check_cpu_pa_features(node);
1083 #ifdef CONFIG_PPC_PSERIES
1085 cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
1087 cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
1093 static int __init early_init_dt_scan_chosen(unsigned long node,
1094 const char *uname, int depth, void *data)
1096 unsigned long *lprop;
1100 DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
1103 (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
1107 /* check if iommu is forced on or off */
1108 if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
1110 if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
1114 /* mem=x on the command line is the preferred mechanism */
1115 lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
1117 memory_limit = *lprop;
1120 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
1122 tce_alloc_start = *lprop;
1123 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
1125 tce_alloc_end = *lprop;
1128 #ifdef CONFIG_PPC_RTAS
1129 /* To help early debugging via the front panel, we retrieve a minimal
1130 * set of RTAS infos now if available
1133 u64 *basep, *entryp, *sizep;
1135 basep = of_get_flat_dt_prop(node, "linux,rtas-base", NULL);
1136 entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL);
1137 sizep = of_get_flat_dt_prop(node, "linux,rtas-size", NULL);
1138 if (basep && entryp && sizep) {
1140 rtas.entry = *entryp;
1144 #endif /* CONFIG_PPC_RTAS */
1147 lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
1149 crashk_res.start = *lprop;
1151 lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL);
1153 crashk_res.end = crashk_res.start + *lprop - 1;
1156 /* Retreive command line */
1157 p = of_get_flat_dt_prop(node, "bootargs", &l);
1158 if (p != NULL && l > 0)
1159 strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE));
1161 #ifdef CONFIG_CMDLINE
1162 if (l == 0 || (l == 1 && (*p) == 0))
1163 strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1164 #endif /* CONFIG_CMDLINE */
1166 DBG("Command line is: %s\n", cmd_line);
1172 static int __init early_init_dt_scan_root(unsigned long node,
1173 const char *uname, int depth, void *data)
1180 prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
1181 dt_root_size_cells = (prop == NULL) ? 1 : *prop;
1182 DBG("dt_root_size_cells = %x\n", dt_root_size_cells);
1184 prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
1185 dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
1186 DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells);
1192 static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp)
1197 /* Ignore more than 2 cells */
1198 while (s > sizeof(unsigned long) / 4) {
1216 static int __init early_init_dt_scan_memory(unsigned long node,
1217 const char *uname, int depth, void *data)
1219 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
1223 /* We are scanning "memory" nodes only */
1226 * The longtrail doesn't have a device_type on the
1227 * /memory node, so look for the node called /memory@0.
1229 if (depth != 1 || strcmp(uname, "memory@0") != 0)
1231 } else if (strcmp(type, "memory") != 0)
1234 reg = (cell_t *)of_get_flat_dt_prop(node, "linux,usable-memory", &l);
1236 reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l);
1240 endp = reg + (l / sizeof(cell_t));
1242 DBG("memory scan node %s, reg size %ld, data: %x %x %x %x,\n",
1243 uname, l, reg[0], reg[1], reg[2], reg[3]);
1245 while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
1246 unsigned long base, size;
1248 base = dt_mem_next_cell(dt_root_addr_cells, ®);
1249 size = dt_mem_next_cell(dt_root_size_cells, ®);
1253 DBG(" - %lx , %lx\n", base, size);
1256 if (base >= 0x80000000ul)
1258 if ((base + size) > 0x80000000ul)
1259 size = 0x80000000ul - base;
1262 lmb_add(base, size);
1267 static void __init early_reserve_mem(void)
1271 unsigned long self_base;
1272 unsigned long self_size;
1274 reserve_map = (u64 *)(((unsigned long)initial_boot_params) +
1275 initial_boot_params->off_mem_rsvmap);
1277 /* before we do anything, lets reserve the dt blob */
1278 self_base = __pa((unsigned long)initial_boot_params);
1279 self_size = initial_boot_params->totalsize;
1280 lmb_reserve(self_base, self_size);
1284 * Handle the case where we might be booting from an old kexec
1285 * image that setup the mem_rsvmap as pairs of 32-bit values
1287 if (*reserve_map > 0xffffffffull) {
1288 u32 base_32, size_32;
1289 u32 *reserve_map_32 = (u32 *)reserve_map;
1292 base_32 = *(reserve_map_32++);
1293 size_32 = *(reserve_map_32++);
1296 /* skip if the reservation is for the blob */
1297 if (base_32 == self_base && size_32 == self_size)
1299 DBG("reserving: %x -> %x\n", base_32, size_32);
1300 lmb_reserve(base_32, size_32);
1306 base = *(reserve_map++);
1307 size = *(reserve_map++);
1310 /* skip if the reservation is for the blob */
1311 if (base == self_base && size == self_size)
1313 DBG("reserving: %llx -> %llx\n", base, size);
1314 lmb_reserve(base, size);
1318 DBG("memory reserved, lmbs :\n");
1323 void __init early_init_devtree(void *params)
1325 DBG(" -> early_init_devtree()\n");
1327 /* Setup flat device-tree pointer */
1328 initial_boot_params = params;
1330 /* Retrieve various informations from the /chosen node of the
1331 * device-tree, including the platform type, initrd location and
1332 * size, TCE reserve, and more ...
1334 of_scan_flat_dt(early_init_dt_scan_chosen, NULL);
1336 /* Scan memory nodes and rebuild LMBs */
1338 of_scan_flat_dt(early_init_dt_scan_root, NULL);
1339 of_scan_flat_dt(early_init_dt_scan_memory, NULL);
1341 /* Save command line for /proc/cmdline and then parse parameters */
1342 strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
1343 parse_early_param();
1345 /* Reserve LMB regions used by kernel, initrd, dt, etc... */
1346 lmb_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START);
1347 reserve_kdump_trampoline();
1348 reserve_crashkernel();
1349 early_reserve_mem();
1351 lmb_enforce_memory_limit(memory_limit);
1354 DBG("Phys. mem: %lx\n", lmb_phys_mem_size());
1356 /* We may need to relocate the flat tree, do it now.
1357 * FIXME .. and the initrd too? */
1360 DBG("Scanning CPUs ...\n");
1362 /* Retreive CPU related informations from the flat tree
1363 * (altivec support, boot CPU ID, ...)
1365 of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
1367 DBG(" <- early_init_devtree()\n");
1373 prom_n_addr_cells(struct device_node* np)
1379 ip = (int *) get_property(np, "#address-cells", NULL);
1382 } while (np->parent);
1383 /* No #address-cells property for the root node, default to 1 */
1386 EXPORT_SYMBOL(prom_n_addr_cells);
1389 prom_n_size_cells(struct device_node* np)
1395 ip = (int *) get_property(np, "#size-cells", NULL);
1398 } while (np->parent);
1399 /* No #size-cells property for the root node, default to 1 */
1402 EXPORT_SYMBOL(prom_n_size_cells);
1405 * Work out the sense (active-low level / active-high edge)
1406 * of each interrupt from the device tree.
1408 void __init prom_get_irq_senses(unsigned char *senses, int off, int max)
1410 struct device_node *np;
1413 /* default to level-triggered */
1414 memset(senses, IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE, max - off);
1416 for (np = allnodes; np != 0; np = np->allnext) {
1417 for (j = 0; j < np->n_intrs; j++) {
1418 i = np->intrs[j].line;
1419 if (i >= off && i < max)
1420 senses[i-off] = np->intrs[j].sense;
1426 * Construct and return a list of the device_nodes with a given name.
1428 struct device_node *find_devices(const char *name)
1430 struct device_node *head, **prevp, *np;
1433 for (np = allnodes; np != 0; np = np->allnext) {
1434 if (np->name != 0 && strcasecmp(np->name, name) == 0) {
1442 EXPORT_SYMBOL(find_devices);
1445 * Construct and return a list of the device_nodes with a given type.
1447 struct device_node *find_type_devices(const char *type)
1449 struct device_node *head, **prevp, *np;
1452 for (np = allnodes; np != 0; np = np->allnext) {
1453 if (np->type != 0 && strcasecmp(np->type, type) == 0) {
1461 EXPORT_SYMBOL(find_type_devices);
1464 * Returns all nodes linked together
1466 struct device_node *find_all_nodes(void)
1468 struct device_node *head, **prevp, *np;
1471 for (np = allnodes; np != 0; np = np->allnext) {
1478 EXPORT_SYMBOL(find_all_nodes);
1480 /** Checks if the given "compat" string matches one of the strings in
1481 * the device's "compatible" property
1483 int device_is_compatible(struct device_node *device, const char *compat)
1488 cp = (char *) get_property(device, "compatible", &cplen);
1492 if (strncasecmp(cp, compat, strlen(compat)) == 0)
1501 EXPORT_SYMBOL(device_is_compatible);
1505 * Indicates whether the root node has a given value in its
1506 * compatible property.
1508 int machine_is_compatible(const char *compat)
1510 struct device_node *root;
1513 root = of_find_node_by_path("/");
1515 rc = device_is_compatible(root, compat);
1520 EXPORT_SYMBOL(machine_is_compatible);
1523 * Construct and return a list of the device_nodes with a given type
1524 * and compatible property.
1526 struct device_node *find_compatible_devices(const char *type,
1529 struct device_node *head, **prevp, *np;
1532 for (np = allnodes; np != 0; np = np->allnext) {
1534 && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1536 if (device_is_compatible(np, compat)) {
1544 EXPORT_SYMBOL(find_compatible_devices);
1547 * Find the device_node with a given full_name.
1549 struct device_node *find_path_device(const char *path)
1551 struct device_node *np;
1553 for (np = allnodes; np != 0; np = np->allnext)
1554 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0)
1558 EXPORT_SYMBOL(find_path_device);
1562 * New implementation of the OF "find" APIs, return a refcounted
1563 * object, call of_node_put() when done. The device tree and list
1564 * are protected by a rw_lock.
1566 * Note that property management will need some locking as well,
1567 * this isn't dealt with yet.
1572 * of_find_node_by_name - Find a node by its "name" property
1573 * @from: The node to start searching from or NULL, the node
1574 * you pass will not be searched, only the next one
1575 * will; typically, you pass what the previous call
1576 * returned. of_node_put() will be called on it
1577 * @name: The name string to match against
1579 * Returns a node pointer with refcount incremented, use
1580 * of_node_put() on it when done.
1582 struct device_node *of_find_node_by_name(struct device_node *from,
1585 struct device_node *np;
1587 read_lock(&devtree_lock);
1588 np = from ? from->allnext : allnodes;
1589 for (; np != NULL; np = np->allnext)
1590 if (np->name != NULL && strcasecmp(np->name, name) == 0
1595 read_unlock(&devtree_lock);
1598 EXPORT_SYMBOL(of_find_node_by_name);
1601 * of_find_node_by_type - Find a node by its "device_type" property
1602 * @from: The node to start searching from or NULL, the node
1603 * you pass will not be searched, only the next one
1604 * will; typically, you pass what the previous call
1605 * returned. of_node_put() will be called on it
1606 * @name: The type string to match against
1608 * Returns a node pointer with refcount incremented, use
1609 * of_node_put() on it when done.
1611 struct device_node *of_find_node_by_type(struct device_node *from,
1614 struct device_node *np;
1616 read_lock(&devtree_lock);
1617 np = from ? from->allnext : allnodes;
1618 for (; np != 0; np = np->allnext)
1619 if (np->type != 0 && strcasecmp(np->type, type) == 0
1624 read_unlock(&devtree_lock);
1627 EXPORT_SYMBOL(of_find_node_by_type);
1630 * of_find_compatible_node - Find a node based on type and one of the
1631 * tokens in its "compatible" property
1632 * @from: The node to start searching from or NULL, the node
1633 * you pass will not be searched, only the next one
1634 * will; typically, you pass what the previous call
1635 * returned. of_node_put() will be called on it
1636 * @type: The type string to match "device_type" or NULL to ignore
1637 * @compatible: The string to match to one of the tokens in the device
1638 * "compatible" list.
1640 * Returns a node pointer with refcount incremented, use
1641 * of_node_put() on it when done.
1643 struct device_node *of_find_compatible_node(struct device_node *from,
1644 const char *type, const char *compatible)
1646 struct device_node *np;
1648 read_lock(&devtree_lock);
1649 np = from ? from->allnext : allnodes;
1650 for (; np != 0; np = np->allnext) {
1652 && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1654 if (device_is_compatible(np, compatible) && of_node_get(np))
1659 read_unlock(&devtree_lock);
1662 EXPORT_SYMBOL(of_find_compatible_node);
1665 * of_find_node_by_path - Find a node matching a full OF path
1666 * @path: The full path to match
1668 * Returns a node pointer with refcount incremented, use
1669 * of_node_put() on it when done.
1671 struct device_node *of_find_node_by_path(const char *path)
1673 struct device_node *np = allnodes;
1675 read_lock(&devtree_lock);
1676 for (; np != 0; np = np->allnext) {
1677 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0
1681 read_unlock(&devtree_lock);
1684 EXPORT_SYMBOL(of_find_node_by_path);
1687 * of_find_node_by_phandle - Find a node given a phandle
1688 * @handle: phandle of the node to find
1690 * Returns a node pointer with refcount incremented, use
1691 * of_node_put() on it when done.
1693 struct device_node *of_find_node_by_phandle(phandle handle)
1695 struct device_node *np;
1697 read_lock(&devtree_lock);
1698 for (np = allnodes; np != 0; np = np->allnext)
1699 if (np->linux_phandle == handle)
1703 read_unlock(&devtree_lock);
1706 EXPORT_SYMBOL(of_find_node_by_phandle);
1709 * of_find_all_nodes - Get next node in global list
1710 * @prev: Previous node or NULL to start iteration
1711 * of_node_put() will be called on it
1713 * Returns a node pointer with refcount incremented, use
1714 * of_node_put() on it when done.
1716 struct device_node *of_find_all_nodes(struct device_node *prev)
1718 struct device_node *np;
1720 read_lock(&devtree_lock);
1721 np = prev ? prev->allnext : allnodes;
1722 for (; np != 0; np = np->allnext)
1723 if (of_node_get(np))
1727 read_unlock(&devtree_lock);
1730 EXPORT_SYMBOL(of_find_all_nodes);
1733 * of_get_parent - Get a node's parent if any
1734 * @node: Node to get parent
1736 * Returns a node pointer with refcount incremented, use
1737 * of_node_put() on it when done.
1739 struct device_node *of_get_parent(const struct device_node *node)
1741 struct device_node *np;
1746 read_lock(&devtree_lock);
1747 np = of_node_get(node->parent);
1748 read_unlock(&devtree_lock);
1751 EXPORT_SYMBOL(of_get_parent);
1754 * of_get_next_child - Iterate a node childs
1755 * @node: parent node
1756 * @prev: previous child of the parent node, or NULL to get first
1758 * Returns a node pointer with refcount incremented, use
1759 * of_node_put() on it when done.
1761 struct device_node *of_get_next_child(const struct device_node *node,
1762 struct device_node *prev)
1764 struct device_node *next;
1766 read_lock(&devtree_lock);
1767 next = prev ? prev->sibling : node->child;
1768 for (; next != 0; next = next->sibling)
1769 if (of_node_get(next))
1773 read_unlock(&devtree_lock);
1776 EXPORT_SYMBOL(of_get_next_child);
1779 * of_node_get - Increment refcount of a node
1780 * @node: Node to inc refcount, NULL is supported to
1781 * simplify writing of callers
1785 struct device_node *of_node_get(struct device_node *node)
1788 kref_get(&node->kref);
1791 EXPORT_SYMBOL(of_node_get);
1793 static inline struct device_node * kref_to_device_node(struct kref *kref)
1795 return container_of(kref, struct device_node, kref);
1799 * of_node_release - release a dynamically allocated node
1800 * @kref: kref element of the node to be released
1802 * In of_node_put() this function is passed to kref_put()
1803 * as the destructor.
1805 static void of_node_release(struct kref *kref)
1807 struct device_node *node = kref_to_device_node(kref);
1808 struct property *prop = node->properties;
1810 if (!OF_IS_DYNAMIC(node))
1813 struct property *next = prop->next;
1820 prop = node->deadprops;
1821 node->deadprops = NULL;
1825 kfree(node->full_name);
1831 * of_node_put - Decrement refcount of a node
1832 * @node: Node to dec refcount, NULL is supported to
1833 * simplify writing of callers
1836 void of_node_put(struct device_node *node)
1839 kref_put(&node->kref, of_node_release);
1841 EXPORT_SYMBOL(of_node_put);
1844 * Plug a device node into the tree and global list.
1846 void of_attach_node(struct device_node *np)
1848 write_lock(&devtree_lock);
1849 np->sibling = np->parent->child;
1850 np->allnext = allnodes;
1851 np->parent->child = np;
1853 write_unlock(&devtree_lock);
1857 * "Unplug" a node from the device tree. The caller must hold
1858 * a reference to the node. The memory associated with the node
1859 * is not freed until its refcount goes to zero.
1861 void of_detach_node(const struct device_node *np)
1863 struct device_node *parent;
1865 write_lock(&devtree_lock);
1867 parent = np->parent;
1870 allnodes = np->allnext;
1872 struct device_node *prev;
1873 for (prev = allnodes;
1874 prev->allnext != np;
1875 prev = prev->allnext)
1877 prev->allnext = np->allnext;
1880 if (parent->child == np)
1881 parent->child = np->sibling;
1883 struct device_node *prevsib;
1884 for (prevsib = np->parent->child;
1885 prevsib->sibling != np;
1886 prevsib = prevsib->sibling)
1888 prevsib->sibling = np->sibling;
1891 write_unlock(&devtree_lock);
1894 #ifdef CONFIG_PPC_PSERIES
1896 * Fix up the uninitialized fields in a new device node:
1897 * name, type, n_addrs, addrs, n_intrs, intrs, and pci-specific fields
1899 * A lot of boot-time code is duplicated here, because functions such
1900 * as finish_node_interrupts, interpret_pci_props, etc. cannot use the
1903 * This should probably be split up into smaller chunks.
1906 static int of_finish_dynamic_node(struct device_node *node)
1908 struct device_node *parent = of_get_parent(node);
1910 phandle *ibm_phandle;
1912 node->name = get_property(node, "name", NULL);
1913 node->type = get_property(node, "device_type", NULL);
1920 /* We don't support that function on PowerMac, at least
1923 if (machine_is(powermac))
1926 /* fix up new node's linux_phandle field */
1927 if ((ibm_phandle = (unsigned int *)get_property(node,
1928 "ibm,phandle", NULL)))
1929 node->linux_phandle = *ibm_phandle;
1932 of_node_put(parent);
1936 static int prom_reconfig_notifier(struct notifier_block *nb,
1937 unsigned long action, void *node)
1942 case PSERIES_RECONFIG_ADD:
1943 err = of_finish_dynamic_node(node);
1945 finish_node(node, NULL, 0);
1947 printk(KERN_ERR "finish_node returned %d\n", err);
1958 static struct notifier_block prom_reconfig_nb = {
1959 .notifier_call = prom_reconfig_notifier,
1960 .priority = 10, /* This one needs to run first */
1963 static int __init prom_reconfig_setup(void)
1965 return pSeries_reconfig_notifier_register(&prom_reconfig_nb);
1967 __initcall(prom_reconfig_setup);
1970 struct property *of_find_property(struct device_node *np, const char *name,
1973 struct property *pp;
1975 read_lock(&devtree_lock);
1976 for (pp = np->properties; pp != 0; pp = pp->next)
1977 if (strcmp(pp->name, name) == 0) {
1982 read_unlock(&devtree_lock);
1988 * Find a property with a given name for a given node
1989 * and return the value.
1991 unsigned char *get_property(struct device_node *np, const char *name,
1994 struct property *pp = of_find_property(np,name,lenp);
1995 return pp ? pp->value : NULL;
1997 EXPORT_SYMBOL(get_property);
2000 * Add a property to a node
2002 int prom_add_property(struct device_node* np, struct property* prop)
2004 struct property **next;
2007 write_lock(&devtree_lock);
2008 next = &np->properties;
2010 if (strcmp(prop->name, (*next)->name) == 0) {
2011 /* duplicate ! don't insert it */
2012 write_unlock(&devtree_lock);
2015 next = &(*next)->next;
2018 write_unlock(&devtree_lock);
2020 #ifdef CONFIG_PROC_DEVICETREE
2021 /* try to add to proc as well if it was initialized */
2023 proc_device_tree_add_prop(np->pde, prop);
2024 #endif /* CONFIG_PROC_DEVICETREE */
2030 * Remove a property from a node. Note that we don't actually
2031 * remove it, since we have given out who-knows-how-many pointers
2032 * to the data using get-property. Instead we just move the property
2033 * to the "dead properties" list, so it won't be found any more.
2035 int prom_remove_property(struct device_node *np, struct property *prop)
2037 struct property **next;
2040 write_lock(&devtree_lock);
2041 next = &np->properties;
2043 if (*next == prop) {
2044 /* found the node */
2046 prop->next = np->deadprops;
2047 np->deadprops = prop;
2051 next = &(*next)->next;
2053 write_unlock(&devtree_lock);
2058 #ifdef CONFIG_PROC_DEVICETREE
2059 /* try to remove the proc node as well */
2061 proc_device_tree_remove_prop(np->pde, prop);
2062 #endif /* CONFIG_PROC_DEVICETREE */
2068 * Update a property in a node. Note that we don't actually
2069 * remove it, since we have given out who-knows-how-many pointers
2070 * to the data using get-property. Instead we just move the property
2071 * to the "dead properties" list, and add the new property to the
2074 int prom_update_property(struct device_node *np,
2075 struct property *newprop,
2076 struct property *oldprop)
2078 struct property **next;
2081 write_lock(&devtree_lock);
2082 next = &np->properties;
2084 if (*next == oldprop) {
2085 /* found the node */
2086 newprop->next = oldprop->next;
2088 oldprop->next = np->deadprops;
2089 np->deadprops = oldprop;
2093 next = &(*next)->next;
2095 write_unlock(&devtree_lock);
2100 #ifdef CONFIG_PROC_DEVICETREE
2101 /* try to add to proc as well if it was initialized */
2103 proc_device_tree_update_prop(np->pde, newprop, oldprop);
2104 #endif /* CONFIG_PROC_DEVICETREE */
2110 /* Find the device node for a given logical cpu number, also returns the cpu
2111 * local thread number (index in ibm,interrupt-server#s) if relevant and
2112 * asked for (non NULL)
2114 struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
2117 struct device_node *np;
2119 hardid = get_hard_smp_processor_id(cpu);
2121 for_each_node_by_type(np, "cpu") {
2123 unsigned int plen, t;
2125 /* Check for ibm,ppc-interrupt-server#s. If it doesn't exist
2126 * fallback to "reg" property and assume no threads
2128 intserv = (u32 *)get_property(np, "ibm,ppc-interrupt-server#s",
2130 if (intserv == NULL) {
2131 u32 *reg = (u32 *)get_property(np, "reg", NULL);
2134 if (*reg == hardid) {
2140 plen /= sizeof(u32);
2141 for (t = 0; t < plen; t++) {
2142 if (hardid == intserv[t]) {
2154 static struct debugfs_blob_wrapper flat_dt_blob;
2156 static int __init export_flat_device_tree(void)
2160 d = debugfs_create_dir("powerpc", NULL);
2164 flat_dt_blob.data = initial_boot_params;
2165 flat_dt_blob.size = initial_boot_params->totalsize;
2167 d = debugfs_create_blob("flat-device-tree", S_IFREG | S_IRUSR,
2174 __initcall(export_flat_device_tree);