diff options
Diffstat (limited to 'arch/sparc64/kernel/mdesc.c')
-rw-r--r-- | arch/sparc64/kernel/mdesc.c | 698 |
1 files changed, 398 insertions, 300 deletions
diff --git a/arch/sparc64/kernel/mdesc.c b/arch/sparc64/kernel/mdesc.c index f0e16045fb16..62a389793949 100644 --- a/arch/sparc64/kernel/mdesc.c +++ b/arch/sparc64/kernel/mdesc.c | |||
@@ -6,6 +6,9 @@ | |||
6 | #include <linux/types.h> | 6 | #include <linux/types.h> |
7 | #include <linux/bootmem.h> | 7 | #include <linux/bootmem.h> |
8 | #include <linux/log2.h> | 8 | #include <linux/log2.h> |
9 | #include <linux/list.h> | ||
10 | #include <linux/slab.h> | ||
11 | #include <linux/mm.h> | ||
9 | 12 | ||
10 | #include <asm/hypervisor.h> | 13 | #include <asm/hypervisor.h> |
11 | #include <asm/mdesc.h> | 14 | #include <asm/mdesc.h> |
@@ -29,7 +32,7 @@ struct mdesc_hdr { | |||
29 | u32 node_sz; /* node block size */ | 32 | u32 node_sz; /* node block size */ |
30 | u32 name_sz; /* name block size */ | 33 | u32 name_sz; /* name block size */ |
31 | u32 data_sz; /* data block size */ | 34 | u32 data_sz; /* data block size */ |
32 | }; | 35 | } __attribute__((aligned(16))); |
33 | 36 | ||
34 | struct mdesc_elem { | 37 | struct mdesc_elem { |
35 | u8 tag; | 38 | u8 tag; |
@@ -53,306 +56,402 @@ struct mdesc_elem { | |||
53 | } d; | 56 | } d; |
54 | }; | 57 | }; |
55 | 58 | ||
56 | static struct mdesc_hdr *main_mdesc; | 59 | struct mdesc_mem_ops { |
57 | static struct mdesc_node *allnodes; | 60 | struct mdesc_handle *(*alloc)(unsigned int mdesc_size); |
58 | 61 | void (*free)(struct mdesc_handle *handle); | |
59 | static struct mdesc_node *allnodes_tail; | 62 | }; |
60 | static unsigned int unique_id; | ||
61 | 63 | ||
62 | static struct mdesc_node **mdesc_hash; | 64 | struct mdesc_handle { |
63 | static unsigned int mdesc_hash_size; | 65 | struct list_head list; |
66 | struct mdesc_mem_ops *mops; | ||
67 | void *self_base; | ||
68 | atomic_t refcnt; | ||
69 | unsigned int handle_size; | ||
70 | struct mdesc_hdr mdesc; | ||
71 | }; | ||
64 | 72 | ||
65 | static inline unsigned int node_hashfn(u64 node) | 73 | static void mdesc_handle_init(struct mdesc_handle *hp, |
74 | unsigned int handle_size, | ||
75 | void *base) | ||
66 | { | 76 | { |
67 | return ((unsigned int) (node ^ (node >> 8) ^ (node >> 16))) | 77 | BUG_ON(((unsigned long)&hp->mdesc) & (16UL - 1)); |
68 | & (mdesc_hash_size - 1); | 78 | |
79 | memset(hp, 0, handle_size); | ||
80 | INIT_LIST_HEAD(&hp->list); | ||
81 | hp->self_base = base; | ||
82 | atomic_set(&hp->refcnt, 1); | ||
83 | hp->handle_size = handle_size; | ||
69 | } | 84 | } |
70 | 85 | ||
71 | static inline void hash_node(struct mdesc_node *mp) | 86 | static struct mdesc_handle *mdesc_bootmem_alloc(unsigned int mdesc_size) |
72 | { | 87 | { |
73 | struct mdesc_node **head = &mdesc_hash[node_hashfn(mp->node)]; | 88 | struct mdesc_handle *hp; |
89 | unsigned int handle_size, alloc_size; | ||
74 | 90 | ||
75 | mp->hash_next = *head; | 91 | handle_size = (sizeof(struct mdesc_handle) - |
76 | *head = mp; | 92 | sizeof(struct mdesc_hdr) + |
93 | mdesc_size); | ||
94 | alloc_size = PAGE_ALIGN(handle_size); | ||
77 | 95 | ||
78 | if (allnodes_tail) { | 96 | hp = __alloc_bootmem(alloc_size, PAGE_SIZE, 0UL); |
79 | allnodes_tail->allnodes_next = mp; | 97 | if (hp) |
80 | allnodes_tail = mp; | 98 | mdesc_handle_init(hp, handle_size, hp); |
81 | } else { | 99 | |
82 | allnodes = allnodes_tail = mp; | 100 | return hp; |
83 | } | ||
84 | } | 101 | } |
85 | 102 | ||
86 | static struct mdesc_node *find_node(u64 node) | 103 | static void mdesc_bootmem_free(struct mdesc_handle *hp) |
87 | { | 104 | { |
88 | struct mdesc_node *mp = mdesc_hash[node_hashfn(node)]; | 105 | unsigned int alloc_size, handle_size = hp->handle_size; |
106 | unsigned long start, end; | ||
107 | |||
108 | BUG_ON(atomic_read(&hp->refcnt) != 0); | ||
109 | BUG_ON(!list_empty(&hp->list)); | ||
89 | 110 | ||
90 | while (mp) { | 111 | alloc_size = PAGE_ALIGN(handle_size); |
91 | if (mp->node == node) | ||
92 | return mp; | ||
93 | 112 | ||
94 | mp = mp->hash_next; | 113 | start = (unsigned long) hp; |
114 | end = start + alloc_size; | ||
115 | |||
116 | while (start < end) { | ||
117 | struct page *p; | ||
118 | |||
119 | p = virt_to_page(start); | ||
120 | ClearPageReserved(p); | ||
121 | __free_page(p); | ||
122 | start += PAGE_SIZE; | ||
95 | } | 123 | } |
96 | return NULL; | ||
97 | } | 124 | } |
98 | 125 | ||
99 | struct property *md_find_property(const struct mdesc_node *mp, | 126 | static struct mdesc_mem_ops bootmem_mdesc_memops = { |
100 | const char *name, | 127 | .alloc = mdesc_bootmem_alloc, |
101 | int *lenp) | 128 | .free = mdesc_bootmem_free, |
129 | }; | ||
130 | |||
131 | static struct mdesc_handle *mdesc_kmalloc(unsigned int mdesc_size) | ||
102 | { | 132 | { |
103 | struct property *pp; | 133 | unsigned int handle_size; |
134 | void *base; | ||
104 | 135 | ||
105 | for (pp = mp->properties; pp != 0; pp = pp->next) { | 136 | handle_size = (sizeof(struct mdesc_handle) - |
106 | if (strcasecmp(pp->name, name) == 0) { | 137 | sizeof(struct mdesc_hdr) + |
107 | if (lenp) | 138 | mdesc_size); |
108 | *lenp = pp->length; | 139 | |
109 | break; | 140 | base = kmalloc(handle_size + 15, GFP_KERNEL); |
110 | } | 141 | if (base) { |
142 | struct mdesc_handle *hp; | ||
143 | unsigned long addr; | ||
144 | |||
145 | addr = (unsigned long)base; | ||
146 | addr = (addr + 15UL) & ~15UL; | ||
147 | hp = (struct mdesc_handle *) addr; | ||
148 | |||
149 | mdesc_handle_init(hp, handle_size, base); | ||
150 | return hp; | ||
111 | } | 151 | } |
112 | return pp; | 152 | |
153 | return NULL; | ||
113 | } | 154 | } |
114 | EXPORT_SYMBOL(md_find_property); | ||
115 | 155 | ||
116 | /* | 156 | static void mdesc_kfree(struct mdesc_handle *hp) |
117 | * Find a property with a given name for a given node | ||
118 | * and return the value. | ||
119 | */ | ||
120 | const void *md_get_property(const struct mdesc_node *mp, const char *name, | ||
121 | int *lenp) | ||
122 | { | 157 | { |
123 | struct property *pp = md_find_property(mp, name, lenp); | 158 | BUG_ON(atomic_read(&hp->refcnt) != 0); |
124 | return pp ? pp->value : NULL; | 159 | BUG_ON(!list_empty(&hp->list)); |
160 | |||
161 | kfree(hp->self_base); | ||
125 | } | 162 | } |
126 | EXPORT_SYMBOL(md_get_property); | ||
127 | 163 | ||
128 | struct mdesc_node *md_find_node_by_name(struct mdesc_node *from, | 164 | static struct mdesc_mem_ops kmalloc_mdesc_memops = { |
129 | const char *name) | 165 | .alloc = mdesc_kmalloc, |
166 | .free = mdesc_kfree, | ||
167 | }; | ||
168 | |||
169 | static struct mdesc_handle *mdesc_alloc(unsigned int mdesc_size, | ||
170 | struct mdesc_mem_ops *mops) | ||
130 | { | 171 | { |
131 | struct mdesc_node *mp; | 172 | struct mdesc_handle *hp = mops->alloc(mdesc_size); |
132 | 173 | ||
133 | mp = from ? from->allnodes_next : allnodes; | 174 | if (hp) |
134 | for (; mp != NULL; mp = mp->allnodes_next) { | 175 | hp->mops = mops; |
135 | if (strcmp(mp->name, name) == 0) | ||
136 | break; | ||
137 | } | ||
138 | return mp; | ||
139 | } | ||
140 | EXPORT_SYMBOL(md_find_node_by_name); | ||
141 | 176 | ||
142 | static unsigned int mdesc_early_allocated; | 177 | return hp; |
178 | } | ||
143 | 179 | ||
144 | static void * __init mdesc_early_alloc(unsigned long size) | 180 | static void mdesc_free(struct mdesc_handle *hp) |
145 | { | 181 | { |
146 | void *ret; | 182 | hp->mops->free(hp); |
183 | } | ||
147 | 184 | ||
148 | ret = __alloc_bootmem(size, SMP_CACHE_BYTES, 0UL); | 185 | static struct mdesc_handle *cur_mdesc; |
149 | if (ret == NULL) { | 186 | static LIST_HEAD(mdesc_zombie_list); |
150 | prom_printf("MDESC: alloc of %lu bytes failed.\n", size); | 187 | static DEFINE_SPINLOCK(mdesc_lock); |
151 | prom_halt(); | ||
152 | } | ||
153 | 188 | ||
154 | memset(ret, 0, size); | 189 | struct mdesc_handle *mdesc_grab(void) |
190 | { | ||
191 | struct mdesc_handle *hp; | ||
192 | unsigned long flags; | ||
155 | 193 | ||
156 | mdesc_early_allocated += size; | 194 | spin_lock_irqsave(&mdesc_lock, flags); |
195 | hp = cur_mdesc; | ||
196 | if (hp) | ||
197 | atomic_inc(&hp->refcnt); | ||
198 | spin_unlock_irqrestore(&mdesc_lock, flags); | ||
157 | 199 | ||
158 | return ret; | 200 | return hp; |
159 | } | 201 | } |
202 | EXPORT_SYMBOL(mdesc_grab); | ||
160 | 203 | ||
161 | static unsigned int __init count_arcs(struct mdesc_elem *ep) | 204 | void mdesc_release(struct mdesc_handle *hp) |
162 | { | 205 | { |
163 | unsigned int ret = 0; | 206 | unsigned long flags; |
164 | 207 | ||
165 | ep++; | 208 | spin_lock_irqsave(&mdesc_lock, flags); |
166 | while (ep->tag != MD_NODE_END) { | 209 | if (atomic_dec_and_test(&hp->refcnt)) { |
167 | if (ep->tag == MD_PROP_ARC) | 210 | list_del_init(&hp->list); |
168 | ret++; | 211 | hp->mops->free(hp); |
169 | ep++; | ||
170 | } | 212 | } |
171 | return ret; | 213 | spin_unlock_irqrestore(&mdesc_lock, flags); |
172 | } | 214 | } |
215 | EXPORT_SYMBOL(mdesc_release); | ||
173 | 216 | ||
174 | static void __init mdesc_node_alloc(u64 node, struct mdesc_elem *ep, const char *names) | 217 | static void do_mdesc_update(struct work_struct *work) |
175 | { | 218 | { |
176 | unsigned int num_arcs = count_arcs(ep); | 219 | unsigned long len, real_len, status; |
177 | struct mdesc_node *mp; | 220 | struct mdesc_handle *hp, *orig_hp; |
221 | unsigned long flags; | ||
222 | |||
223 | (void) sun4v_mach_desc(0UL, 0UL, &len); | ||
224 | |||
225 | hp = mdesc_alloc(len, &kmalloc_mdesc_memops); | ||
226 | if (!hp) { | ||
227 | printk(KERN_ERR "MD: mdesc alloc fails\n"); | ||
228 | return; | ||
229 | } | ||
230 | |||
231 | status = sun4v_mach_desc(__pa(&hp->mdesc), len, &real_len); | ||
232 | if (status != HV_EOK || real_len > len) { | ||
233 | printk(KERN_ERR "MD: mdesc reread fails with %lu\n", | ||
234 | status); | ||
235 | atomic_dec(&hp->refcnt); | ||
236 | mdesc_free(hp); | ||
237 | return; | ||
238 | } | ||
178 | 239 | ||
179 | mp = mdesc_early_alloc(sizeof(*mp) + | 240 | spin_lock_irqsave(&mdesc_lock, flags); |
180 | (num_arcs * sizeof(struct mdesc_arc))); | 241 | orig_hp = cur_mdesc; |
181 | mp->name = names + ep->name_offset; | 242 | cur_mdesc = hp; |
182 | mp->node = node; | ||
183 | mp->unique_id = unique_id++; | ||
184 | mp->num_arcs = num_arcs; | ||
185 | 243 | ||
186 | hash_node(mp); | 244 | if (atomic_dec_and_test(&orig_hp->refcnt)) |
245 | mdesc_free(orig_hp); | ||
246 | else | ||
247 | list_add(&orig_hp->list, &mdesc_zombie_list); | ||
248 | spin_unlock_irqrestore(&mdesc_lock, flags); | ||
187 | } | 249 | } |
188 | 250 | ||
189 | static inline struct mdesc_elem *node_block(struct mdesc_hdr *mdesc) | 251 | static DECLARE_WORK(mdesc_update_work, do_mdesc_update); |
252 | |||
253 | void mdesc_update(void) | ||
254 | { | ||
255 | schedule_work(&mdesc_update_work); | ||
256 | } | ||
257 | |||
258 | static struct mdesc_elem *node_block(struct mdesc_hdr *mdesc) | ||
190 | { | 259 | { |
191 | return (struct mdesc_elem *) (mdesc + 1); | 260 | return (struct mdesc_elem *) (mdesc + 1); |
192 | } | 261 | } |
193 | 262 | ||
194 | static inline void *name_block(struct mdesc_hdr *mdesc) | 263 | static void *name_block(struct mdesc_hdr *mdesc) |
195 | { | 264 | { |
196 | return ((void *) node_block(mdesc)) + mdesc->node_sz; | 265 | return ((void *) node_block(mdesc)) + mdesc->node_sz; |
197 | } | 266 | } |
198 | 267 | ||
199 | static inline void *data_block(struct mdesc_hdr *mdesc) | 268 | static void *data_block(struct mdesc_hdr *mdesc) |
200 | { | 269 | { |
201 | return ((void *) name_block(mdesc)) + mdesc->name_sz; | 270 | return ((void *) name_block(mdesc)) + mdesc->name_sz; |
202 | } | 271 | } |
203 | 272 | ||
204 | /* In order to avoid recursion (the graph can be very deep) we use a | 273 | u64 mdesc_node_by_name(struct mdesc_handle *hp, |
205 | * two pass algorithm. First we allocate all the nodes and hash them. | 274 | u64 from_node, const char *name) |
206 | * Then we iterate over each node, filling in the arcs and properties. | ||
207 | */ | ||
208 | static void __init build_all_nodes(struct mdesc_hdr *mdesc) | ||
209 | { | 275 | { |
210 | struct mdesc_elem *start, *ep; | 276 | struct mdesc_elem *ep = node_block(&hp->mdesc); |
211 | struct mdesc_node *mp; | 277 | const char *names = name_block(&hp->mdesc); |
212 | const char *names; | 278 | u64 last_node = hp->mdesc.node_sz / 16; |
213 | void *data; | 279 | u64 ret; |
214 | u64 last_node; | 280 | |
215 | 281 | if (from_node == MDESC_NODE_NULL) | |
216 | start = ep = node_block(mdesc); | 282 | from_node = 0; |
217 | last_node = mdesc->node_sz / 16; | 283 | |
284 | if (from_node >= last_node) | ||
285 | return MDESC_NODE_NULL; | ||
286 | |||
287 | ret = ep[from_node].d.val; | ||
288 | while (ret < last_node) { | ||
289 | if (ep[ret].tag != MD_NODE) | ||
290 | return MDESC_NODE_NULL; | ||
291 | if (!strcmp(names + ep[ret].name_offset, name)) | ||
292 | break; | ||
293 | ret = ep[ret].d.val; | ||
294 | } | ||
295 | if (ret >= last_node) | ||
296 | ret = MDESC_NODE_NULL; | ||
297 | return ret; | ||
298 | } | ||
299 | EXPORT_SYMBOL(mdesc_node_by_name); | ||
218 | 300 | ||
219 | names = name_block(mdesc); | 301 | const void *mdesc_get_property(struct mdesc_handle *hp, u64 node, |
302 | const char *name, int *lenp) | ||
303 | { | ||
304 | const char *names = name_block(&hp->mdesc); | ||
305 | u64 last_node = hp->mdesc.node_sz / 16; | ||
306 | void *data = data_block(&hp->mdesc); | ||
307 | struct mdesc_elem *ep; | ||
220 | 308 | ||
221 | while (1) { | 309 | if (node == MDESC_NODE_NULL || node >= last_node) |
222 | u64 node = ep - start; | 310 | return NULL; |
223 | 311 | ||
224 | if (ep->tag == MD_LIST_END) | 312 | ep = node_block(&hp->mdesc) + node; |
313 | ep++; | ||
314 | for (; ep->tag != MD_NODE_END; ep++) { | ||
315 | void *val = NULL; | ||
316 | int len = 0; | ||
317 | |||
318 | switch (ep->tag) { | ||
319 | case MD_PROP_VAL: | ||
320 | val = &ep->d.val; | ||
321 | len = 8; | ||
225 | break; | 322 | break; |
226 | 323 | ||
227 | if (ep->tag != MD_NODE) { | 324 | case MD_PROP_STR: |
228 | prom_printf("MDESC: Inconsistent element list.\n"); | 325 | case MD_PROP_DATA: |
229 | prom_halt(); | 326 | val = data + ep->d.data.data_offset; |
230 | } | 327 | len = ep->d.data.data_len; |
231 | 328 | break; | |
232 | mdesc_node_alloc(node, ep, names); | ||
233 | 329 | ||
234 | if (ep->d.val >= last_node) { | 330 | default: |
235 | printk("MDESC: Warning, early break out of node scan.\n"); | ||
236 | printk("MDESC: Next node [%lu] last_node [%lu].\n", | ||
237 | node, last_node); | ||
238 | break; | 331 | break; |
239 | } | 332 | } |
333 | if (!val) | ||
334 | continue; | ||
240 | 335 | ||
241 | ep = start + ep->d.val; | 336 | if (!strcmp(names + ep->name_offset, name)) { |
337 | if (lenp) | ||
338 | *lenp = len; | ||
339 | return val; | ||
340 | } | ||
242 | } | 341 | } |
243 | 342 | ||
244 | data = data_block(mdesc); | 343 | return NULL; |
245 | for (mp = allnodes; mp; mp = mp->allnodes_next) { | 344 | } |
246 | struct mdesc_elem *ep = start + mp->node; | 345 | EXPORT_SYMBOL(mdesc_get_property); |
247 | struct property **link = &mp->properties; | ||
248 | unsigned int this_arc = 0; | ||
249 | |||
250 | ep++; | ||
251 | while (ep->tag != MD_NODE_END) { | ||
252 | switch (ep->tag) { | ||
253 | case MD_PROP_ARC: { | ||
254 | struct mdesc_node *target; | ||
255 | |||
256 | if (this_arc >= mp->num_arcs) { | ||
257 | prom_printf("MDESC: ARC overrun [%u:%u]\n", | ||
258 | this_arc, mp->num_arcs); | ||
259 | prom_halt(); | ||
260 | } | ||
261 | target = find_node(ep->d.val); | ||
262 | if (!target) { | ||
263 | printk("MDESC: Warning, arc points to " | ||
264 | "missing node, ignoring.\n"); | ||
265 | break; | ||
266 | } | ||
267 | mp->arcs[this_arc].name = | ||
268 | (names + ep->name_offset); | ||
269 | mp->arcs[this_arc].arc = target; | ||
270 | this_arc++; | ||
271 | break; | ||
272 | } | ||
273 | 346 | ||
274 | case MD_PROP_VAL: | 347 | u64 mdesc_next_arc(struct mdesc_handle *hp, u64 from, const char *arc_type) |
275 | case MD_PROP_STR: | 348 | { |
276 | case MD_PROP_DATA: { | 349 | struct mdesc_elem *ep, *base = node_block(&hp->mdesc); |
277 | struct property *p = mdesc_early_alloc(sizeof(*p)); | 350 | const char *names = name_block(&hp->mdesc); |
278 | 351 | u64 last_node = hp->mdesc.node_sz / 16; | |
279 | p->unique_id = unique_id++; | ||
280 | p->name = (char *) names + ep->name_offset; | ||
281 | if (ep->tag == MD_PROP_VAL) { | ||
282 | p->value = &ep->d.val; | ||
283 | p->length = 8; | ||
284 | } else { | ||
285 | p->value = data + ep->d.data.data_offset; | ||
286 | p->length = ep->d.data.data_len; | ||
287 | } | ||
288 | *link = p; | ||
289 | link = &p->next; | ||
290 | break; | ||
291 | } | ||
292 | 352 | ||
293 | case MD_NOOP: | 353 | if (from == MDESC_NODE_NULL || from >= last_node) |
294 | break; | 354 | return MDESC_NODE_NULL; |
295 | 355 | ||
296 | default: | 356 | ep = base + from; |
297 | printk("MDESC: Warning, ignoring unknown tag type %02x\n", | 357 | |
298 | ep->tag); | 358 | ep++; |
299 | } | 359 | for (; ep->tag != MD_NODE_END; ep++) { |
300 | ep++; | 360 | if (ep->tag != MD_PROP_ARC) |
301 | } | 361 | continue; |
362 | |||
363 | if (strcmp(names + ep->name_offset, arc_type)) | ||
364 | continue; | ||
365 | |||
366 | return ep - base; | ||
302 | } | 367 | } |
368 | |||
369 | return MDESC_NODE_NULL; | ||
303 | } | 370 | } |
371 | EXPORT_SYMBOL(mdesc_next_arc); | ||
304 | 372 | ||
305 | static unsigned int __init count_nodes(struct mdesc_hdr *mdesc) | 373 | u64 mdesc_arc_target(struct mdesc_handle *hp, u64 arc) |
306 | { | 374 | { |
307 | struct mdesc_elem *ep = node_block(mdesc); | 375 | struct mdesc_elem *ep, *base = node_block(&hp->mdesc); |
308 | struct mdesc_elem *end; | 376 | |
309 | unsigned int cnt = 0; | 377 | ep = base + arc; |
310 | 378 | ||
311 | end = ((void *)ep) + mdesc->node_sz; | 379 | return ep->d.val; |
312 | while (ep < end) { | 380 | } |
313 | if (ep->tag == MD_NODE) | 381 | EXPORT_SYMBOL(mdesc_arc_target); |
314 | cnt++; | 382 | |
315 | ep++; | 383 | const char *mdesc_node_name(struct mdesc_handle *hp, u64 node) |
316 | } | 384 | { |
317 | return cnt; | 385 | struct mdesc_elem *ep, *base = node_block(&hp->mdesc); |
386 | const char *names = name_block(&hp->mdesc); | ||
387 | u64 last_node = hp->mdesc.node_sz / 16; | ||
388 | |||
389 | if (node == MDESC_NODE_NULL || node >= last_node) | ||
390 | return NULL; | ||
391 | |||
392 | ep = base + node; | ||
393 | if (ep->tag != MD_NODE) | ||
394 | return NULL; | ||
395 | |||
396 | return names + ep->name_offset; | ||
318 | } | 397 | } |
398 | EXPORT_SYMBOL(mdesc_node_name); | ||
319 | 399 | ||
320 | static void __init report_platform_properties(void) | 400 | static void __init report_platform_properties(void) |
321 | { | 401 | { |
322 | struct mdesc_node *pn = md_find_node_by_name(NULL, "platform"); | 402 | struct mdesc_handle *hp = mdesc_grab(); |
403 | u64 pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "platform"); | ||
323 | const char *s; | 404 | const char *s; |
324 | const u64 *v; | 405 | const u64 *v; |
325 | 406 | ||
326 | if (!pn) { | 407 | if (pn == MDESC_NODE_NULL) { |
327 | prom_printf("No platform node in machine-description.\n"); | 408 | prom_printf("No platform node in machine-description.\n"); |
328 | prom_halt(); | 409 | prom_halt(); |
329 | } | 410 | } |
330 | 411 | ||
331 | s = md_get_property(pn, "banner-name", NULL); | 412 | s = mdesc_get_property(hp, pn, "banner-name", NULL); |
332 | printk("PLATFORM: banner-name [%s]\n", s); | 413 | printk("PLATFORM: banner-name [%s]\n", s); |
333 | s = md_get_property(pn, "name", NULL); | 414 | s = mdesc_get_property(hp, pn, "name", NULL); |
334 | printk("PLATFORM: name [%s]\n", s); | 415 | printk("PLATFORM: name [%s]\n", s); |
335 | 416 | ||
336 | v = md_get_property(pn, "hostid", NULL); | 417 | v = mdesc_get_property(hp, pn, "hostid", NULL); |
337 | if (v) | 418 | if (v) |
338 | printk("PLATFORM: hostid [%08lx]\n", *v); | 419 | printk("PLATFORM: hostid [%08lx]\n", *v); |
339 | v = md_get_property(pn, "serial#", NULL); | 420 | v = mdesc_get_property(hp, pn, "serial#", NULL); |
340 | if (v) | 421 | if (v) |
341 | printk("PLATFORM: serial# [%08lx]\n", *v); | 422 | printk("PLATFORM: serial# [%08lx]\n", *v); |
342 | v = md_get_property(pn, "stick-frequency", NULL); | 423 | v = mdesc_get_property(hp, pn, "stick-frequency", NULL); |
343 | printk("PLATFORM: stick-frequency [%08lx]\n", *v); | 424 | printk("PLATFORM: stick-frequency [%08lx]\n", *v); |
344 | v = md_get_property(pn, "mac-address", NULL); | 425 | v = mdesc_get_property(hp, pn, "mac-address", NULL); |
345 | if (v) | 426 | if (v) |
346 | printk("PLATFORM: mac-address [%lx]\n", *v); | 427 | printk("PLATFORM: mac-address [%lx]\n", *v); |
347 | v = md_get_property(pn, "watchdog-resolution", NULL); | 428 | v = mdesc_get_property(hp, pn, "watchdog-resolution", NULL); |
348 | if (v) | 429 | if (v) |
349 | printk("PLATFORM: watchdog-resolution [%lu ms]\n", *v); | 430 | printk("PLATFORM: watchdog-resolution [%lu ms]\n", *v); |
350 | v = md_get_property(pn, "watchdog-max-timeout", NULL); | 431 | v = mdesc_get_property(hp, pn, "watchdog-max-timeout", NULL); |
351 | if (v) | 432 | if (v) |
352 | printk("PLATFORM: watchdog-max-timeout [%lu ms]\n", *v); | 433 | printk("PLATFORM: watchdog-max-timeout [%lu ms]\n", *v); |
353 | v = md_get_property(pn, "max-cpus", NULL); | 434 | v = mdesc_get_property(hp, pn, "max-cpus", NULL); |
354 | if (v) | 435 | if (v) |
355 | printk("PLATFORM: max-cpus [%lu]\n", *v); | 436 | printk("PLATFORM: max-cpus [%lu]\n", *v); |
437 | |||
438 | #ifdef CONFIG_SMP | ||
439 | { | ||
440 | int max_cpu, i; | ||
441 | |||
442 | if (v) { | ||
443 | max_cpu = *v; | ||
444 | if (max_cpu > NR_CPUS) | ||
445 | max_cpu = NR_CPUS; | ||
446 | } else { | ||
447 | max_cpu = NR_CPUS; | ||
448 | } | ||
449 | for (i = 0; i < max_cpu; i++) | ||
450 | cpu_set(i, cpu_possible_map); | ||
451 | } | ||
452 | #endif | ||
453 | |||
454 | mdesc_release(hp); | ||
356 | } | 455 | } |
357 | 456 | ||
358 | static int inline find_in_proplist(const char *list, const char *match, int len) | 457 | static int inline find_in_proplist(const char *list, const char *match, int len) |
@@ -369,15 +468,17 @@ static int inline find_in_proplist(const char *list, const char *match, int len) | |||
369 | return 0; | 468 | return 0; |
370 | } | 469 | } |
371 | 470 | ||
372 | static void __init fill_in_one_cache(cpuinfo_sparc *c, struct mdesc_node *mp) | 471 | static void __devinit fill_in_one_cache(cpuinfo_sparc *c, |
472 | struct mdesc_handle *hp, | ||
473 | u64 mp) | ||
373 | { | 474 | { |
374 | const u64 *level = md_get_property(mp, "level", NULL); | 475 | const u64 *level = mdesc_get_property(hp, mp, "level", NULL); |
375 | const u64 *size = md_get_property(mp, "size", NULL); | 476 | const u64 *size = mdesc_get_property(hp, mp, "size", NULL); |
376 | const u64 *line_size = md_get_property(mp, "line-size", NULL); | 477 | const u64 *line_size = mdesc_get_property(hp, mp, "line-size", NULL); |
377 | const char *type; | 478 | const char *type; |
378 | int type_len; | 479 | int type_len; |
379 | 480 | ||
380 | type = md_get_property(mp, "type", &type_len); | 481 | type = mdesc_get_property(hp, mp, "type", &type_len); |
381 | 482 | ||
382 | switch (*level) { | 483 | switch (*level) { |
383 | case 1: | 484 | case 1: |
@@ -400,48 +501,45 @@ static void __init fill_in_one_cache(cpuinfo_sparc *c, struct mdesc_node *mp) | |||
400 | } | 501 | } |
401 | 502 | ||
402 | if (*level == 1) { | 503 | if (*level == 1) { |
403 | unsigned int i; | 504 | u64 a; |
404 | |||
405 | for (i = 0; i < mp->num_arcs; i++) { | ||
406 | struct mdesc_node *t = mp->arcs[i].arc; | ||
407 | 505 | ||
408 | if (strcmp(mp->arcs[i].name, "fwd")) | 506 | mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) { |
409 | continue; | 507 | u64 target = mdesc_arc_target(hp, a); |
508 | const char *name = mdesc_node_name(hp, target); | ||
410 | 509 | ||
411 | if (!strcmp(t->name, "cache")) | 510 | if (!strcmp(name, "cache")) |
412 | fill_in_one_cache(c, t); | 511 | fill_in_one_cache(c, hp, target); |
413 | } | 512 | } |
414 | } | 513 | } |
415 | } | 514 | } |
416 | 515 | ||
417 | static void __init mark_core_ids(struct mdesc_node *mp, int core_id) | 516 | static void __devinit mark_core_ids(struct mdesc_handle *hp, u64 mp, |
517 | int core_id) | ||
418 | { | 518 | { |
419 | unsigned int i; | 519 | u64 a; |
420 | 520 | ||
421 | for (i = 0; i < mp->num_arcs; i++) { | 521 | mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) { |
422 | struct mdesc_node *t = mp->arcs[i].arc; | 522 | u64 t = mdesc_arc_target(hp, a); |
523 | const char *name; | ||
423 | const u64 *id; | 524 | const u64 *id; |
424 | 525 | ||
425 | if (strcmp(mp->arcs[i].name, "back")) | 526 | name = mdesc_node_name(hp, t); |
426 | continue; | 527 | if (!strcmp(name, "cpu")) { |
427 | 528 | id = mdesc_get_property(hp, t, "id", NULL); | |
428 | if (!strcmp(t->name, "cpu")) { | ||
429 | id = md_get_property(t, "id", NULL); | ||
430 | if (*id < NR_CPUS) | 529 | if (*id < NR_CPUS) |
431 | cpu_data(*id).core_id = core_id; | 530 | cpu_data(*id).core_id = core_id; |
432 | } else { | 531 | } else { |
433 | unsigned int j; | 532 | u64 j; |
434 | 533 | ||
435 | for (j = 0; j < t->num_arcs; j++) { | 534 | mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_BACK) { |
436 | struct mdesc_node *n = t->arcs[j].arc; | 535 | u64 n = mdesc_arc_target(hp, j); |
536 | const char *n_name; | ||
437 | 537 | ||
438 | if (strcmp(t->arcs[j].name, "back")) | 538 | n_name = mdesc_node_name(hp, n); |
539 | if (strcmp(n_name, "cpu")) | ||
439 | continue; | 540 | continue; |
440 | 541 | ||
441 | if (strcmp(n->name, "cpu")) | 542 | id = mdesc_get_property(hp, n, "id", NULL); |
442 | continue; | ||
443 | |||
444 | id = md_get_property(n, "id", NULL); | ||
445 | if (*id < NR_CPUS) | 543 | if (*id < NR_CPUS) |
446 | cpu_data(*id).core_id = core_id; | 544 | cpu_data(*id).core_id = core_id; |
447 | } | 545 | } |
@@ -449,78 +547,81 @@ static void __init mark_core_ids(struct mdesc_node *mp, int core_id) | |||
449 | } | 547 | } |
450 | } | 548 | } |
451 | 549 | ||
452 | static void __init set_core_ids(void) | 550 | static void __devinit set_core_ids(struct mdesc_handle *hp) |
453 | { | 551 | { |
454 | struct mdesc_node *mp; | ||
455 | int idx; | 552 | int idx; |
553 | u64 mp; | ||
456 | 554 | ||
457 | idx = 1; | 555 | idx = 1; |
458 | md_for_each_node_by_name(mp, "cache") { | 556 | mdesc_for_each_node_by_name(hp, mp, "cache") { |
459 | const u64 *level = md_get_property(mp, "level", NULL); | 557 | const u64 *level; |
460 | const char *type; | 558 | const char *type; |
461 | int len; | 559 | int len; |
462 | 560 | ||
561 | level = mdesc_get_property(hp, mp, "level", NULL); | ||
463 | if (*level != 1) | 562 | if (*level != 1) |
464 | continue; | 563 | continue; |
465 | 564 | ||
466 | type = md_get_property(mp, "type", &len); | 565 | type = mdesc_get_property(hp, mp, "type", &len); |
467 | if (!find_in_proplist(type, "instn", len)) | 566 | if (!find_in_proplist(type, "instn", len)) |
468 | continue; | 567 | continue; |
469 | 568 | ||
470 | mark_core_ids(mp, idx); | 569 | mark_core_ids(hp, mp, idx); |
471 | 570 | ||
472 | idx++; | 571 | idx++; |
473 | } | 572 | } |
474 | } | 573 | } |
475 | 574 | ||
476 | static void __init mark_proc_ids(struct mdesc_node *mp, int proc_id) | 575 | static void __devinit mark_proc_ids(struct mdesc_handle *hp, u64 mp, |
576 | int proc_id) | ||
477 | { | 577 | { |
478 | int i; | 578 | u64 a; |
479 | 579 | ||
480 | for (i = 0; i < mp->num_arcs; i++) { | 580 | mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) { |
481 | struct mdesc_node *t = mp->arcs[i].arc; | 581 | u64 t = mdesc_arc_target(hp, a); |
582 | const char *name; | ||
482 | const u64 *id; | 583 | const u64 *id; |
483 | 584 | ||
484 | if (strcmp(mp->arcs[i].name, "back")) | 585 | name = mdesc_node_name(hp, t); |
485 | continue; | 586 | if (strcmp(name, "cpu")) |
486 | |||
487 | if (strcmp(t->name, "cpu")) | ||
488 | continue; | 587 | continue; |
489 | 588 | ||
490 | id = md_get_property(t, "id", NULL); | 589 | id = mdesc_get_property(hp, t, "id", NULL); |
491 | if (*id < NR_CPUS) | 590 | if (*id < NR_CPUS) |
492 | cpu_data(*id).proc_id = proc_id; | 591 | cpu_data(*id).proc_id = proc_id; |
493 | } | 592 | } |
494 | } | 593 | } |
495 | 594 | ||
496 | static void __init __set_proc_ids(const char *exec_unit_name) | 595 | static void __devinit __set_proc_ids(struct mdesc_handle *hp, |
596 | const char *exec_unit_name) | ||
497 | { | 597 | { |
498 | struct mdesc_node *mp; | ||
499 | int idx; | 598 | int idx; |
599 | u64 mp; | ||
500 | 600 | ||
501 | idx = 0; | 601 | idx = 0; |
502 | md_for_each_node_by_name(mp, exec_unit_name) { | 602 | mdesc_for_each_node_by_name(hp, mp, exec_unit_name) { |
503 | const char *type; | 603 | const char *type; |
504 | int len; | 604 | int len; |
505 | 605 | ||
506 | type = md_get_property(mp, "type", &len); | 606 | type = mdesc_get_property(hp, mp, "type", &len); |
507 | if (!find_in_proplist(type, "int", len) && | 607 | if (!find_in_proplist(type, "int", len) && |
508 | !find_in_proplist(type, "integer", len)) | 608 | !find_in_proplist(type, "integer", len)) |
509 | continue; | 609 | continue; |
510 | 610 | ||
511 | mark_proc_ids(mp, idx); | 611 | mark_proc_ids(hp, mp, idx); |
512 | 612 | ||
513 | idx++; | 613 | idx++; |
514 | } | 614 | } |
515 | } | 615 | } |
516 | 616 | ||
517 | static void __init set_proc_ids(void) | 617 | static void __devinit set_proc_ids(struct mdesc_handle *hp) |
518 | { | 618 | { |
519 | __set_proc_ids("exec_unit"); | 619 | __set_proc_ids(hp, "exec_unit"); |
520 | __set_proc_ids("exec-unit"); | 620 | __set_proc_ids(hp, "exec-unit"); |
521 | } | 621 | } |
522 | 622 | ||
523 | static void __init get_one_mondo_bits(const u64 *p, unsigned int *mask, unsigned char def) | 623 | static void __devinit get_one_mondo_bits(const u64 *p, unsigned int *mask, |
624 | unsigned char def) | ||
524 | { | 625 | { |
525 | u64 val; | 626 | u64 val; |
526 | 627 | ||
@@ -538,35 +639,37 @@ use_default: | |||
538 | *mask = ((1U << def) * 64U) - 1U; | 639 | *mask = ((1U << def) * 64U) - 1U; |
539 | } | 640 | } |
540 | 641 | ||
541 | static void __init get_mondo_data(struct mdesc_node *mp, struct trap_per_cpu *tb) | 642 | static void __devinit get_mondo_data(struct mdesc_handle *hp, u64 mp, |
643 | struct trap_per_cpu *tb) | ||
542 | { | 644 | { |
543 | const u64 *val; | 645 | const u64 *val; |
544 | 646 | ||
545 | val = md_get_property(mp, "q-cpu-mondo-#bits", NULL); | 647 | val = mdesc_get_property(hp, mp, "q-cpu-mondo-#bits", NULL); |
546 | get_one_mondo_bits(val, &tb->cpu_mondo_qmask, 7); | 648 | get_one_mondo_bits(val, &tb->cpu_mondo_qmask, 7); |
547 | 649 | ||
548 | val = md_get_property(mp, "q-dev-mondo-#bits", NULL); | 650 | val = mdesc_get_property(hp, mp, "q-dev-mondo-#bits", NULL); |
549 | get_one_mondo_bits(val, &tb->dev_mondo_qmask, 7); | 651 | get_one_mondo_bits(val, &tb->dev_mondo_qmask, 7); |
550 | 652 | ||
551 | val = md_get_property(mp, "q-resumable-#bits", NULL); | 653 | val = mdesc_get_property(hp, mp, "q-resumable-#bits", NULL); |
552 | get_one_mondo_bits(val, &tb->resum_qmask, 6); | 654 | get_one_mondo_bits(val, &tb->resum_qmask, 6); |
553 | 655 | ||
554 | val = md_get_property(mp, "q-nonresumable-#bits", NULL); | 656 | val = mdesc_get_property(hp, mp, "q-nonresumable-#bits", NULL); |
555 | get_one_mondo_bits(val, &tb->nonresum_qmask, 2); | 657 | get_one_mondo_bits(val, &tb->nonresum_qmask, 2); |
556 | } | 658 | } |
557 | 659 | ||
558 | static void __init mdesc_fill_in_cpu_data(void) | 660 | void __devinit mdesc_fill_in_cpu_data(cpumask_t mask) |
559 | { | 661 | { |
560 | struct mdesc_node *mp; | 662 | struct mdesc_handle *hp = mdesc_grab(); |
663 | u64 mp; | ||
561 | 664 | ||
562 | ncpus_probed = 0; | 665 | ncpus_probed = 0; |
563 | md_for_each_node_by_name(mp, "cpu") { | 666 | mdesc_for_each_node_by_name(hp, mp, "cpu") { |
564 | const u64 *id = md_get_property(mp, "id", NULL); | 667 | const u64 *id = mdesc_get_property(hp, mp, "id", NULL); |
565 | const u64 *cfreq = md_get_property(mp, "clock-frequency", NULL); | 668 | const u64 *cfreq = mdesc_get_property(hp, mp, "clock-frequency", NULL); |
566 | struct trap_per_cpu *tb; | 669 | struct trap_per_cpu *tb; |
567 | cpuinfo_sparc *c; | 670 | cpuinfo_sparc *c; |
568 | unsigned int i; | ||
569 | int cpuid; | 671 | int cpuid; |
672 | u64 a; | ||
570 | 673 | ||
571 | ncpus_probed++; | 674 | ncpus_probed++; |
572 | 675 | ||
@@ -575,6 +678,8 @@ static void __init mdesc_fill_in_cpu_data(void) | |||
575 | #ifdef CONFIG_SMP | 678 | #ifdef CONFIG_SMP |
576 | if (cpuid >= NR_CPUS) | 679 | if (cpuid >= NR_CPUS) |
577 | continue; | 680 | continue; |
681 | if (!cpu_isset(cpuid, mask)) | ||
682 | continue; | ||
578 | #else | 683 | #else |
579 | /* On uniprocessor we only want the values for the | 684 | /* On uniprocessor we only want the values for the |
580 | * real physical cpu the kernel booted onto, however | 685 | * real physical cpu the kernel booted onto, however |
@@ -589,35 +694,30 @@ static void __init mdesc_fill_in_cpu_data(void) | |||
589 | c->clock_tick = *cfreq; | 694 | c->clock_tick = *cfreq; |
590 | 695 | ||
591 | tb = &trap_block[cpuid]; | 696 | tb = &trap_block[cpuid]; |
592 | get_mondo_data(mp, tb); | 697 | get_mondo_data(hp, mp, tb); |
593 | |||
594 | for (i = 0; i < mp->num_arcs; i++) { | ||
595 | struct mdesc_node *t = mp->arcs[i].arc; | ||
596 | unsigned int j; | ||
597 | 698 | ||
598 | if (strcmp(mp->arcs[i].name, "fwd")) | 699 | mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) { |
599 | continue; | 700 | u64 j, t = mdesc_arc_target(hp, a); |
701 | const char *t_name; | ||
600 | 702 | ||
601 | if (!strcmp(t->name, "cache")) { | 703 | t_name = mdesc_node_name(hp, t); |
602 | fill_in_one_cache(c, t); | 704 | if (!strcmp(t_name, "cache")) { |
705 | fill_in_one_cache(c, hp, t); | ||
603 | continue; | 706 | continue; |
604 | } | 707 | } |
605 | 708 | ||
606 | for (j = 0; j < t->num_arcs; j++) { | 709 | mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_FWD) { |
607 | struct mdesc_node *n; | 710 | u64 n = mdesc_arc_target(hp, j); |
711 | const char *n_name; | ||
608 | 712 | ||
609 | n = t->arcs[j].arc; | 713 | n_name = mdesc_node_name(hp, n); |
610 | if (strcmp(t->arcs[j].name, "fwd")) | 714 | if (!strcmp(n_name, "cache")) |
611 | continue; | 715 | fill_in_one_cache(c, hp, n); |
612 | |||
613 | if (!strcmp(n->name, "cache")) | ||
614 | fill_in_one_cache(c, n); | ||
615 | } | 716 | } |
616 | } | 717 | } |
617 | 718 | ||
618 | #ifdef CONFIG_SMP | 719 | #ifdef CONFIG_SMP |
619 | cpu_set(cpuid, cpu_present_map); | 720 | cpu_set(cpuid, cpu_present_map); |
620 | cpu_set(cpuid, phys_cpu_present_map); | ||
621 | #endif | 721 | #endif |
622 | 722 | ||
623 | c->core_id = 0; | 723 | c->core_id = 0; |
@@ -628,45 +728,43 @@ static void __init mdesc_fill_in_cpu_data(void) | |||
628 | sparc64_multi_core = 1; | 728 | sparc64_multi_core = 1; |
629 | #endif | 729 | #endif |
630 | 730 | ||
631 | set_core_ids(); | 731 | set_core_ids(hp); |
632 | set_proc_ids(); | 732 | set_proc_ids(hp); |
633 | 733 | ||
634 | smp_fill_in_sib_core_maps(); | 734 | smp_fill_in_sib_core_maps(); |
735 | |||
736 | mdesc_release(hp); | ||
635 | } | 737 | } |
636 | 738 | ||
637 | void __init sun4v_mdesc_init(void) | 739 | void __init sun4v_mdesc_init(void) |
638 | { | 740 | { |
741 | struct mdesc_handle *hp; | ||
639 | unsigned long len, real_len, status; | 742 | unsigned long len, real_len, status; |
743 | cpumask_t mask; | ||
640 | 744 | ||
641 | (void) sun4v_mach_desc(0UL, 0UL, &len); | 745 | (void) sun4v_mach_desc(0UL, 0UL, &len); |
642 | 746 | ||
643 | printk("MDESC: Size is %lu bytes.\n", len); | 747 | printk("MDESC: Size is %lu bytes.\n", len); |
644 | 748 | ||
645 | main_mdesc = mdesc_early_alloc(len); | 749 | hp = mdesc_alloc(len, &bootmem_mdesc_memops); |
750 | if (hp == NULL) { | ||
751 | prom_printf("MDESC: alloc of %lu bytes failed.\n", len); | ||
752 | prom_halt(); | ||
753 | } | ||
646 | 754 | ||
647 | status = sun4v_mach_desc(__pa(main_mdesc), len, &real_len); | 755 | status = sun4v_mach_desc(__pa(&hp->mdesc), len, &real_len); |
648 | if (status != HV_EOK || real_len > len) { | 756 | if (status != HV_EOK || real_len > len) { |
649 | prom_printf("sun4v_mach_desc fails, err(%lu), " | 757 | prom_printf("sun4v_mach_desc fails, err(%lu), " |
650 | "len(%lu), real_len(%lu)\n", | 758 | "len(%lu), real_len(%lu)\n", |
651 | status, len, real_len); | 759 | status, len, real_len); |
760 | mdesc_free(hp); | ||
652 | prom_halt(); | 761 | prom_halt(); |
653 | } | 762 | } |
654 | 763 | ||
655 | len = count_nodes(main_mdesc); | 764 | cur_mdesc = hp; |
656 | printk("MDESC: %lu nodes.\n", len); | ||
657 | |||
658 | len = roundup_pow_of_two(len); | ||
659 | |||
660 | mdesc_hash = mdesc_early_alloc(len * sizeof(struct mdesc_node *)); | ||
661 | mdesc_hash_size = len; | ||
662 | |||
663 | printk("MDESC: Hash size %lu entries.\n", len); | ||
664 | |||
665 | build_all_nodes(main_mdesc); | ||
666 | |||
667 | printk("MDESC: Built graph with %u bytes of memory.\n", | ||
668 | mdesc_early_allocated); | ||
669 | 765 | ||
670 | report_platform_properties(); | 766 | report_platform_properties(); |
671 | mdesc_fill_in_cpu_data(); | 767 | |
768 | cpus_setall(mask); | ||
769 | mdesc_fill_in_cpu_data(mask); | ||
672 | } | 770 | } |