diff options
author | David S. Miller <davem@sunset.davemloft.net> | 2007-07-12 16:47:50 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-07-16 07:04:28 -0400 |
commit | 43fdf27470b216ebdef47e09ff83bed2f2894b13 (patch) | |
tree | 76b9b838089e5679471026037c93325c228df84a /arch/sparc64 | |
parent | 133f09a169f3022be3de671b29658b7ecb375022 (diff) |
[SPARC64]: Abstract out mdesc accesses for better MD update handling.
Since we have to be able to handle MD updates, having an in-tree
set of data structures representing the MD objects actually makes
things more painful.
The MD itself is easy to parse, and we can implement the existing
interfaces using direct parsing of the MD binary image.
The MD is now reference counted, so accesses have to now take the
form:
handle = mdesc_grab();
... operations on MD ...
mdesc_release(handle);
The only remaining issue are cases where code holds on to references
to MD property values. mdesc_get_property() returns a direct pointer
to the property value, most cases just pull in the information they
need and discard the pointer, but there are few that use the pointer
directly over a long lifetime. Those will be fixed up in a subsequent
changeset.
A preliminary handler for MD update events from domain services is
there, it is rudimentry but it works and handles all of the reference
counting. It does not check the generation number of the MDs,
and it does not generate a "add/delete" list for notification to
interesting parties about MD changes but that will be forthcoming.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64')
-rw-r--r-- | arch/sparc64/kernel/ds.c | 22 | ||||
-rw-r--r-- | arch/sparc64/kernel/ldc.c | 13 | ||||
-rw-r--r-- | arch/sparc64/kernel/mdesc.c | 666 | ||||
-rw-r--r-- | arch/sparc64/kernel/vio.c | 143 | ||||
-rw-r--r-- | arch/sparc64/kernel/viohs.c | 35 |
5 files changed, 471 insertions, 408 deletions
diff --git a/arch/sparc64/kernel/ds.c b/arch/sparc64/kernel/ds.c index c7ece8c52039..9c8839d1cffd 100644 --- a/arch/sparc64/kernel/ds.c +++ b/arch/sparc64/kernel/ds.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <asm/ldc.h> | 15 | #include <asm/ldc.h> |
16 | #include <asm/vio.h> | 16 | #include <asm/vio.h> |
17 | #include <asm/power.h> | 17 | #include <asm/power.h> |
18 | #include <asm/mdesc.h> | ||
18 | 19 | ||
19 | #define DRV_MODULE_NAME "ds" | 20 | #define DRV_MODULE_NAME "ds" |
20 | #define PFX DRV_MODULE_NAME ": " | 21 | #define PFX DRV_MODULE_NAME ": " |
@@ -170,8 +171,7 @@ static void md_update_data(struct ldc_channel *lp, | |||
170 | 171 | ||
171 | rp = (struct ds_md_update_req *) (dpkt + 1); | 172 | rp = (struct ds_md_update_req *) (dpkt + 1); |
172 | 173 | ||
173 | printk(KERN_ERR PFX "MD update REQ [%lx] len=%d\n", | 174 | printk(KERN_ERR PFX "Machine description update.\n"); |
174 | rp->req_num, len); | ||
175 | 175 | ||
176 | memset(&pkt, 0, sizeof(pkt)); | 176 | memset(&pkt, 0, sizeof(pkt)); |
177 | pkt.data.tag.type = DS_DATA; | 177 | pkt.data.tag.type = DS_DATA; |
@@ -181,6 +181,8 @@ static void md_update_data(struct ldc_channel *lp, | |||
181 | pkt.res.result = DS_OK; | 181 | pkt.res.result = DS_OK; |
182 | 182 | ||
183 | ds_send(lp, &pkt, sizeof(pkt)); | 183 | ds_send(lp, &pkt, sizeof(pkt)); |
184 | |||
185 | mdesc_update(); | ||
184 | } | 186 | } |
185 | 187 | ||
186 | struct ds_shutdown_req { | 188 | struct ds_shutdown_req { |
@@ -555,7 +557,6 @@ static int __devinit ds_probe(struct vio_dev *vdev, | |||
555 | const struct vio_device_id *id) | 557 | const struct vio_device_id *id) |
556 | { | 558 | { |
557 | static int ds_version_printed; | 559 | static int ds_version_printed; |
558 | struct mdesc_node *endp; | ||
559 | struct ldc_channel_config ds_cfg = { | 560 | struct ldc_channel_config ds_cfg = { |
560 | .event = ds_event, | 561 | .event = ds_event, |
561 | .mtu = 4096, | 562 | .mtu = 4096, |
@@ -563,20 +564,11 @@ static int __devinit ds_probe(struct vio_dev *vdev, | |||
563 | }; | 564 | }; |
564 | struct ldc_channel *lp; | 565 | struct ldc_channel *lp; |
565 | struct ds_info *dp; | 566 | struct ds_info *dp; |
566 | const u64 *chan_id; | ||
567 | int err; | 567 | int err; |
568 | 568 | ||
569 | if (ds_version_printed++ == 0) | 569 | if (ds_version_printed++ == 0) |
570 | printk(KERN_INFO "%s", version); | 570 | printk(KERN_INFO "%s", version); |
571 | 571 | ||
572 | endp = vio_find_endpoint(vdev); | ||
573 | if (!endp) | ||
574 | return -ENODEV; | ||
575 | |||
576 | chan_id = md_get_property(endp, "id", NULL); | ||
577 | if (!chan_id) | ||
578 | return -ENODEV; | ||
579 | |||
580 | dp = kzalloc(sizeof(*dp), GFP_KERNEL); | 572 | dp = kzalloc(sizeof(*dp), GFP_KERNEL); |
581 | err = -ENOMEM; | 573 | err = -ENOMEM; |
582 | if (!dp) | 574 | if (!dp) |
@@ -588,10 +580,10 @@ static int __devinit ds_probe(struct vio_dev *vdev, | |||
588 | 580 | ||
589 | dp->rcv_buf_len = 4096; | 581 | dp->rcv_buf_len = 4096; |
590 | 582 | ||
591 | ds_cfg.tx_irq = endp->irqs[0]; | 583 | ds_cfg.tx_irq = vdev->tx_irq; |
592 | ds_cfg.rx_irq = endp->irqs[1]; | 584 | ds_cfg.rx_irq = vdev->rx_irq; |
593 | 585 | ||
594 | lp = ldc_alloc(*chan_id, &ds_cfg, dp); | 586 | lp = ldc_alloc(vdev->channel_id, &ds_cfg, dp); |
595 | if (IS_ERR(lp)) { | 587 | if (IS_ERR(lp)) { |
596 | err = PTR_ERR(lp); | 588 | err = PTR_ERR(lp); |
597 | goto out_free_rcv_buf; | 589 | goto out_free_rcv_buf; |
diff --git a/arch/sparc64/kernel/ldc.c b/arch/sparc64/kernel/ldc.c index dbb65b674a67..85a2be0b0962 100644 --- a/arch/sparc64/kernel/ldc.c +++ b/arch/sparc64/kernel/ldc.c | |||
@@ -2335,15 +2335,20 @@ EXPORT_SYMBOL(ldc_free_exp_dring); | |||
2335 | 2335 | ||
2336 | static int __init ldc_init(void) | 2336 | static int __init ldc_init(void) |
2337 | { | 2337 | { |
2338 | struct mdesc_node *mp; | ||
2339 | unsigned long major, minor; | 2338 | unsigned long major, minor; |
2339 | struct mdesc_handle *hp; | ||
2340 | const u64 *v; | 2340 | const u64 *v; |
2341 | u64 mp; | ||
2341 | 2342 | ||
2342 | mp = md_find_node_by_name(NULL, "platform"); | 2343 | hp = mdesc_grab(); |
2343 | if (!mp) | 2344 | if (!hp) |
2344 | return -ENODEV; | 2345 | return -ENODEV; |
2345 | 2346 | ||
2346 | v = md_get_property(mp, "domaining-enabled", NULL); | 2347 | mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "platform"); |
2348 | if (mp == MDESC_NODE_NULL) | ||
2349 | return -ENODEV; | ||
2350 | |||
2351 | v = mdesc_get_property(hp, mp, "domaining-enabled", NULL); | ||
2347 | if (!v) | 2352 | if (!v) |
2348 | return -ENODEV; | 2353 | return -ENODEV; |
2349 | 2354 | ||
diff --git a/arch/sparc64/kernel/mdesc.c b/arch/sparc64/kernel/mdesc.c index f0e16045fb16..9e5088d563cc 100644 --- a/arch/sparc64/kernel/mdesc.c +++ b/arch/sparc64/kernel/mdesc.c | |||
@@ -6,6 +6,8 @@ | |||
6 | #include <linux/types.h> | 6 | #include <linux/types.h> |
7 | #include <linux/bootmem.h> | 7 | #include <linux/bootmem.h> |
8 | #include <linux/log2.h> | 8 | #include <linux/log2.h> |
9 | #include <linux/list.h> | ||
10 | #include <linux/slab.h> | ||
9 | 11 | ||
10 | #include <asm/hypervisor.h> | 12 | #include <asm/hypervisor.h> |
11 | #include <asm/mdesc.h> | 13 | #include <asm/mdesc.h> |
@@ -29,7 +31,7 @@ struct mdesc_hdr { | |||
29 | u32 node_sz; /* node block size */ | 31 | u32 node_sz; /* node block size */ |
30 | u32 name_sz; /* name block size */ | 32 | u32 name_sz; /* name block size */ |
31 | u32 data_sz; /* data block size */ | 33 | u32 data_sz; /* data block size */ |
32 | }; | 34 | } __attribute__((aligned(16))); |
33 | 35 | ||
34 | struct mdesc_elem { | 36 | struct mdesc_elem { |
35 | u8 tag; | 37 | u8 tag; |
@@ -53,306 +55,386 @@ struct mdesc_elem { | |||
53 | } d; | 55 | } d; |
54 | }; | 56 | }; |
55 | 57 | ||
56 | static struct mdesc_hdr *main_mdesc; | 58 | struct mdesc_mem_ops { |
57 | static struct mdesc_node *allnodes; | 59 | struct mdesc_handle *(*alloc)(unsigned int mdesc_size); |
58 | 60 | void (*free)(struct mdesc_handle *handle); | |
59 | static struct mdesc_node *allnodes_tail; | 61 | }; |
60 | static unsigned int unique_id; | ||
61 | 62 | ||
62 | static struct mdesc_node **mdesc_hash; | 63 | struct mdesc_handle { |
63 | static unsigned int mdesc_hash_size; | 64 | struct list_head list; |
65 | struct mdesc_mem_ops *mops; | ||
66 | void *self_base; | ||
67 | atomic_t refcnt; | ||
68 | unsigned int handle_size; | ||
69 | struct mdesc_hdr mdesc; | ||
70 | }; | ||
64 | 71 | ||
65 | static inline unsigned int node_hashfn(u64 node) | 72 | static void mdesc_handle_init(struct mdesc_handle *hp, |
73 | unsigned int handle_size, | ||
74 | void *base) | ||
66 | { | 75 | { |
67 | return ((unsigned int) (node ^ (node >> 8) ^ (node >> 16))) | 76 | BUG_ON(((unsigned long)&hp->mdesc) & (16UL - 1)); |
68 | & (mdesc_hash_size - 1); | 77 | |
78 | memset(hp, 0, handle_size); | ||
79 | INIT_LIST_HEAD(&hp->list); | ||
80 | hp->self_base = base; | ||
81 | atomic_set(&hp->refcnt, 1); | ||
82 | hp->handle_size = handle_size; | ||
69 | } | 83 | } |
70 | 84 | ||
71 | static inline void hash_node(struct mdesc_node *mp) | 85 | static struct mdesc_handle *mdesc_bootmem_alloc(unsigned int mdesc_size) |
72 | { | 86 | { |
73 | struct mdesc_node **head = &mdesc_hash[node_hashfn(mp->node)]; | 87 | struct mdesc_handle *hp; |
88 | unsigned int handle_size, alloc_size; | ||
74 | 89 | ||
75 | mp->hash_next = *head; | 90 | handle_size = (sizeof(struct mdesc_handle) - |
76 | *head = mp; | 91 | sizeof(struct mdesc_hdr) + |
92 | mdesc_size); | ||
93 | alloc_size = PAGE_ALIGN(handle_size); | ||
77 | 94 | ||
78 | if (allnodes_tail) { | 95 | hp = __alloc_bootmem(alloc_size, PAGE_SIZE, 0UL); |
79 | allnodes_tail->allnodes_next = mp; | 96 | if (hp) |
80 | allnodes_tail = mp; | 97 | mdesc_handle_init(hp, handle_size, hp); |
81 | } else { | 98 | |
82 | allnodes = allnodes_tail = mp; | 99 | return hp; |
83 | } | ||
84 | } | 100 | } |
85 | 101 | ||
86 | static struct mdesc_node *find_node(u64 node) | 102 | static void mdesc_bootmem_free(struct mdesc_handle *hp) |
87 | { | 103 | { |
88 | struct mdesc_node *mp = mdesc_hash[node_hashfn(node)]; | 104 | unsigned int alloc_size, handle_size = hp->handle_size; |
105 | unsigned long start, end; | ||
89 | 106 | ||
90 | while (mp) { | 107 | BUG_ON(atomic_read(&hp->refcnt) != 0); |
91 | if (mp->node == node) | 108 | BUG_ON(!list_empty(&hp->list)); |
92 | return mp; | ||
93 | 109 | ||
94 | mp = mp->hash_next; | 110 | alloc_size = PAGE_ALIGN(handle_size); |
111 | |||
112 | start = (unsigned long) hp; | ||
113 | end = start + alloc_size; | ||
114 | |||
115 | while (start < end) { | ||
116 | struct page *p; | ||
117 | |||
118 | p = virt_to_page(start); | ||
119 | ClearPageReserved(p); | ||
120 | __free_page(p); | ||
121 | start += PAGE_SIZE; | ||
95 | } | 122 | } |
96 | return NULL; | ||
97 | } | 123 | } |
98 | 124 | ||
99 | struct property *md_find_property(const struct mdesc_node *mp, | 125 | static struct mdesc_mem_ops bootmem_mdesc_memops = { |
100 | const char *name, | 126 | .alloc = mdesc_bootmem_alloc, |
101 | int *lenp) | 127 | .free = mdesc_bootmem_free, |
128 | }; | ||
129 | |||
130 | static struct mdesc_handle *mdesc_kmalloc(unsigned int mdesc_size) | ||
102 | { | 131 | { |
103 | struct property *pp; | 132 | unsigned int handle_size; |
133 | void *base; | ||
104 | 134 | ||
105 | for (pp = mp->properties; pp != 0; pp = pp->next) { | 135 | handle_size = (sizeof(struct mdesc_handle) - |
106 | if (strcasecmp(pp->name, name) == 0) { | 136 | sizeof(struct mdesc_hdr) + |
107 | if (lenp) | 137 | mdesc_size); |
108 | *lenp = pp->length; | 138 | |
109 | break; | 139 | base = kmalloc(handle_size + 15, GFP_KERNEL); |
110 | } | 140 | if (base) { |
141 | struct mdesc_handle *hp; | ||
142 | unsigned long addr; | ||
143 | |||
144 | addr = (unsigned long)base; | ||
145 | addr = (addr + 15UL) & ~15UL; | ||
146 | hp = (struct mdesc_handle *) addr; | ||
147 | |||
148 | mdesc_handle_init(hp, handle_size, base); | ||
149 | return hp; | ||
111 | } | 150 | } |
112 | return pp; | 151 | |
152 | return NULL; | ||
113 | } | 153 | } |
114 | EXPORT_SYMBOL(md_find_property); | ||
115 | 154 | ||
116 | /* | 155 | static void mdesc_kfree(struct mdesc_handle *hp) |
117 | * Find a property with a given name for a given node | ||
118 | * and return the value. | ||
119 | */ | ||
120 | const void *md_get_property(const struct mdesc_node *mp, const char *name, | ||
121 | int *lenp) | ||
122 | { | 156 | { |
123 | struct property *pp = md_find_property(mp, name, lenp); | 157 | BUG_ON(atomic_read(&hp->refcnt) != 0); |
124 | return pp ? pp->value : NULL; | 158 | BUG_ON(!list_empty(&hp->list)); |
159 | |||
160 | kfree(hp->self_base); | ||
125 | } | 161 | } |
126 | EXPORT_SYMBOL(md_get_property); | ||
127 | 162 | ||
128 | struct mdesc_node *md_find_node_by_name(struct mdesc_node *from, | 163 | static struct mdesc_mem_ops kmalloc_mdesc_memops = { |
129 | const char *name) | 164 | .alloc = mdesc_kmalloc, |
165 | .free = mdesc_kfree, | ||
166 | }; | ||
167 | |||
168 | static struct mdesc_handle *mdesc_alloc(unsigned int mdesc_size, | ||
169 | struct mdesc_mem_ops *mops) | ||
130 | { | 170 | { |
131 | struct mdesc_node *mp; | 171 | struct mdesc_handle *hp = mops->alloc(mdesc_size); |
132 | 172 | ||
133 | mp = from ? from->allnodes_next : allnodes; | 173 | if (hp) |
134 | for (; mp != NULL; mp = mp->allnodes_next) { | 174 | hp->mops = mops; |
135 | if (strcmp(mp->name, name) == 0) | ||
136 | break; | ||
137 | } | ||
138 | return mp; | ||
139 | } | ||
140 | EXPORT_SYMBOL(md_find_node_by_name); | ||
141 | 175 | ||
142 | static unsigned int mdesc_early_allocated; | 176 | return hp; |
177 | } | ||
143 | 178 | ||
144 | static void * __init mdesc_early_alloc(unsigned long size) | 179 | static void mdesc_free(struct mdesc_handle *hp) |
145 | { | 180 | { |
146 | void *ret; | 181 | hp->mops->free(hp); |
182 | } | ||
147 | 183 | ||
148 | ret = __alloc_bootmem(size, SMP_CACHE_BYTES, 0UL); | 184 | static struct mdesc_handle *cur_mdesc; |
149 | if (ret == NULL) { | 185 | static LIST_HEAD(mdesc_zombie_list); |
150 | prom_printf("MDESC: alloc of %lu bytes failed.\n", size); | 186 | static DEFINE_SPINLOCK(mdesc_lock); |
151 | prom_halt(); | ||
152 | } | ||
153 | 187 | ||
154 | memset(ret, 0, size); | 188 | struct mdesc_handle *mdesc_grab(void) |
189 | { | ||
190 | struct mdesc_handle *hp; | ||
191 | unsigned long flags; | ||
155 | 192 | ||
156 | mdesc_early_allocated += size; | 193 | spin_lock_irqsave(&mdesc_lock, flags); |
194 | hp = cur_mdesc; | ||
195 | if (hp) | ||
196 | atomic_inc(&hp->refcnt); | ||
197 | spin_unlock_irqrestore(&mdesc_lock, flags); | ||
157 | 198 | ||
158 | return ret; | 199 | return hp; |
159 | } | 200 | } |
201 | EXPORT_SYMBOL(mdesc_grab); | ||
160 | 202 | ||
161 | static unsigned int __init count_arcs(struct mdesc_elem *ep) | 203 | void mdesc_release(struct mdesc_handle *hp) |
162 | { | 204 | { |
163 | unsigned int ret = 0; | 205 | unsigned long flags; |
164 | 206 | ||
165 | ep++; | 207 | spin_lock_irqsave(&mdesc_lock, flags); |
166 | while (ep->tag != MD_NODE_END) { | 208 | if (atomic_dec_and_test(&hp->refcnt)) { |
167 | if (ep->tag == MD_PROP_ARC) | 209 | list_del_init(&hp->list); |
168 | ret++; | 210 | hp->mops->free(hp); |
169 | ep++; | ||
170 | } | 211 | } |
171 | return ret; | 212 | spin_unlock_irqrestore(&mdesc_lock, flags); |
172 | } | 213 | } |
214 | EXPORT_SYMBOL(mdesc_release); | ||
173 | 215 | ||
174 | static void __init mdesc_node_alloc(u64 node, struct mdesc_elem *ep, const char *names) | 216 | static void do_mdesc_update(struct work_struct *work) |
175 | { | 217 | { |
176 | unsigned int num_arcs = count_arcs(ep); | 218 | unsigned long len, real_len, status; |
177 | struct mdesc_node *mp; | 219 | struct mdesc_handle *hp, *orig_hp; |
220 | unsigned long flags; | ||
221 | |||
222 | (void) sun4v_mach_desc(0UL, 0UL, &len); | ||
223 | |||
224 | hp = mdesc_alloc(len, &kmalloc_mdesc_memops); | ||
225 | if (!hp) { | ||
226 | printk(KERN_ERR "MD: mdesc alloc fails\n"); | ||
227 | return; | ||
228 | } | ||
229 | |||
230 | status = sun4v_mach_desc(__pa(&hp->mdesc), len, &real_len); | ||
231 | if (status != HV_EOK || real_len > len) { | ||
232 | printk(KERN_ERR "MD: mdesc reread fails with %lu\n", | ||
233 | status); | ||
234 | atomic_dec(&hp->refcnt); | ||
235 | mdesc_free(hp); | ||
236 | return; | ||
237 | } | ||
178 | 238 | ||
179 | mp = mdesc_early_alloc(sizeof(*mp) + | 239 | spin_lock_irqsave(&mdesc_lock, flags); |
180 | (num_arcs * sizeof(struct mdesc_arc))); | 240 | orig_hp = cur_mdesc; |
181 | mp->name = names + ep->name_offset; | 241 | cur_mdesc = hp; |
182 | mp->node = node; | ||
183 | mp->unique_id = unique_id++; | ||
184 | mp->num_arcs = num_arcs; | ||
185 | 242 | ||
186 | hash_node(mp); | 243 | if (atomic_dec_and_test(&orig_hp->refcnt)) |
244 | mdesc_free(orig_hp); | ||
245 | else | ||
246 | list_add(&orig_hp->list, &mdesc_zombie_list); | ||
247 | spin_unlock_irqrestore(&mdesc_lock, flags); | ||
187 | } | 248 | } |
188 | 249 | ||
189 | static inline struct mdesc_elem *node_block(struct mdesc_hdr *mdesc) | 250 | static DECLARE_WORK(mdesc_update_work, do_mdesc_update); |
251 | |||
252 | void mdesc_update(void) | ||
253 | { | ||
254 | schedule_work(&mdesc_update_work); | ||
255 | } | ||
256 | |||
257 | static struct mdesc_elem *node_block(struct mdesc_hdr *mdesc) | ||
190 | { | 258 | { |
191 | return (struct mdesc_elem *) (mdesc + 1); | 259 | return (struct mdesc_elem *) (mdesc + 1); |
192 | } | 260 | } |
193 | 261 | ||
194 | static inline void *name_block(struct mdesc_hdr *mdesc) | 262 | static void *name_block(struct mdesc_hdr *mdesc) |
195 | { | 263 | { |
196 | return ((void *) node_block(mdesc)) + mdesc->node_sz; | 264 | return ((void *) node_block(mdesc)) + mdesc->node_sz; |
197 | } | 265 | } |
198 | 266 | ||
199 | static inline void *data_block(struct mdesc_hdr *mdesc) | 267 | static void *data_block(struct mdesc_hdr *mdesc) |
200 | { | 268 | { |
201 | return ((void *) name_block(mdesc)) + mdesc->name_sz; | 269 | return ((void *) name_block(mdesc)) + mdesc->name_sz; |
202 | } | 270 | } |
203 | 271 | ||
204 | /* In order to avoid recursion (the graph can be very deep) we use a | 272 | u64 mdesc_node_by_name(struct mdesc_handle *hp, |
205 | * two pass algorithm. First we allocate all the nodes and hash them. | 273 | u64 from_node, const char *name) |
206 | * Then we iterate over each node, filling in the arcs and properties. | ||
207 | */ | ||
208 | static void __init build_all_nodes(struct mdesc_hdr *mdesc) | ||
209 | { | 274 | { |
210 | struct mdesc_elem *start, *ep; | 275 | struct mdesc_elem *ep = node_block(&hp->mdesc); |
211 | struct mdesc_node *mp; | 276 | const char *names = name_block(&hp->mdesc); |
212 | const char *names; | 277 | u64 last_node = hp->mdesc.node_sz / 16; |
213 | void *data; | 278 | u64 ret; |
214 | u64 last_node; | 279 | |
215 | 280 | if (from_node == MDESC_NODE_NULL) | |
216 | start = ep = node_block(mdesc); | 281 | from_node = 0; |
217 | last_node = mdesc->node_sz / 16; | 282 | |
283 | if (from_node >= last_node) | ||
284 | return MDESC_NODE_NULL; | ||
285 | |||
286 | ret = ep[from_node].d.val; | ||
287 | while (ret < last_node) { | ||
288 | if (ep[ret].tag != MD_NODE) | ||
289 | return MDESC_NODE_NULL; | ||
290 | if (!strcmp(names + ep[ret].name_offset, name)) | ||
291 | break; | ||
292 | ret = ep[ret].d.val; | ||
293 | } | ||
294 | if (ret >= last_node) | ||
295 | ret = MDESC_NODE_NULL; | ||
296 | return ret; | ||
297 | } | ||
298 | EXPORT_SYMBOL(mdesc_node_by_name); | ||
218 | 299 | ||
219 | names = name_block(mdesc); | 300 | const void *mdesc_get_property(struct mdesc_handle *hp, u64 node, |
301 | const char *name, int *lenp) | ||
302 | { | ||
303 | const char *names = name_block(&hp->mdesc); | ||
304 | u64 last_node = hp->mdesc.node_sz / 16; | ||
305 | void *data = data_block(&hp->mdesc); | ||
306 | struct mdesc_elem *ep; | ||
220 | 307 | ||
221 | while (1) { | 308 | if (node == MDESC_NODE_NULL || node >= last_node) |
222 | u64 node = ep - start; | 309 | return NULL; |
223 | 310 | ||
224 | if (ep->tag == MD_LIST_END) | 311 | ep = node_block(&hp->mdesc) + node; |
312 | ep++; | ||
313 | for (; ep->tag != MD_NODE_END; ep++) { | ||
314 | void *val = NULL; | ||
315 | int len = 0; | ||
316 | |||
317 | switch (ep->tag) { | ||
318 | case MD_PROP_VAL: | ||
319 | val = &ep->d.val; | ||
320 | len = 8; | ||
225 | break; | 321 | break; |
226 | 322 | ||
227 | if (ep->tag != MD_NODE) { | 323 | case MD_PROP_STR: |
228 | prom_printf("MDESC: Inconsistent element list.\n"); | 324 | case MD_PROP_DATA: |
229 | prom_halt(); | 325 | val = data + ep->d.data.data_offset; |
230 | } | 326 | len = ep->d.data.data_len; |
231 | 327 | break; | |
232 | mdesc_node_alloc(node, ep, names); | ||
233 | 328 | ||
234 | if (ep->d.val >= last_node) { | 329 | default: |
235 | printk("MDESC: Warning, early break out of node scan.\n"); | ||
236 | printk("MDESC: Next node [%lu] last_node [%lu].\n", | ||
237 | node, last_node); | ||
238 | break; | 330 | break; |
239 | } | 331 | } |
332 | if (!val) | ||
333 | continue; | ||
240 | 334 | ||
241 | ep = start + ep->d.val; | 335 | if (!strcmp(names + ep->name_offset, name)) { |
336 | if (lenp) | ||
337 | *lenp = len; | ||
338 | return val; | ||
339 | } | ||
242 | } | 340 | } |
243 | 341 | ||
244 | data = data_block(mdesc); | 342 | return NULL; |
245 | for (mp = allnodes; mp; mp = mp->allnodes_next) { | 343 | } |
246 | struct mdesc_elem *ep = start + mp->node; | 344 | EXPORT_SYMBOL(mdesc_get_property); |
247 | struct property **link = &mp->properties; | ||
248 | unsigned int this_arc = 0; | ||
249 | |||
250 | ep++; | ||
251 | while (ep->tag != MD_NODE_END) { | ||
252 | switch (ep->tag) { | ||
253 | case MD_PROP_ARC: { | ||
254 | struct mdesc_node *target; | ||
255 | |||
256 | if (this_arc >= mp->num_arcs) { | ||
257 | prom_printf("MDESC: ARC overrun [%u:%u]\n", | ||
258 | this_arc, mp->num_arcs); | ||
259 | prom_halt(); | ||
260 | } | ||
261 | target = find_node(ep->d.val); | ||
262 | if (!target) { | ||
263 | printk("MDESC: Warning, arc points to " | ||
264 | "missing node, ignoring.\n"); | ||
265 | break; | ||
266 | } | ||
267 | mp->arcs[this_arc].name = | ||
268 | (names + ep->name_offset); | ||
269 | mp->arcs[this_arc].arc = target; | ||
270 | this_arc++; | ||
271 | break; | ||
272 | } | ||
273 | 345 | ||
274 | case MD_PROP_VAL: | 346 | u64 mdesc_next_arc(struct mdesc_handle *hp, u64 from, const char *arc_type) |
275 | case MD_PROP_STR: | 347 | { |
276 | case MD_PROP_DATA: { | 348 | struct mdesc_elem *ep, *base = node_block(&hp->mdesc); |
277 | struct property *p = mdesc_early_alloc(sizeof(*p)); | 349 | const char *names = name_block(&hp->mdesc); |
278 | 350 | u64 last_node = hp->mdesc.node_sz / 16; | |
279 | p->unique_id = unique_id++; | ||
280 | p->name = (char *) names + ep->name_offset; | ||
281 | if (ep->tag == MD_PROP_VAL) { | ||
282 | p->value = &ep->d.val; | ||
283 | p->length = 8; | ||
284 | } else { | ||
285 | p->value = data + ep->d.data.data_offset; | ||
286 | p->length = ep->d.data.data_len; | ||
287 | } | ||
288 | *link = p; | ||
289 | link = &p->next; | ||
290 | break; | ||
291 | } | ||
292 | 351 | ||
293 | case MD_NOOP: | 352 | if (from == MDESC_NODE_NULL || from >= last_node) |
294 | break; | 353 | return MDESC_NODE_NULL; |
295 | 354 | ||
296 | default: | 355 | ep = base + from; |
297 | printk("MDESC: Warning, ignoring unknown tag type %02x\n", | 356 | |
298 | ep->tag); | 357 | ep++; |
299 | } | 358 | for (; ep->tag != MD_NODE_END; ep++) { |
300 | ep++; | 359 | if (ep->tag != MD_PROP_ARC) |
301 | } | 360 | continue; |
361 | |||
362 | if (strcmp(names + ep->name_offset, arc_type)) | ||
363 | continue; | ||
364 | |||
365 | return ep - base; | ||
302 | } | 366 | } |
367 | |||
368 | return MDESC_NODE_NULL; | ||
303 | } | 369 | } |
370 | EXPORT_SYMBOL(mdesc_next_arc); | ||
304 | 371 | ||
305 | static unsigned int __init count_nodes(struct mdesc_hdr *mdesc) | 372 | u64 mdesc_arc_target(struct mdesc_handle *hp, u64 arc) |
306 | { | 373 | { |
307 | struct mdesc_elem *ep = node_block(mdesc); | 374 | struct mdesc_elem *ep, *base = node_block(&hp->mdesc); |
308 | struct mdesc_elem *end; | 375 | |
309 | unsigned int cnt = 0; | 376 | ep = base + arc; |
310 | 377 | ||
311 | end = ((void *)ep) + mdesc->node_sz; | 378 | return ep->d.val; |
312 | while (ep < end) { | ||
313 | if (ep->tag == MD_NODE) | ||
314 | cnt++; | ||
315 | ep++; | ||
316 | } | ||
317 | return cnt; | ||
318 | } | 379 | } |
380 | EXPORT_SYMBOL(mdesc_arc_target); | ||
381 | |||
382 | const char *mdesc_node_name(struct mdesc_handle *hp, u64 node) | ||
383 | { | ||
384 | struct mdesc_elem *ep, *base = node_block(&hp->mdesc); | ||
385 | const char *names = name_block(&hp->mdesc); | ||
386 | u64 last_node = hp->mdesc.node_sz / 16; | ||
387 | |||
388 | if (node == MDESC_NODE_NULL || node >= last_node) | ||
389 | return NULL; | ||
390 | |||
391 | ep = base + node; | ||
392 | if (ep->tag != MD_NODE) | ||
393 | return NULL; | ||
394 | |||
395 | return names + ep->name_offset; | ||
396 | } | ||
397 | EXPORT_SYMBOL(mdesc_node_name); | ||
319 | 398 | ||
320 | static void __init report_platform_properties(void) | 399 | static void __init report_platform_properties(void) |
321 | { | 400 | { |
322 | struct mdesc_node *pn = md_find_node_by_name(NULL, "platform"); | 401 | struct mdesc_handle *hp = mdesc_grab(); |
402 | u64 pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "platform"); | ||
323 | const char *s; | 403 | const char *s; |
324 | const u64 *v; | 404 | const u64 *v; |
325 | 405 | ||
326 | if (!pn) { | 406 | if (pn == MDESC_NODE_NULL) { |
327 | prom_printf("No platform node in machine-description.\n"); | 407 | prom_printf("No platform node in machine-description.\n"); |
328 | prom_halt(); | 408 | prom_halt(); |
329 | } | 409 | } |
330 | 410 | ||
331 | s = md_get_property(pn, "banner-name", NULL); | 411 | s = mdesc_get_property(hp, pn, "banner-name", NULL); |
332 | printk("PLATFORM: banner-name [%s]\n", s); | 412 | printk("PLATFORM: banner-name [%s]\n", s); |
333 | s = md_get_property(pn, "name", NULL); | 413 | s = mdesc_get_property(hp, pn, "name", NULL); |
334 | printk("PLATFORM: name [%s]\n", s); | 414 | printk("PLATFORM: name [%s]\n", s); |
335 | 415 | ||
336 | v = md_get_property(pn, "hostid", NULL); | 416 | v = mdesc_get_property(hp, pn, "hostid", NULL); |
337 | if (v) | 417 | if (v) |
338 | printk("PLATFORM: hostid [%08lx]\n", *v); | 418 | printk("PLATFORM: hostid [%08lx]\n", *v); |
339 | v = md_get_property(pn, "serial#", NULL); | 419 | v = mdesc_get_property(hp, pn, "serial#", NULL); |
340 | if (v) | 420 | if (v) |
341 | printk("PLATFORM: serial# [%08lx]\n", *v); | 421 | printk("PLATFORM: serial# [%08lx]\n", *v); |
342 | v = md_get_property(pn, "stick-frequency", NULL); | 422 | v = mdesc_get_property(hp, pn, "stick-frequency", NULL); |
343 | printk("PLATFORM: stick-frequency [%08lx]\n", *v); | 423 | printk("PLATFORM: stick-frequency [%08lx]\n", *v); |
344 | v = md_get_property(pn, "mac-address", NULL); | 424 | v = mdesc_get_property(hp, pn, "mac-address", NULL); |
345 | if (v) | 425 | if (v) |
346 | printk("PLATFORM: mac-address [%lx]\n", *v); | 426 | printk("PLATFORM: mac-address [%lx]\n", *v); |
347 | v = md_get_property(pn, "watchdog-resolution", NULL); | 427 | v = mdesc_get_property(hp, pn, "watchdog-resolution", NULL); |
348 | if (v) | 428 | if (v) |
349 | printk("PLATFORM: watchdog-resolution [%lu ms]\n", *v); | 429 | printk("PLATFORM: watchdog-resolution [%lu ms]\n", *v); |
350 | v = md_get_property(pn, "watchdog-max-timeout", NULL); | 430 | v = mdesc_get_property(hp, pn, "watchdog-max-timeout", NULL); |
351 | if (v) | 431 | if (v) |
352 | printk("PLATFORM: watchdog-max-timeout [%lu ms]\n", *v); | 432 | printk("PLATFORM: watchdog-max-timeout [%lu ms]\n", *v); |
353 | v = md_get_property(pn, "max-cpus", NULL); | 433 | v = mdesc_get_property(hp, pn, "max-cpus", NULL); |
354 | if (v) | 434 | if (v) |
355 | printk("PLATFORM: max-cpus [%lu]\n", *v); | 435 | printk("PLATFORM: max-cpus [%lu]\n", *v); |
436 | |||
437 | mdesc_release(hp); | ||
356 | } | 438 | } |
357 | 439 | ||
358 | static int inline find_in_proplist(const char *list, const char *match, int len) | 440 | static int inline find_in_proplist(const char *list, const char *match, int len) |
@@ -369,15 +451,17 @@ static int inline find_in_proplist(const char *list, const char *match, int len) | |||
369 | return 0; | 451 | return 0; |
370 | } | 452 | } |
371 | 453 | ||
372 | static void __init fill_in_one_cache(cpuinfo_sparc *c, struct mdesc_node *mp) | 454 | static void __init fill_in_one_cache(cpuinfo_sparc *c, |
455 | struct mdesc_handle *hp, | ||
456 | u64 mp) | ||
373 | { | 457 | { |
374 | const u64 *level = md_get_property(mp, "level", NULL); | 458 | const u64 *level = mdesc_get_property(hp, mp, "level", NULL); |
375 | const u64 *size = md_get_property(mp, "size", NULL); | 459 | const u64 *size = mdesc_get_property(hp, mp, "size", NULL); |
376 | const u64 *line_size = md_get_property(mp, "line-size", NULL); | 460 | const u64 *line_size = mdesc_get_property(hp, mp, "line-size", NULL); |
377 | const char *type; | 461 | const char *type; |
378 | int type_len; | 462 | int type_len; |
379 | 463 | ||
380 | type = md_get_property(mp, "type", &type_len); | 464 | type = mdesc_get_property(hp, mp, "type", &type_len); |
381 | 465 | ||
382 | switch (*level) { | 466 | switch (*level) { |
383 | case 1: | 467 | case 1: |
@@ -400,48 +484,44 @@ static void __init fill_in_one_cache(cpuinfo_sparc *c, struct mdesc_node *mp) | |||
400 | } | 484 | } |
401 | 485 | ||
402 | if (*level == 1) { | 486 | if (*level == 1) { |
403 | unsigned int i; | 487 | u64 a; |
404 | 488 | ||
405 | for (i = 0; i < mp->num_arcs; i++) { | 489 | mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) { |
406 | struct mdesc_node *t = mp->arcs[i].arc; | 490 | u64 target = mdesc_arc_target(hp, a); |
491 | const char *name = mdesc_node_name(hp, target); | ||
407 | 492 | ||
408 | if (strcmp(mp->arcs[i].name, "fwd")) | 493 | if (!strcmp(name, "cache")) |
409 | continue; | 494 | fill_in_one_cache(c, hp, target); |
410 | |||
411 | if (!strcmp(t->name, "cache")) | ||
412 | fill_in_one_cache(c, t); | ||
413 | } | 495 | } |
414 | } | 496 | } |
415 | } | 497 | } |
416 | 498 | ||
417 | static void __init mark_core_ids(struct mdesc_node *mp, int core_id) | 499 | static void __init mark_core_ids(struct mdesc_handle *hp, u64 mp, int core_id) |
418 | { | 500 | { |
419 | unsigned int i; | 501 | u64 a; |
420 | 502 | ||
421 | for (i = 0; i < mp->num_arcs; i++) { | 503 | mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) { |
422 | struct mdesc_node *t = mp->arcs[i].arc; | 504 | u64 t = mdesc_arc_target(hp, a); |
505 | const char *name; | ||
423 | const u64 *id; | 506 | const u64 *id; |
424 | 507 | ||
425 | if (strcmp(mp->arcs[i].name, "back")) | 508 | name = mdesc_node_name(hp, t); |
426 | continue; | 509 | if (!strcmp(name, "cpu")) { |
427 | 510 | id = mdesc_get_property(hp, t, "id", NULL); | |
428 | if (!strcmp(t->name, "cpu")) { | ||
429 | id = md_get_property(t, "id", NULL); | ||
430 | if (*id < NR_CPUS) | 511 | if (*id < NR_CPUS) |
431 | cpu_data(*id).core_id = core_id; | 512 | cpu_data(*id).core_id = core_id; |
432 | } else { | 513 | } else { |
433 | unsigned int j; | 514 | u64 j; |
434 | 515 | ||
435 | for (j = 0; j < t->num_arcs; j++) { | 516 | mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_BACK) { |
436 | struct mdesc_node *n = t->arcs[j].arc; | 517 | u64 n = mdesc_arc_target(hp, j); |
518 | const char *n_name; | ||
437 | 519 | ||
438 | if (strcmp(t->arcs[j].name, "back")) | 520 | n_name = mdesc_node_name(hp, n); |
521 | if (strcmp(n_name, "cpu")) | ||
439 | continue; | 522 | continue; |
440 | 523 | ||
441 | if (strcmp(n->name, "cpu")) | 524 | id = mdesc_get_property(hp, n, "id", NULL); |
442 | continue; | ||
443 | |||
444 | id = md_get_property(n, "id", NULL); | ||
445 | if (*id < NR_CPUS) | 525 | if (*id < NR_CPUS) |
446 | cpu_data(*id).core_id = core_id; | 526 | cpu_data(*id).core_id = core_id; |
447 | } | 527 | } |
@@ -449,75 +529,76 @@ static void __init mark_core_ids(struct mdesc_node *mp, int core_id) | |||
449 | } | 529 | } |
450 | } | 530 | } |
451 | 531 | ||
452 | static void __init set_core_ids(void) | 532 | static void __init set_core_ids(struct mdesc_handle *hp) |
453 | { | 533 | { |
454 | struct mdesc_node *mp; | ||
455 | int idx; | 534 | int idx; |
535 | u64 mp; | ||
456 | 536 | ||
457 | idx = 1; | 537 | idx = 1; |
458 | md_for_each_node_by_name(mp, "cache") { | 538 | mdesc_for_each_node_by_name(hp, mp, "cache") { |
459 | const u64 *level = md_get_property(mp, "level", NULL); | 539 | const u64 *level; |
460 | const char *type; | 540 | const char *type; |
461 | int len; | 541 | int len; |
462 | 542 | ||
543 | level = mdesc_get_property(hp, mp, "level", NULL); | ||
463 | if (*level != 1) | 544 | if (*level != 1) |
464 | continue; | 545 | continue; |
465 | 546 | ||
466 | type = md_get_property(mp, "type", &len); | 547 | type = mdesc_get_property(hp, mp, "type", &len); |
467 | if (!find_in_proplist(type, "instn", len)) | 548 | if (!find_in_proplist(type, "instn", len)) |
468 | continue; | 549 | continue; |
469 | 550 | ||
470 | mark_core_ids(mp, idx); | 551 | mark_core_ids(hp, mp, idx); |
471 | 552 | ||
472 | idx++; | 553 | idx++; |
473 | } | 554 | } |
474 | } | 555 | } |
475 | 556 | ||
476 | static void __init mark_proc_ids(struct mdesc_node *mp, int proc_id) | 557 | static void __init mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id) |
477 | { | 558 | { |
478 | int i; | 559 | u64 a; |
479 | 560 | ||
480 | for (i = 0; i < mp->num_arcs; i++) { | 561 | mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) { |
481 | struct mdesc_node *t = mp->arcs[i].arc; | 562 | u64 t = mdesc_arc_target(hp, a); |
563 | const char *name; | ||
482 | const u64 *id; | 564 | const u64 *id; |
483 | 565 | ||
484 | if (strcmp(mp->arcs[i].name, "back")) | 566 | name = mdesc_node_name(hp, t); |
485 | continue; | 567 | if (strcmp(name, "cpu")) |
486 | |||
487 | if (strcmp(t->name, "cpu")) | ||
488 | continue; | 568 | continue; |
489 | 569 | ||
490 | id = md_get_property(t, "id", NULL); | 570 | id = mdesc_get_property(hp, t, "id", NULL); |
491 | if (*id < NR_CPUS) | 571 | if (*id < NR_CPUS) |
492 | cpu_data(*id).proc_id = proc_id; | 572 | cpu_data(*id).proc_id = proc_id; |
493 | } | 573 | } |
494 | } | 574 | } |
495 | 575 | ||
496 | static void __init __set_proc_ids(const char *exec_unit_name) | 576 | static void __init __set_proc_ids(struct mdesc_handle *hp, |
577 | const char *exec_unit_name) | ||
497 | { | 578 | { |
498 | struct mdesc_node *mp; | ||
499 | int idx; | 579 | int idx; |
580 | u64 mp; | ||
500 | 581 | ||
501 | idx = 0; | 582 | idx = 0; |
502 | md_for_each_node_by_name(mp, exec_unit_name) { | 583 | mdesc_for_each_node_by_name(hp, mp, exec_unit_name) { |
503 | const char *type; | 584 | const char *type; |
504 | int len; | 585 | int len; |
505 | 586 | ||
506 | type = md_get_property(mp, "type", &len); | 587 | type = mdesc_get_property(hp, mp, "type", &len); |
507 | if (!find_in_proplist(type, "int", len) && | 588 | if (!find_in_proplist(type, "int", len) && |
508 | !find_in_proplist(type, "integer", len)) | 589 | !find_in_proplist(type, "integer", len)) |
509 | continue; | 590 | continue; |
510 | 591 | ||
511 | mark_proc_ids(mp, idx); | 592 | mark_proc_ids(hp, mp, idx); |
512 | 593 | ||
513 | idx++; | 594 | idx++; |
514 | } | 595 | } |
515 | } | 596 | } |
516 | 597 | ||
517 | static void __init set_proc_ids(void) | 598 | static void __init set_proc_ids(struct mdesc_handle *hp) |
518 | { | 599 | { |
519 | __set_proc_ids("exec_unit"); | 600 | __set_proc_ids(hp, "exec_unit"); |
520 | __set_proc_ids("exec-unit"); | 601 | __set_proc_ids(hp, "exec-unit"); |
521 | } | 602 | } |
522 | 603 | ||
523 | static void __init get_one_mondo_bits(const u64 *p, unsigned int *mask, unsigned char def) | 604 | static void __init get_one_mondo_bits(const u64 *p, unsigned int *mask, unsigned char def) |
@@ -538,35 +619,37 @@ use_default: | |||
538 | *mask = ((1U << def) * 64U) - 1U; | 619 | *mask = ((1U << def) * 64U) - 1U; |
539 | } | 620 | } |
540 | 621 | ||
541 | static void __init get_mondo_data(struct mdesc_node *mp, struct trap_per_cpu *tb) | 622 | static void __init get_mondo_data(struct mdesc_handle *hp, u64 mp, |
623 | struct trap_per_cpu *tb) | ||
542 | { | 624 | { |
543 | const u64 *val; | 625 | const u64 *val; |
544 | 626 | ||
545 | val = md_get_property(mp, "q-cpu-mondo-#bits", NULL); | 627 | val = mdesc_get_property(hp, mp, "q-cpu-mondo-#bits", NULL); |
546 | get_one_mondo_bits(val, &tb->cpu_mondo_qmask, 7); | 628 | get_one_mondo_bits(val, &tb->cpu_mondo_qmask, 7); |
547 | 629 | ||
548 | val = md_get_property(mp, "q-dev-mondo-#bits", NULL); | 630 | val = mdesc_get_property(hp, mp, "q-dev-mondo-#bits", NULL); |
549 | get_one_mondo_bits(val, &tb->dev_mondo_qmask, 7); | 631 | get_one_mondo_bits(val, &tb->dev_mondo_qmask, 7); |
550 | 632 | ||
551 | val = md_get_property(mp, "q-resumable-#bits", NULL); | 633 | val = mdesc_get_property(hp, mp, "q-resumable-#bits", NULL); |
552 | get_one_mondo_bits(val, &tb->resum_qmask, 6); | 634 | get_one_mondo_bits(val, &tb->resum_qmask, 6); |
553 | 635 | ||
554 | val = md_get_property(mp, "q-nonresumable-#bits", NULL); | 636 | val = mdesc_get_property(hp, mp, "q-nonresumable-#bits", NULL); |
555 | get_one_mondo_bits(val, &tb->nonresum_qmask, 2); | 637 | get_one_mondo_bits(val, &tb->nonresum_qmask, 2); |
556 | } | 638 | } |
557 | 639 | ||
558 | static void __init mdesc_fill_in_cpu_data(void) | 640 | static void __init mdesc_fill_in_cpu_data(void) |
559 | { | 641 | { |
560 | struct mdesc_node *mp; | 642 | struct mdesc_handle *hp = mdesc_grab(); |
643 | u64 mp; | ||
561 | 644 | ||
562 | ncpus_probed = 0; | 645 | ncpus_probed = 0; |
563 | md_for_each_node_by_name(mp, "cpu") { | 646 | mdesc_for_each_node_by_name(hp, mp, "cpu") { |
564 | const u64 *id = md_get_property(mp, "id", NULL); | 647 | const u64 *id = mdesc_get_property(hp, mp, "id", NULL); |
565 | const u64 *cfreq = md_get_property(mp, "clock-frequency", NULL); | 648 | const u64 *cfreq = mdesc_get_property(hp, mp, "clock-frequency", NULL); |
566 | struct trap_per_cpu *tb; | 649 | struct trap_per_cpu *tb; |
567 | cpuinfo_sparc *c; | 650 | cpuinfo_sparc *c; |
568 | unsigned int i; | ||
569 | int cpuid; | 651 | int cpuid; |
652 | u64 a; | ||
570 | 653 | ||
571 | ncpus_probed++; | 654 | ncpus_probed++; |
572 | 655 | ||
@@ -589,29 +672,25 @@ static void __init mdesc_fill_in_cpu_data(void) | |||
589 | c->clock_tick = *cfreq; | 672 | c->clock_tick = *cfreq; |
590 | 673 | ||
591 | tb = &trap_block[cpuid]; | 674 | tb = &trap_block[cpuid]; |
592 | get_mondo_data(mp, tb); | 675 | get_mondo_data(hp, mp, tb); |
593 | |||
594 | for (i = 0; i < mp->num_arcs; i++) { | ||
595 | struct mdesc_node *t = mp->arcs[i].arc; | ||
596 | unsigned int j; | ||
597 | 676 | ||
598 | if (strcmp(mp->arcs[i].name, "fwd")) | 677 | mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) { |
599 | continue; | 678 | u64 j, t = mdesc_arc_target(hp, a); |
679 | const char *t_name; | ||
600 | 680 | ||
601 | if (!strcmp(t->name, "cache")) { | 681 | t_name = mdesc_node_name(hp, t); |
602 | fill_in_one_cache(c, t); | 682 | if (!strcmp(t_name, "cache")) { |
683 | fill_in_one_cache(c, hp, t); | ||
603 | continue; | 684 | continue; |
604 | } | 685 | } |
605 | 686 | ||
606 | for (j = 0; j < t->num_arcs; j++) { | 687 | mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_FWD) { |
607 | struct mdesc_node *n; | 688 | u64 n = mdesc_arc_target(hp, j); |
689 | const char *n_name; | ||
608 | 690 | ||
609 | n = t->arcs[j].arc; | 691 | n_name = mdesc_node_name(hp, n); |
610 | if (strcmp(t->arcs[j].name, "fwd")) | 692 | if (!strcmp(n_name, "cache")) |
611 | continue; | 693 | fill_in_one_cache(c, hp, n); |
612 | |||
613 | if (!strcmp(n->name, "cache")) | ||
614 | fill_in_one_cache(c, n); | ||
615 | } | 694 | } |
616 | } | 695 | } |
617 | 696 | ||
@@ -628,44 +707,39 @@ static void __init mdesc_fill_in_cpu_data(void) | |||
628 | sparc64_multi_core = 1; | 707 | sparc64_multi_core = 1; |
629 | #endif | 708 | #endif |
630 | 709 | ||
631 | set_core_ids(); | 710 | set_core_ids(hp); |
632 | set_proc_ids(); | 711 | set_proc_ids(hp); |
633 | 712 | ||
634 | smp_fill_in_sib_core_maps(); | 713 | smp_fill_in_sib_core_maps(); |
714 | |||
715 | mdesc_release(hp); | ||
635 | } | 716 | } |
636 | 717 | ||
637 | void __init sun4v_mdesc_init(void) | 718 | void __init sun4v_mdesc_init(void) |
638 | { | 719 | { |
720 | struct mdesc_handle *hp; | ||
639 | unsigned long len, real_len, status; | 721 | unsigned long len, real_len, status; |
640 | 722 | ||
641 | (void) sun4v_mach_desc(0UL, 0UL, &len); | 723 | (void) sun4v_mach_desc(0UL, 0UL, &len); |
642 | 724 | ||
643 | printk("MDESC: Size is %lu bytes.\n", len); | 725 | printk("MDESC: Size is %lu bytes.\n", len); |
644 | 726 | ||
645 | main_mdesc = mdesc_early_alloc(len); | 727 | hp = mdesc_alloc(len, &bootmem_mdesc_memops); |
728 | if (hp == NULL) { | ||
729 | prom_printf("MDESC: alloc of %lu bytes failed.\n", len); | ||
730 | prom_halt(); | ||
731 | } | ||
646 | 732 | ||
647 | status = sun4v_mach_desc(__pa(main_mdesc), len, &real_len); | 733 | status = sun4v_mach_desc(__pa(&hp->mdesc), len, &real_len); |
648 | if (status != HV_EOK || real_len > len) { | 734 | if (status != HV_EOK || real_len > len) { |
649 | prom_printf("sun4v_mach_desc fails, err(%lu), " | 735 | prom_printf("sun4v_mach_desc fails, err(%lu), " |
650 | "len(%lu), real_len(%lu)\n", | 736 | "len(%lu), real_len(%lu)\n", |
651 | status, len, real_len); | 737 | status, len, real_len); |
738 | mdesc_free(hp); | ||
652 | prom_halt(); | 739 | prom_halt(); |
653 | } | 740 | } |
654 | 741 | ||
655 | len = count_nodes(main_mdesc); | 742 | cur_mdesc = hp; |
656 | printk("MDESC: %lu nodes.\n", len); | ||
657 | |||
658 | len = roundup_pow_of_two(len); | ||
659 | |||
660 | mdesc_hash = mdesc_early_alloc(len * sizeof(struct mdesc_node *)); | ||
661 | mdesc_hash_size = len; | ||
662 | |||
663 | printk("MDESC: Hash size %lu entries.\n", len); | ||
664 | |||
665 | build_all_nodes(main_mdesc); | ||
666 | |||
667 | printk("MDESC: Built graph with %u bytes of memory.\n", | ||
668 | mdesc_early_allocated); | ||
669 | 743 | ||
670 | report_platform_properties(); | 744 | report_platform_properties(); |
671 | mdesc_fill_in_cpu_data(); | 745 | mdesc_fill_in_cpu_data(); |
diff --git a/arch/sparc64/kernel/vio.c b/arch/sparc64/kernel/vio.c index 7eccc91cd59d..64f082555bcd 100644 --- a/arch/sparc64/kernel/vio.c +++ b/arch/sparc64/kernel/vio.c | |||
@@ -147,30 +147,6 @@ void vio_unregister_driver(struct vio_driver *viodrv) | |||
147 | } | 147 | } |
148 | EXPORT_SYMBOL(vio_unregister_driver); | 148 | EXPORT_SYMBOL(vio_unregister_driver); |
149 | 149 | ||
150 | struct mdesc_node *vio_find_endpoint(struct vio_dev *vdev) | ||
151 | { | ||
152 | struct mdesc_node *endp, *mp = vdev->mp; | ||
153 | int i; | ||
154 | |||
155 | endp = NULL; | ||
156 | for (i = 0; i < mp->num_arcs; i++) { | ||
157 | struct mdesc_node *t; | ||
158 | |||
159 | if (strcmp(mp->arcs[i].name, "fwd")) | ||
160 | continue; | ||
161 | |||
162 | t = mp->arcs[i].arc; | ||
163 | if (strcmp(t->name, "channel-endpoint")) | ||
164 | continue; | ||
165 | |||
166 | endp = t; | ||
167 | break; | ||
168 | } | ||
169 | |||
170 | return endp; | ||
171 | } | ||
172 | EXPORT_SYMBOL(vio_find_endpoint); | ||
173 | |||
174 | static void __devinit vio_dev_release(struct device *dev) | 150 | static void __devinit vio_dev_release(struct device *dev) |
175 | { | 151 | { |
176 | kfree(to_vio_dev(dev)); | 152 | kfree(to_vio_dev(dev)); |
@@ -197,22 +173,47 @@ struct device_node *cdev_node; | |||
197 | static struct vio_dev *root_vdev; | 173 | static struct vio_dev *root_vdev; |
198 | static u64 cdev_cfg_handle; | 174 | static u64 cdev_cfg_handle; |
199 | 175 | ||
200 | static struct vio_dev *vio_create_one(struct mdesc_node *mp, | 176 | static void vio_fill_channel_info(struct mdesc_handle *hp, u64 mp, |
177 | struct vio_dev *vdev) | ||
178 | { | ||
179 | u64 a; | ||
180 | |||
181 | mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) { | ||
182 | const u64 *chan_id; | ||
183 | const u64 *irq; | ||
184 | u64 target; | ||
185 | |||
186 | target = mdesc_arc_target(hp, a); | ||
187 | |||
188 | irq = mdesc_get_property(hp, target, "tx-ino", NULL); | ||
189 | if (irq) | ||
190 | vdev->tx_irq = sun4v_build_virq(cdev_cfg_handle, *irq); | ||
191 | |||
192 | irq = mdesc_get_property(hp, target, "rx-ino", NULL); | ||
193 | if (irq) | ||
194 | vdev->rx_irq = sun4v_build_virq(cdev_cfg_handle, *irq); | ||
195 | |||
196 | chan_id = mdesc_get_property(hp, target, "id", NULL); | ||
197 | if (chan_id) | ||
198 | vdev->channel_id = *chan_id; | ||
199 | } | ||
200 | } | ||
201 | |||
202 | static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp, | ||
201 | struct device *parent) | 203 | struct device *parent) |
202 | { | 204 | { |
203 | const char *type, *compat; | 205 | const char *type, *compat; |
204 | struct device_node *dp; | 206 | struct device_node *dp; |
205 | struct vio_dev *vdev; | 207 | struct vio_dev *vdev; |
206 | const u64 *irq; | ||
207 | int err, clen; | 208 | int err, clen; |
208 | 209 | ||
209 | type = md_get_property(mp, "device-type", NULL); | 210 | type = mdesc_get_property(hp, mp, "device-type", NULL); |
210 | if (!type) { | 211 | if (!type) { |
211 | type = md_get_property(mp, "name", NULL); | 212 | type = mdesc_get_property(hp, mp, "name", NULL); |
212 | if (!type) | 213 | if (!type) |
213 | type = mp->name; | 214 | type = mdesc_node_name(hp, mp); |
214 | } | 215 | } |
215 | compat = md_get_property(mp, "device-type", &clen); | 216 | compat = mdesc_get_property(hp, mp, "device-type", &clen); |
216 | 217 | ||
217 | vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); | 218 | vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); |
218 | if (!vdev) { | 219 | if (!vdev) { |
@@ -225,15 +226,13 @@ static struct vio_dev *vio_create_one(struct mdesc_node *mp, | |||
225 | vdev->compat = compat; | 226 | vdev->compat = compat; |
226 | vdev->compat_len = clen; | 227 | vdev->compat_len = clen; |
227 | 228 | ||
228 | irq = md_get_property(mp, "tx-ino", NULL); | 229 | vdev->channel_id = ~0UL; |
229 | if (irq) | 230 | vdev->tx_irq = ~0; |
230 | mp->irqs[0] = sun4v_build_virq(cdev_cfg_handle, *irq); | 231 | vdev->rx_irq = ~0; |
231 | 232 | ||
232 | irq = md_get_property(mp, "rx-ino", NULL); | 233 | vio_fill_channel_info(hp, mp, vdev); |
233 | if (irq) | ||
234 | mp->irqs[1] = sun4v_build_virq(cdev_cfg_handle, *irq); | ||
235 | 234 | ||
236 | snprintf(vdev->dev.bus_id, BUS_ID_SIZE, "%lx", mp->node); | 235 | snprintf(vdev->dev.bus_id, BUS_ID_SIZE, "%lx", mp); |
237 | vdev->dev.parent = parent; | 236 | vdev->dev.parent = parent; |
238 | vdev->dev.bus = &vio_bus_type; | 237 | vdev->dev.bus = &vio_bus_type; |
239 | vdev->dev.release = vio_dev_release; | 238 | vdev->dev.release = vio_dev_release; |
@@ -267,46 +266,43 @@ static struct vio_dev *vio_create_one(struct mdesc_node *mp, | |||
267 | return vdev; | 266 | return vdev; |
268 | } | 267 | } |
269 | 268 | ||
270 | static void walk_tree(struct mdesc_node *n, struct vio_dev *parent) | 269 | static void walk_tree(struct mdesc_handle *hp, u64 n, struct vio_dev *parent) |
271 | { | 270 | { |
272 | int i; | 271 | u64 a; |
273 | 272 | ||
274 | for (i = 0; i < n->num_arcs; i++) { | 273 | mdesc_for_each_arc(a, hp, n, MDESC_ARC_TYPE_FWD) { |
275 | struct mdesc_node *mp; | ||
276 | struct vio_dev *vdev; | 274 | struct vio_dev *vdev; |
275 | u64 target; | ||
277 | 276 | ||
278 | if (strcmp(n->arcs[i].name, "fwd")) | 277 | target = mdesc_arc_target(hp, a); |
279 | continue; | 278 | vdev = vio_create_one(hp, target, &parent->dev); |
280 | 279 | if (vdev) | |
281 | mp = n->arcs[i].arc; | 280 | walk_tree(hp, target, vdev); |
282 | |||
283 | vdev = vio_create_one(mp, &parent->dev); | ||
284 | if (vdev && mp->num_arcs) | ||
285 | walk_tree(mp, vdev); | ||
286 | } | 281 | } |
287 | } | 282 | } |
288 | 283 | ||
289 | static void create_devices(struct mdesc_node *root) | 284 | static void create_devices(struct mdesc_handle *hp, u64 root) |
290 | { | 285 | { |
291 | struct mdesc_node *mp; | 286 | u64 mp; |
292 | 287 | ||
293 | root_vdev = vio_create_one(root, NULL); | 288 | root_vdev = vio_create_one(hp, root, NULL); |
294 | if (!root_vdev) { | 289 | if (!root_vdev) { |
295 | printk(KERN_ERR "VIO: Coult not create root device.\n"); | 290 | printk(KERN_ERR "VIO: Coult not create root device.\n"); |
296 | return; | 291 | return; |
297 | } | 292 | } |
298 | 293 | ||
299 | walk_tree(root, root_vdev); | 294 | walk_tree(hp, root, root_vdev); |
300 | 295 | ||
301 | /* Domain services is odd as it doesn't sit underneath the | 296 | /* Domain services is odd as it doesn't sit underneath the |
302 | * channel-devices node, so we plug it in manually. | 297 | * channel-devices node, so we plug it in manually. |
303 | */ | 298 | */ |
304 | mp = md_find_node_by_name(NULL, "domain-services"); | 299 | mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "domain-services"); |
305 | if (mp) { | 300 | if (mp != MDESC_NODE_NULL) { |
306 | struct vio_dev *parent = vio_create_one(mp, &root_vdev->dev); | 301 | struct vio_dev *parent = vio_create_one(hp, mp, |
302 | &root_vdev->dev); | ||
307 | 303 | ||
308 | if (parent) | 304 | if (parent) |
309 | walk_tree(mp, parent); | 305 | walk_tree(hp, mp, parent); |
310 | } | 306 | } |
311 | } | 307 | } |
312 | 308 | ||
@@ -316,40 +312,47 @@ const char *cfg_handle_prop = "cfg-handle"; | |||
316 | 312 | ||
317 | static int __init vio_init(void) | 313 | static int __init vio_init(void) |
318 | { | 314 | { |
319 | struct mdesc_node *root; | 315 | struct mdesc_handle *hp; |
320 | const char *compat; | 316 | const char *compat; |
321 | const u64 *cfg_handle; | 317 | const u64 *cfg_handle; |
322 | int err, len; | 318 | int err, len; |
319 | u64 root; | ||
320 | |||
321 | hp = mdesc_grab(); | ||
322 | if (!hp) | ||
323 | return 0; | ||
323 | 324 | ||
324 | root = md_find_node_by_name(NULL, channel_devices_node); | 325 | root = mdesc_node_by_name(hp, MDESC_NODE_NULL, channel_devices_node); |
325 | if (!root) { | 326 | if (root == MDESC_NODE_NULL) { |
326 | printk(KERN_INFO "VIO: No channel-devices MDESC node.\n"); | 327 | printk(KERN_INFO "VIO: No channel-devices MDESC node.\n"); |
328 | mdesc_release(hp); | ||
327 | return 0; | 329 | return 0; |
328 | } | 330 | } |
329 | 331 | ||
330 | cdev_node = of_find_node_by_name(NULL, "channel-devices"); | 332 | cdev_node = of_find_node_by_name(NULL, "channel-devices"); |
333 | err = -ENODEV; | ||
331 | if (!cdev_node) { | 334 | if (!cdev_node) { |
332 | printk(KERN_INFO "VIO: No channel-devices OBP node.\n"); | 335 | printk(KERN_INFO "VIO: No channel-devices OBP node.\n"); |
333 | return -ENODEV; | 336 | goto out_release; |
334 | } | 337 | } |
335 | 338 | ||
336 | compat = md_get_property(root, "compatible", &len); | 339 | compat = mdesc_get_property(hp, root, "compatible", &len); |
337 | if (!compat) { | 340 | if (!compat) { |
338 | printk(KERN_ERR "VIO: Channel devices lacks compatible " | 341 | printk(KERN_ERR "VIO: Channel devices lacks compatible " |
339 | "property\n"); | 342 | "property\n"); |
340 | return -ENODEV; | 343 | goto out_release; |
341 | } | 344 | } |
342 | if (!find_in_proplist(compat, channel_devices_compat, len)) { | 345 | if (!find_in_proplist(compat, channel_devices_compat, len)) { |
343 | printk(KERN_ERR "VIO: Channel devices node lacks (%s) " | 346 | printk(KERN_ERR "VIO: Channel devices node lacks (%s) " |
344 | "compat entry.\n", channel_devices_compat); | 347 | "compat entry.\n", channel_devices_compat); |
345 | return -ENODEV; | 348 | goto out_release; |
346 | } | 349 | } |
347 | 350 | ||
348 | cfg_handle = md_get_property(root, cfg_handle_prop, NULL); | 351 | cfg_handle = mdesc_get_property(hp, root, cfg_handle_prop, NULL); |
349 | if (!cfg_handle) { | 352 | if (!cfg_handle) { |
350 | printk(KERN_ERR "VIO: Channel devices lacks %s property\n", | 353 | printk(KERN_ERR "VIO: Channel devices lacks %s property\n", |
351 | cfg_handle_prop); | 354 | cfg_handle_prop); |
352 | return -ENODEV; | 355 | goto out_release; |
353 | } | 356 | } |
354 | 357 | ||
355 | cdev_cfg_handle = *cfg_handle; | 358 | cdev_cfg_handle = *cfg_handle; |
@@ -361,9 +364,15 @@ static int __init vio_init(void) | |||
361 | return err; | 364 | return err; |
362 | } | 365 | } |
363 | 366 | ||
364 | create_devices(root); | 367 | create_devices(hp, root); |
368 | |||
369 | mdesc_release(hp); | ||
365 | 370 | ||
366 | return 0; | 371 | return 0; |
372 | |||
373 | out_release: | ||
374 | mdesc_release(hp); | ||
375 | return err; | ||
367 | } | 376 | } |
368 | 377 | ||
369 | postcore_initcall(vio_init); | 378 | postcore_initcall(vio_init); |
diff --git a/arch/sparc64/kernel/viohs.c b/arch/sparc64/kernel/viohs.c index b0b1b8779342..15613add45d1 100644 --- a/arch/sparc64/kernel/viohs.c +++ b/arch/sparc64/kernel/viohs.c | |||
@@ -136,7 +136,7 @@ static int process_unknown(struct vio_driver_state *vio, void *arg) | |||
136 | pkt->type, pkt->stype, pkt->stype_env, pkt->sid); | 136 | pkt->type, pkt->stype, pkt->stype_env, pkt->sid); |
137 | 137 | ||
138 | printk(KERN_ERR "vio: ID[%lu] Resetting connection.\n", | 138 | printk(KERN_ERR "vio: ID[%lu] Resetting connection.\n", |
139 | vio->channel_id); | 139 | vio->vdev->channel_id); |
140 | 140 | ||
141 | ldc_disconnect(vio->lp); | 141 | ldc_disconnect(vio->lp); |
142 | 142 | ||
@@ -678,21 +678,11 @@ extern int vio_ldc_alloc(struct vio_driver_state *vio, | |||
678 | { | 678 | { |
679 | struct ldc_channel_config cfg = *base_cfg; | 679 | struct ldc_channel_config cfg = *base_cfg; |
680 | struct ldc_channel *lp; | 680 | struct ldc_channel *lp; |
681 | const u64 *id; | ||
682 | 681 | ||
683 | id = md_get_property(vio->endpoint, "id", NULL); | 682 | cfg.tx_irq = vio->vdev->tx_irq; |
684 | if (!id) { | 683 | cfg.rx_irq = vio->vdev->rx_irq; |
685 | printk(KERN_ERR "%s: Channel lacks id property.\n", | ||
686 | vio->name); | ||
687 | return -ENODEV; | ||
688 | } | ||
689 | |||
690 | vio->channel_id = *id; | ||
691 | |||
692 | cfg.rx_irq = vio->rx_irq; | ||
693 | cfg.tx_irq = vio->tx_irq; | ||
694 | 684 | ||
695 | lp = ldc_alloc(vio->channel_id, &cfg, event_arg); | 685 | lp = ldc_alloc(vio->vdev->channel_id, &cfg, event_arg); |
696 | if (IS_ERR(lp)) | 686 | if (IS_ERR(lp)) |
697 | return PTR_ERR(lp); | 687 | return PTR_ERR(lp); |
698 | 688 | ||
@@ -728,7 +718,7 @@ void vio_port_up(struct vio_driver_state *vio) | |||
728 | if (err) | 718 | if (err) |
729 | printk(KERN_WARNING "%s: Port %lu bind failed, " | 719 | printk(KERN_WARNING "%s: Port %lu bind failed, " |
730 | "err=%d\n", | 720 | "err=%d\n", |
731 | vio->name, vio->channel_id, err); | 721 | vio->name, vio->vdev->channel_id, err); |
732 | } | 722 | } |
733 | 723 | ||
734 | if (!err) { | 724 | if (!err) { |
@@ -736,7 +726,7 @@ void vio_port_up(struct vio_driver_state *vio) | |||
736 | if (err) | 726 | if (err) |
737 | printk(KERN_WARNING "%s: Port %lu connect failed, " | 727 | printk(KERN_WARNING "%s: Port %lu connect failed, " |
738 | "err=%d\n", | 728 | "err=%d\n", |
739 | vio->name, vio->channel_id, err); | 729 | vio->name, vio->vdev->channel_id, err); |
740 | } | 730 | } |
741 | if (err) { | 731 | if (err) { |
742 | unsigned long expires = jiffies + HZ; | 732 | unsigned long expires = jiffies + HZ; |
@@ -757,9 +747,9 @@ static void vio_port_timer(unsigned long _arg) | |||
757 | } | 747 | } |
758 | 748 | ||
759 | int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev, | 749 | int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev, |
760 | u8 dev_class, struct mdesc_node *channel_endpoint, | 750 | u8 dev_class, struct vio_version *ver_table, |
761 | struct vio_version *ver_table, int ver_table_size, | 751 | int ver_table_size, struct vio_driver_ops *ops, |
762 | struct vio_driver_ops *ops, char *name) | 752 | char *name) |
763 | { | 753 | { |
764 | switch (dev_class) { | 754 | switch (dev_class) { |
765 | case VDEV_NETWORK: | 755 | case VDEV_NETWORK: |
@@ -777,9 +767,6 @@ int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev, | |||
777 | !ops->handshake_complete) | 767 | !ops->handshake_complete) |
778 | return -EINVAL; | 768 | return -EINVAL; |
779 | 769 | ||
780 | if (!channel_endpoint) | ||
781 | return -EINVAL; | ||
782 | |||
783 | if (!ver_table || ver_table_size < 0) | 770 | if (!ver_table || ver_table_size < 0) |
784 | return -EINVAL; | 771 | return -EINVAL; |
785 | 772 | ||
@@ -793,10 +780,6 @@ int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev, | |||
793 | vio->dev_class = dev_class; | 780 | vio->dev_class = dev_class; |
794 | vio->vdev = vdev; | 781 | vio->vdev = vdev; |
795 | 782 | ||
796 | vio->endpoint = channel_endpoint; | ||
797 | vio->tx_irq = channel_endpoint->irqs[0]; | ||
798 | vio->rx_irq = channel_endpoint->irqs[1]; | ||
799 | |||
800 | vio->ver_table = ver_table; | 783 | vio->ver_table = ver_table; |
801 | vio->ver_table_entries = ver_table_size; | 784 | vio->ver_table_entries = ver_table_size; |
802 | 785 | ||