diff options
Diffstat (limited to 'drivers/net/iseries_veth.c')
-rw-r--r-- | drivers/net/iseries_veth.c | 869 |
1 files changed, 581 insertions, 288 deletions
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c index 183ba97785b0..dc5d089bf184 100644 --- a/drivers/net/iseries_veth.c +++ b/drivers/net/iseries_veth.c | |||
@@ -79,12 +79,55 @@ | |||
79 | #include <asm/iommu.h> | 79 | #include <asm/iommu.h> |
80 | #include <asm/vio.h> | 80 | #include <asm/vio.h> |
81 | 81 | ||
82 | #include "iseries_veth.h" | 82 | #undef DEBUG |
83 | 83 | ||
84 | MODULE_AUTHOR("Kyle Lucke <klucke@us.ibm.com>"); | 84 | MODULE_AUTHOR("Kyle Lucke <klucke@us.ibm.com>"); |
85 | MODULE_DESCRIPTION("iSeries Virtual ethernet driver"); | 85 | MODULE_DESCRIPTION("iSeries Virtual ethernet driver"); |
86 | MODULE_LICENSE("GPL"); | 86 | MODULE_LICENSE("GPL"); |
87 | 87 | ||
88 | #define VETH_EVENT_CAP (0) | ||
89 | #define VETH_EVENT_FRAMES (1) | ||
90 | #define VETH_EVENT_MONITOR (2) | ||
91 | #define VETH_EVENT_FRAMES_ACK (3) | ||
92 | |||
93 | #define VETH_MAX_ACKS_PER_MSG (20) | ||
94 | #define VETH_MAX_FRAMES_PER_MSG (6) | ||
95 | |||
96 | struct veth_frames_data { | ||
97 | u32 addr[VETH_MAX_FRAMES_PER_MSG]; | ||
98 | u16 len[VETH_MAX_FRAMES_PER_MSG]; | ||
99 | u32 eofmask; | ||
100 | }; | ||
101 | #define VETH_EOF_SHIFT (32-VETH_MAX_FRAMES_PER_MSG) | ||
102 | |||
103 | struct veth_frames_ack_data { | ||
104 | u16 token[VETH_MAX_ACKS_PER_MSG]; | ||
105 | }; | ||
106 | |||
107 | struct veth_cap_data { | ||
108 | u8 caps_version; | ||
109 | u8 rsvd1; | ||
110 | u16 num_buffers; | ||
111 | u16 ack_threshold; | ||
112 | u16 rsvd2; | ||
113 | u32 ack_timeout; | ||
114 | u32 rsvd3; | ||
115 | u64 rsvd4[3]; | ||
116 | }; | ||
117 | |||
118 | struct veth_lpevent { | ||
119 | struct HvLpEvent base_event; | ||
120 | union { | ||
121 | struct veth_cap_data caps_data; | ||
122 | struct veth_frames_data frames_data; | ||
123 | struct veth_frames_ack_data frames_ack_data; | ||
124 | } u; | ||
125 | |||
126 | }; | ||
127 | |||
128 | #define DRV_NAME "iseries_veth" | ||
129 | #define DRV_VERSION "2.0" | ||
130 | |||
88 | #define VETH_NUMBUFFERS (120) | 131 | #define VETH_NUMBUFFERS (120) |
89 | #define VETH_ACKTIMEOUT (1000000) /* microseconds */ | 132 | #define VETH_ACKTIMEOUT (1000000) /* microseconds */ |
90 | #define VETH_MAX_MCAST (12) | 133 | #define VETH_MAX_MCAST (12) |
@@ -113,9 +156,9 @@ MODULE_LICENSE("GPL"); | |||
113 | 156 | ||
114 | struct veth_msg { | 157 | struct veth_msg { |
115 | struct veth_msg *next; | 158 | struct veth_msg *next; |
116 | struct VethFramesData data; | 159 | struct veth_frames_data data; |
117 | int token; | 160 | int token; |
118 | unsigned long in_use; | 161 | int in_use; |
119 | struct sk_buff *skb; | 162 | struct sk_buff *skb; |
120 | struct device *dev; | 163 | struct device *dev; |
121 | }; | 164 | }; |
@@ -125,23 +168,28 @@ struct veth_lpar_connection { | |||
125 | struct work_struct statemachine_wq; | 168 | struct work_struct statemachine_wq; |
126 | struct veth_msg *msgs; | 169 | struct veth_msg *msgs; |
127 | int num_events; | 170 | int num_events; |
128 | struct VethCapData local_caps; | 171 | struct veth_cap_data local_caps; |
129 | 172 | ||
173 | struct kobject kobject; | ||
130 | struct timer_list ack_timer; | 174 | struct timer_list ack_timer; |
131 | 175 | ||
176 | struct timer_list reset_timer; | ||
177 | unsigned int reset_timeout; | ||
178 | unsigned long last_contact; | ||
179 | int outstanding_tx; | ||
180 | |||
132 | spinlock_t lock; | 181 | spinlock_t lock; |
133 | unsigned long state; | 182 | unsigned long state; |
134 | HvLpInstanceId src_inst; | 183 | HvLpInstanceId src_inst; |
135 | HvLpInstanceId dst_inst; | 184 | HvLpInstanceId dst_inst; |
136 | struct VethLpEvent cap_event, cap_ack_event; | 185 | struct veth_lpevent cap_event, cap_ack_event; |
137 | u16 pending_acks[VETH_MAX_ACKS_PER_MSG]; | 186 | u16 pending_acks[VETH_MAX_ACKS_PER_MSG]; |
138 | u32 num_pending_acks; | 187 | u32 num_pending_acks; |
139 | 188 | ||
140 | int num_ack_events; | 189 | int num_ack_events; |
141 | struct VethCapData remote_caps; | 190 | struct veth_cap_data remote_caps; |
142 | u32 ack_timeout; | 191 | u32 ack_timeout; |
143 | 192 | ||
144 | spinlock_t msg_stack_lock; | ||
145 | struct veth_msg *msg_stack_head; | 193 | struct veth_msg *msg_stack_head; |
146 | }; | 194 | }; |
147 | 195 | ||
@@ -151,15 +199,17 @@ struct veth_port { | |||
151 | u64 mac_addr; | 199 | u64 mac_addr; |
152 | HvLpIndexMap lpar_map; | 200 | HvLpIndexMap lpar_map; |
153 | 201 | ||
154 | spinlock_t pending_gate; | 202 | /* queue_lock protects the stopped_map and dev's queue. */ |
155 | struct sk_buff *pending_skb; | 203 | spinlock_t queue_lock; |
156 | HvLpIndexMap pending_lpmask; | 204 | HvLpIndexMap stopped_map; |
157 | 205 | ||
206 | /* mcast_gate protects promiscuous, num_mcast & mcast_addr. */ | ||
158 | rwlock_t mcast_gate; | 207 | rwlock_t mcast_gate; |
159 | int promiscuous; | 208 | int promiscuous; |
160 | int all_mcast; | ||
161 | int num_mcast; | 209 | int num_mcast; |
162 | u64 mcast_addr[VETH_MAX_MCAST]; | 210 | u64 mcast_addr[VETH_MAX_MCAST]; |
211 | |||
212 | struct kobject kobject; | ||
163 | }; | 213 | }; |
164 | 214 | ||
165 | static HvLpIndex this_lp; | 215 | static HvLpIndex this_lp; |
@@ -168,44 +218,56 @@ static struct net_device *veth_dev[HVMAXARCHITECTEDVIRTUALLANS]; /* = 0 */ | |||
168 | 218 | ||
169 | static int veth_start_xmit(struct sk_buff *skb, struct net_device *dev); | 219 | static int veth_start_xmit(struct sk_buff *skb, struct net_device *dev); |
170 | static void veth_recycle_msg(struct veth_lpar_connection *, struct veth_msg *); | 220 | static void veth_recycle_msg(struct veth_lpar_connection *, struct veth_msg *); |
171 | static void veth_flush_pending(struct veth_lpar_connection *cnx); | 221 | static void veth_wake_queues(struct veth_lpar_connection *cnx); |
172 | static void veth_receive(struct veth_lpar_connection *, struct VethLpEvent *); | 222 | static void veth_stop_queues(struct veth_lpar_connection *cnx); |
173 | static void veth_timed_ack(unsigned long connectionPtr); | 223 | static void veth_receive(struct veth_lpar_connection *, struct veth_lpevent *); |
224 | static void veth_release_connection(struct kobject *kobject); | ||
225 | static void veth_timed_ack(unsigned long ptr); | ||
226 | static void veth_timed_reset(unsigned long ptr); | ||
174 | 227 | ||
175 | /* | 228 | /* |
176 | * Utility functions | 229 | * Utility functions |
177 | */ | 230 | */ |
178 | 231 | ||
179 | #define veth_printk(prio, fmt, args...) \ | 232 | #define veth_info(fmt, args...) \ |
180 | printk(prio "%s: " fmt, __FILE__, ## args) | 233 | printk(KERN_INFO DRV_NAME ": " fmt, ## args) |
181 | 234 | ||
182 | #define veth_error(fmt, args...) \ | 235 | #define veth_error(fmt, args...) \ |
183 | printk(KERN_ERR "(%s:%3.3d) ERROR: " fmt, __FILE__, __LINE__ , ## args) | 236 | printk(KERN_ERR DRV_NAME ": Error: " fmt, ## args) |
237 | |||
238 | #ifdef DEBUG | ||
239 | #define veth_debug(fmt, args...) \ | ||
240 | printk(KERN_DEBUG DRV_NAME ": " fmt, ## args) | ||
241 | #else | ||
242 | #define veth_debug(fmt, args...) do {} while (0) | ||
243 | #endif | ||
184 | 244 | ||
245 | /* You must hold the connection's lock when you call this function. */ | ||
185 | static inline void veth_stack_push(struct veth_lpar_connection *cnx, | 246 | static inline void veth_stack_push(struct veth_lpar_connection *cnx, |
186 | struct veth_msg *msg) | 247 | struct veth_msg *msg) |
187 | { | 248 | { |
188 | unsigned long flags; | ||
189 | |||
190 | spin_lock_irqsave(&cnx->msg_stack_lock, flags); | ||
191 | msg->next = cnx->msg_stack_head; | 249 | msg->next = cnx->msg_stack_head; |
192 | cnx->msg_stack_head = msg; | 250 | cnx->msg_stack_head = msg; |
193 | spin_unlock_irqrestore(&cnx->msg_stack_lock, flags); | ||
194 | } | 251 | } |
195 | 252 | ||
253 | /* You must hold the connection's lock when you call this function. */ | ||
196 | static inline struct veth_msg *veth_stack_pop(struct veth_lpar_connection *cnx) | 254 | static inline struct veth_msg *veth_stack_pop(struct veth_lpar_connection *cnx) |
197 | { | 255 | { |
198 | unsigned long flags; | ||
199 | struct veth_msg *msg; | 256 | struct veth_msg *msg; |
200 | 257 | ||
201 | spin_lock_irqsave(&cnx->msg_stack_lock, flags); | ||
202 | msg = cnx->msg_stack_head; | 258 | msg = cnx->msg_stack_head; |
203 | if (msg) | 259 | if (msg) |
204 | cnx->msg_stack_head = cnx->msg_stack_head->next; | 260 | cnx->msg_stack_head = cnx->msg_stack_head->next; |
205 | spin_unlock_irqrestore(&cnx->msg_stack_lock, flags); | 261 | |
206 | return msg; | 262 | return msg; |
207 | } | 263 | } |
208 | 264 | ||
265 | /* You must hold the connection's lock when you call this function. */ | ||
266 | static inline int veth_stack_is_empty(struct veth_lpar_connection *cnx) | ||
267 | { | ||
268 | return cnx->msg_stack_head == NULL; | ||
269 | } | ||
270 | |||
209 | static inline HvLpEvent_Rc | 271 | static inline HvLpEvent_Rc |
210 | veth_signalevent(struct veth_lpar_connection *cnx, u16 subtype, | 272 | veth_signalevent(struct veth_lpar_connection *cnx, u16 subtype, |
211 | HvLpEvent_AckInd ackind, HvLpEvent_AckType acktype, | 273 | HvLpEvent_AckInd ackind, HvLpEvent_AckType acktype, |
@@ -249,7 +311,7 @@ static int veth_allocate_events(HvLpIndex rlp, int number) | |||
249 | struct veth_allocation vc = { COMPLETION_INITIALIZER(vc.c), 0 }; | 311 | struct veth_allocation vc = { COMPLETION_INITIALIZER(vc.c), 0 }; |
250 | 312 | ||
251 | mf_allocate_lp_events(rlp, HvLpEvent_Type_VirtualLan, | 313 | mf_allocate_lp_events(rlp, HvLpEvent_Type_VirtualLan, |
252 | sizeof(struct VethLpEvent), number, | 314 | sizeof(struct veth_lpevent), number, |
253 | &veth_complete_allocation, &vc); | 315 | &veth_complete_allocation, &vc); |
254 | wait_for_completion(&vc.c); | 316 | wait_for_completion(&vc.c); |
255 | 317 | ||
@@ -257,6 +319,137 @@ static int veth_allocate_events(HvLpIndex rlp, int number) | |||
257 | } | 319 | } |
258 | 320 | ||
259 | /* | 321 | /* |
322 | * sysfs support | ||
323 | */ | ||
324 | |||
325 | struct veth_cnx_attribute { | ||
326 | struct attribute attr; | ||
327 | ssize_t (*show)(struct veth_lpar_connection *, char *buf); | ||
328 | ssize_t (*store)(struct veth_lpar_connection *, const char *buf); | ||
329 | }; | ||
330 | |||
331 | static ssize_t veth_cnx_attribute_show(struct kobject *kobj, | ||
332 | struct attribute *attr, char *buf) | ||
333 | { | ||
334 | struct veth_cnx_attribute *cnx_attr; | ||
335 | struct veth_lpar_connection *cnx; | ||
336 | |||
337 | cnx_attr = container_of(attr, struct veth_cnx_attribute, attr); | ||
338 | cnx = container_of(kobj, struct veth_lpar_connection, kobject); | ||
339 | |||
340 | if (!cnx_attr->show) | ||
341 | return -EIO; | ||
342 | |||
343 | return cnx_attr->show(cnx, buf); | ||
344 | } | ||
345 | |||
346 | #define CUSTOM_CNX_ATTR(_name, _format, _expression) \ | ||
347 | static ssize_t _name##_show(struct veth_lpar_connection *cnx, char *buf)\ | ||
348 | { \ | ||
349 | return sprintf(buf, _format, _expression); \ | ||
350 | } \ | ||
351 | struct veth_cnx_attribute veth_cnx_attr_##_name = __ATTR_RO(_name) | ||
352 | |||
353 | #define SIMPLE_CNX_ATTR(_name) \ | ||
354 | CUSTOM_CNX_ATTR(_name, "%lu\n", (unsigned long)cnx->_name) | ||
355 | |||
356 | SIMPLE_CNX_ATTR(outstanding_tx); | ||
357 | SIMPLE_CNX_ATTR(remote_lp); | ||
358 | SIMPLE_CNX_ATTR(num_events); | ||
359 | SIMPLE_CNX_ATTR(src_inst); | ||
360 | SIMPLE_CNX_ATTR(dst_inst); | ||
361 | SIMPLE_CNX_ATTR(num_pending_acks); | ||
362 | SIMPLE_CNX_ATTR(num_ack_events); | ||
363 | CUSTOM_CNX_ATTR(ack_timeout, "%d\n", jiffies_to_msecs(cnx->ack_timeout)); | ||
364 | CUSTOM_CNX_ATTR(reset_timeout, "%d\n", jiffies_to_msecs(cnx->reset_timeout)); | ||
365 | CUSTOM_CNX_ATTR(state, "0x%.4lX\n", cnx->state); | ||
366 | CUSTOM_CNX_ATTR(last_contact, "%d\n", cnx->last_contact ? | ||
367 | jiffies_to_msecs(jiffies - cnx->last_contact) : 0); | ||
368 | |||
369 | #define GET_CNX_ATTR(_name) (&veth_cnx_attr_##_name.attr) | ||
370 | |||
371 | static struct attribute *veth_cnx_default_attrs[] = { | ||
372 | GET_CNX_ATTR(outstanding_tx), | ||
373 | GET_CNX_ATTR(remote_lp), | ||
374 | GET_CNX_ATTR(num_events), | ||
375 | GET_CNX_ATTR(reset_timeout), | ||
376 | GET_CNX_ATTR(last_contact), | ||
377 | GET_CNX_ATTR(state), | ||
378 | GET_CNX_ATTR(src_inst), | ||
379 | GET_CNX_ATTR(dst_inst), | ||
380 | GET_CNX_ATTR(num_pending_acks), | ||
381 | GET_CNX_ATTR(num_ack_events), | ||
382 | GET_CNX_ATTR(ack_timeout), | ||
383 | NULL | ||
384 | }; | ||
385 | |||
386 | static struct sysfs_ops veth_cnx_sysfs_ops = { | ||
387 | .show = veth_cnx_attribute_show | ||
388 | }; | ||
389 | |||
390 | static struct kobj_type veth_lpar_connection_ktype = { | ||
391 | .release = veth_release_connection, | ||
392 | .sysfs_ops = &veth_cnx_sysfs_ops, | ||
393 | .default_attrs = veth_cnx_default_attrs | ||
394 | }; | ||
395 | |||
396 | struct veth_port_attribute { | ||
397 | struct attribute attr; | ||
398 | ssize_t (*show)(struct veth_port *, char *buf); | ||
399 | ssize_t (*store)(struct veth_port *, const char *buf); | ||
400 | }; | ||
401 | |||
402 | static ssize_t veth_port_attribute_show(struct kobject *kobj, | ||
403 | struct attribute *attr, char *buf) | ||
404 | { | ||
405 | struct veth_port_attribute *port_attr; | ||
406 | struct veth_port *port; | ||
407 | |||
408 | port_attr = container_of(attr, struct veth_port_attribute, attr); | ||
409 | port = container_of(kobj, struct veth_port, kobject); | ||
410 | |||
411 | if (!port_attr->show) | ||
412 | return -EIO; | ||
413 | |||
414 | return port_attr->show(port, buf); | ||
415 | } | ||
416 | |||
417 | #define CUSTOM_PORT_ATTR(_name, _format, _expression) \ | ||
418 | static ssize_t _name##_show(struct veth_port *port, char *buf) \ | ||
419 | { \ | ||
420 | return sprintf(buf, _format, _expression); \ | ||
421 | } \ | ||
422 | struct veth_port_attribute veth_port_attr_##_name = __ATTR_RO(_name) | ||
423 | |||
424 | #define SIMPLE_PORT_ATTR(_name) \ | ||
425 | CUSTOM_PORT_ATTR(_name, "%lu\n", (unsigned long)port->_name) | ||
426 | |||
427 | SIMPLE_PORT_ATTR(promiscuous); | ||
428 | SIMPLE_PORT_ATTR(num_mcast); | ||
429 | CUSTOM_PORT_ATTR(lpar_map, "0x%X\n", port->lpar_map); | ||
430 | CUSTOM_PORT_ATTR(stopped_map, "0x%X\n", port->stopped_map); | ||
431 | CUSTOM_PORT_ATTR(mac_addr, "0x%lX\n", port->mac_addr); | ||
432 | |||
433 | #define GET_PORT_ATTR(_name) (&veth_port_attr_##_name.attr) | ||
434 | static struct attribute *veth_port_default_attrs[] = { | ||
435 | GET_PORT_ATTR(mac_addr), | ||
436 | GET_PORT_ATTR(lpar_map), | ||
437 | GET_PORT_ATTR(stopped_map), | ||
438 | GET_PORT_ATTR(promiscuous), | ||
439 | GET_PORT_ATTR(num_mcast), | ||
440 | NULL | ||
441 | }; | ||
442 | |||
443 | static struct sysfs_ops veth_port_sysfs_ops = { | ||
444 | .show = veth_port_attribute_show | ||
445 | }; | ||
446 | |||
447 | static struct kobj_type veth_port_ktype = { | ||
448 | .sysfs_ops = &veth_port_sysfs_ops, | ||
449 | .default_attrs = veth_port_default_attrs | ||
450 | }; | ||
451 | |||
452 | /* | ||
260 | * LPAR connection code | 453 | * LPAR connection code |
261 | */ | 454 | */ |
262 | 455 | ||
@@ -266,7 +459,7 @@ static inline void veth_kick_statemachine(struct veth_lpar_connection *cnx) | |||
266 | } | 459 | } |
267 | 460 | ||
268 | static void veth_take_cap(struct veth_lpar_connection *cnx, | 461 | static void veth_take_cap(struct veth_lpar_connection *cnx, |
269 | struct VethLpEvent *event) | 462 | struct veth_lpevent *event) |
270 | { | 463 | { |
271 | unsigned long flags; | 464 | unsigned long flags; |
272 | 465 | ||
@@ -278,7 +471,7 @@ static void veth_take_cap(struct veth_lpar_connection *cnx, | |||
278 | HvLpEvent_Type_VirtualLan); | 471 | HvLpEvent_Type_VirtualLan); |
279 | 472 | ||
280 | if (cnx->state & VETH_STATE_GOTCAPS) { | 473 | if (cnx->state & VETH_STATE_GOTCAPS) { |
281 | veth_error("Received a second capabilities from lpar %d\n", | 474 | veth_error("Received a second capabilities from LPAR %d.\n", |
282 | cnx->remote_lp); | 475 | cnx->remote_lp); |
283 | event->base_event.xRc = HvLpEvent_Rc_BufferNotAvailable; | 476 | event->base_event.xRc = HvLpEvent_Rc_BufferNotAvailable; |
284 | HvCallEvent_ackLpEvent((struct HvLpEvent *) event); | 477 | HvCallEvent_ackLpEvent((struct HvLpEvent *) event); |
@@ -291,13 +484,13 @@ static void veth_take_cap(struct veth_lpar_connection *cnx, | |||
291 | } | 484 | } |
292 | 485 | ||
293 | static void veth_take_cap_ack(struct veth_lpar_connection *cnx, | 486 | static void veth_take_cap_ack(struct veth_lpar_connection *cnx, |
294 | struct VethLpEvent *event) | 487 | struct veth_lpevent *event) |
295 | { | 488 | { |
296 | unsigned long flags; | 489 | unsigned long flags; |
297 | 490 | ||
298 | spin_lock_irqsave(&cnx->lock, flags); | 491 | spin_lock_irqsave(&cnx->lock, flags); |
299 | if (cnx->state & VETH_STATE_GOTCAPACK) { | 492 | if (cnx->state & VETH_STATE_GOTCAPACK) { |
300 | veth_error("Received a second capabilities ack from lpar %d\n", | 493 | veth_error("Received a second capabilities ack from LPAR %d.\n", |
301 | cnx->remote_lp); | 494 | cnx->remote_lp); |
302 | } else { | 495 | } else { |
303 | memcpy(&cnx->cap_ack_event, event, | 496 | memcpy(&cnx->cap_ack_event, event, |
@@ -309,19 +502,24 @@ static void veth_take_cap_ack(struct veth_lpar_connection *cnx, | |||
309 | } | 502 | } |
310 | 503 | ||
311 | static void veth_take_monitor_ack(struct veth_lpar_connection *cnx, | 504 | static void veth_take_monitor_ack(struct veth_lpar_connection *cnx, |
312 | struct VethLpEvent *event) | 505 | struct veth_lpevent *event) |
313 | { | 506 | { |
314 | unsigned long flags; | 507 | unsigned long flags; |
315 | 508 | ||
316 | spin_lock_irqsave(&cnx->lock, flags); | 509 | spin_lock_irqsave(&cnx->lock, flags); |
317 | veth_printk(KERN_DEBUG, "Monitor ack returned for lpar %d\n", | 510 | veth_debug("cnx %d: lost connection.\n", cnx->remote_lp); |
318 | cnx->remote_lp); | 511 | |
319 | cnx->state |= VETH_STATE_RESET; | 512 | /* Avoid kicking the statemachine once we're shutdown. |
320 | veth_kick_statemachine(cnx); | 513 | * It's unnecessary and it could break veth_stop_connection(). */ |
514 | |||
515 | if (! (cnx->state & VETH_STATE_SHUTDOWN)) { | ||
516 | cnx->state |= VETH_STATE_RESET; | ||
517 | veth_kick_statemachine(cnx); | ||
518 | } | ||
321 | spin_unlock_irqrestore(&cnx->lock, flags); | 519 | spin_unlock_irqrestore(&cnx->lock, flags); |
322 | } | 520 | } |
323 | 521 | ||
324 | static void veth_handle_ack(struct VethLpEvent *event) | 522 | static void veth_handle_ack(struct veth_lpevent *event) |
325 | { | 523 | { |
326 | HvLpIndex rlp = event->base_event.xTargetLp; | 524 | HvLpIndex rlp = event->base_event.xTargetLp; |
327 | struct veth_lpar_connection *cnx = veth_cnx[rlp]; | 525 | struct veth_lpar_connection *cnx = veth_cnx[rlp]; |
@@ -329,58 +527,67 @@ static void veth_handle_ack(struct VethLpEvent *event) | |||
329 | BUG_ON(! cnx); | 527 | BUG_ON(! cnx); |
330 | 528 | ||
331 | switch (event->base_event.xSubtype) { | 529 | switch (event->base_event.xSubtype) { |
332 | case VethEventTypeCap: | 530 | case VETH_EVENT_CAP: |
333 | veth_take_cap_ack(cnx, event); | 531 | veth_take_cap_ack(cnx, event); |
334 | break; | 532 | break; |
335 | case VethEventTypeMonitor: | 533 | case VETH_EVENT_MONITOR: |
336 | veth_take_monitor_ack(cnx, event); | 534 | veth_take_monitor_ack(cnx, event); |
337 | break; | 535 | break; |
338 | default: | 536 | default: |
339 | veth_error("Unknown ack type %d from lpar %d\n", | 537 | veth_error("Unknown ack type %d from LPAR %d.\n", |
340 | event->base_event.xSubtype, rlp); | 538 | event->base_event.xSubtype, rlp); |
341 | }; | 539 | }; |
342 | } | 540 | } |
343 | 541 | ||
344 | static void veth_handle_int(struct VethLpEvent *event) | 542 | static void veth_handle_int(struct veth_lpevent *event) |
345 | { | 543 | { |
346 | HvLpIndex rlp = event->base_event.xSourceLp; | 544 | HvLpIndex rlp = event->base_event.xSourceLp; |
347 | struct veth_lpar_connection *cnx = veth_cnx[rlp]; | 545 | struct veth_lpar_connection *cnx = veth_cnx[rlp]; |
348 | unsigned long flags; | 546 | unsigned long flags; |
349 | int i; | 547 | int i, acked = 0; |
350 | 548 | ||
351 | BUG_ON(! cnx); | 549 | BUG_ON(! cnx); |
352 | 550 | ||
353 | switch (event->base_event.xSubtype) { | 551 | switch (event->base_event.xSubtype) { |
354 | case VethEventTypeCap: | 552 | case VETH_EVENT_CAP: |
355 | veth_take_cap(cnx, event); | 553 | veth_take_cap(cnx, event); |
356 | break; | 554 | break; |
357 | case VethEventTypeMonitor: | 555 | case VETH_EVENT_MONITOR: |
358 | /* do nothing... this'll hang out here til we're dead, | 556 | /* do nothing... this'll hang out here til we're dead, |
359 | * and the hypervisor will return it for us. */ | 557 | * and the hypervisor will return it for us. */ |
360 | break; | 558 | break; |
361 | case VethEventTypeFramesAck: | 559 | case VETH_EVENT_FRAMES_ACK: |
362 | spin_lock_irqsave(&cnx->lock, flags); | 560 | spin_lock_irqsave(&cnx->lock, flags); |
561 | |||
363 | for (i = 0; i < VETH_MAX_ACKS_PER_MSG; ++i) { | 562 | for (i = 0; i < VETH_MAX_ACKS_PER_MSG; ++i) { |
364 | u16 msgnum = event->u.frames_ack_data.token[i]; | 563 | u16 msgnum = event->u.frames_ack_data.token[i]; |
365 | 564 | ||
366 | if (msgnum < VETH_NUMBUFFERS) | 565 | if (msgnum < VETH_NUMBUFFERS) { |
367 | veth_recycle_msg(cnx, cnx->msgs + msgnum); | 566 | veth_recycle_msg(cnx, cnx->msgs + msgnum); |
567 | cnx->outstanding_tx--; | ||
568 | acked++; | ||
569 | } | ||
570 | } | ||
571 | |||
572 | if (acked > 0) { | ||
573 | cnx->last_contact = jiffies; | ||
574 | veth_wake_queues(cnx); | ||
368 | } | 575 | } |
576 | |||
369 | spin_unlock_irqrestore(&cnx->lock, flags); | 577 | spin_unlock_irqrestore(&cnx->lock, flags); |
370 | veth_flush_pending(cnx); | ||
371 | break; | 578 | break; |
372 | case VethEventTypeFrames: | 579 | case VETH_EVENT_FRAMES: |
373 | veth_receive(cnx, event); | 580 | veth_receive(cnx, event); |
374 | break; | 581 | break; |
375 | default: | 582 | default: |
376 | veth_error("Unknown interrupt type %d from lpar %d\n", | 583 | veth_error("Unknown interrupt type %d from LPAR %d.\n", |
377 | event->base_event.xSubtype, rlp); | 584 | event->base_event.xSubtype, rlp); |
378 | }; | 585 | }; |
379 | } | 586 | } |
380 | 587 | ||
381 | static void veth_handle_event(struct HvLpEvent *event, struct pt_regs *regs) | 588 | static void veth_handle_event(struct HvLpEvent *event, struct pt_regs *regs) |
382 | { | 589 | { |
383 | struct VethLpEvent *veth_event = (struct VethLpEvent *)event; | 590 | struct veth_lpevent *veth_event = (struct veth_lpevent *)event; |
384 | 591 | ||
385 | if (event->xFlags.xFunction == HvLpEvent_Function_Ack) | 592 | if (event->xFlags.xFunction == HvLpEvent_Function_Ack) |
386 | veth_handle_ack(veth_event); | 593 | veth_handle_ack(veth_event); |
@@ -390,7 +597,7 @@ static void veth_handle_event(struct HvLpEvent *event, struct pt_regs *regs) | |||
390 | 597 | ||
391 | static int veth_process_caps(struct veth_lpar_connection *cnx) | 598 | static int veth_process_caps(struct veth_lpar_connection *cnx) |
392 | { | 599 | { |
393 | struct VethCapData *remote_caps = &cnx->remote_caps; | 600 | struct veth_cap_data *remote_caps = &cnx->remote_caps; |
394 | int num_acks_needed; | 601 | int num_acks_needed; |
395 | 602 | ||
396 | /* Convert timer to jiffies */ | 603 | /* Convert timer to jiffies */ |
@@ -400,8 +607,8 @@ static int veth_process_caps(struct veth_lpar_connection *cnx) | |||
400 | || (remote_caps->ack_threshold > VETH_MAX_ACKS_PER_MSG) | 607 | || (remote_caps->ack_threshold > VETH_MAX_ACKS_PER_MSG) |
401 | || (remote_caps->ack_threshold == 0) | 608 | || (remote_caps->ack_threshold == 0) |
402 | || (cnx->ack_timeout == 0) ) { | 609 | || (cnx->ack_timeout == 0) ) { |
403 | veth_error("Received incompatible capabilities from lpar %d\n", | 610 | veth_error("Received incompatible capabilities from LPAR %d.\n", |
404 | cnx->remote_lp); | 611 | cnx->remote_lp); |
405 | return HvLpEvent_Rc_InvalidSubtypeData; | 612 | return HvLpEvent_Rc_InvalidSubtypeData; |
406 | } | 613 | } |
407 | 614 | ||
@@ -418,8 +625,8 @@ static int veth_process_caps(struct veth_lpar_connection *cnx) | |||
418 | cnx->num_ack_events += num; | 625 | cnx->num_ack_events += num; |
419 | 626 | ||
420 | if (cnx->num_ack_events < num_acks_needed) { | 627 | if (cnx->num_ack_events < num_acks_needed) { |
421 | veth_error("Couldn't allocate enough ack events for lpar %d\n", | 628 | veth_error("Couldn't allocate enough ack events " |
422 | cnx->remote_lp); | 629 | "for LPAR %d.\n", cnx->remote_lp); |
423 | 630 | ||
424 | return HvLpEvent_Rc_BufferNotAvailable; | 631 | return HvLpEvent_Rc_BufferNotAvailable; |
425 | } | 632 | } |
@@ -440,15 +647,15 @@ static void veth_statemachine(void *p) | |||
440 | 647 | ||
441 | restart: | 648 | restart: |
442 | if (cnx->state & VETH_STATE_RESET) { | 649 | if (cnx->state & VETH_STATE_RESET) { |
443 | int i; | ||
444 | |||
445 | del_timer(&cnx->ack_timer); | ||
446 | |||
447 | if (cnx->state & VETH_STATE_OPEN) | 650 | if (cnx->state & VETH_STATE_OPEN) |
448 | HvCallEvent_closeLpEventPath(cnx->remote_lp, | 651 | HvCallEvent_closeLpEventPath(cnx->remote_lp, |
449 | HvLpEvent_Type_VirtualLan); | 652 | HvLpEvent_Type_VirtualLan); |
450 | 653 | ||
451 | /* reset ack data */ | 654 | /* |
655 | * Reset ack data. This prevents the ack_timer actually | ||
656 | * doing anything, even if it runs one more time when | ||
657 | * we drop the lock below. | ||
658 | */ | ||
452 | memset(&cnx->pending_acks, 0xff, sizeof (cnx->pending_acks)); | 659 | memset(&cnx->pending_acks, 0xff, sizeof (cnx->pending_acks)); |
453 | cnx->num_pending_acks = 0; | 660 | cnx->num_pending_acks = 0; |
454 | 661 | ||
@@ -458,14 +665,32 @@ static void veth_statemachine(void *p) | |||
458 | | VETH_STATE_SENTCAPACK | VETH_STATE_READY); | 665 | | VETH_STATE_SENTCAPACK | VETH_STATE_READY); |
459 | 666 | ||
460 | /* Clean up any leftover messages */ | 667 | /* Clean up any leftover messages */ |
461 | if (cnx->msgs) | 668 | if (cnx->msgs) { |
669 | int i; | ||
462 | for (i = 0; i < VETH_NUMBUFFERS; ++i) | 670 | for (i = 0; i < VETH_NUMBUFFERS; ++i) |
463 | veth_recycle_msg(cnx, cnx->msgs + i); | 671 | veth_recycle_msg(cnx, cnx->msgs + i); |
672 | } | ||
673 | |||
674 | cnx->outstanding_tx = 0; | ||
675 | veth_wake_queues(cnx); | ||
676 | |||
677 | /* Drop the lock so we can do stuff that might sleep or | ||
678 | * take other locks. */ | ||
464 | spin_unlock_irq(&cnx->lock); | 679 | spin_unlock_irq(&cnx->lock); |
465 | veth_flush_pending(cnx); | 680 | |
681 | del_timer_sync(&cnx->ack_timer); | ||
682 | del_timer_sync(&cnx->reset_timer); | ||
683 | |||
466 | spin_lock_irq(&cnx->lock); | 684 | spin_lock_irq(&cnx->lock); |
685 | |||
467 | if (cnx->state & VETH_STATE_RESET) | 686 | if (cnx->state & VETH_STATE_RESET) |
468 | goto restart; | 687 | goto restart; |
688 | |||
689 | /* Hack, wait for the other end to reset itself. */ | ||
690 | if (! (cnx->state & VETH_STATE_SHUTDOWN)) { | ||
691 | schedule_delayed_work(&cnx->statemachine_wq, 5 * HZ); | ||
692 | goto out; | ||
693 | } | ||
469 | } | 694 | } |
470 | 695 | ||
471 | if (cnx->state & VETH_STATE_SHUTDOWN) | 696 | if (cnx->state & VETH_STATE_SHUTDOWN) |
@@ -488,7 +713,7 @@ static void veth_statemachine(void *p) | |||
488 | 713 | ||
489 | if ( (cnx->state & VETH_STATE_OPEN) | 714 | if ( (cnx->state & VETH_STATE_OPEN) |
490 | && !(cnx->state & VETH_STATE_SENTMON) ) { | 715 | && !(cnx->state & VETH_STATE_SENTMON) ) { |
491 | rc = veth_signalevent(cnx, VethEventTypeMonitor, | 716 | rc = veth_signalevent(cnx, VETH_EVENT_MONITOR, |
492 | HvLpEvent_AckInd_DoAck, | 717 | HvLpEvent_AckInd_DoAck, |
493 | HvLpEvent_AckType_DeferredAck, | 718 | HvLpEvent_AckType_DeferredAck, |
494 | 0, 0, 0, 0, 0, 0); | 719 | 0, 0, 0, 0, 0, 0); |
@@ -498,9 +723,8 @@ static void veth_statemachine(void *p) | |||
498 | } else { | 723 | } else { |
499 | if ( (rc != HvLpEvent_Rc_PartitionDead) | 724 | if ( (rc != HvLpEvent_Rc_PartitionDead) |
500 | && (rc != HvLpEvent_Rc_PathClosed) ) | 725 | && (rc != HvLpEvent_Rc_PathClosed) ) |
501 | veth_error("Error sending monitor to " | 726 | veth_error("Error sending monitor to LPAR %d, " |
502 | "lpar %d, rc=%x\n", | 727 | "rc = %d\n", rlp, rc); |
503 | rlp, (int) rc); | ||
504 | 728 | ||
505 | /* Oh well, hope we get a cap from the other | 729 | /* Oh well, hope we get a cap from the other |
506 | * end and do better when that kicks us */ | 730 | * end and do better when that kicks us */ |
@@ -512,7 +736,7 @@ static void veth_statemachine(void *p) | |||
512 | && !(cnx->state & VETH_STATE_SENTCAPS)) { | 736 | && !(cnx->state & VETH_STATE_SENTCAPS)) { |
513 | u64 *rawcap = (u64 *)&cnx->local_caps; | 737 | u64 *rawcap = (u64 *)&cnx->local_caps; |
514 | 738 | ||
515 | rc = veth_signalevent(cnx, VethEventTypeCap, | 739 | rc = veth_signalevent(cnx, VETH_EVENT_CAP, |
516 | HvLpEvent_AckInd_DoAck, | 740 | HvLpEvent_AckInd_DoAck, |
517 | HvLpEvent_AckType_ImmediateAck, | 741 | HvLpEvent_AckType_ImmediateAck, |
518 | 0, rawcap[0], rawcap[1], rawcap[2], | 742 | 0, rawcap[0], rawcap[1], rawcap[2], |
@@ -523,9 +747,9 @@ static void veth_statemachine(void *p) | |||
523 | } else { | 747 | } else { |
524 | if ( (rc != HvLpEvent_Rc_PartitionDead) | 748 | if ( (rc != HvLpEvent_Rc_PartitionDead) |
525 | && (rc != HvLpEvent_Rc_PathClosed) ) | 749 | && (rc != HvLpEvent_Rc_PathClosed) ) |
526 | veth_error("Error sending caps to " | 750 | veth_error("Error sending caps to LPAR %d, " |
527 | "lpar %d, rc=%x\n", | 751 | "rc = %d\n", rlp, rc); |
528 | rlp, (int) rc); | 752 | |
529 | /* Oh well, hope we get a cap from the other | 753 | /* Oh well, hope we get a cap from the other |
530 | * end and do better when that kicks us */ | 754 | * end and do better when that kicks us */ |
531 | goto out; | 755 | goto out; |
@@ -534,7 +758,7 @@ static void veth_statemachine(void *p) | |||
534 | 758 | ||
535 | if ((cnx->state & VETH_STATE_GOTCAPS) | 759 | if ((cnx->state & VETH_STATE_GOTCAPS) |
536 | && !(cnx->state & VETH_STATE_SENTCAPACK)) { | 760 | && !(cnx->state & VETH_STATE_SENTCAPACK)) { |
537 | struct VethCapData *remote_caps = &cnx->remote_caps; | 761 | struct veth_cap_data *remote_caps = &cnx->remote_caps; |
538 | 762 | ||
539 | memcpy(remote_caps, &cnx->cap_event.u.caps_data, | 763 | memcpy(remote_caps, &cnx->cap_event.u.caps_data, |
540 | sizeof(*remote_caps)); | 764 | sizeof(*remote_caps)); |
@@ -565,10 +789,8 @@ static void veth_statemachine(void *p) | |||
565 | add_timer(&cnx->ack_timer); | 789 | add_timer(&cnx->ack_timer); |
566 | cnx->state |= VETH_STATE_READY; | 790 | cnx->state |= VETH_STATE_READY; |
567 | } else { | 791 | } else { |
568 | veth_printk(KERN_ERR, "Caps rejected (rc=%d) by " | 792 | veth_error("Caps rejected by LPAR %d, rc = %d\n", |
569 | "lpar %d\n", | 793 | rlp, cnx->cap_ack_event.base_event.xRc); |
570 | cnx->cap_ack_event.base_event.xRc, | ||
571 | rlp); | ||
572 | goto cant_cope; | 794 | goto cant_cope; |
573 | } | 795 | } |
574 | } | 796 | } |
@@ -581,8 +803,8 @@ static void veth_statemachine(void *p) | |||
581 | /* FIXME: we get here if something happens we really can't | 803 | /* FIXME: we get here if something happens we really can't |
582 | * cope with. The link will never work once we get here, and | 804 | * cope with. The link will never work once we get here, and |
583 | * all we can do is not lock the rest of the system up */ | 805 | * all we can do is not lock the rest of the system up */ |
584 | veth_error("Badness on connection to lpar %d (state=%04lx) " | 806 | veth_error("Unrecoverable error on connection to LPAR %d, shutting down" |
585 | " - shutting down\n", rlp, cnx->state); | 807 | " (state = 0x%04lx)\n", rlp, cnx->state); |
586 | cnx->state |= VETH_STATE_SHUTDOWN; | 808 | cnx->state |= VETH_STATE_SHUTDOWN; |
587 | spin_unlock_irq(&cnx->lock); | 809 | spin_unlock_irq(&cnx->lock); |
588 | } | 810 | } |
@@ -591,7 +813,7 @@ static int veth_init_connection(u8 rlp) | |||
591 | { | 813 | { |
592 | struct veth_lpar_connection *cnx; | 814 | struct veth_lpar_connection *cnx; |
593 | struct veth_msg *msgs; | 815 | struct veth_msg *msgs; |
594 | int i; | 816 | int i, rc; |
595 | 817 | ||
596 | if ( (rlp == this_lp) | 818 | if ( (rlp == this_lp) |
597 | || ! HvLpConfig_doLpsCommunicateOnVirtualLan(this_lp, rlp) ) | 819 | || ! HvLpConfig_doLpsCommunicateOnVirtualLan(this_lp, rlp) ) |
@@ -605,22 +827,36 @@ static int veth_init_connection(u8 rlp) | |||
605 | cnx->remote_lp = rlp; | 827 | cnx->remote_lp = rlp; |
606 | spin_lock_init(&cnx->lock); | 828 | spin_lock_init(&cnx->lock); |
607 | INIT_WORK(&cnx->statemachine_wq, veth_statemachine, cnx); | 829 | INIT_WORK(&cnx->statemachine_wq, veth_statemachine, cnx); |
830 | |||
608 | init_timer(&cnx->ack_timer); | 831 | init_timer(&cnx->ack_timer); |
609 | cnx->ack_timer.function = veth_timed_ack; | 832 | cnx->ack_timer.function = veth_timed_ack; |
610 | cnx->ack_timer.data = (unsigned long) cnx; | 833 | cnx->ack_timer.data = (unsigned long) cnx; |
834 | |||
835 | init_timer(&cnx->reset_timer); | ||
836 | cnx->reset_timer.function = veth_timed_reset; | ||
837 | cnx->reset_timer.data = (unsigned long) cnx; | ||
838 | cnx->reset_timeout = 5 * HZ * (VETH_ACKTIMEOUT / 1000000); | ||
839 | |||
611 | memset(&cnx->pending_acks, 0xff, sizeof (cnx->pending_acks)); | 840 | memset(&cnx->pending_acks, 0xff, sizeof (cnx->pending_acks)); |
612 | 841 | ||
613 | veth_cnx[rlp] = cnx; | 842 | veth_cnx[rlp] = cnx; |
614 | 843 | ||
844 | /* This gets us 1 reference, which is held on behalf of the driver | ||
845 | * infrastructure. It's released at module unload. */ | ||
846 | kobject_init(&cnx->kobject); | ||
847 | cnx->kobject.ktype = &veth_lpar_connection_ktype; | ||
848 | rc = kobject_set_name(&cnx->kobject, "cnx%.2d", rlp); | ||
849 | if (rc != 0) | ||
850 | return rc; | ||
851 | |||
615 | msgs = kmalloc(VETH_NUMBUFFERS * sizeof(struct veth_msg), GFP_KERNEL); | 852 | msgs = kmalloc(VETH_NUMBUFFERS * sizeof(struct veth_msg), GFP_KERNEL); |
616 | if (! msgs) { | 853 | if (! msgs) { |
617 | veth_error("Can't allocate buffers for lpar %d\n", rlp); | 854 | veth_error("Can't allocate buffers for LPAR %d.\n", rlp); |
618 | return -ENOMEM; | 855 | return -ENOMEM; |
619 | } | 856 | } |
620 | 857 | ||
621 | cnx->msgs = msgs; | 858 | cnx->msgs = msgs; |
622 | memset(msgs, 0, VETH_NUMBUFFERS * sizeof(struct veth_msg)); | 859 | memset(msgs, 0, VETH_NUMBUFFERS * sizeof(struct veth_msg)); |
623 | spin_lock_init(&cnx->msg_stack_lock); | ||
624 | 860 | ||
625 | for (i = 0; i < VETH_NUMBUFFERS; i++) { | 861 | for (i = 0; i < VETH_NUMBUFFERS; i++) { |
626 | msgs[i].token = i; | 862 | msgs[i].token = i; |
@@ -630,8 +866,7 @@ static int veth_init_connection(u8 rlp) | |||
630 | cnx->num_events = veth_allocate_events(rlp, 2 + VETH_NUMBUFFERS); | 866 | cnx->num_events = veth_allocate_events(rlp, 2 + VETH_NUMBUFFERS); |
631 | 867 | ||
632 | if (cnx->num_events < (2 + VETH_NUMBUFFERS)) { | 868 | if (cnx->num_events < (2 + VETH_NUMBUFFERS)) { |
633 | veth_error("Can't allocate events for lpar %d, only got %d\n", | 869 | veth_error("Can't allocate enough events for LPAR %d.\n", rlp); |
634 | rlp, cnx->num_events); | ||
635 | return -ENOMEM; | 870 | return -ENOMEM; |
636 | } | 871 | } |
637 | 872 | ||
@@ -642,11 +877,9 @@ static int veth_init_connection(u8 rlp) | |||
642 | return 0; | 877 | return 0; |
643 | } | 878 | } |
644 | 879 | ||
645 | static void veth_stop_connection(u8 rlp) | 880 | static void veth_stop_connection(struct veth_lpar_connection *cnx) |
646 | { | 881 | { |
647 | struct veth_lpar_connection *cnx = veth_cnx[rlp]; | 882 | if (!cnx) |
648 | |||
649 | if (! cnx) | ||
650 | return; | 883 | return; |
651 | 884 | ||
652 | spin_lock_irq(&cnx->lock); | 885 | spin_lock_irq(&cnx->lock); |
@@ -654,12 +887,23 @@ static void veth_stop_connection(u8 rlp) | |||
654 | veth_kick_statemachine(cnx); | 887 | veth_kick_statemachine(cnx); |
655 | spin_unlock_irq(&cnx->lock); | 888 | spin_unlock_irq(&cnx->lock); |
656 | 889 | ||
890 | /* There's a slim chance the reset code has just queued the | ||
891 | * statemachine to run in five seconds. If so we need to cancel | ||
892 | * that and requeue the work to run now. */ | ||
893 | if (cancel_delayed_work(&cnx->statemachine_wq)) { | ||
894 | spin_lock_irq(&cnx->lock); | ||
895 | veth_kick_statemachine(cnx); | ||
896 | spin_unlock_irq(&cnx->lock); | ||
897 | } | ||
898 | |||
899 | /* Wait for the state machine to run. */ | ||
657 | flush_scheduled_work(); | 900 | flush_scheduled_work(); |
901 | } | ||
658 | 902 | ||
659 | /* FIXME: not sure if this is necessary - will already have | 903 | static void veth_destroy_connection(struct veth_lpar_connection *cnx) |
660 | * been deleted by the state machine, just want to make sure | 904 | { |
661 | * its not running any more */ | 905 | if (!cnx) |
662 | del_timer_sync(&cnx->ack_timer); | 906 | return; |
663 | 907 | ||
664 | if (cnx->num_events > 0) | 908 | if (cnx->num_events > 0) |
665 | mf_deallocate_lp_events(cnx->remote_lp, | 909 | mf_deallocate_lp_events(cnx->remote_lp, |
@@ -671,18 +915,18 @@ static void veth_stop_connection(u8 rlp) | |||
671 | HvLpEvent_Type_VirtualLan, | 915 | HvLpEvent_Type_VirtualLan, |
672 | cnx->num_ack_events, | 916 | cnx->num_ack_events, |
673 | NULL, NULL); | 917 | NULL, NULL); |
674 | } | ||
675 | |||
676 | static void veth_destroy_connection(u8 rlp) | ||
677 | { | ||
678 | struct veth_lpar_connection *cnx = veth_cnx[rlp]; | ||
679 | |||
680 | if (! cnx) | ||
681 | return; | ||
682 | 918 | ||
683 | kfree(cnx->msgs); | 919 | kfree(cnx->msgs); |
920 | veth_cnx[cnx->remote_lp] = NULL; | ||
684 | kfree(cnx); | 921 | kfree(cnx); |
685 | veth_cnx[rlp] = NULL; | 922 | } |
923 | |||
924 | static void veth_release_connection(struct kobject *kobj) | ||
925 | { | ||
926 | struct veth_lpar_connection *cnx; | ||
927 | cnx = container_of(kobj, struct veth_lpar_connection, kobject); | ||
928 | veth_stop_connection(cnx); | ||
929 | veth_destroy_connection(cnx); | ||
686 | } | 930 | } |
687 | 931 | ||
688 | /* | 932 | /* |
@@ -726,17 +970,15 @@ static void veth_set_multicast_list(struct net_device *dev) | |||
726 | 970 | ||
727 | write_lock_irqsave(&port->mcast_gate, flags); | 971 | write_lock_irqsave(&port->mcast_gate, flags); |
728 | 972 | ||
729 | if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */ | 973 | if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) || |
730 | printk(KERN_INFO "%s: Promiscuous mode enabled.\n", | 974 | (dev->mc_count > VETH_MAX_MCAST)) { |
731 | dev->name); | ||
732 | port->promiscuous = 1; | 975 | port->promiscuous = 1; |
733 | } else if ( (dev->flags & IFF_ALLMULTI) | ||
734 | || (dev->mc_count > VETH_MAX_MCAST) ) { | ||
735 | port->all_mcast = 1; | ||
736 | } else { | 976 | } else { |
737 | struct dev_mc_list *dmi = dev->mc_list; | 977 | struct dev_mc_list *dmi = dev->mc_list; |
738 | int i; | 978 | int i; |
739 | 979 | ||
980 | port->promiscuous = 0; | ||
981 | |||
740 | /* Update table */ | 982 | /* Update table */ |
741 | port->num_mcast = 0; | 983 | port->num_mcast = 0; |
742 | 984 | ||
@@ -758,9 +1000,10 @@ static void veth_set_multicast_list(struct net_device *dev) | |||
758 | 1000 | ||
759 | static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | 1001 | static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) |
760 | { | 1002 | { |
761 | strncpy(info->driver, "veth", sizeof(info->driver) - 1); | 1003 | strncpy(info->driver, DRV_NAME, sizeof(info->driver) - 1); |
762 | info->driver[sizeof(info->driver) - 1] = '\0'; | 1004 | info->driver[sizeof(info->driver) - 1] = '\0'; |
763 | strncpy(info->version, "1.0", sizeof(info->version) - 1); | 1005 | strncpy(info->version, DRV_VERSION, sizeof(info->version) - 1); |
1006 | info->version[sizeof(info->version) - 1] = '\0'; | ||
764 | } | 1007 | } |
765 | 1008 | ||
766 | static int veth_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | 1009 | static int veth_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) |
@@ -791,49 +1034,6 @@ static struct ethtool_ops ops = { | |||
791 | .get_link = veth_get_link, | 1034 | .get_link = veth_get_link, |
792 | }; | 1035 | }; |
793 | 1036 | ||
794 | static void veth_tx_timeout(struct net_device *dev) | ||
795 | { | ||
796 | struct veth_port *port = (struct veth_port *)dev->priv; | ||
797 | struct net_device_stats *stats = &port->stats; | ||
798 | unsigned long flags; | ||
799 | int i; | ||
800 | |||
801 | stats->tx_errors++; | ||
802 | |||
803 | spin_lock_irqsave(&port->pending_gate, flags); | ||
804 | |||
805 | if (!port->pending_lpmask) { | ||
806 | spin_unlock_irqrestore(&port->pending_gate, flags); | ||
807 | return; | ||
808 | } | ||
809 | |||
810 | printk(KERN_WARNING "%s: Tx timeout! Resetting lp connections: %08x\n", | ||
811 | dev->name, port->pending_lpmask); | ||
812 | |||
813 | for (i = 0; i < HVMAXARCHITECTEDLPS; i++) { | ||
814 | struct veth_lpar_connection *cnx = veth_cnx[i]; | ||
815 | |||
816 | if (! (port->pending_lpmask & (1<<i))) | ||
817 | continue; | ||
818 | |||
819 | /* If we're pending on it, we must be connected to it, | ||
820 | * so we should certainly have a structure for it. */ | ||
821 | BUG_ON(! cnx); | ||
822 | |||
823 | /* Theoretically we could be kicking a connection | ||
824 | * which doesn't deserve it, but in practice if we've | ||
825 | * had a Tx timeout, the pending_lpmask will have | ||
826 | * exactly one bit set - the connection causing the | ||
827 | * problem. */ | ||
828 | spin_lock(&cnx->lock); | ||
829 | cnx->state |= VETH_STATE_RESET; | ||
830 | veth_kick_statemachine(cnx); | ||
831 | spin_unlock(&cnx->lock); | ||
832 | } | ||
833 | |||
834 | spin_unlock_irqrestore(&port->pending_gate, flags); | ||
835 | } | ||
836 | |||
837 | static struct net_device * __init veth_probe_one(int vlan, struct device *vdev) | 1037 | static struct net_device * __init veth_probe_one(int vlan, struct device *vdev) |
838 | { | 1038 | { |
839 | struct net_device *dev; | 1039 | struct net_device *dev; |
@@ -848,8 +1048,9 @@ static struct net_device * __init veth_probe_one(int vlan, struct device *vdev) | |||
848 | 1048 | ||
849 | port = (struct veth_port *) dev->priv; | 1049 | port = (struct veth_port *) dev->priv; |
850 | 1050 | ||
851 | spin_lock_init(&port->pending_gate); | 1051 | spin_lock_init(&port->queue_lock); |
852 | rwlock_init(&port->mcast_gate); | 1052 | rwlock_init(&port->mcast_gate); |
1053 | port->stopped_map = 0; | ||
853 | 1054 | ||
854 | for (i = 0; i < HVMAXARCHITECTEDLPS; i++) { | 1055 | for (i = 0; i < HVMAXARCHITECTEDLPS; i++) { |
855 | HvLpVirtualLanIndexMap map; | 1056 | HvLpVirtualLanIndexMap map; |
@@ -882,22 +1083,24 @@ static struct net_device * __init veth_probe_one(int vlan, struct device *vdev) | |||
882 | dev->set_multicast_list = veth_set_multicast_list; | 1083 | dev->set_multicast_list = veth_set_multicast_list; |
883 | SET_ETHTOOL_OPS(dev, &ops); | 1084 | SET_ETHTOOL_OPS(dev, &ops); |
884 | 1085 | ||
885 | dev->watchdog_timeo = 2 * (VETH_ACKTIMEOUT * HZ / 1000000); | ||
886 | dev->tx_timeout = veth_tx_timeout; | ||
887 | |||
888 | SET_NETDEV_DEV(dev, vdev); | 1086 | SET_NETDEV_DEV(dev, vdev); |
889 | 1087 | ||
890 | rc = register_netdev(dev); | 1088 | rc = register_netdev(dev); |
891 | if (rc != 0) { | 1089 | if (rc != 0) { |
892 | veth_printk(KERN_ERR, | 1090 | veth_error("Failed registering net device for vlan%d.\n", vlan); |
893 | "Failed to register ethernet device for vlan %d\n", | ||
894 | vlan); | ||
895 | free_netdev(dev); | 1091 | free_netdev(dev); |
896 | return NULL; | 1092 | return NULL; |
897 | } | 1093 | } |
898 | 1094 | ||
899 | veth_printk(KERN_DEBUG, "%s attached to iSeries vlan %d (lpar_map=0x%04x)\n", | 1095 | kobject_init(&port->kobject); |
900 | dev->name, vlan, port->lpar_map); | 1096 | port->kobject.parent = &dev->class_dev.kobj; |
1097 | port->kobject.ktype = &veth_port_ktype; | ||
1098 | kobject_set_name(&port->kobject, "veth_port"); | ||
1099 | if (0 != kobject_add(&port->kobject)) | ||
1100 | veth_error("Failed adding port for %s to sysfs.\n", dev->name); | ||
1101 | |||
1102 | veth_info("%s attached to iSeries vlan %d (LPAR map = 0x%.4X)\n", | ||
1103 | dev->name, vlan, port->lpar_map); | ||
901 | 1104 | ||
902 | return dev; | 1105 | return dev; |
903 | } | 1106 | } |
@@ -912,98 +1115,95 @@ static int veth_transmit_to_one(struct sk_buff *skb, HvLpIndex rlp, | |||
912 | struct veth_lpar_connection *cnx = veth_cnx[rlp]; | 1115 | struct veth_lpar_connection *cnx = veth_cnx[rlp]; |
913 | struct veth_port *port = (struct veth_port *) dev->priv; | 1116 | struct veth_port *port = (struct veth_port *) dev->priv; |
914 | HvLpEvent_Rc rc; | 1117 | HvLpEvent_Rc rc; |
915 | u32 dma_address, dma_length; | ||
916 | struct veth_msg *msg = NULL; | 1118 | struct veth_msg *msg = NULL; |
917 | int err = 0; | ||
918 | unsigned long flags; | 1119 | unsigned long flags; |
919 | 1120 | ||
920 | if (! cnx) { | 1121 | if (! cnx) |
921 | port->stats.tx_errors++; | ||
922 | dev_kfree_skb(skb); | ||
923 | return 0; | 1122 | return 0; |
924 | } | ||
925 | 1123 | ||
926 | spin_lock_irqsave(&cnx->lock, flags); | 1124 | spin_lock_irqsave(&cnx->lock, flags); |
927 | 1125 | ||
928 | if (! (cnx->state & VETH_STATE_READY)) | 1126 | if (! (cnx->state & VETH_STATE_READY)) |
929 | goto drop; | 1127 | goto no_error; |
930 | 1128 | ||
931 | if ((skb->len - 14) > VETH_MAX_MTU) | 1129 | if ((skb->len - ETH_HLEN) > VETH_MAX_MTU) |
932 | goto drop; | 1130 | goto drop; |
933 | 1131 | ||
934 | msg = veth_stack_pop(cnx); | 1132 | msg = veth_stack_pop(cnx); |
935 | 1133 | if (! msg) | |
936 | if (! msg) { | ||
937 | err = 1; | ||
938 | goto drop; | 1134 | goto drop; |
939 | } | ||
940 | 1135 | ||
941 | dma_length = skb->len; | 1136 | msg->in_use = 1; |
942 | dma_address = dma_map_single(port->dev, skb->data, | 1137 | msg->skb = skb_get(skb); |
943 | dma_length, DMA_TO_DEVICE); | 1138 | |
1139 | msg->data.addr[0] = dma_map_single(port->dev, skb->data, | ||
1140 | skb->len, DMA_TO_DEVICE); | ||
944 | 1141 | ||
945 | if (dma_mapping_error(dma_address)) | 1142 | if (dma_mapping_error(msg->data.addr[0])) |
946 | goto recycle_and_drop; | 1143 | goto recycle_and_drop; |
947 | 1144 | ||
948 | /* Is it really necessary to check the length and address | ||
949 | * fields of the first entry here? */ | ||
950 | msg->skb = skb; | ||
951 | msg->dev = port->dev; | 1145 | msg->dev = port->dev; |
952 | msg->data.addr[0] = dma_address; | 1146 | msg->data.len[0] = skb->len; |
953 | msg->data.len[0] = dma_length; | ||
954 | msg->data.eofmask = 1 << VETH_EOF_SHIFT; | 1147 | msg->data.eofmask = 1 << VETH_EOF_SHIFT; |
955 | set_bit(0, &(msg->in_use)); | 1148 | |
956 | rc = veth_signaldata(cnx, VethEventTypeFrames, msg->token, &msg->data); | 1149 | rc = veth_signaldata(cnx, VETH_EVENT_FRAMES, msg->token, &msg->data); |
957 | 1150 | ||
958 | if (rc != HvLpEvent_Rc_Good) | 1151 | if (rc != HvLpEvent_Rc_Good) |
959 | goto recycle_and_drop; | 1152 | goto recycle_and_drop; |
960 | 1153 | ||
1154 | /* If the timer's not already running, start it now. */ | ||
1155 | if (0 == cnx->outstanding_tx) | ||
1156 | mod_timer(&cnx->reset_timer, jiffies + cnx->reset_timeout); | ||
1157 | |||
1158 | cnx->last_contact = jiffies; | ||
1159 | cnx->outstanding_tx++; | ||
1160 | |||
1161 | if (veth_stack_is_empty(cnx)) | ||
1162 | veth_stop_queues(cnx); | ||
1163 | |||
1164 | no_error: | ||
961 | spin_unlock_irqrestore(&cnx->lock, flags); | 1165 | spin_unlock_irqrestore(&cnx->lock, flags); |
962 | return 0; | 1166 | return 0; |
963 | 1167 | ||
964 | recycle_and_drop: | 1168 | recycle_and_drop: |
965 | msg->skb = NULL; | ||
966 | /* need to set in use to make veth_recycle_msg in case this | ||
967 | * was a mapping failure */ | ||
968 | set_bit(0, &msg->in_use); | ||
969 | veth_recycle_msg(cnx, msg); | 1169 | veth_recycle_msg(cnx, msg); |
970 | drop: | 1170 | drop: |
971 | port->stats.tx_errors++; | ||
972 | dev_kfree_skb(skb); | ||
973 | spin_unlock_irqrestore(&cnx->lock, flags); | 1171 | spin_unlock_irqrestore(&cnx->lock, flags); |
974 | return err; | 1172 | return 1; |
975 | } | 1173 | } |
976 | 1174 | ||
977 | static HvLpIndexMap veth_transmit_to_many(struct sk_buff *skb, | 1175 | static void veth_transmit_to_many(struct sk_buff *skb, |
978 | HvLpIndexMap lpmask, | 1176 | HvLpIndexMap lpmask, |
979 | struct net_device *dev) | 1177 | struct net_device *dev) |
980 | { | 1178 | { |
981 | struct veth_port *port = (struct veth_port *) dev->priv; | 1179 | struct veth_port *port = (struct veth_port *) dev->priv; |
982 | int i; | 1180 | int i, success, error; |
983 | int rc; | 1181 | |
1182 | success = error = 0; | ||
984 | 1183 | ||
985 | for (i = 0; i < HVMAXARCHITECTEDLPS; i++) { | 1184 | for (i = 0; i < HVMAXARCHITECTEDLPS; i++) { |
986 | if ((lpmask & (1 << i)) == 0) | 1185 | if ((lpmask & (1 << i)) == 0) |
987 | continue; | 1186 | continue; |
988 | 1187 | ||
989 | rc = veth_transmit_to_one(skb_get(skb), i, dev); | 1188 | if (veth_transmit_to_one(skb, i, dev)) |
990 | if (! rc) | 1189 | error = 1; |
991 | lpmask &= ~(1<<i); | 1190 | else |
1191 | success = 1; | ||
992 | } | 1192 | } |
993 | 1193 | ||
994 | if (! lpmask) { | 1194 | if (error) |
1195 | port->stats.tx_errors++; | ||
1196 | |||
1197 | if (success) { | ||
995 | port->stats.tx_packets++; | 1198 | port->stats.tx_packets++; |
996 | port->stats.tx_bytes += skb->len; | 1199 | port->stats.tx_bytes += skb->len; |
997 | } | 1200 | } |
998 | |||
999 | return lpmask; | ||
1000 | } | 1201 | } |
1001 | 1202 | ||
1002 | static int veth_start_xmit(struct sk_buff *skb, struct net_device *dev) | 1203 | static int veth_start_xmit(struct sk_buff *skb, struct net_device *dev) |
1003 | { | 1204 | { |
1004 | unsigned char *frame = skb->data; | 1205 | unsigned char *frame = skb->data; |
1005 | struct veth_port *port = (struct veth_port *) dev->priv; | 1206 | struct veth_port *port = (struct veth_port *) dev->priv; |
1006 | unsigned long flags; | ||
1007 | HvLpIndexMap lpmask; | 1207 | HvLpIndexMap lpmask; |
1008 | 1208 | ||
1009 | if (! (frame[0] & 0x01)) { | 1209 | if (! (frame[0] & 0x01)) { |
@@ -1020,44 +1220,27 @@ static int veth_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1020 | lpmask = port->lpar_map; | 1220 | lpmask = port->lpar_map; |
1021 | } | 1221 | } |
1022 | 1222 | ||
1023 | spin_lock_irqsave(&port->pending_gate, flags); | 1223 | veth_transmit_to_many(skb, lpmask, dev); |
1024 | |||
1025 | lpmask = veth_transmit_to_many(skb, lpmask, dev); | ||
1026 | |||
1027 | dev->trans_start = jiffies; | ||
1028 | 1224 | ||
1029 | if (! lpmask) { | 1225 | dev_kfree_skb(skb); |
1030 | dev_kfree_skb(skb); | ||
1031 | } else { | ||
1032 | if (port->pending_skb) { | ||
1033 | veth_error("%s: Tx while skb was pending!\n", | ||
1034 | dev->name); | ||
1035 | dev_kfree_skb(skb); | ||
1036 | spin_unlock_irqrestore(&port->pending_gate, flags); | ||
1037 | return 1; | ||
1038 | } | ||
1039 | |||
1040 | port->pending_skb = skb; | ||
1041 | port->pending_lpmask = lpmask; | ||
1042 | netif_stop_queue(dev); | ||
1043 | } | ||
1044 | |||
1045 | spin_unlock_irqrestore(&port->pending_gate, flags); | ||
1046 | 1226 | ||
1047 | return 0; | 1227 | return 0; |
1048 | } | 1228 | } |
1049 | 1229 | ||
1230 | /* You must hold the connection's lock when you call this function. */ | ||
1050 | static void veth_recycle_msg(struct veth_lpar_connection *cnx, | 1231 | static void veth_recycle_msg(struct veth_lpar_connection *cnx, |
1051 | struct veth_msg *msg) | 1232 | struct veth_msg *msg) |
1052 | { | 1233 | { |
1053 | u32 dma_address, dma_length; | 1234 | u32 dma_address, dma_length; |
1054 | 1235 | ||
1055 | if (test_and_clear_bit(0, &msg->in_use)) { | 1236 | if (msg->in_use) { |
1237 | msg->in_use = 0; | ||
1056 | dma_address = msg->data.addr[0]; | 1238 | dma_address = msg->data.addr[0]; |
1057 | dma_length = msg->data.len[0]; | 1239 | dma_length = msg->data.len[0]; |
1058 | 1240 | ||
1059 | dma_unmap_single(msg->dev, dma_address, dma_length, | 1241 | if (!dma_mapping_error(dma_address)) |
1060 | DMA_TO_DEVICE); | 1242 | dma_unmap_single(msg->dev, dma_address, dma_length, |
1243 | DMA_TO_DEVICE); | ||
1061 | 1244 | ||
1062 | if (msg->skb) { | 1245 | if (msg->skb) { |
1063 | dev_kfree_skb_any(msg->skb); | 1246 | dev_kfree_skb_any(msg->skb); |
@@ -1066,15 +1249,16 @@ static void veth_recycle_msg(struct veth_lpar_connection *cnx, | |||
1066 | 1249 | ||
1067 | memset(&msg->data, 0, sizeof(msg->data)); | 1250 | memset(&msg->data, 0, sizeof(msg->data)); |
1068 | veth_stack_push(cnx, msg); | 1251 | veth_stack_push(cnx, msg); |
1069 | } else | 1252 | } else if (cnx->state & VETH_STATE_OPEN) { |
1070 | if (cnx->state & VETH_STATE_OPEN) | 1253 | veth_error("Non-pending frame (# %d) acked by LPAR %d.\n", |
1071 | veth_error("Bogus frames ack from lpar %d (#%d)\n", | 1254 | cnx->remote_lp, msg->token); |
1072 | cnx->remote_lp, msg->token); | 1255 | } |
1073 | } | 1256 | } |
1074 | 1257 | ||
1075 | static void veth_flush_pending(struct veth_lpar_connection *cnx) | 1258 | static void veth_wake_queues(struct veth_lpar_connection *cnx) |
1076 | { | 1259 | { |
1077 | int i; | 1260 | int i; |
1261 | |||
1078 | for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++) { | 1262 | for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++) { |
1079 | struct net_device *dev = veth_dev[i]; | 1263 | struct net_device *dev = veth_dev[i]; |
1080 | struct veth_port *port; | 1264 | struct veth_port *port; |
@@ -1088,20 +1272,77 @@ static void veth_flush_pending(struct veth_lpar_connection *cnx) | |||
1088 | if (! (port->lpar_map & (1<<cnx->remote_lp))) | 1272 | if (! (port->lpar_map & (1<<cnx->remote_lp))) |
1089 | continue; | 1273 | continue; |
1090 | 1274 | ||
1091 | spin_lock_irqsave(&port->pending_gate, flags); | 1275 | spin_lock_irqsave(&port->queue_lock, flags); |
1092 | if (port->pending_skb) { | 1276 | |
1093 | port->pending_lpmask = | 1277 | port->stopped_map &= ~(1 << cnx->remote_lp); |
1094 | veth_transmit_to_many(port->pending_skb, | 1278 | |
1095 | port->pending_lpmask, | 1279 | if (0 == port->stopped_map && netif_queue_stopped(dev)) { |
1096 | dev); | 1280 | veth_debug("cnx %d: woke queue for %s.\n", |
1097 | if (! port->pending_lpmask) { | 1281 | cnx->remote_lp, dev->name); |
1098 | dev_kfree_skb_any(port->pending_skb); | 1282 | netif_wake_queue(dev); |
1099 | port->pending_skb = NULL; | 1283 | } |
1100 | netif_wake_queue(dev); | 1284 | spin_unlock_irqrestore(&port->queue_lock, flags); |
1101 | } | 1285 | } |
1286 | } | ||
1287 | |||
1288 | static void veth_stop_queues(struct veth_lpar_connection *cnx) | ||
1289 | { | ||
1290 | int i; | ||
1291 | |||
1292 | for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++) { | ||
1293 | struct net_device *dev = veth_dev[i]; | ||
1294 | struct veth_port *port; | ||
1295 | |||
1296 | if (! dev) | ||
1297 | continue; | ||
1298 | |||
1299 | port = (struct veth_port *)dev->priv; | ||
1300 | |||
1301 | /* If this cnx is not on the vlan for this port, continue */ | ||
1302 | if (! (port->lpar_map & (1 << cnx->remote_lp))) | ||
1303 | continue; | ||
1304 | |||
1305 | spin_lock(&port->queue_lock); | ||
1306 | |||
1307 | netif_stop_queue(dev); | ||
1308 | port->stopped_map |= (1 << cnx->remote_lp); | ||
1309 | |||
1310 | veth_debug("cnx %d: stopped queue for %s, map = 0x%x.\n", | ||
1311 | cnx->remote_lp, dev->name, port->stopped_map); | ||
1312 | |||
1313 | spin_unlock(&port->queue_lock); | ||
1314 | } | ||
1315 | } | ||
1316 | |||
1317 | static void veth_timed_reset(unsigned long ptr) | ||
1318 | { | ||
1319 | struct veth_lpar_connection *cnx = (struct veth_lpar_connection *)ptr; | ||
1320 | unsigned long trigger_time, flags; | ||
1321 | |||
1322 | /* FIXME is it possible this fires after veth_stop_connection()? | ||
1323 | * That would reschedule the statemachine for 5 seconds and probably | ||
1324 | * execute it after the module's been unloaded. Hmm. */ | ||
1325 | |||
1326 | spin_lock_irqsave(&cnx->lock, flags); | ||
1327 | |||
1328 | if (cnx->outstanding_tx > 0) { | ||
1329 | trigger_time = cnx->last_contact + cnx->reset_timeout; | ||
1330 | |||
1331 | if (trigger_time < jiffies) { | ||
1332 | cnx->state |= VETH_STATE_RESET; | ||
1333 | veth_kick_statemachine(cnx); | ||
1334 | veth_error("%d packets not acked by LPAR %d within %d " | ||
1335 | "seconds, resetting.\n", | ||
1336 | cnx->outstanding_tx, cnx->remote_lp, | ||
1337 | cnx->reset_timeout / HZ); | ||
1338 | } else { | ||
1339 | /* Reschedule the timer */ | ||
1340 | trigger_time = jiffies + cnx->reset_timeout; | ||
1341 | mod_timer(&cnx->reset_timer, trigger_time); | ||
1102 | } | 1342 | } |
1103 | spin_unlock_irqrestore(&port->pending_gate, flags); | ||
1104 | } | 1343 | } |
1344 | |||
1345 | spin_unlock_irqrestore(&cnx->lock, flags); | ||
1105 | } | 1346 | } |
1106 | 1347 | ||
1107 | /* | 1348 | /* |
@@ -1117,12 +1358,9 @@ static inline int veth_frame_wanted(struct veth_port *port, u64 mac_addr) | |||
1117 | if ( (mac_addr == port->mac_addr) || (mac_addr == 0xffffffffffff0000) ) | 1358 | if ( (mac_addr == port->mac_addr) || (mac_addr == 0xffffffffffff0000) ) |
1118 | return 1; | 1359 | return 1; |
1119 | 1360 | ||
1120 | if (! (((char *) &mac_addr)[0] & 0x01)) | ||
1121 | return 0; | ||
1122 | |||
1123 | read_lock_irqsave(&port->mcast_gate, flags); | 1361 | read_lock_irqsave(&port->mcast_gate, flags); |
1124 | 1362 | ||
1125 | if (port->promiscuous || port->all_mcast) { | 1363 | if (port->promiscuous) { |
1126 | wanted = 1; | 1364 | wanted = 1; |
1127 | goto out; | 1365 | goto out; |
1128 | } | 1366 | } |
@@ -1175,21 +1413,21 @@ static void veth_flush_acks(struct veth_lpar_connection *cnx) | |||
1175 | { | 1413 | { |
1176 | HvLpEvent_Rc rc; | 1414 | HvLpEvent_Rc rc; |
1177 | 1415 | ||
1178 | rc = veth_signaldata(cnx, VethEventTypeFramesAck, | 1416 | rc = veth_signaldata(cnx, VETH_EVENT_FRAMES_ACK, |
1179 | 0, &cnx->pending_acks); | 1417 | 0, &cnx->pending_acks); |
1180 | 1418 | ||
1181 | if (rc != HvLpEvent_Rc_Good) | 1419 | if (rc != HvLpEvent_Rc_Good) |
1182 | veth_error("Error 0x%x acking frames from lpar %d!\n", | 1420 | veth_error("Failed acking frames from LPAR %d, rc = %d\n", |
1183 | (unsigned)rc, cnx->remote_lp); | 1421 | cnx->remote_lp, (int)rc); |
1184 | 1422 | ||
1185 | cnx->num_pending_acks = 0; | 1423 | cnx->num_pending_acks = 0; |
1186 | memset(&cnx->pending_acks, 0xff, sizeof(cnx->pending_acks)); | 1424 | memset(&cnx->pending_acks, 0xff, sizeof(cnx->pending_acks)); |
1187 | } | 1425 | } |
1188 | 1426 | ||
1189 | static void veth_receive(struct veth_lpar_connection *cnx, | 1427 | static void veth_receive(struct veth_lpar_connection *cnx, |
1190 | struct VethLpEvent *event) | 1428 | struct veth_lpevent *event) |
1191 | { | 1429 | { |
1192 | struct VethFramesData *senddata = &event->u.frames_data; | 1430 | struct veth_frames_data *senddata = &event->u.frames_data; |
1193 | int startchunk = 0; | 1431 | int startchunk = 0; |
1194 | int nchunks; | 1432 | int nchunks; |
1195 | unsigned long flags; | 1433 | unsigned long flags; |
@@ -1216,9 +1454,10 @@ static void veth_receive(struct veth_lpar_connection *cnx, | |||
1216 | /* make sure that we have at least 1 EOF entry in the | 1454 | /* make sure that we have at least 1 EOF entry in the |
1217 | * remaining entries */ | 1455 | * remaining entries */ |
1218 | if (! (senddata->eofmask >> (startchunk + VETH_EOF_SHIFT))) { | 1456 | if (! (senddata->eofmask >> (startchunk + VETH_EOF_SHIFT))) { |
1219 | veth_error("missing EOF frag in event " | 1457 | veth_error("Missing EOF fragment in event " |
1220 | "eofmask=0x%x startchunk=%d\n", | 1458 | "eofmask = 0x%x startchunk = %d\n", |
1221 | (unsigned) senddata->eofmask, startchunk); | 1459 | (unsigned)senddata->eofmask, |
1460 | startchunk); | ||
1222 | break; | 1461 | break; |
1223 | } | 1462 | } |
1224 | 1463 | ||
@@ -1237,8 +1476,9 @@ static void veth_receive(struct veth_lpar_connection *cnx, | |||
1237 | /* nchunks == # of chunks in this frame */ | 1476 | /* nchunks == # of chunks in this frame */ |
1238 | 1477 | ||
1239 | if ((length - ETH_HLEN) > VETH_MAX_MTU) { | 1478 | if ((length - ETH_HLEN) > VETH_MAX_MTU) { |
1240 | veth_error("Received oversize frame from lpar %d " | 1479 | veth_error("Received oversize frame from LPAR %d " |
1241 | "(length=%d)\n", cnx->remote_lp, length); | 1480 | "(length = %d)\n", |
1481 | cnx->remote_lp, length); | ||
1242 | continue; | 1482 | continue; |
1243 | } | 1483 | } |
1244 | 1484 | ||
@@ -1331,15 +1571,33 @@ static void veth_timed_ack(unsigned long ptr) | |||
1331 | 1571 | ||
1332 | static int veth_remove(struct vio_dev *vdev) | 1572 | static int veth_remove(struct vio_dev *vdev) |
1333 | { | 1573 | { |
1334 | int i = vdev->unit_address; | 1574 | struct veth_lpar_connection *cnx; |
1335 | struct net_device *dev; | 1575 | struct net_device *dev; |
1576 | struct veth_port *port; | ||
1577 | int i; | ||
1336 | 1578 | ||
1337 | dev = veth_dev[i]; | 1579 | dev = veth_dev[vdev->unit_address]; |
1338 | if (dev != NULL) { | 1580 | |
1339 | veth_dev[i] = NULL; | 1581 | if (! dev) |
1340 | unregister_netdev(dev); | 1582 | return 0; |
1341 | free_netdev(dev); | 1583 | |
1584 | port = netdev_priv(dev); | ||
1585 | |||
1586 | for (i = 0; i < HVMAXARCHITECTEDLPS; i++) { | ||
1587 | cnx = veth_cnx[i]; | ||
1588 | |||
1589 | if (cnx && (port->lpar_map & (1 << i))) { | ||
1590 | /* Drop our reference to connections on our VLAN */ | ||
1591 | kobject_put(&cnx->kobject); | ||
1592 | } | ||
1342 | } | 1593 | } |
1594 | |||
1595 | veth_dev[vdev->unit_address] = NULL; | ||
1596 | kobject_del(&port->kobject); | ||
1597 | kobject_put(&port->kobject); | ||
1598 | unregister_netdev(dev); | ||
1599 | free_netdev(dev); | ||
1600 | |||
1343 | return 0; | 1601 | return 0; |
1344 | } | 1602 | } |
1345 | 1603 | ||
@@ -1347,6 +1605,7 @@ static int veth_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
1347 | { | 1605 | { |
1348 | int i = vdev->unit_address; | 1606 | int i = vdev->unit_address; |
1349 | struct net_device *dev; | 1607 | struct net_device *dev; |
1608 | struct veth_port *port; | ||
1350 | 1609 | ||
1351 | dev = veth_probe_one(i, &vdev->dev); | 1610 | dev = veth_probe_one(i, &vdev->dev); |
1352 | if (dev == NULL) { | 1611 | if (dev == NULL) { |
@@ -1355,11 +1614,23 @@ static int veth_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
1355 | } | 1614 | } |
1356 | veth_dev[i] = dev; | 1615 | veth_dev[i] = dev; |
1357 | 1616 | ||
1358 | /* Start the state machine on each connection, to commence | 1617 | port = (struct veth_port*)netdev_priv(dev); |
1359 | * link negotiation */ | 1618 | |
1360 | for (i = 0; i < HVMAXARCHITECTEDLPS; i++) | 1619 | /* Start the state machine on each connection on this vlan. If we're |
1361 | if (veth_cnx[i]) | 1620 | * the first dev to do so this will commence link negotiation */ |
1362 | veth_kick_statemachine(veth_cnx[i]); | 1621 | for (i = 0; i < HVMAXARCHITECTEDLPS; i++) { |
1622 | struct veth_lpar_connection *cnx; | ||
1623 | |||
1624 | if (! (port->lpar_map & (1 << i))) | ||
1625 | continue; | ||
1626 | |||
1627 | cnx = veth_cnx[i]; | ||
1628 | if (!cnx) | ||
1629 | continue; | ||
1630 | |||
1631 | kobject_get(&cnx->kobject); | ||
1632 | veth_kick_statemachine(cnx); | ||
1633 | } | ||
1363 | 1634 | ||
1364 | return 0; | 1635 | return 0; |
1365 | } | 1636 | } |
@@ -1375,7 +1646,7 @@ static struct vio_device_id veth_device_table[] __devinitdata = { | |||
1375 | MODULE_DEVICE_TABLE(vio, veth_device_table); | 1646 | MODULE_DEVICE_TABLE(vio, veth_device_table); |
1376 | 1647 | ||
1377 | static struct vio_driver veth_driver = { | 1648 | static struct vio_driver veth_driver = { |
1378 | .name = "iseries_veth", | 1649 | .name = DRV_NAME, |
1379 | .id_table = veth_device_table, | 1650 | .id_table = veth_device_table, |
1380 | .probe = veth_probe, | 1651 | .probe = veth_probe, |
1381 | .remove = veth_remove | 1652 | .remove = veth_remove |
@@ -1388,29 +1659,29 @@ static struct vio_driver veth_driver = { | |||
1388 | void __exit veth_module_cleanup(void) | 1659 | void __exit veth_module_cleanup(void) |
1389 | { | 1660 | { |
1390 | int i; | 1661 | int i; |
1662 | struct veth_lpar_connection *cnx; | ||
1391 | 1663 | ||
1392 | /* Stop the queues first to stop any new packets being sent. */ | 1664 | /* Disconnect our "irq" to stop events coming from the Hypervisor. */ |
1393 | for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++) | ||
1394 | if (veth_dev[i]) | ||
1395 | netif_stop_queue(veth_dev[i]); | ||
1396 | |||
1397 | /* Stop the connections before we unregister the driver. This | ||
1398 | * ensures there's no skbs lying around holding the device open. */ | ||
1399 | for (i = 0; i < HVMAXARCHITECTEDLPS; ++i) | ||
1400 | veth_stop_connection(i); | ||
1401 | |||
1402 | HvLpEvent_unregisterHandler(HvLpEvent_Type_VirtualLan); | 1665 | HvLpEvent_unregisterHandler(HvLpEvent_Type_VirtualLan); |
1403 | 1666 | ||
1404 | /* Hypervisor callbacks may have scheduled more work while we | 1667 | /* Make sure any work queued from Hypervisor callbacks is finished. */ |
1405 | * were stoping connections. Now that we've disconnected from | ||
1406 | * the hypervisor make sure everything's finished. */ | ||
1407 | flush_scheduled_work(); | 1668 | flush_scheduled_work(); |
1408 | 1669 | ||
1409 | vio_unregister_driver(&veth_driver); | 1670 | for (i = 0; i < HVMAXARCHITECTEDLPS; ++i) { |
1671 | cnx = veth_cnx[i]; | ||
1672 | |||
1673 | if (!cnx) | ||
1674 | continue; | ||
1410 | 1675 | ||
1411 | for (i = 0; i < HVMAXARCHITECTEDLPS; ++i) | 1676 | /* Remove the connection from sysfs */ |
1412 | veth_destroy_connection(i); | 1677 | kobject_del(&cnx->kobject); |
1678 | /* Drop the driver's reference to the connection */ | ||
1679 | kobject_put(&cnx->kobject); | ||
1680 | } | ||
1413 | 1681 | ||
1682 | /* Unregister the driver, which will close all the netdevs and stop | ||
1683 | * the connections when they're no longer referenced. */ | ||
1684 | vio_unregister_driver(&veth_driver); | ||
1414 | } | 1685 | } |
1415 | module_exit(veth_module_cleanup); | 1686 | module_exit(veth_module_cleanup); |
1416 | 1687 | ||
@@ -1423,15 +1694,37 @@ int __init veth_module_init(void) | |||
1423 | 1694 | ||
1424 | for (i = 0; i < HVMAXARCHITECTEDLPS; ++i) { | 1695 | for (i = 0; i < HVMAXARCHITECTEDLPS; ++i) { |
1425 | rc = veth_init_connection(i); | 1696 | rc = veth_init_connection(i); |
1426 | if (rc != 0) { | 1697 | if (rc != 0) |
1427 | veth_module_cleanup(); | 1698 | goto error; |
1428 | return rc; | ||
1429 | } | ||
1430 | } | 1699 | } |
1431 | 1700 | ||
1432 | HvLpEvent_registerHandler(HvLpEvent_Type_VirtualLan, | 1701 | HvLpEvent_registerHandler(HvLpEvent_Type_VirtualLan, |
1433 | &veth_handle_event); | 1702 | &veth_handle_event); |
1434 | 1703 | ||
1435 | return vio_register_driver(&veth_driver); | 1704 | rc = vio_register_driver(&veth_driver); |
1705 | if (rc != 0) | ||
1706 | goto error; | ||
1707 | |||
1708 | for (i = 0; i < HVMAXARCHITECTEDLPS; ++i) { | ||
1709 | struct kobject *kobj; | ||
1710 | |||
1711 | if (!veth_cnx[i]) | ||
1712 | continue; | ||
1713 | |||
1714 | kobj = &veth_cnx[i]->kobject; | ||
1715 | kobj->parent = &veth_driver.driver.kobj; | ||
1716 | /* If the add failes, complain but otherwise continue */ | ||
1717 | if (0 != kobject_add(kobj)) | ||
1718 | veth_error("cnx %d: Failed adding to sysfs.\n", i); | ||
1719 | } | ||
1720 | |||
1721 | return 0; | ||
1722 | |||
1723 | error: | ||
1724 | for (i = 0; i < HVMAXARCHITECTEDLPS; ++i) { | ||
1725 | veth_destroy_connection(veth_cnx[i]); | ||
1726 | } | ||
1727 | |||
1728 | return rc; | ||
1436 | } | 1729 | } |
1437 | module_init(veth_module_init); | 1730 | module_init(veth_module_init); |