aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp/ipoib/ipoib.h
diff options
context:
space:
mode:
authorMoni Shoua <monis@Voltaire.COM>2008-07-15 02:48:49 -0400
committerRoland Dreier <rolandd@cisco.com>2008-07-15 02:48:49 -0400
commitee1e2c82c245a5fb2864e9dbcdaab3390fde3fcc (patch)
tree2bd6686dcee9524352c1afce3cb772373ec83d5f /drivers/infiniband/ulp/ipoib/ipoib.h
parent038919f29682b00ea95506e959210fc72d1aaf64 (diff)
IPoIB: Refresh paths instead of flushing them on SM change events
The patch tries to solve the problem of device going down and paths being flushed on an SM change event. The method is to mark the paths as candidates for refresh (by setting the new valid flag to 0), and wait for an ARP probe a new path record query. The solution requires a different and less intrusive handling of SM change event. For that, the second argument of the flush function changes its meaning from a boolean flag to a level. In most cases, SM failover doesn't cause LID change so traffic won't stop. In the rare cases of LID change, the remote host (the one that hadn't changed its LID) will lose connectivity until paths are refreshed. This is no worse than the current state. In fact, preventing the device from going down saves packets that otherwise would be lost. Signed-off-by: Moni Levy <monil@voltaire.com> Signed-off-by: Moni Shoua <monis@voltaire.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/ulp/ipoib/ipoib.h')
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h17
1 files changed, 14 insertions, 3 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 2c522572e3c5..bb19587c5eaf 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -54,6 +54,12 @@
54 54
55/* constants */ 55/* constants */
56 56
57enum ipoib_flush_level {
58 IPOIB_FLUSH_LIGHT,
59 IPOIB_FLUSH_NORMAL,
60 IPOIB_FLUSH_HEAVY
61};
62
57enum { 63enum {
58 IPOIB_ENCAP_LEN = 4, 64 IPOIB_ENCAP_LEN = 4,
59 65
@@ -284,10 +290,11 @@ struct ipoib_dev_priv {
284 290
285 struct delayed_work pkey_poll_task; 291 struct delayed_work pkey_poll_task;
286 struct delayed_work mcast_task; 292 struct delayed_work mcast_task;
287 struct work_struct flush_task; 293 struct work_struct flush_light;
294 struct work_struct flush_normal;
295 struct work_struct flush_heavy;
288 struct work_struct restart_task; 296 struct work_struct restart_task;
289 struct delayed_work ah_reap_task; 297 struct delayed_work ah_reap_task;
290 struct work_struct pkey_event_task;
291 298
292 struct ib_device *ca; 299 struct ib_device *ca;
293 u8 port; 300 u8 port;
@@ -369,6 +376,7 @@ struct ipoib_path {
369 376
370 struct rb_node rb_node; 377 struct rb_node rb_node;
371 struct list_head list; 378 struct list_head list;
379 int valid;
372}; 380};
373 381
374struct ipoib_neigh { 382struct ipoib_neigh {
@@ -433,11 +441,14 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
433 struct ipoib_ah *address, u32 qpn); 441 struct ipoib_ah *address, u32 qpn);
434void ipoib_reap_ah(struct work_struct *work); 442void ipoib_reap_ah(struct work_struct *work);
435 443
444void ipoib_mark_paths_invalid(struct net_device *dev);
436void ipoib_flush_paths(struct net_device *dev); 445void ipoib_flush_paths(struct net_device *dev);
437struct ipoib_dev_priv *ipoib_intf_alloc(const char *format); 446struct ipoib_dev_priv *ipoib_intf_alloc(const char *format);
438 447
439int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port); 448int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
440void ipoib_ib_dev_flush(struct work_struct *work); 449void ipoib_ib_dev_flush_light(struct work_struct *work);
450void ipoib_ib_dev_flush_normal(struct work_struct *work);
451void ipoib_ib_dev_flush_heavy(struct work_struct *work);
441void ipoib_pkey_event(struct work_struct *work); 452void ipoib_pkey_event(struct work_struct *work);
442void ipoib_ib_dev_cleanup(struct net_device *dev); 453void ipoib_ib_dev_cleanup(struct net_device *dev);
443 454