diff options
author | Moni Shoua <monis@Voltaire.COM> | 2008-07-15 02:48:43 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2008-07-15 02:48:43 -0400 |
commit | 164ba0893c27a216557396320b6063fdac040392 (patch) | |
tree | 257f56ce914584f1a26112d86f1737c27671a096 | |
parent | a9474917099e007c0f51d5474394b5890111614f (diff) |
IB/sa: Fail requests made while creating new SM AH
This patch solves a race that occurs after an event occurs that causes
the SA query module to flush its SM address handle (AH). When SM AH
becomes invalid and needs an update it is handled by the global
workqueue. On the other hand this event is also handled in the IPoIB
driver by queuing work in the ipoib_workqueue that does multicast
joins. Although queuing is in the right order, it is done to 2
different workqueues and so there is no guarantee that the first to be
queued is the first to be executed.
This causes a problem because IPoIB may end up sending an request to
the old SM, which will take a long time to time out (since the old SM
is gone); this leads to a much longer than necessary interruption in
multicast traffer.
The patch sets the SA query module's SM AH to NULL when the event
occurs, and until update_sm_ah() is done, any request that needs sm_ah
fails with -EAGAIN return status.
For consumers, the patch doesn't make things worse. Before the patch,
MADs are sent to the wrong SM so the request gets lost. Consumers can
be improved if they examine the return code and respond to EAGAIN
properly but even without an improvement the situation is not getting
worse.
Signed-off-by: Moni Levy <monil@voltaire.com>
Signed-off-by: Moni Shoua <monis@voltaire.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
-rw-r--r-- | drivers/infiniband/core/sa_query.c | 22 |
1 files changed, 16 insertions, 6 deletions
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index cf474ec27070..78ea8157d622 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c | |||
@@ -361,7 +361,7 @@ static void update_sm_ah(struct work_struct *work) | |||
361 | { | 361 | { |
362 | struct ib_sa_port *port = | 362 | struct ib_sa_port *port = |
363 | container_of(work, struct ib_sa_port, update_task); | 363 | container_of(work, struct ib_sa_port, update_task); |
364 | struct ib_sa_sm_ah *new_ah, *old_ah; | 364 | struct ib_sa_sm_ah *new_ah; |
365 | struct ib_port_attr port_attr; | 365 | struct ib_port_attr port_attr; |
366 | struct ib_ah_attr ah_attr; | 366 | struct ib_ah_attr ah_attr; |
367 | 367 | ||
@@ -397,12 +397,9 @@ static void update_sm_ah(struct work_struct *work) | |||
397 | } | 397 | } |
398 | 398 | ||
399 | spin_lock_irq(&port->ah_lock); | 399 | spin_lock_irq(&port->ah_lock); |
400 | old_ah = port->sm_ah; | ||
401 | port->sm_ah = new_ah; | 400 | port->sm_ah = new_ah; |
402 | spin_unlock_irq(&port->ah_lock); | 401 | spin_unlock_irq(&port->ah_lock); |
403 | 402 | ||
404 | if (old_ah) | ||
405 | kref_put(&old_ah->ref, free_sm_ah); | ||
406 | } | 403 | } |
407 | 404 | ||
408 | static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event) | 405 | static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event) |
@@ -413,8 +410,17 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event | |||
413 | event->event == IB_EVENT_PKEY_CHANGE || | 410 | event->event == IB_EVENT_PKEY_CHANGE || |
414 | event->event == IB_EVENT_SM_CHANGE || | 411 | event->event == IB_EVENT_SM_CHANGE || |
415 | event->event == IB_EVENT_CLIENT_REREGISTER) { | 412 | event->event == IB_EVENT_CLIENT_REREGISTER) { |
416 | struct ib_sa_device *sa_dev; | 413 | unsigned long flags; |
417 | sa_dev = container_of(handler, typeof(*sa_dev), event_handler); | 414 | struct ib_sa_device *sa_dev = |
415 | container_of(handler, typeof(*sa_dev), event_handler); | ||
416 | struct ib_sa_port *port = | ||
417 | &sa_dev->port[event->element.port_num - sa_dev->start_port]; | ||
418 | |||
419 | spin_lock_irqsave(&port->ah_lock, flags); | ||
420 | if (port->sm_ah) | ||
421 | kref_put(&port->sm_ah->ref, free_sm_ah); | ||
422 | port->sm_ah = NULL; | ||
423 | spin_unlock_irqrestore(&port->ah_lock, flags); | ||
418 | 424 | ||
419 | schedule_work(&sa_dev->port[event->element.port_num - | 425 | schedule_work(&sa_dev->port[event->element.port_num - |
420 | sa_dev->start_port].update_task); | 426 | sa_dev->start_port].update_task); |
@@ -519,6 +525,10 @@ static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask) | |||
519 | unsigned long flags; | 525 | unsigned long flags; |
520 | 526 | ||
521 | spin_lock_irqsave(&query->port->ah_lock, flags); | 527 | spin_lock_irqsave(&query->port->ah_lock, flags); |
528 | if (!query->port->sm_ah) { | ||
529 | spin_unlock_irqrestore(&query->port->ah_lock, flags); | ||
530 | return -EAGAIN; | ||
531 | } | ||
522 | kref_get(&query->port->sm_ah->ref); | 532 | kref_get(&query->port->sm_ah->ref); |
523 | query->sm_ah = query->port->sm_ah; | 533 | query->sm_ah = query->port->sm_ah; |
524 | spin_unlock_irqrestore(&query->port->ah_lock, flags); | 534 | spin_unlock_irqrestore(&query->port->ah_lock, flags); |