aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorSteve Wise <swise@opengridcomputing.com>2009-09-05 23:22:38 -0400
committerRoland Dreier <rolandd@cisco.com>2009-09-05 23:22:38 -0400
commitfa0d4c11c4b6eb49708b82b638ceb0761152f46a (patch)
tree3e1cdc905a64d532cbf7d2fa6b58413c698f6f31 /drivers/infiniband
parentb496fe82d4075847a1c42efba2e81d28f6467b3a (diff)
RDMA/cxgb3: Handle port events properly
Massage the err_handler upcall into an event handler upcall, pass netdev port events to the cxgb3 ULPs and generate RDMA port events based on LLD port events. Signed-off-by: Steve Wise <swise@opengridcomputing.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c28
1 files changed, 20 insertions, 8 deletions
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c
index 26fc0a4eaa74..5796170b0b25 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.c
+++ b/drivers/infiniband/hw/cxgb3/iwch.c
@@ -51,7 +51,7 @@ cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS];
51 51
52static void open_rnic_dev(struct t3cdev *); 52static void open_rnic_dev(struct t3cdev *);
53static void close_rnic_dev(struct t3cdev *); 53static void close_rnic_dev(struct t3cdev *);
54static void iwch_err_handler(struct t3cdev *, u32, u32); 54static void iwch_event_handler(struct t3cdev *, u32, u32);
55 55
56struct cxgb3_client t3c_client = { 56struct cxgb3_client t3c_client = {
57 .name = "iw_cxgb3", 57 .name = "iw_cxgb3",
@@ -59,7 +59,7 @@ struct cxgb3_client t3c_client = {
59 .remove = close_rnic_dev, 59 .remove = close_rnic_dev,
60 .handlers = t3c_handlers, 60 .handlers = t3c_handlers,
61 .redirect = iwch_ep_redirect, 61 .redirect = iwch_ep_redirect,
62 .err_handler = iwch_err_handler 62 .event_handler = iwch_event_handler
63}; 63};
64 64
65static LIST_HEAD(dev_list); 65static LIST_HEAD(dev_list);
@@ -162,21 +162,33 @@ static void close_rnic_dev(struct t3cdev *tdev)
162 mutex_unlock(&dev_mutex); 162 mutex_unlock(&dev_mutex);
163} 163}
164 164
165static void iwch_err_handler(struct t3cdev *tdev, u32 status, u32 error) 165static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id)
166{ 166{
167 struct cxio_rdev *rdev = tdev->ulp; 167 struct cxio_rdev *rdev = tdev->ulp;
168 struct iwch_dev *rnicp = rdev_to_iwch_dev(rdev); 168 struct iwch_dev *rnicp = rdev_to_iwch_dev(rdev);
169 struct ib_event event; 169 struct ib_event event;
170 u32 portnum = port_id + 1;
170 171
171 if (status == OFFLOAD_STATUS_DOWN) { 172 switch (evt) {
173 case OFFLOAD_STATUS_DOWN: {
172 rdev->flags = CXIO_ERROR_FATAL; 174 rdev->flags = CXIO_ERROR_FATAL;
173
174 event.device = &rnicp->ibdev;
175 event.event = IB_EVENT_DEVICE_FATAL; 175 event.event = IB_EVENT_DEVICE_FATAL;
176 event.element.port_num = 0; 176 break;
177 ib_dispatch_event(&event); 177 }
178 case OFFLOAD_PORT_DOWN: {
179 event.event = IB_EVENT_PORT_ERR;
180 break;
181 }
182 case OFFLOAD_PORT_UP: {
183 event.event = IB_EVENT_PORT_ACTIVE;
184 break;
185 }
178 } 186 }
179 187
188 event.device = &rnicp->ibdev;
189 event.element.port_num = portnum;
190 ib_dispatch_event(&event);
191
180 return; 192 return;
181} 193}
182 194