diff options
Diffstat (limited to 'drivers/infiniband/ulp/ipoib/ipoib.h')
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib.h | 49 |
1 files changed, 46 insertions, 3 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 87310eeb6df0..285c143115cc 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
@@ -132,12 +132,46 @@ struct ipoib_cm_data { | |||
132 | __be32 mtu; | 132 | __be32 mtu; |
133 | }; | 133 | }; |
134 | 134 | ||
135 | /* | ||
136 | * Quoting 10.3.1 Queue Pair and EE Context States: | ||
137 | * | ||
138 | * Note, for QPs that are associated with an SRQ, the Consumer should take the | ||
139 | * QP through the Error State before invoking a Destroy QP or a Modify QP to the | ||
140 | * Reset State. The Consumer may invoke the Destroy QP without first performing | ||
141 | * a Modify QP to the Error State and waiting for the Affiliated Asynchronous | ||
142 | * Last WQE Reached Event. However, if the Consumer does not wait for the | ||
143 | * Affiliated Asynchronous Last WQE Reached Event, then WQE and Data Segment | ||
144 | * leakage may occur. Therefore, it is good programming practice to tear down a | ||
145 | * QP that is associated with an SRQ by using the following process: | ||
146 | * | ||
147 | * - Put the QP in the Error State | ||
148 | * - Wait for the Affiliated Asynchronous Last WQE Reached Event; | ||
149 | * - either: | ||
150 | * drain the CQ by invoking the Poll CQ verb and either wait for CQ | ||
151 | * to be empty or the number of Poll CQ operations has exceeded | ||
152 | * CQ capacity size; | ||
153 | * - or | ||
154 | * post another WR that completes on the same CQ and wait for this | ||
155 | * WR to return as a WC; | ||
156 | * - and then invoke a Destroy QP or Reset QP. | ||
157 | * | ||
158 | * We use the second option and wait for a completion on the | ||
159 | * same CQ before destroying QPs attached to our SRQ. | ||
160 | */ | ||
161 | |||
162 | enum ipoib_cm_state { | ||
163 | IPOIB_CM_RX_LIVE, | ||
164 | IPOIB_CM_RX_ERROR, /* Ignored by stale task */ | ||
165 | IPOIB_CM_RX_FLUSH /* Last WQE Reached event observed */ | ||
166 | }; | ||
167 | |||
135 | struct ipoib_cm_rx { | 168 | struct ipoib_cm_rx { |
136 | struct ib_cm_id *id; | 169 | struct ib_cm_id *id; |
137 | struct ib_qp *qp; | 170 | struct ib_qp *qp; |
138 | struct list_head list; | 171 | struct list_head list; |
139 | struct net_device *dev; | 172 | struct net_device *dev; |
140 | unsigned long jiffies; | 173 | unsigned long jiffies; |
174 | enum ipoib_cm_state state; | ||
141 | }; | 175 | }; |
142 | 176 | ||
143 | struct ipoib_cm_tx { | 177 | struct ipoib_cm_tx { |
@@ -165,10 +199,15 @@ struct ipoib_cm_dev_priv { | |||
165 | struct ib_srq *srq; | 199 | struct ib_srq *srq; |
166 | struct ipoib_cm_rx_buf *srq_ring; | 200 | struct ipoib_cm_rx_buf *srq_ring; |
167 | struct ib_cm_id *id; | 201 | struct ib_cm_id *id; |
168 | struct list_head passive_ids; | 202 | struct list_head passive_ids; /* state: LIVE */ |
203 | struct list_head rx_error_list; /* state: ERROR */ | ||
204 | struct list_head rx_flush_list; /* state: FLUSH, drain not started */ | ||
205 | struct list_head rx_drain_list; /* state: FLUSH, drain started */ | ||
206 | struct list_head rx_reap_list; /* state: FLUSH, drain done */ | ||
169 | struct work_struct start_task; | 207 | struct work_struct start_task; |
170 | struct work_struct reap_task; | 208 | struct work_struct reap_task; |
171 | struct work_struct skb_task; | 209 | struct work_struct skb_task; |
210 | struct work_struct rx_reap_task; | ||
172 | struct delayed_work stale_task; | 211 | struct delayed_work stale_task; |
173 | struct sk_buff_head skb_queue; | 212 | struct sk_buff_head skb_queue; |
174 | struct list_head start_list; | 213 | struct list_head start_list; |
@@ -201,15 +240,17 @@ struct ipoib_dev_priv { | |||
201 | struct list_head multicast_list; | 240 | struct list_head multicast_list; |
202 | struct rb_root multicast_tree; | 241 | struct rb_root multicast_tree; |
203 | 242 | ||
204 | struct delayed_work pkey_task; | 243 | struct delayed_work pkey_poll_task; |
205 | struct delayed_work mcast_task; | 244 | struct delayed_work mcast_task; |
206 | struct work_struct flush_task; | 245 | struct work_struct flush_task; |
207 | struct work_struct restart_task; | 246 | struct work_struct restart_task; |
208 | struct delayed_work ah_reap_task; | 247 | struct delayed_work ah_reap_task; |
248 | struct work_struct pkey_event_task; | ||
209 | 249 | ||
210 | struct ib_device *ca; | 250 | struct ib_device *ca; |
211 | u8 port; | 251 | u8 port; |
212 | u16 pkey; | 252 | u16 pkey; |
253 | u16 pkey_index; | ||
213 | struct ib_pd *pd; | 254 | struct ib_pd *pd; |
214 | struct ib_mr *mr; | 255 | struct ib_mr *mr; |
215 | struct ib_cq *cq; | 256 | struct ib_cq *cq; |
@@ -333,12 +374,13 @@ struct ipoib_dev_priv *ipoib_intf_alloc(const char *format); | |||
333 | 374 | ||
334 | int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port); | 375 | int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port); |
335 | void ipoib_ib_dev_flush(struct work_struct *work); | 376 | void ipoib_ib_dev_flush(struct work_struct *work); |
377 | void ipoib_pkey_event(struct work_struct *work); | ||
336 | void ipoib_ib_dev_cleanup(struct net_device *dev); | 378 | void ipoib_ib_dev_cleanup(struct net_device *dev); |
337 | 379 | ||
338 | int ipoib_ib_dev_open(struct net_device *dev); | 380 | int ipoib_ib_dev_open(struct net_device *dev); |
339 | int ipoib_ib_dev_up(struct net_device *dev); | 381 | int ipoib_ib_dev_up(struct net_device *dev); |
340 | int ipoib_ib_dev_down(struct net_device *dev, int flush); | 382 | int ipoib_ib_dev_down(struct net_device *dev, int flush); |
341 | int ipoib_ib_dev_stop(struct net_device *dev); | 383 | int ipoib_ib_dev_stop(struct net_device *dev, int flush); |
342 | 384 | ||
343 | int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port); | 385 | int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port); |
344 | void ipoib_dev_cleanup(struct net_device *dev); | 386 | void ipoib_dev_cleanup(struct net_device *dev); |
@@ -386,6 +428,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey); | |||
386 | 428 | ||
387 | void ipoib_pkey_poll(struct work_struct *work); | 429 | void ipoib_pkey_poll(struct work_struct *work); |
388 | int ipoib_pkey_dev_delay_open(struct net_device *dev); | 430 | int ipoib_pkey_dev_delay_open(struct net_device *dev); |
431 | void ipoib_drain_cq(struct net_device *dev); | ||
389 | 432 | ||
390 | #ifdef CONFIG_INFINIBAND_IPOIB_CM | 433 | #ifdef CONFIG_INFINIBAND_IPOIB_CM |
391 | 434 | ||