aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp/ipoib/ipoib.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/ulp/ipoib/ipoib.h')
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h50
1 files changed, 47 insertions, 3 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 87310eeb6df0..158759e28a5b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -132,12 +132,46 @@ struct ipoib_cm_data {
132 __be32 mtu; 132 __be32 mtu;
133}; 133};
134 134
135/*
136 * Quoting 10.3.1 Queue Pair and EE Context States:
137 *
138 * Note, for QPs that are associated with an SRQ, the Consumer should take the
139 * QP through the Error State before invoking a Destroy QP or a Modify QP to the
140 * Reset State. The Consumer may invoke the Destroy QP without first performing
141 * a Modify QP to the Error State and waiting for the Affiliated Asynchronous
142 * Last WQE Reached Event. However, if the Consumer does not wait for the
143 * Affiliated Asynchronous Last WQE Reached Event, then WQE and Data Segment
144 * leakage may occur. Therefore, it is good programming practice to tear down a
145 * QP that is associated with an SRQ by using the following process:
146 *
147 * - Put the QP in the Error State
148 * - Wait for the Affiliated Asynchronous Last WQE Reached Event;
149 * - either:
150 * drain the CQ by invoking the Poll CQ verb and either wait for CQ
151 * to be empty or the number of Poll CQ operations has exceeded
152 * CQ capacity size;
153 * - or
154 * post another WR that completes on the same CQ and wait for this
155 * WR to return as a WC;
156 * - and then invoke a Destroy QP or Reset QP.
157 *
158 * We use the second option and wait for a completion on the
159 * rx_drain_qp before destroying QPs attached to our SRQ.
160 */
161
162enum ipoib_cm_state {
163 IPOIB_CM_RX_LIVE,
164 IPOIB_CM_RX_ERROR, /* Ignored by stale task */
165 IPOIB_CM_RX_FLUSH /* Last WQE Reached event observed */
166};
167
135struct ipoib_cm_rx { 168struct ipoib_cm_rx {
136 struct ib_cm_id *id; 169 struct ib_cm_id *id;
137 struct ib_qp *qp; 170 struct ib_qp *qp;
138 struct list_head list; 171 struct list_head list;
139 struct net_device *dev; 172 struct net_device *dev;
140 unsigned long jiffies; 173 unsigned long jiffies;
174 enum ipoib_cm_state state;
141}; 175};
142 176
143struct ipoib_cm_tx { 177struct ipoib_cm_tx {
@@ -165,10 +199,16 @@ struct ipoib_cm_dev_priv {
165 struct ib_srq *srq; 199 struct ib_srq *srq;
166 struct ipoib_cm_rx_buf *srq_ring; 200 struct ipoib_cm_rx_buf *srq_ring;
167 struct ib_cm_id *id; 201 struct ib_cm_id *id;
168 struct list_head passive_ids; 202 struct ib_qp *rx_drain_qp; /* generates WR described in 10.3.1 */
203 struct list_head passive_ids; /* state: LIVE */
204 struct list_head rx_error_list; /* state: ERROR */
205 struct list_head rx_flush_list; /* state: FLUSH, drain not started */
206 struct list_head rx_drain_list; /* state: FLUSH, drain started */
207 struct list_head rx_reap_list; /* state: FLUSH, drain done */
169 struct work_struct start_task; 208 struct work_struct start_task;
170 struct work_struct reap_task; 209 struct work_struct reap_task;
171 struct work_struct skb_task; 210 struct work_struct skb_task;
211 struct work_struct rx_reap_task;
172 struct delayed_work stale_task; 212 struct delayed_work stale_task;
173 struct sk_buff_head skb_queue; 213 struct sk_buff_head skb_queue;
174 struct list_head start_list; 214 struct list_head start_list;
@@ -201,15 +241,17 @@ struct ipoib_dev_priv {
201 struct list_head multicast_list; 241 struct list_head multicast_list;
202 struct rb_root multicast_tree; 242 struct rb_root multicast_tree;
203 243
204 struct delayed_work pkey_task; 244 struct delayed_work pkey_poll_task;
205 struct delayed_work mcast_task; 245 struct delayed_work mcast_task;
206 struct work_struct flush_task; 246 struct work_struct flush_task;
207 struct work_struct restart_task; 247 struct work_struct restart_task;
208 struct delayed_work ah_reap_task; 248 struct delayed_work ah_reap_task;
249 struct work_struct pkey_event_task;
209 250
210 struct ib_device *ca; 251 struct ib_device *ca;
211 u8 port; 252 u8 port;
212 u16 pkey; 253 u16 pkey;
254 u16 pkey_index;
213 struct ib_pd *pd; 255 struct ib_pd *pd;
214 struct ib_mr *mr; 256 struct ib_mr *mr;
215 struct ib_cq *cq; 257 struct ib_cq *cq;
@@ -333,12 +375,13 @@ struct ipoib_dev_priv *ipoib_intf_alloc(const char *format);
333 375
334int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port); 376int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
335void ipoib_ib_dev_flush(struct work_struct *work); 377void ipoib_ib_dev_flush(struct work_struct *work);
378void ipoib_pkey_event(struct work_struct *work);
336void ipoib_ib_dev_cleanup(struct net_device *dev); 379void ipoib_ib_dev_cleanup(struct net_device *dev);
337 380
338int ipoib_ib_dev_open(struct net_device *dev); 381int ipoib_ib_dev_open(struct net_device *dev);
339int ipoib_ib_dev_up(struct net_device *dev); 382int ipoib_ib_dev_up(struct net_device *dev);
340int ipoib_ib_dev_down(struct net_device *dev, int flush); 383int ipoib_ib_dev_down(struct net_device *dev, int flush);
341int ipoib_ib_dev_stop(struct net_device *dev); 384int ipoib_ib_dev_stop(struct net_device *dev, int flush);
342 385
343int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port); 386int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
344void ipoib_dev_cleanup(struct net_device *dev); 387void ipoib_dev_cleanup(struct net_device *dev);
@@ -386,6 +429,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey);
386 429
387void ipoib_pkey_poll(struct work_struct *work); 430void ipoib_pkey_poll(struct work_struct *work);
388int ipoib_pkey_dev_delay_open(struct net_device *dev); 431int ipoib_pkey_dev_delay_open(struct net_device *dev);
432void ipoib_drain_cq(struct net_device *dev);
389 433
390#ifdef CONFIG_INFINIBAND_IPOIB_CM 434#ifdef CONFIG_INFINIBAND_IPOIB_CM
391 435