diff options
Diffstat (limited to 'drivers/infiniband/ulp/ipoib')
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib.h | 2 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_ib.c | 194 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_main.c | 37 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_multicast.c | 34 |
4 files changed, 154 insertions, 113 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 474aa214ab57..0b8a79d53a00 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
@@ -336,6 +336,8 @@ static inline void ipoib_unregister_debugfs(void) { } | |||
336 | extern int ipoib_sendq_size; | 336 | extern int ipoib_sendq_size; |
337 | extern int ipoib_recvq_size; | 337 | extern int ipoib_recvq_size; |
338 | 338 | ||
339 | extern struct ib_sa_client ipoib_sa_client; | ||
340 | |||
339 | #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG | 341 | #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG |
340 | extern int ipoib_debug_level; | 342 | extern int ipoib_debug_level; |
341 | 343 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 5033666b1481..f426a69d9a43 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
@@ -169,117 +169,129 @@ static int ipoib_ib_post_receives(struct net_device *dev) | |||
169 | return 0; | 169 | return 0; |
170 | } | 170 | } |
171 | 171 | ||
172 | static void ipoib_ib_handle_wc(struct net_device *dev, | 172 | static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) |
173 | struct ib_wc *wc) | ||
174 | { | 173 | { |
175 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 174 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
176 | unsigned int wr_id = wc->wr_id; | 175 | unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV; |
176 | struct sk_buff *skb; | ||
177 | dma_addr_t addr; | ||
177 | 178 | ||
178 | ipoib_dbg_data(priv, "called: id %d, op %d, status: %d\n", | 179 | ipoib_dbg_data(priv, "recv completion: id %d, op %d, status: %d\n", |
179 | wr_id, wc->opcode, wc->status); | 180 | wr_id, wc->opcode, wc->status); |
180 | 181 | ||
181 | if (wr_id & IPOIB_OP_RECV) { | 182 | if (unlikely(wr_id >= ipoib_recvq_size)) { |
182 | wr_id &= ~IPOIB_OP_RECV; | 183 | ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n", |
183 | 184 | wr_id, ipoib_recvq_size); | |
184 | if (wr_id < ipoib_recvq_size) { | 185 | return; |
185 | struct sk_buff *skb = priv->rx_ring[wr_id].skb; | 186 | } |
186 | dma_addr_t addr = priv->rx_ring[wr_id].mapping; | ||
187 | |||
188 | if (unlikely(wc->status != IB_WC_SUCCESS)) { | ||
189 | if (wc->status != IB_WC_WR_FLUSH_ERR) | ||
190 | ipoib_warn(priv, "failed recv event " | ||
191 | "(status=%d, wrid=%d vend_err %x)\n", | ||
192 | wc->status, wr_id, wc->vendor_err); | ||
193 | dma_unmap_single(priv->ca->dma_device, addr, | ||
194 | IPOIB_BUF_SIZE, DMA_FROM_DEVICE); | ||
195 | dev_kfree_skb_any(skb); | ||
196 | priv->rx_ring[wr_id].skb = NULL; | ||
197 | return; | ||
198 | } | ||
199 | 187 | ||
200 | /* | 188 | skb = priv->rx_ring[wr_id].skb; |
201 | * If we can't allocate a new RX buffer, dump | 189 | addr = priv->rx_ring[wr_id].mapping; |
202 | * this packet and reuse the old buffer. | ||
203 | */ | ||
204 | if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) { | ||
205 | ++priv->stats.rx_dropped; | ||
206 | goto repost; | ||
207 | } | ||
208 | 190 | ||
209 | ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", | 191 | if (unlikely(wc->status != IB_WC_SUCCESS)) { |
210 | wc->byte_len, wc->slid); | 192 | if (wc->status != IB_WC_WR_FLUSH_ERR) |
193 | ipoib_warn(priv, "failed recv event " | ||
194 | "(status=%d, wrid=%d vend_err %x)\n", | ||
195 | wc->status, wr_id, wc->vendor_err); | ||
196 | dma_unmap_single(priv->ca->dma_device, addr, | ||
197 | IPOIB_BUF_SIZE, DMA_FROM_DEVICE); | ||
198 | dev_kfree_skb_any(skb); | ||
199 | priv->rx_ring[wr_id].skb = NULL; | ||
200 | return; | ||
201 | } | ||
211 | 202 | ||
212 | dma_unmap_single(priv->ca->dma_device, addr, | 203 | /* |
213 | IPOIB_BUF_SIZE, DMA_FROM_DEVICE); | 204 | * If we can't allocate a new RX buffer, dump |
205 | * this packet and reuse the old buffer. | ||
206 | */ | ||
207 | if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) { | ||
208 | ++priv->stats.rx_dropped; | ||
209 | goto repost; | ||
210 | } | ||
214 | 211 | ||
215 | skb_put(skb, wc->byte_len); | 212 | ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", |
216 | skb_pull(skb, IB_GRH_BYTES); | 213 | wc->byte_len, wc->slid); |
217 | 214 | ||
218 | if (wc->slid != priv->local_lid || | 215 | dma_unmap_single(priv->ca->dma_device, addr, |
219 | wc->src_qp != priv->qp->qp_num) { | 216 | IPOIB_BUF_SIZE, DMA_FROM_DEVICE); |
220 | skb->protocol = ((struct ipoib_header *) skb->data)->proto; | ||
221 | skb->mac.raw = skb->data; | ||
222 | skb_pull(skb, IPOIB_ENCAP_LEN); | ||
223 | 217 | ||
224 | dev->last_rx = jiffies; | 218 | skb_put(skb, wc->byte_len); |
225 | ++priv->stats.rx_packets; | 219 | skb_pull(skb, IB_GRH_BYTES); |
226 | priv->stats.rx_bytes += skb->len; | ||
227 | 220 | ||
228 | skb->dev = dev; | 221 | if (wc->slid != priv->local_lid || |
229 | /* XXX get correct PACKET_ type here */ | 222 | wc->src_qp != priv->qp->qp_num) { |
230 | skb->pkt_type = PACKET_HOST; | 223 | skb->protocol = ((struct ipoib_header *) skb->data)->proto; |
231 | netif_rx_ni(skb); | 224 | skb->mac.raw = skb->data; |
232 | } else { | 225 | skb_pull(skb, IPOIB_ENCAP_LEN); |
233 | ipoib_dbg_data(priv, "dropping loopback packet\n"); | ||
234 | dev_kfree_skb_any(skb); | ||
235 | } | ||
236 | 226 | ||
237 | repost: | 227 | dev->last_rx = jiffies; |
238 | if (unlikely(ipoib_ib_post_receive(dev, wr_id))) | 228 | ++priv->stats.rx_packets; |
239 | ipoib_warn(priv, "ipoib_ib_post_receive failed " | 229 | priv->stats.rx_bytes += skb->len; |
240 | "for buf %d\n", wr_id); | ||
241 | } else | ||
242 | ipoib_warn(priv, "completion event with wrid %d\n", | ||
243 | wr_id); | ||
244 | 230 | ||
231 | skb->dev = dev; | ||
232 | /* XXX get correct PACKET_ type here */ | ||
233 | skb->pkt_type = PACKET_HOST; | ||
234 | netif_rx_ni(skb); | ||
245 | } else { | 235 | } else { |
246 | struct ipoib_tx_buf *tx_req; | 236 | ipoib_dbg_data(priv, "dropping loopback packet\n"); |
247 | unsigned long flags; | 237 | dev_kfree_skb_any(skb); |
238 | } | ||
248 | 239 | ||
249 | if (wr_id >= ipoib_sendq_size) { | 240 | repost: |
250 | ipoib_warn(priv, "completion event with wrid %d (> %d)\n", | 241 | if (unlikely(ipoib_ib_post_receive(dev, wr_id))) |
251 | wr_id, ipoib_sendq_size); | 242 | ipoib_warn(priv, "ipoib_ib_post_receive failed " |
252 | return; | 243 | "for buf %d\n", wr_id); |
253 | } | 244 | } |
254 | 245 | ||
255 | ipoib_dbg_data(priv, "send complete, wrid %d\n", wr_id); | 246 | static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) |
247 | { | ||
248 | struct ipoib_dev_priv *priv = netdev_priv(dev); | ||
249 | unsigned int wr_id = wc->wr_id; | ||
250 | struct ipoib_tx_buf *tx_req; | ||
251 | unsigned long flags; | ||
256 | 252 | ||
257 | tx_req = &priv->tx_ring[wr_id]; | 253 | ipoib_dbg_data(priv, "send completion: id %d, op %d, status: %d\n", |
254 | wr_id, wc->opcode, wc->status); | ||
258 | 255 | ||
259 | dma_unmap_single(priv->ca->dma_device, | 256 | if (unlikely(wr_id >= ipoib_sendq_size)) { |
260 | pci_unmap_addr(tx_req, mapping), | 257 | ipoib_warn(priv, "send completion event with wrid %d (> %d)\n", |
261 | tx_req->skb->len, | 258 | wr_id, ipoib_sendq_size); |
262 | DMA_TO_DEVICE); | 259 | return; |
260 | } | ||
263 | 261 | ||
264 | ++priv->stats.tx_packets; | 262 | tx_req = &priv->tx_ring[wr_id]; |
265 | priv->stats.tx_bytes += tx_req->skb->len; | ||
266 | 263 | ||
267 | dev_kfree_skb_any(tx_req->skb); | 264 | dma_unmap_single(priv->ca->dma_device, |
265 | pci_unmap_addr(tx_req, mapping), | ||
266 | tx_req->skb->len, | ||
267 | DMA_TO_DEVICE); | ||
268 | 268 | ||
269 | spin_lock_irqsave(&priv->tx_lock, flags); | 269 | ++priv->stats.tx_packets; |
270 | ++priv->tx_tail; | 270 | priv->stats.tx_bytes += tx_req->skb->len; |
271 | if (netif_queue_stopped(dev) && | ||
272 | test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags) && | ||
273 | priv->tx_head - priv->tx_tail <= ipoib_sendq_size >> 1) | ||
274 | netif_wake_queue(dev); | ||
275 | spin_unlock_irqrestore(&priv->tx_lock, flags); | ||
276 | 271 | ||
277 | if (wc->status != IB_WC_SUCCESS && | 272 | dev_kfree_skb_any(tx_req->skb); |
278 | wc->status != IB_WC_WR_FLUSH_ERR) | 273 | |
279 | ipoib_warn(priv, "failed send event " | 274 | spin_lock_irqsave(&priv->tx_lock, flags); |
280 | "(status=%d, wrid=%d vend_err %x)\n", | 275 | ++priv->tx_tail; |
281 | wc->status, wr_id, wc->vendor_err); | 276 | if (netif_queue_stopped(dev) && |
282 | } | 277 | test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags) && |
278 | priv->tx_head - priv->tx_tail <= ipoib_sendq_size >> 1) | ||
279 | netif_wake_queue(dev); | ||
280 | spin_unlock_irqrestore(&priv->tx_lock, flags); | ||
281 | |||
282 | if (wc->status != IB_WC_SUCCESS && | ||
283 | wc->status != IB_WC_WR_FLUSH_ERR) | ||
284 | ipoib_warn(priv, "failed send event " | ||
285 | "(status=%d, wrid=%d vend_err %x)\n", | ||
286 | wc->status, wr_id, wc->vendor_err); | ||
287 | } | ||
288 | |||
289 | static void ipoib_ib_handle_wc(struct net_device *dev, struct ib_wc *wc) | ||
290 | { | ||
291 | if (wc->wr_id & IPOIB_OP_RECV) | ||
292 | ipoib_ib_handle_rx_wc(dev, wc); | ||
293 | else | ||
294 | ipoib_ib_handle_tx_wc(dev, wc); | ||
283 | } | 295 | } |
284 | 296 | ||
285 | void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr) | 297 | void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr) |
@@ -320,7 +332,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, | |||
320 | struct ipoib_tx_buf *tx_req; | 332 | struct ipoib_tx_buf *tx_req; |
321 | dma_addr_t addr; | 333 | dma_addr_t addr; |
322 | 334 | ||
323 | if (skb->len > dev->mtu + INFINIBAND_ALEN) { | 335 | if (unlikely(skb->len > dev->mtu + INFINIBAND_ALEN)) { |
324 | ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", | 336 | ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", |
325 | skb->len, dev->mtu + INFINIBAND_ALEN); | 337 | skb->len, dev->mtu + INFINIBAND_ALEN); |
326 | ++priv->stats.tx_dropped; | 338 | ++priv->stats.tx_dropped; |
@@ -619,8 +631,10 @@ void ipoib_ib_dev_flush(void *_dev) | |||
619 | * The device could have been brought down between the start and when | 631 | * The device could have been brought down between the start and when |
620 | * we get here, don't bring it back up if it's not configured up | 632 | * we get here, don't bring it back up if it's not configured up |
621 | */ | 633 | */ |
622 | if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) | 634 | if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { |
623 | ipoib_ib_dev_up(dev); | 635 | ipoib_ib_dev_up(dev); |
636 | ipoib_mcast_restart_task(dev); | ||
637 | } | ||
624 | 638 | ||
625 | mutex_lock(&priv->vlan_mutex); | 639 | mutex_lock(&priv->vlan_mutex); |
626 | 640 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index cf71d2a5515c..1eaf00e9862c 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -40,7 +40,6 @@ | |||
40 | 40 | ||
41 | #include <linux/init.h> | 41 | #include <linux/init.h> |
42 | #include <linux/slab.h> | 42 | #include <linux/slab.h> |
43 | #include <linux/vmalloc.h> | ||
44 | #include <linux/kernel.h> | 43 | #include <linux/kernel.h> |
45 | 44 | ||
46 | #include <linux/if_arp.h> /* For ARPHRD_xxx */ | 45 | #include <linux/if_arp.h> /* For ARPHRD_xxx */ |
@@ -82,6 +81,8 @@ static const u8 ipv4_bcast_addr[] = { | |||
82 | 81 | ||
83 | struct workqueue_struct *ipoib_workqueue; | 82 | struct workqueue_struct *ipoib_workqueue; |
84 | 83 | ||
84 | struct ib_sa_client ipoib_sa_client; | ||
85 | |||
85 | static void ipoib_add_one(struct ib_device *device); | 86 | static void ipoib_add_one(struct ib_device *device); |
86 | static void ipoib_remove_one(struct ib_device *device); | 87 | static void ipoib_remove_one(struct ib_device *device); |
87 | 88 | ||
@@ -336,7 +337,8 @@ void ipoib_flush_paths(struct net_device *dev) | |||
336 | struct ipoib_path *path, *tp; | 337 | struct ipoib_path *path, *tp; |
337 | LIST_HEAD(remove_list); | 338 | LIST_HEAD(remove_list); |
338 | 339 | ||
339 | spin_lock_irq(&priv->lock); | 340 | spin_lock_irq(&priv->tx_lock); |
341 | spin_lock(&priv->lock); | ||
340 | 342 | ||
341 | list_splice(&priv->path_list, &remove_list); | 343 | list_splice(&priv->path_list, &remove_list); |
342 | INIT_LIST_HEAD(&priv->path_list); | 344 | INIT_LIST_HEAD(&priv->path_list); |
@@ -347,12 +349,15 @@ void ipoib_flush_paths(struct net_device *dev) | |||
347 | list_for_each_entry_safe(path, tp, &remove_list, list) { | 349 | list_for_each_entry_safe(path, tp, &remove_list, list) { |
348 | if (path->query) | 350 | if (path->query) |
349 | ib_sa_cancel_query(path->query_id, path->query); | 351 | ib_sa_cancel_query(path->query_id, path->query); |
350 | spin_unlock_irq(&priv->lock); | 352 | spin_unlock(&priv->lock); |
353 | spin_unlock_irq(&priv->tx_lock); | ||
351 | wait_for_completion(&path->done); | 354 | wait_for_completion(&path->done); |
352 | path_free(dev, path); | 355 | path_free(dev, path); |
353 | spin_lock_irq(&priv->lock); | 356 | spin_lock_irq(&priv->tx_lock); |
357 | spin_lock(&priv->lock); | ||
354 | } | 358 | } |
355 | spin_unlock_irq(&priv->lock); | 359 | spin_unlock(&priv->lock); |
360 | spin_unlock_irq(&priv->tx_lock); | ||
356 | } | 361 | } |
357 | 362 | ||
358 | static void path_rec_completion(int status, | 363 | static void path_rec_completion(int status, |
@@ -459,7 +464,7 @@ static int path_rec_start(struct net_device *dev, | |||
459 | init_completion(&path->done); | 464 | init_completion(&path->done); |
460 | 465 | ||
461 | path->query_id = | 466 | path->query_id = |
462 | ib_sa_path_rec_get(priv->ca, priv->port, | 467 | ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port, |
463 | &path->pathrec, | 468 | &path->pathrec, |
464 | IB_SA_PATH_REC_DGID | | 469 | IB_SA_PATH_REC_DGID | |
465 | IB_SA_PATH_REC_SGID | | 470 | IB_SA_PATH_REC_SGID | |
@@ -615,7 +620,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
615 | struct ipoib_neigh *neigh; | 620 | struct ipoib_neigh *neigh; |
616 | unsigned long flags; | 621 | unsigned long flags; |
617 | 622 | ||
618 | if (!spin_trylock_irqsave(&priv->tx_lock, flags)) | 623 | if (unlikely(!spin_trylock_irqsave(&priv->tx_lock, flags))) |
619 | return NETDEV_TX_LOCKED; | 624 | return NETDEV_TX_LOCKED; |
620 | 625 | ||
621 | /* | 626 | /* |
@@ -628,7 +633,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
628 | return NETDEV_TX_BUSY; | 633 | return NETDEV_TX_BUSY; |
629 | } | 634 | } |
630 | 635 | ||
631 | if (skb->dst && skb->dst->neighbour) { | 636 | if (likely(skb->dst && skb->dst->neighbour)) { |
632 | if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) { | 637 | if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) { |
633 | ipoib_path_lookup(skb, dev); | 638 | ipoib_path_lookup(skb, dev); |
634 | goto out; | 639 | goto out; |
@@ -1107,13 +1112,16 @@ static void ipoib_add_one(struct ib_device *device) | |||
1107 | struct ipoib_dev_priv *priv; | 1112 | struct ipoib_dev_priv *priv; |
1108 | int s, e, p; | 1113 | int s, e, p; |
1109 | 1114 | ||
1115 | if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) | ||
1116 | return; | ||
1117 | |||
1110 | dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL); | 1118 | dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL); |
1111 | if (!dev_list) | 1119 | if (!dev_list) |
1112 | return; | 1120 | return; |
1113 | 1121 | ||
1114 | INIT_LIST_HEAD(dev_list); | 1122 | INIT_LIST_HEAD(dev_list); |
1115 | 1123 | ||
1116 | if (device->node_type == IB_NODE_SWITCH) { | 1124 | if (device->node_type == RDMA_NODE_IB_SWITCH) { |
1117 | s = 0; | 1125 | s = 0; |
1118 | e = 0; | 1126 | e = 0; |
1119 | } else { | 1127 | } else { |
@@ -1137,6 +1145,9 @@ static void ipoib_remove_one(struct ib_device *device) | |||
1137 | struct ipoib_dev_priv *priv, *tmp; | 1145 | struct ipoib_dev_priv *priv, *tmp; |
1138 | struct list_head *dev_list; | 1146 | struct list_head *dev_list; |
1139 | 1147 | ||
1148 | if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) | ||
1149 | return; | ||
1150 | |||
1140 | dev_list = ib_get_client_data(device, &ipoib_client); | 1151 | dev_list = ib_get_client_data(device, &ipoib_client); |
1141 | 1152 | ||
1142 | list_for_each_entry_safe(priv, tmp, dev_list, list) { | 1153 | list_for_each_entry_safe(priv, tmp, dev_list, list) { |
@@ -1181,13 +1192,16 @@ static int __init ipoib_init_module(void) | |||
1181 | goto err_fs; | 1192 | goto err_fs; |
1182 | } | 1193 | } |
1183 | 1194 | ||
1195 | ib_sa_register_client(&ipoib_sa_client); | ||
1196 | |||
1184 | ret = ib_register_client(&ipoib_client); | 1197 | ret = ib_register_client(&ipoib_client); |
1185 | if (ret) | 1198 | if (ret) |
1186 | goto err_wq; | 1199 | goto err_sa; |
1187 | 1200 | ||
1188 | return 0; | 1201 | return 0; |
1189 | 1202 | ||
1190 | err_wq: | 1203 | err_sa: |
1204 | ib_sa_unregister_client(&ipoib_sa_client); | ||
1191 | destroy_workqueue(ipoib_workqueue); | 1205 | destroy_workqueue(ipoib_workqueue); |
1192 | 1206 | ||
1193 | err_fs: | 1207 | err_fs: |
@@ -1199,6 +1213,7 @@ err_fs: | |||
1199 | static void __exit ipoib_cleanup_module(void) | 1213 | static void __exit ipoib_cleanup_module(void) |
1200 | { | 1214 | { |
1201 | ib_unregister_client(&ipoib_client); | 1215 | ib_unregister_client(&ipoib_client); |
1216 | ib_sa_unregister_client(&ipoib_sa_client); | ||
1202 | ipoib_unregister_debugfs(); | 1217 | ipoib_unregister_debugfs(); |
1203 | destroy_workqueue(ipoib_workqueue); | 1218 | destroy_workqueue(ipoib_workqueue); |
1204 | } | 1219 | } |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index ec356ce7cdcd..3faa1820f0e9 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c | |||
@@ -361,7 +361,7 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast) | |||
361 | 361 | ||
362 | init_completion(&mcast->done); | 362 | init_completion(&mcast->done); |
363 | 363 | ||
364 | ret = ib_sa_mcmember_rec_set(priv->ca, priv->port, &rec, | 364 | ret = ib_sa_mcmember_rec_set(&ipoib_sa_client, priv->ca, priv->port, &rec, |
365 | IB_SA_MCMEMBER_REC_MGID | | 365 | IB_SA_MCMEMBER_REC_MGID | |
366 | IB_SA_MCMEMBER_REC_PORT_GID | | 366 | IB_SA_MCMEMBER_REC_PORT_GID | |
367 | IB_SA_MCMEMBER_REC_PKEY | | 367 | IB_SA_MCMEMBER_REC_PKEY | |
@@ -472,22 +472,32 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast, | |||
472 | 472 | ||
473 | if (create) { | 473 | if (create) { |
474 | comp_mask |= | 474 | comp_mask |= |
475 | IB_SA_MCMEMBER_REC_QKEY | | 475 | IB_SA_MCMEMBER_REC_QKEY | |
476 | IB_SA_MCMEMBER_REC_SL | | 476 | IB_SA_MCMEMBER_REC_MTU_SELECTOR | |
477 | IB_SA_MCMEMBER_REC_FLOW_LABEL | | 477 | IB_SA_MCMEMBER_REC_MTU | |
478 | IB_SA_MCMEMBER_REC_TRAFFIC_CLASS; | 478 | IB_SA_MCMEMBER_REC_TRAFFIC_CLASS | |
479 | IB_SA_MCMEMBER_REC_RATE_SELECTOR | | ||
480 | IB_SA_MCMEMBER_REC_RATE | | ||
481 | IB_SA_MCMEMBER_REC_SL | | ||
482 | IB_SA_MCMEMBER_REC_FLOW_LABEL | | ||
483 | IB_SA_MCMEMBER_REC_HOP_LIMIT; | ||
479 | 484 | ||
480 | rec.qkey = priv->broadcast->mcmember.qkey; | 485 | rec.qkey = priv->broadcast->mcmember.qkey; |
486 | rec.mtu_selector = IB_SA_EQ; | ||
487 | rec.mtu = priv->broadcast->mcmember.mtu; | ||
488 | rec.traffic_class = priv->broadcast->mcmember.traffic_class; | ||
489 | rec.rate_selector = IB_SA_EQ; | ||
490 | rec.rate = priv->broadcast->mcmember.rate; | ||
481 | rec.sl = priv->broadcast->mcmember.sl; | 491 | rec.sl = priv->broadcast->mcmember.sl; |
482 | rec.flow_label = priv->broadcast->mcmember.flow_label; | 492 | rec.flow_label = priv->broadcast->mcmember.flow_label; |
483 | rec.traffic_class = priv->broadcast->mcmember.traffic_class; | 493 | rec.hop_limit = priv->broadcast->mcmember.hop_limit; |
484 | } | 494 | } |
485 | 495 | ||
486 | init_completion(&mcast->done); | 496 | init_completion(&mcast->done); |
487 | 497 | ||
488 | ret = ib_sa_mcmember_rec_set(priv->ca, priv->port, &rec, comp_mask, | 498 | ret = ib_sa_mcmember_rec_set(&ipoib_sa_client, priv->ca, priv->port, |
489 | mcast->backoff * 1000, GFP_ATOMIC, | 499 | &rec, comp_mask, mcast->backoff * 1000, |
490 | ipoib_mcast_join_complete, | 500 | GFP_ATOMIC, ipoib_mcast_join_complete, |
491 | mcast, &mcast->query); | 501 | mcast, &mcast->query); |
492 | 502 | ||
493 | if (ret < 0) { | 503 | if (ret < 0) { |
@@ -528,7 +538,7 @@ void ipoib_mcast_join_task(void *dev_ptr) | |||
528 | priv->local_rate = attr.active_speed * | 538 | priv->local_rate = attr.active_speed * |
529 | ib_width_enum_to_int(attr.active_width); | 539 | ib_width_enum_to_int(attr.active_width); |
530 | } else | 540 | } else |
531 | ipoib_warn(priv, "ib_query_port failed\n"); | 541 | ipoib_warn(priv, "ib_query_port failed\n"); |
532 | } | 542 | } |
533 | 543 | ||
534 | if (!priv->broadcast) { | 544 | if (!priv->broadcast) { |
@@ -681,7 +691,7 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) | |||
681 | * Just make one shot at leaving and don't wait for a reply; | 691 | * Just make one shot at leaving and don't wait for a reply; |
682 | * if we fail, too bad. | 692 | * if we fail, too bad. |
683 | */ | 693 | */ |
684 | ret = ib_sa_mcmember_rec_delete(priv->ca, priv->port, &rec, | 694 | ret = ib_sa_mcmember_rec_delete(&ipoib_sa_client, priv->ca, priv->port, &rec, |
685 | IB_SA_MCMEMBER_REC_MGID | | 695 | IB_SA_MCMEMBER_REC_MGID | |
686 | IB_SA_MCMEMBER_REC_PORT_GID | | 696 | IB_SA_MCMEMBER_REC_PORT_GID | |
687 | IB_SA_MCMEMBER_REC_PKEY | | 697 | IB_SA_MCMEMBER_REC_PKEY | |
@@ -795,7 +805,7 @@ void ipoib_mcast_dev_flush(struct net_device *dev) | |||
795 | } | 805 | } |
796 | 806 | ||
797 | if (priv->broadcast) { | 807 | if (priv->broadcast) { |
798 | rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree); | 808 | rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree); |
799 | list_add_tail(&priv->broadcast->list, &remove_list); | 809 | list_add_tail(&priv->broadcast->list, &remove_list); |
800 | priv->broadcast = NULL; | 810 | priv->broadcast = NULL; |
801 | } | 811 | } |