aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-10-23 11:16:03 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-23 11:16:03 -0400
commit724bdd097e4d47b6ad963db5d92258ab5c485e05 (patch)
treef9d2de5f826780aa04532a89c3b67a01ae413f7d /drivers/infiniband
parentdc8dcad83b53e3aebc52889e81772c00a1490408 (diff)
parent56f2fdaade2a6b91ccd785de938b91172d5c94f2 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: IB/ehca: Reject dynamic memory add/remove when ehca adapter is present IB/ehca: Fix reported max number of QPs and CQs in systems with >1 adapter IPoIB: Set netdev offload features properly for child (VLAN) interfaces IPoIB: Clean up ethtool support mlx4_core: Add Ethernet PCI device IDs mlx4_en: Add driver for Mellanox ConnectX 10GbE NIC mlx4_core: Multiple port type support mlx4_core: Ethernet MAC/VLAN management mlx4_core: Get ethernet MTU and default address from firmware mlx4_core: Support multiple pre-reserved QP regions Update NetEffect maintainer emails to Intel emails RDMA/cxgb3: Remove cmid reference on tid allocation failures IB/mad: Use krealloc() to resize snoop table IPoIB: Always initialize poll_timer to avoid crash on unload IB/ehca: Don't allow creating UC QP with SRQ mlx4_core: Add QP range reservation support RDMA/ucma: Test ucma_alloc_multicast() return against NULL, not with IS_ERR()
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/mad.c14
-rw-r--r--drivers/infiniband/core/ucma.c4
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c83
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c10
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c6
-rw-r--r--drivers/infiniband/hw/mlx4/main.c11
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c21
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c9
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c7
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c67
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c4
16 files changed, 175 insertions, 70 deletions
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 49c45feccd5b..5c54fc2350be 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -406,19 +406,15 @@ static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
406 406
407 if (i == qp_info->snoop_table_size) { 407 if (i == qp_info->snoop_table_size) {
408 /* Grow table. */ 408 /* Grow table. */
409 new_snoop_table = kmalloc(sizeof mad_snoop_priv * 409 new_snoop_table = krealloc(qp_info->snoop_table,
410 qp_info->snoop_table_size + 1, 410 sizeof mad_snoop_priv *
411 GFP_ATOMIC); 411 (qp_info->snoop_table_size + 1),
412 GFP_ATOMIC);
412 if (!new_snoop_table) { 413 if (!new_snoop_table) {
413 i = -ENOMEM; 414 i = -ENOMEM;
414 goto out; 415 goto out;
415 } 416 }
416 if (qp_info->snoop_table) { 417
417 memcpy(new_snoop_table, qp_info->snoop_table,
418 sizeof mad_snoop_priv *
419 qp_info->snoop_table_size);
420 kfree(qp_info->snoop_table);
421 }
422 qp_info->snoop_table = new_snoop_table; 418 qp_info->snoop_table = new_snoop_table;
423 qp_info->snoop_table_size++; 419 qp_info->snoop_table_size++;
424 } 420 }
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 3ddacf39b7ba..4346a24568fb 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -904,8 +904,8 @@ static ssize_t ucma_join_multicast(struct ucma_file *file,
904 904
905 mutex_lock(&file->mut); 905 mutex_lock(&file->mut);
906 mc = ucma_alloc_multicast(ctx); 906 mc = ucma_alloc_multicast(ctx);
907 if (IS_ERR(mc)) { 907 if (!mc) {
908 ret = PTR_ERR(mc); 908 ret = -ENOMEM;
909 goto err1; 909 goto err1;
910 } 910 }
911 911
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index c325c44807e8..44e936e48a31 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1942,6 +1942,7 @@ fail4:
1942fail3: 1942fail3:
1943 cxgb3_free_atid(ep->com.tdev, ep->atid); 1943 cxgb3_free_atid(ep->com.tdev, ep->atid);
1944fail2: 1944fail2:
1945 cm_id->rem_ref(cm_id);
1945 put_ep(&ep->com); 1946 put_ep(&ep->com);
1946out: 1947out:
1947 return err; 1948 return err;
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 5d7b7855afb9..4df887af66a5 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -128,6 +128,8 @@ struct ehca_shca {
128 /* MR pgsize: bit 0-3 means 4K, 64K, 1M, 16M respectively */ 128 /* MR pgsize: bit 0-3 means 4K, 64K, 1M, 16M respectively */
129 u32 hca_cap_mr_pgsize; 129 u32 hca_cap_mr_pgsize;
130 int max_mtu; 130 int max_mtu;
131 int max_num_qps;
132 int max_num_cqs;
131 atomic_t num_cqs; 133 atomic_t num_cqs;
132 atomic_t num_qps; 134 atomic_t num_qps;
133}; 135};
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index 33647a95eb9a..2f4c28a30271 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -132,9 +132,9 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
132 if (cqe >= 0xFFFFFFFF - 64 - additional_cqe) 132 if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
133 return ERR_PTR(-EINVAL); 133 return ERR_PTR(-EINVAL);
134 134
135 if (!atomic_add_unless(&shca->num_cqs, 1, ehca_max_cq)) { 135 if (!atomic_add_unless(&shca->num_cqs, 1, shca->max_num_cqs)) {
136 ehca_err(device, "Unable to create CQ, max number of %i " 136 ehca_err(device, "Unable to create CQ, max number of %i "
137 "CQs reached.", ehca_max_cq); 137 "CQs reached.", shca->max_num_cqs);
138 ehca_err(device, "To increase the maximum number of CQs " 138 ehca_err(device, "To increase the maximum number of CQs "
139 "use the number_of_cqs module parameter.\n"); 139 "use the number_of_cqs module parameter.\n");
140 return ERR_PTR(-ENOSPC); 140 return ERR_PTR(-ENOSPC);
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 598844d2edc9..bb02a86aa526 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -44,6 +44,8 @@
44#include <linux/slab.h> 44#include <linux/slab.h>
45#endif 45#endif
46 46
47#include <linux/notifier.h>
48#include <linux/memory.h>
47#include "ehca_classes.h" 49#include "ehca_classes.h"
48#include "ehca_iverbs.h" 50#include "ehca_iverbs.h"
49#include "ehca_mrmw.h" 51#include "ehca_mrmw.h"
@@ -366,22 +368,23 @@ static int ehca_sense_attributes(struct ehca_shca *shca)
366 shca->hca_cap_mr_pgsize |= pgsize_map[i + 1]; 368 shca->hca_cap_mr_pgsize |= pgsize_map[i + 1];
367 369
368 /* Set maximum number of CQs and QPs to calculate EQ size */ 370 /* Set maximum number of CQs and QPs to calculate EQ size */
369 if (ehca_max_qp == -1) 371 if (shca->max_num_qps == -1)
370 ehca_max_qp = min_t(int, rblock->max_qp, EHCA_MAX_NUM_QUEUES); 372 shca->max_num_qps = min_t(int, rblock->max_qp,
371 else if (ehca_max_qp < 1 || ehca_max_qp > rblock->max_qp) { 373 EHCA_MAX_NUM_QUEUES);
372 ehca_gen_err("Requested number of QPs is out of range (1 - %i) " 374 else if (shca->max_num_qps < 1 || shca->max_num_qps > rblock->max_qp) {
373 "specified by HW", rblock->max_qp); 375 ehca_gen_warn("The requested number of QPs is out of range "
374 ret = -EINVAL; 376 "(1 - %i) specified by HW. Value is set to %i",
375 goto sense_attributes1; 377 rblock->max_qp, rblock->max_qp);
378 shca->max_num_qps = rblock->max_qp;
376 } 379 }
377 380
378 if (ehca_max_cq == -1) 381 if (shca->max_num_cqs == -1)
379 ehca_max_cq = min_t(int, rblock->max_cq, EHCA_MAX_NUM_QUEUES); 382 shca->max_num_cqs = min_t(int, rblock->max_cq,
380 else if (ehca_max_cq < 1 || ehca_max_cq > rblock->max_cq) { 383 EHCA_MAX_NUM_QUEUES);
381 ehca_gen_err("Requested number of CQs is out of range (1 - %i) " 384 else if (shca->max_num_cqs < 1 || shca->max_num_cqs > rblock->max_cq) {
382 "specified by HW", rblock->max_cq); 385 ehca_gen_warn("The requested number of CQs is out of range "
383 ret = -EINVAL; 386 "(1 - %i) specified by HW. Value is set to %i",
384 goto sense_attributes1; 387 rblock->max_cq, rblock->max_cq);
385 } 388 }
386 389
387 /* query max MTU from first port -- it's the same for all ports */ 390 /* query max MTU from first port -- it's the same for all ports */
@@ -733,9 +736,13 @@ static int __devinit ehca_probe(struct of_device *dev,
733 ehca_gen_err("Cannot allocate shca memory."); 736 ehca_gen_err("Cannot allocate shca memory.");
734 return -ENOMEM; 737 return -ENOMEM;
735 } 738 }
739
736 mutex_init(&shca->modify_mutex); 740 mutex_init(&shca->modify_mutex);
737 atomic_set(&shca->num_cqs, 0); 741 atomic_set(&shca->num_cqs, 0);
738 atomic_set(&shca->num_qps, 0); 742 atomic_set(&shca->num_qps, 0);
743 shca->max_num_qps = ehca_max_qp;
744 shca->max_num_cqs = ehca_max_cq;
745
739 for (i = 0; i < ARRAY_SIZE(shca->sport); i++) 746 for (i = 0; i < ARRAY_SIZE(shca->sport); i++)
740 spin_lock_init(&shca->sport[i].mod_sqp_lock); 747 spin_lock_init(&shca->sport[i].mod_sqp_lock);
741 748
@@ -755,7 +762,7 @@ static int __devinit ehca_probe(struct of_device *dev,
755 goto probe1; 762 goto probe1;
756 } 763 }
757 764
758 eq_size = 2 * ehca_max_cq + 4 * ehca_max_qp; 765 eq_size = 2 * shca->max_num_cqs + 4 * shca->max_num_qps;
759 /* create event queues */ 766 /* create event queues */
760 ret = ehca_create_eq(shca, &shca->eq, EHCA_EQ, eq_size); 767 ret = ehca_create_eq(shca, &shca->eq, EHCA_EQ, eq_size);
761 if (ret) { 768 if (ret) {
@@ -964,6 +971,41 @@ void ehca_poll_eqs(unsigned long data)
964 spin_unlock(&shca_list_lock); 971 spin_unlock(&shca_list_lock);
965} 972}
966 973
974static int ehca_mem_notifier(struct notifier_block *nb,
975 unsigned long action, void *data)
976{
977 static unsigned long ehca_dmem_warn_time;
978
979 switch (action) {
980 case MEM_CANCEL_OFFLINE:
981 case MEM_CANCEL_ONLINE:
982 case MEM_ONLINE:
983 case MEM_OFFLINE:
984 return NOTIFY_OK;
985 case MEM_GOING_ONLINE:
986 case MEM_GOING_OFFLINE:
987 /* only ok if no hca is attached to the lpar */
988 spin_lock(&shca_list_lock);
989 if (list_empty(&shca_list)) {
990 spin_unlock(&shca_list_lock);
991 return NOTIFY_OK;
992 } else {
993 spin_unlock(&shca_list_lock);
994 if (printk_timed_ratelimit(&ehca_dmem_warn_time,
995 30 * 1000))
996 ehca_gen_err("DMEM operations are not allowed"
997 "as long as an ehca adapter is"
998 "attached to the LPAR");
999 return NOTIFY_BAD;
1000 }
1001 }
1002 return NOTIFY_OK;
1003}
1004
1005static struct notifier_block ehca_mem_nb = {
1006 .notifier_call = ehca_mem_notifier,
1007};
1008
967static int __init ehca_module_init(void) 1009static int __init ehca_module_init(void)
968{ 1010{
969 int ret; 1011 int ret;
@@ -991,6 +1033,12 @@ static int __init ehca_module_init(void)
991 goto module_init2; 1033 goto module_init2;
992 } 1034 }
993 1035
1036 ret = register_memory_notifier(&ehca_mem_nb);
1037 if (ret) {
1038 ehca_gen_err("Failed registering memory add/remove notifier");
1039 goto module_init3;
1040 }
1041
994 if (ehca_poll_all_eqs != 1) { 1042 if (ehca_poll_all_eqs != 1) {
995 ehca_gen_err("WARNING!!!"); 1043 ehca_gen_err("WARNING!!!");
996 ehca_gen_err("It is possible to lose interrupts."); 1044 ehca_gen_err("It is possible to lose interrupts.");
@@ -1003,6 +1051,9 @@ static int __init ehca_module_init(void)
1003 1051
1004 return 0; 1052 return 0;
1005 1053
1054module_init3:
1055 ibmebus_unregister_driver(&ehca_driver);
1056
1006module_init2: 1057module_init2:
1007 ehca_destroy_slab_caches(); 1058 ehca_destroy_slab_caches();
1008 1059
@@ -1018,6 +1069,8 @@ static void __exit ehca_module_exit(void)
1018 1069
1019 ibmebus_unregister_driver(&ehca_driver); 1070 ibmebus_unregister_driver(&ehca_driver);
1020 1071
1072 unregister_memory_notifier(&ehca_mem_nb);
1073
1021 ehca_destroy_slab_caches(); 1074 ehca_destroy_slab_caches();
1022 1075
1023 ehca_destroy_comp_pool(); 1076 ehca_destroy_comp_pool();
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 4dbe2870e014..4d54b9f64567 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -465,9 +465,9 @@ static struct ehca_qp *internal_create_qp(
465 u32 swqe_size = 0, rwqe_size = 0, ib_qp_num; 465 u32 swqe_size = 0, rwqe_size = 0, ib_qp_num;
466 unsigned long flags; 466 unsigned long flags;
467 467
468 if (!atomic_add_unless(&shca->num_qps, 1, ehca_max_qp)) { 468 if (!atomic_add_unless(&shca->num_qps, 1, shca->max_num_qps)) {
469 ehca_err(pd->device, "Unable to create QP, max number of %i " 469 ehca_err(pd->device, "Unable to create QP, max number of %i "
470 "QPs reached.", ehca_max_qp); 470 "QPs reached.", shca->max_num_qps);
471 ehca_err(pd->device, "To increase the maximum number of QPs " 471 ehca_err(pd->device, "To increase the maximum number of QPs "
472 "use the number_of_qps module parameter.\n"); 472 "use the number_of_qps module parameter.\n");
473 return ERR_PTR(-ENOSPC); 473 return ERR_PTR(-ENOSPC);
@@ -502,6 +502,12 @@ static struct ehca_qp *internal_create_qp(
502 if (init_attr->srq) { 502 if (init_attr->srq) {
503 my_srq = container_of(init_attr->srq, struct ehca_qp, ib_srq); 503 my_srq = container_of(init_attr->srq, struct ehca_qp, ib_srq);
504 504
505 if (qp_type == IB_QPT_UC) {
506 ehca_err(pd->device, "UC with SRQ not supported");
507 atomic_dec(&shca->num_qps);
508 return ERR_PTR(-EINVAL);
509 }
510
505 has_srq = 1; 511 has_srq = 1;
506 parms.ext_type = EQPT_SRQBASE; 512 parms.ext_type = EQPT_SRQBASE;
507 parms.srq_qpn = my_srq->real_qp_num; 513 parms.srq_qpn = my_srq->real_qp_num;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index cdca3a511e1c..606f1e2ef284 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -298,7 +298,7 @@ int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
298 int p, q; 298 int p, q;
299 int ret; 299 int ret;
300 300
301 for (p = 0; p < dev->dev->caps.num_ports; ++p) 301 for (p = 0; p < dev->num_ports; ++p)
302 for (q = 0; q <= 1; ++q) { 302 for (q = 0; q <= 1; ++q) {
303 agent = ib_register_mad_agent(&dev->ib_dev, p + 1, 303 agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
304 q ? IB_QPT_GSI : IB_QPT_SMI, 304 q ? IB_QPT_GSI : IB_QPT_SMI,
@@ -314,7 +314,7 @@ int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
314 return 0; 314 return 0;
315 315
316err: 316err:
317 for (p = 0; p < dev->dev->caps.num_ports; ++p) 317 for (p = 0; p < dev->num_ports; ++p)
318 for (q = 0; q <= 1; ++q) 318 for (q = 0; q <= 1; ++q)
319 if (dev->send_agent[p][q]) 319 if (dev->send_agent[p][q])
320 ib_unregister_mad_agent(dev->send_agent[p][q]); 320 ib_unregister_mad_agent(dev->send_agent[p][q]);
@@ -327,7 +327,7 @@ void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
327 struct ib_mad_agent *agent; 327 struct ib_mad_agent *agent;
328 int p, q; 328 int p, q;
329 329
330 for (p = 0; p < dev->dev->caps.num_ports; ++p) { 330 for (p = 0; p < dev->num_ports; ++p) {
331 for (q = 0; q <= 1; ++q) { 331 for (q = 0; q <= 1; ++q) {
332 agent = dev->send_agent[p][q]; 332 agent = dev->send_agent[p][q];
333 dev->send_agent[p][q] = NULL; 333 dev->send_agent[p][q] = NULL;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index a3c2851c0545..2e80f8f47b02 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -574,7 +574,10 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
574 ibdev->ib_dev.owner = THIS_MODULE; 574 ibdev->ib_dev.owner = THIS_MODULE;
575 ibdev->ib_dev.node_type = RDMA_NODE_IB_CA; 575 ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
576 ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey; 576 ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
577 ibdev->ib_dev.phys_port_cnt = dev->caps.num_ports; 577 ibdev->num_ports = 0;
578 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
579 ibdev->num_ports++;
580 ibdev->ib_dev.phys_port_cnt = ibdev->num_ports;
578 ibdev->ib_dev.num_comp_vectors = 1; 581 ibdev->ib_dev.num_comp_vectors = 1;
579 ibdev->ib_dev.dma_device = &dev->pdev->dev; 582 ibdev->ib_dev.dma_device = &dev->pdev->dev;
580 583
@@ -691,7 +694,7 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
691 struct mlx4_ib_dev *ibdev = ibdev_ptr; 694 struct mlx4_ib_dev *ibdev = ibdev_ptr;
692 int p; 695 int p;
693 696
694 for (p = 1; p <= dev->caps.num_ports; ++p) 697 for (p = 1; p <= ibdev->num_ports; ++p)
695 mlx4_CLOSE_PORT(dev, p); 698 mlx4_CLOSE_PORT(dev, p);
696 699
697 mlx4_ib_mad_cleanup(ibdev); 700 mlx4_ib_mad_cleanup(ibdev);
@@ -706,6 +709,10 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
706 enum mlx4_dev_event event, int port) 709 enum mlx4_dev_event event, int port)
707{ 710{
708 struct ib_event ibev; 711 struct ib_event ibev;
712 struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
713
714 if (port > ibdev->num_ports)
715 return;
709 716
710 switch (event) { 717 switch (event) {
711 case MLX4_DEV_EVENT_PORT_UP: 718 case MLX4_DEV_EVENT_PORT_UP:
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 6e2b0dc21b61..9974e886b8de 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -162,6 +162,7 @@ struct mlx4_ib_ah {
162struct mlx4_ib_dev { 162struct mlx4_ib_dev {
163 struct ib_device ib_dev; 163 struct ib_device ib_dev;
164 struct mlx4_dev *dev; 164 struct mlx4_dev *dev;
165 int num_ports;
165 void __iomem *uar_map; 166 void __iomem *uar_map;
166 167
167 struct mlx4_uar priv_uar; 168 struct mlx4_uar priv_uar;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index baa01deb2436..39167a797f99 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -451,6 +451,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
451 struct ib_qp_init_attr *init_attr, 451 struct ib_qp_init_attr *init_attr,
452 struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp) 452 struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp)
453{ 453{
454 int qpn;
454 int err; 455 int err;
455 456
456 mutex_init(&qp->mutex); 457 mutex_init(&qp->mutex);
@@ -545,9 +546,17 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
545 } 546 }
546 } 547 }
547 548
548 err = mlx4_qp_alloc(dev->dev, sqpn, &qp->mqp); 549 if (sqpn) {
550 qpn = sqpn;
551 } else {
552 err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn);
553 if (err)
554 goto err_wrid;
555 }
556
557 err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp);
549 if (err) 558 if (err)
550 goto err_wrid; 559 goto err_qpn;
551 560
552 /* 561 /*
553 * Hardware wants QPN written in big-endian order (after 562 * Hardware wants QPN written in big-endian order (after
@@ -560,6 +569,10 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
560 569
561 return 0; 570 return 0;
562 571
572err_qpn:
573 if (!sqpn)
574 mlx4_qp_release_range(dev->dev, qpn, 1);
575
563err_wrid: 576err_wrid:
564 if (pd->uobject) { 577 if (pd->uobject) {
565 if (!init_attr->srq) 578 if (!init_attr->srq)
@@ -655,6 +668,10 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
655 mlx4_ib_unlock_cqs(send_cq, recv_cq); 668 mlx4_ib_unlock_cqs(send_cq, recv_cq);
656 669
657 mlx4_qp_free(dev->dev, &qp->mqp); 670 mlx4_qp_free(dev->dev, &qp->mqp);
671
672 if (!is_sqp(dev, qp))
673 mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1);
674
658 mlx4_mtt_cleanup(dev->dev, &qp->mtt); 675 mlx4_mtt_cleanup(dev->dev, &qp->mtt);
659 676
660 if (is_user) { 677 if (is_user) {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 68ba5c3482e4..e0c7dfabf2b4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -507,6 +507,7 @@ int ipoib_pkey_dev_delay_open(struct net_device *dev);
507void ipoib_drain_cq(struct net_device *dev); 507void ipoib_drain_cq(struct net_device *dev);
508 508
509void ipoib_set_ethtool_ops(struct net_device *dev); 509void ipoib_set_ethtool_ops(struct net_device *dev);
510int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca);
510 511
511#ifdef CONFIG_INFINIBAND_IPOIB_CM 512#ifdef CONFIG_INFINIBAND_IPOIB_CM
512 513
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index 66af5c1a76e5..e9795f60e5d6 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -42,6 +42,13 @@ static void ipoib_get_drvinfo(struct net_device *netdev,
42 strncpy(drvinfo->driver, "ipoib", sizeof(drvinfo->driver) - 1); 42 strncpy(drvinfo->driver, "ipoib", sizeof(drvinfo->driver) - 1);
43} 43}
44 44
45static u32 ipoib_get_rx_csum(struct net_device *dev)
46{
47 struct ipoib_dev_priv *priv = netdev_priv(dev);
48 return test_bit(IPOIB_FLAG_CSUM, &priv->flags) &&
49 !test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
50}
51
45static int ipoib_get_coalesce(struct net_device *dev, 52static int ipoib_get_coalesce(struct net_device *dev,
46 struct ethtool_coalesce *coal) 53 struct ethtool_coalesce *coal)
47{ 54{
@@ -129,7 +136,7 @@ static void ipoib_get_ethtool_stats(struct net_device *dev,
129 136
130static const struct ethtool_ops ipoib_ethtool_ops = { 137static const struct ethtool_ops ipoib_ethtool_ops = {
131 .get_drvinfo = ipoib_get_drvinfo, 138 .get_drvinfo = ipoib_get_drvinfo,
132 .get_tso = ethtool_op_get_tso, 139 .get_rx_csum = ipoib_get_rx_csum,
133 .get_coalesce = ipoib_get_coalesce, 140 .get_coalesce = ipoib_get_coalesce,
134 .set_coalesce = ipoib_set_coalesce, 141 .set_coalesce = ipoib_set_coalesce,
135 .get_flags = ethtool_op_get_flags, 142 .get_flags = ethtool_op_get_flags,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 0e748aeeae99..28eb6f03c588 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -685,10 +685,6 @@ int ipoib_ib_dev_open(struct net_device *dev)
685 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, 685 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
686 round_jiffies_relative(HZ)); 686 round_jiffies_relative(HZ));
687 687
688 init_timer(&priv->poll_timer);
689 priv->poll_timer.function = ipoib_ib_tx_timer_func;
690 priv->poll_timer.data = (unsigned long)dev;
691
692 set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); 688 set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
693 689
694 return 0; 690 return 0;
@@ -906,6 +902,9 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
906 return -ENODEV; 902 return -ENODEV;
907 } 903 }
908 904
905 setup_timer(&priv->poll_timer, ipoib_ib_tx_timer_func,
906 (unsigned long) dev);
907
909 if (dev->flags & IFF_UP) { 908 if (dev->flags & IFF_UP) {
910 if (ipoib_ib_dev_open(dev)) { 909 if (ipoib_ib_dev_open(dev)) {
911 ipoib_transport_dev_cleanup(dev); 910 ipoib_transport_dev_cleanup(dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index c0ee514396df..fddded7900d1 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1173,11 +1173,48 @@ int ipoib_add_pkey_attr(struct net_device *dev)
1173 return device_create_file(&dev->dev, &dev_attr_pkey); 1173 return device_create_file(&dev->dev, &dev_attr_pkey);
1174} 1174}
1175 1175
1176int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca)
1177{
1178 struct ib_device_attr *device_attr;
1179 int result = -ENOMEM;
1180
1181 device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
1182 if (!device_attr) {
1183 printk(KERN_WARNING "%s: allocation of %zu bytes failed\n",
1184 hca->name, sizeof *device_attr);
1185 return result;
1186 }
1187
1188 result = ib_query_device(hca, device_attr);
1189 if (result) {
1190 printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n",
1191 hca->name, result);
1192 kfree(device_attr);
1193 return result;
1194 }
1195 priv->hca_caps = device_attr->device_cap_flags;
1196
1197 kfree(device_attr);
1198
1199 if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
1200 set_bit(IPOIB_FLAG_CSUM, &priv->flags);
1201 priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
1202 }
1203
1204 if (lro)
1205 priv->dev->features |= NETIF_F_LRO;
1206
1207 if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO)
1208 priv->dev->features |= NETIF_F_TSO;
1209
1210 return 0;
1211}
1212
1213
1176static struct net_device *ipoib_add_port(const char *format, 1214static struct net_device *ipoib_add_port(const char *format,
1177 struct ib_device *hca, u8 port) 1215 struct ib_device *hca, u8 port)
1178{ 1216{
1179 struct ipoib_dev_priv *priv; 1217 struct ipoib_dev_priv *priv;
1180 struct ib_device_attr *device_attr;
1181 struct ib_port_attr attr; 1218 struct ib_port_attr attr;
1182 int result = -ENOMEM; 1219 int result = -ENOMEM;
1183 1220
@@ -1206,31 +1243,8 @@ static struct net_device *ipoib_add_port(const char *format,
1206 goto device_init_failed; 1243 goto device_init_failed;
1207 } 1244 }
1208 1245
1209 device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL); 1246 if (ipoib_set_dev_features(priv, hca))
1210 if (!device_attr) {
1211 printk(KERN_WARNING "%s: allocation of %zu bytes failed\n",
1212 hca->name, sizeof *device_attr);
1213 goto device_init_failed; 1247 goto device_init_failed;
1214 }
1215
1216 result = ib_query_device(hca, device_attr);
1217 if (result) {
1218 printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n",
1219 hca->name, result);
1220 kfree(device_attr);
1221 goto device_init_failed;
1222 }
1223 priv->hca_caps = device_attr->device_cap_flags;
1224
1225 kfree(device_attr);
1226
1227 if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
1228 set_bit(IPOIB_FLAG_CSUM, &priv->flags);
1229 priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
1230 }
1231
1232 if (lro)
1233 priv->dev->features |= NETIF_F_LRO;
1234 1248
1235 /* 1249 /*
1236 * Set the full membership bit, so that we join the right 1250 * Set the full membership bit, so that we join the right
@@ -1266,9 +1280,6 @@ static struct net_device *ipoib_add_port(const char *format,
1266 goto event_failed; 1280 goto event_failed;
1267 } 1281 }
1268 1282
1269 if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO)
1270 priv->dev->features |= NETIF_F_TSO;
1271
1272 result = register_netdev(priv->dev); 1283 result = register_netdev(priv->dev);
1273 if (result) { 1284 if (result) {
1274 printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n", 1285 printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index b08eb56196d3..2cf1a4088718 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -93,6 +93,10 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
93 priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; 93 priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
94 set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags); 94 set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
95 95
96 result = ipoib_set_dev_features(priv, ppriv->ca);
97 if (result)
98 goto device_init_failed;
99
96 priv->pkey = pkey; 100 priv->pkey = pkey;
97 101
98 memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN); 102 memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN);