diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-23 11:16:03 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-23 11:16:03 -0400 |
commit | 724bdd097e4d47b6ad963db5d92258ab5c485e05 (patch) | |
tree | f9d2de5f826780aa04532a89c3b67a01ae413f7d | |
parent | dc8dcad83b53e3aebc52889e81772c00a1490408 (diff) | |
parent | 56f2fdaade2a6b91ccd785de938b91172d5c94f2 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
IB/ehca: Reject dynamic memory add/remove when ehca adapter is present
IB/ehca: Fix reported max number of QPs and CQs in systems with >1 adapter
IPoIB: Set netdev offload features properly for child (VLAN) interfaces
IPoIB: Clean up ethtool support
mlx4_core: Add Ethernet PCI device IDs
mlx4_en: Add driver for Mellanox ConnectX 10GbE NIC
mlx4_core: Multiple port type support
mlx4_core: Ethernet MAC/VLAN management
mlx4_core: Get ethernet MTU and default address from firmware
mlx4_core: Support multiple pre-reserved QP regions
Update NetEffect maintainer emails to Intel emails
RDMA/cxgb3: Remove cmid reference on tid allocation failures
IB/mad: Use krealloc() to resize snoop table
IPoIB: Always initialize poll_timer to avoid crash on unload
IB/ehca: Don't allow creating UC QP with SRQ
mlx4_core: Add QP range reservation support
RDMA/ucma: Test ucma_alloc_multicast() return against NULL, not with IS_ERR()
44 files changed, 6394 insertions, 125 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 5c3f79c26384..1d11b56794d8 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -2928,9 +2928,9 @@ S: Maintained | |||
2928 | 2928 | ||
2929 | NETEFFECT IWARP RNIC DRIVER (IW_NES) | 2929 | NETEFFECT IWARP RNIC DRIVER (IW_NES) |
2930 | P: Faisal Latif | 2930 | P: Faisal Latif |
2931 | M: flatif@neteffect.com | 2931 | M: faisal.latif@intel.com |
2932 | P: Chien Tung | 2932 | P: Chien Tung |
2933 | M: ctung@neteffect.com | 2933 | M: chien.tin.tung@intel.com |
2934 | L: general@lists.openfabrics.org | 2934 | L: general@lists.openfabrics.org |
2935 | W: http://www.neteffect.com | 2935 | W: http://www.neteffect.com |
2936 | S: Supported | 2936 | S: Supported |
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 49c45feccd5b..5c54fc2350be 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
@@ -406,19 +406,15 @@ static int register_snoop_agent(struct ib_mad_qp_info *qp_info, | |||
406 | 406 | ||
407 | if (i == qp_info->snoop_table_size) { | 407 | if (i == qp_info->snoop_table_size) { |
408 | /* Grow table. */ | 408 | /* Grow table. */ |
409 | new_snoop_table = kmalloc(sizeof mad_snoop_priv * | 409 | new_snoop_table = krealloc(qp_info->snoop_table, |
410 | qp_info->snoop_table_size + 1, | 410 | sizeof mad_snoop_priv * |
411 | GFP_ATOMIC); | 411 | (qp_info->snoop_table_size + 1), |
412 | GFP_ATOMIC); | ||
412 | if (!new_snoop_table) { | 413 | if (!new_snoop_table) { |
413 | i = -ENOMEM; | 414 | i = -ENOMEM; |
414 | goto out; | 415 | goto out; |
415 | } | 416 | } |
416 | if (qp_info->snoop_table) { | 417 | |
417 | memcpy(new_snoop_table, qp_info->snoop_table, | ||
418 | sizeof mad_snoop_priv * | ||
419 | qp_info->snoop_table_size); | ||
420 | kfree(qp_info->snoop_table); | ||
421 | } | ||
422 | qp_info->snoop_table = new_snoop_table; | 418 | qp_info->snoop_table = new_snoop_table; |
423 | qp_info->snoop_table_size++; | 419 | qp_info->snoop_table_size++; |
424 | } | 420 | } |
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 3ddacf39b7ba..4346a24568fb 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c | |||
@@ -904,8 +904,8 @@ static ssize_t ucma_join_multicast(struct ucma_file *file, | |||
904 | 904 | ||
905 | mutex_lock(&file->mut); | 905 | mutex_lock(&file->mut); |
906 | mc = ucma_alloc_multicast(ctx); | 906 | mc = ucma_alloc_multicast(ctx); |
907 | if (IS_ERR(mc)) { | 907 | if (!mc) { |
908 | ret = PTR_ERR(mc); | 908 | ret = -ENOMEM; |
909 | goto err1; | 909 | goto err1; |
910 | } | 910 | } |
911 | 911 | ||
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index c325c44807e8..44e936e48a31 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c | |||
@@ -1942,6 +1942,7 @@ fail4: | |||
1942 | fail3: | 1942 | fail3: |
1943 | cxgb3_free_atid(ep->com.tdev, ep->atid); | 1943 | cxgb3_free_atid(ep->com.tdev, ep->atid); |
1944 | fail2: | 1944 | fail2: |
1945 | cm_id->rem_ref(cm_id); | ||
1945 | put_ep(&ep->com); | 1946 | put_ep(&ep->com); |
1946 | out: | 1947 | out: |
1947 | return err; | 1948 | return err; |
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h index 5d7b7855afb9..4df887af66a5 100644 --- a/drivers/infiniband/hw/ehca/ehca_classes.h +++ b/drivers/infiniband/hw/ehca/ehca_classes.h | |||
@@ -128,6 +128,8 @@ struct ehca_shca { | |||
128 | /* MR pgsize: bit 0-3 means 4K, 64K, 1M, 16M respectively */ | 128 | /* MR pgsize: bit 0-3 means 4K, 64K, 1M, 16M respectively */ |
129 | u32 hca_cap_mr_pgsize; | 129 | u32 hca_cap_mr_pgsize; |
130 | int max_mtu; | 130 | int max_mtu; |
131 | int max_num_qps; | ||
132 | int max_num_cqs; | ||
131 | atomic_t num_cqs; | 133 | atomic_t num_cqs; |
132 | atomic_t num_qps; | 134 | atomic_t num_qps; |
133 | }; | 135 | }; |
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c index 33647a95eb9a..2f4c28a30271 100644 --- a/drivers/infiniband/hw/ehca/ehca_cq.c +++ b/drivers/infiniband/hw/ehca/ehca_cq.c | |||
@@ -132,9 +132,9 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector, | |||
132 | if (cqe >= 0xFFFFFFFF - 64 - additional_cqe) | 132 | if (cqe >= 0xFFFFFFFF - 64 - additional_cqe) |
133 | return ERR_PTR(-EINVAL); | 133 | return ERR_PTR(-EINVAL); |
134 | 134 | ||
135 | if (!atomic_add_unless(&shca->num_cqs, 1, ehca_max_cq)) { | 135 | if (!atomic_add_unless(&shca->num_cqs, 1, shca->max_num_cqs)) { |
136 | ehca_err(device, "Unable to create CQ, max number of %i " | 136 | ehca_err(device, "Unable to create CQ, max number of %i " |
137 | "CQs reached.", ehca_max_cq); | 137 | "CQs reached.", shca->max_num_cqs); |
138 | ehca_err(device, "To increase the maximum number of CQs " | 138 | ehca_err(device, "To increase the maximum number of CQs " |
139 | "use the number_of_cqs module parameter.\n"); | 139 | "use the number_of_cqs module parameter.\n"); |
140 | return ERR_PTR(-ENOSPC); | 140 | return ERR_PTR(-ENOSPC); |
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c index 598844d2edc9..bb02a86aa526 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/infiniband/hw/ehca/ehca_main.c | |||
@@ -44,6 +44,8 @@ | |||
44 | #include <linux/slab.h> | 44 | #include <linux/slab.h> |
45 | #endif | 45 | #endif |
46 | 46 | ||
47 | #include <linux/notifier.h> | ||
48 | #include <linux/memory.h> | ||
47 | #include "ehca_classes.h" | 49 | #include "ehca_classes.h" |
48 | #include "ehca_iverbs.h" | 50 | #include "ehca_iverbs.h" |
49 | #include "ehca_mrmw.h" | 51 | #include "ehca_mrmw.h" |
@@ -366,22 +368,23 @@ static int ehca_sense_attributes(struct ehca_shca *shca) | |||
366 | shca->hca_cap_mr_pgsize |= pgsize_map[i + 1]; | 368 | shca->hca_cap_mr_pgsize |= pgsize_map[i + 1]; |
367 | 369 | ||
368 | /* Set maximum number of CQs and QPs to calculate EQ size */ | 370 | /* Set maximum number of CQs and QPs to calculate EQ size */ |
369 | if (ehca_max_qp == -1) | 371 | if (shca->max_num_qps == -1) |
370 | ehca_max_qp = min_t(int, rblock->max_qp, EHCA_MAX_NUM_QUEUES); | 372 | shca->max_num_qps = min_t(int, rblock->max_qp, |
371 | else if (ehca_max_qp < 1 || ehca_max_qp > rblock->max_qp) { | 373 | EHCA_MAX_NUM_QUEUES); |
372 | ehca_gen_err("Requested number of QPs is out of range (1 - %i) " | 374 | else if (shca->max_num_qps < 1 || shca->max_num_qps > rblock->max_qp) { |
373 | "specified by HW", rblock->max_qp); | 375 | ehca_gen_warn("The requested number of QPs is out of range " |
374 | ret = -EINVAL; | 376 | "(1 - %i) specified by HW. Value is set to %i", |
375 | goto sense_attributes1; | 377 | rblock->max_qp, rblock->max_qp); |
378 | shca->max_num_qps = rblock->max_qp; | ||
376 | } | 379 | } |
377 | 380 | ||
378 | if (ehca_max_cq == -1) | 381 | if (shca->max_num_cqs == -1) |
379 | ehca_max_cq = min_t(int, rblock->max_cq, EHCA_MAX_NUM_QUEUES); | 382 | shca->max_num_cqs = min_t(int, rblock->max_cq, |
380 | else if (ehca_max_cq < 1 || ehca_max_cq > rblock->max_cq) { | 383 | EHCA_MAX_NUM_QUEUES); |
381 | ehca_gen_err("Requested number of CQs is out of range (1 - %i) " | 384 | else if (shca->max_num_cqs < 1 || shca->max_num_cqs > rblock->max_cq) { |
382 | "specified by HW", rblock->max_cq); | 385 | ehca_gen_warn("The requested number of CQs is out of range " |
383 | ret = -EINVAL; | 386 | "(1 - %i) specified by HW. Value is set to %i", |
384 | goto sense_attributes1; | 387 | rblock->max_cq, rblock->max_cq); |
385 | } | 388 | } |
386 | 389 | ||
387 | /* query max MTU from first port -- it's the same for all ports */ | 390 | /* query max MTU from first port -- it's the same for all ports */ |
@@ -733,9 +736,13 @@ static int __devinit ehca_probe(struct of_device *dev, | |||
733 | ehca_gen_err("Cannot allocate shca memory."); | 736 | ehca_gen_err("Cannot allocate shca memory."); |
734 | return -ENOMEM; | 737 | return -ENOMEM; |
735 | } | 738 | } |
739 | |||
736 | mutex_init(&shca->modify_mutex); | 740 | mutex_init(&shca->modify_mutex); |
737 | atomic_set(&shca->num_cqs, 0); | 741 | atomic_set(&shca->num_cqs, 0); |
738 | atomic_set(&shca->num_qps, 0); | 742 | atomic_set(&shca->num_qps, 0); |
743 | shca->max_num_qps = ehca_max_qp; | ||
744 | shca->max_num_cqs = ehca_max_cq; | ||
745 | |||
739 | for (i = 0; i < ARRAY_SIZE(shca->sport); i++) | 746 | for (i = 0; i < ARRAY_SIZE(shca->sport); i++) |
740 | spin_lock_init(&shca->sport[i].mod_sqp_lock); | 747 | spin_lock_init(&shca->sport[i].mod_sqp_lock); |
741 | 748 | ||
@@ -755,7 +762,7 @@ static int __devinit ehca_probe(struct of_device *dev, | |||
755 | goto probe1; | 762 | goto probe1; |
756 | } | 763 | } |
757 | 764 | ||
758 | eq_size = 2 * ehca_max_cq + 4 * ehca_max_qp; | 765 | eq_size = 2 * shca->max_num_cqs + 4 * shca->max_num_qps; |
759 | /* create event queues */ | 766 | /* create event queues */ |
760 | ret = ehca_create_eq(shca, &shca->eq, EHCA_EQ, eq_size); | 767 | ret = ehca_create_eq(shca, &shca->eq, EHCA_EQ, eq_size); |
761 | if (ret) { | 768 | if (ret) { |
@@ -964,6 +971,41 @@ void ehca_poll_eqs(unsigned long data) | |||
964 | spin_unlock(&shca_list_lock); | 971 | spin_unlock(&shca_list_lock); |
965 | } | 972 | } |
966 | 973 | ||
974 | static int ehca_mem_notifier(struct notifier_block *nb, | ||
975 | unsigned long action, void *data) | ||
976 | { | ||
977 | static unsigned long ehca_dmem_warn_time; | ||
978 | |||
979 | switch (action) { | ||
980 | case MEM_CANCEL_OFFLINE: | ||
981 | case MEM_CANCEL_ONLINE: | ||
982 | case MEM_ONLINE: | ||
983 | case MEM_OFFLINE: | ||
984 | return NOTIFY_OK; | ||
985 | case MEM_GOING_ONLINE: | ||
986 | case MEM_GOING_OFFLINE: | ||
987 | /* only ok if no hca is attached to the lpar */ | ||
988 | spin_lock(&shca_list_lock); | ||
989 | if (list_empty(&shca_list)) { | ||
990 | spin_unlock(&shca_list_lock); | ||
991 | return NOTIFY_OK; | ||
992 | } else { | ||
993 | spin_unlock(&shca_list_lock); | ||
994 | if (printk_timed_ratelimit(&ehca_dmem_warn_time, | ||
995 | 30 * 1000)) | ||
996 | ehca_gen_err("DMEM operations are not allowed" | ||
997 | "as long as an ehca adapter is" | ||
998 | "attached to the LPAR"); | ||
999 | return NOTIFY_BAD; | ||
1000 | } | ||
1001 | } | ||
1002 | return NOTIFY_OK; | ||
1003 | } | ||
1004 | |||
1005 | static struct notifier_block ehca_mem_nb = { | ||
1006 | .notifier_call = ehca_mem_notifier, | ||
1007 | }; | ||
1008 | |||
967 | static int __init ehca_module_init(void) | 1009 | static int __init ehca_module_init(void) |
968 | { | 1010 | { |
969 | int ret; | 1011 | int ret; |
@@ -991,6 +1033,12 @@ static int __init ehca_module_init(void) | |||
991 | goto module_init2; | 1033 | goto module_init2; |
992 | } | 1034 | } |
993 | 1035 | ||
1036 | ret = register_memory_notifier(&ehca_mem_nb); | ||
1037 | if (ret) { | ||
1038 | ehca_gen_err("Failed registering memory add/remove notifier"); | ||
1039 | goto module_init3; | ||
1040 | } | ||
1041 | |||
994 | if (ehca_poll_all_eqs != 1) { | 1042 | if (ehca_poll_all_eqs != 1) { |
995 | ehca_gen_err("WARNING!!!"); | 1043 | ehca_gen_err("WARNING!!!"); |
996 | ehca_gen_err("It is possible to lose interrupts."); | 1044 | ehca_gen_err("It is possible to lose interrupts."); |
@@ -1003,6 +1051,9 @@ static int __init ehca_module_init(void) | |||
1003 | 1051 | ||
1004 | return 0; | 1052 | return 0; |
1005 | 1053 | ||
1054 | module_init3: | ||
1055 | ibmebus_unregister_driver(&ehca_driver); | ||
1056 | |||
1006 | module_init2: | 1057 | module_init2: |
1007 | ehca_destroy_slab_caches(); | 1058 | ehca_destroy_slab_caches(); |
1008 | 1059 | ||
@@ -1018,6 +1069,8 @@ static void __exit ehca_module_exit(void) | |||
1018 | 1069 | ||
1019 | ibmebus_unregister_driver(&ehca_driver); | 1070 | ibmebus_unregister_driver(&ehca_driver); |
1020 | 1071 | ||
1072 | unregister_memory_notifier(&ehca_mem_nb); | ||
1073 | |||
1021 | ehca_destroy_slab_caches(); | 1074 | ehca_destroy_slab_caches(); |
1022 | 1075 | ||
1023 | ehca_destroy_comp_pool(); | 1076 | ehca_destroy_comp_pool(); |
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c index 4dbe2870e014..4d54b9f64567 100644 --- a/drivers/infiniband/hw/ehca/ehca_qp.c +++ b/drivers/infiniband/hw/ehca/ehca_qp.c | |||
@@ -465,9 +465,9 @@ static struct ehca_qp *internal_create_qp( | |||
465 | u32 swqe_size = 0, rwqe_size = 0, ib_qp_num; | 465 | u32 swqe_size = 0, rwqe_size = 0, ib_qp_num; |
466 | unsigned long flags; | 466 | unsigned long flags; |
467 | 467 | ||
468 | if (!atomic_add_unless(&shca->num_qps, 1, ehca_max_qp)) { | 468 | if (!atomic_add_unless(&shca->num_qps, 1, shca->max_num_qps)) { |
469 | ehca_err(pd->device, "Unable to create QP, max number of %i " | 469 | ehca_err(pd->device, "Unable to create QP, max number of %i " |
470 | "QPs reached.", ehca_max_qp); | 470 | "QPs reached.", shca->max_num_qps); |
471 | ehca_err(pd->device, "To increase the maximum number of QPs " | 471 | ehca_err(pd->device, "To increase the maximum number of QPs " |
472 | "use the number_of_qps module parameter.\n"); | 472 | "use the number_of_qps module parameter.\n"); |
473 | return ERR_PTR(-ENOSPC); | 473 | return ERR_PTR(-ENOSPC); |
@@ -502,6 +502,12 @@ static struct ehca_qp *internal_create_qp( | |||
502 | if (init_attr->srq) { | 502 | if (init_attr->srq) { |
503 | my_srq = container_of(init_attr->srq, struct ehca_qp, ib_srq); | 503 | my_srq = container_of(init_attr->srq, struct ehca_qp, ib_srq); |
504 | 504 | ||
505 | if (qp_type == IB_QPT_UC) { | ||
506 | ehca_err(pd->device, "UC with SRQ not supported"); | ||
507 | atomic_dec(&shca->num_qps); | ||
508 | return ERR_PTR(-EINVAL); | ||
509 | } | ||
510 | |||
505 | has_srq = 1; | 511 | has_srq = 1; |
506 | parms.ext_type = EQPT_SRQBASE; | 512 | parms.ext_type = EQPT_SRQBASE; |
507 | parms.srq_qpn = my_srq->real_qp_num; | 513 | parms.srq_qpn = my_srq->real_qp_num; |
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index cdca3a511e1c..606f1e2ef284 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c | |||
@@ -298,7 +298,7 @@ int mlx4_ib_mad_init(struct mlx4_ib_dev *dev) | |||
298 | int p, q; | 298 | int p, q; |
299 | int ret; | 299 | int ret; |
300 | 300 | ||
301 | for (p = 0; p < dev->dev->caps.num_ports; ++p) | 301 | for (p = 0; p < dev->num_ports; ++p) |
302 | for (q = 0; q <= 1; ++q) { | 302 | for (q = 0; q <= 1; ++q) { |
303 | agent = ib_register_mad_agent(&dev->ib_dev, p + 1, | 303 | agent = ib_register_mad_agent(&dev->ib_dev, p + 1, |
304 | q ? IB_QPT_GSI : IB_QPT_SMI, | 304 | q ? IB_QPT_GSI : IB_QPT_SMI, |
@@ -314,7 +314,7 @@ int mlx4_ib_mad_init(struct mlx4_ib_dev *dev) | |||
314 | return 0; | 314 | return 0; |
315 | 315 | ||
316 | err: | 316 | err: |
317 | for (p = 0; p < dev->dev->caps.num_ports; ++p) | 317 | for (p = 0; p < dev->num_ports; ++p) |
318 | for (q = 0; q <= 1; ++q) | 318 | for (q = 0; q <= 1; ++q) |
319 | if (dev->send_agent[p][q]) | 319 | if (dev->send_agent[p][q]) |
320 | ib_unregister_mad_agent(dev->send_agent[p][q]); | 320 | ib_unregister_mad_agent(dev->send_agent[p][q]); |
@@ -327,7 +327,7 @@ void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev) | |||
327 | struct ib_mad_agent *agent; | 327 | struct ib_mad_agent *agent; |
328 | int p, q; | 328 | int p, q; |
329 | 329 | ||
330 | for (p = 0; p < dev->dev->caps.num_ports; ++p) { | 330 | for (p = 0; p < dev->num_ports; ++p) { |
331 | for (q = 0; q <= 1; ++q) { | 331 | for (q = 0; q <= 1; ++q) { |
332 | agent = dev->send_agent[p][q]; | 332 | agent = dev->send_agent[p][q]; |
333 | dev->send_agent[p][q] = NULL; | 333 | dev->send_agent[p][q] = NULL; |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index a3c2851c0545..2e80f8f47b02 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -574,7 +574,10 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
574 | ibdev->ib_dev.owner = THIS_MODULE; | 574 | ibdev->ib_dev.owner = THIS_MODULE; |
575 | ibdev->ib_dev.node_type = RDMA_NODE_IB_CA; | 575 | ibdev->ib_dev.node_type = RDMA_NODE_IB_CA; |
576 | ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey; | 576 | ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey; |
577 | ibdev->ib_dev.phys_port_cnt = dev->caps.num_ports; | 577 | ibdev->num_ports = 0; |
578 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) | ||
579 | ibdev->num_ports++; | ||
580 | ibdev->ib_dev.phys_port_cnt = ibdev->num_ports; | ||
578 | ibdev->ib_dev.num_comp_vectors = 1; | 581 | ibdev->ib_dev.num_comp_vectors = 1; |
579 | ibdev->ib_dev.dma_device = &dev->pdev->dev; | 582 | ibdev->ib_dev.dma_device = &dev->pdev->dev; |
580 | 583 | ||
@@ -691,7 +694,7 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) | |||
691 | struct mlx4_ib_dev *ibdev = ibdev_ptr; | 694 | struct mlx4_ib_dev *ibdev = ibdev_ptr; |
692 | int p; | 695 | int p; |
693 | 696 | ||
694 | for (p = 1; p <= dev->caps.num_ports; ++p) | 697 | for (p = 1; p <= ibdev->num_ports; ++p) |
695 | mlx4_CLOSE_PORT(dev, p); | 698 | mlx4_CLOSE_PORT(dev, p); |
696 | 699 | ||
697 | mlx4_ib_mad_cleanup(ibdev); | 700 | mlx4_ib_mad_cleanup(ibdev); |
@@ -706,6 +709,10 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, | |||
706 | enum mlx4_dev_event event, int port) | 709 | enum mlx4_dev_event event, int port) |
707 | { | 710 | { |
708 | struct ib_event ibev; | 711 | struct ib_event ibev; |
712 | struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr); | ||
713 | |||
714 | if (port > ibdev->num_ports) | ||
715 | return; | ||
709 | 716 | ||
710 | switch (event) { | 717 | switch (event) { |
711 | case MLX4_DEV_EVENT_PORT_UP: | 718 | case MLX4_DEV_EVENT_PORT_UP: |
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 6e2b0dc21b61..9974e886b8de 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h | |||
@@ -162,6 +162,7 @@ struct mlx4_ib_ah { | |||
162 | struct mlx4_ib_dev { | 162 | struct mlx4_ib_dev { |
163 | struct ib_device ib_dev; | 163 | struct ib_device ib_dev; |
164 | struct mlx4_dev *dev; | 164 | struct mlx4_dev *dev; |
165 | int num_ports; | ||
165 | void __iomem *uar_map; | 166 | void __iomem *uar_map; |
166 | 167 | ||
167 | struct mlx4_uar priv_uar; | 168 | struct mlx4_uar priv_uar; |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index baa01deb2436..39167a797f99 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -451,6 +451,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
451 | struct ib_qp_init_attr *init_attr, | 451 | struct ib_qp_init_attr *init_attr, |
452 | struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp) | 452 | struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp) |
453 | { | 453 | { |
454 | int qpn; | ||
454 | int err; | 455 | int err; |
455 | 456 | ||
456 | mutex_init(&qp->mutex); | 457 | mutex_init(&qp->mutex); |
@@ -545,9 +546,17 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
545 | } | 546 | } |
546 | } | 547 | } |
547 | 548 | ||
548 | err = mlx4_qp_alloc(dev->dev, sqpn, &qp->mqp); | 549 | if (sqpn) { |
550 | qpn = sqpn; | ||
551 | } else { | ||
552 | err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn); | ||
553 | if (err) | ||
554 | goto err_wrid; | ||
555 | } | ||
556 | |||
557 | err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp); | ||
549 | if (err) | 558 | if (err) |
550 | goto err_wrid; | 559 | goto err_qpn; |
551 | 560 | ||
552 | /* | 561 | /* |
553 | * Hardware wants QPN written in big-endian order (after | 562 | * Hardware wants QPN written in big-endian order (after |
@@ -560,6 +569,10 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
560 | 569 | ||
561 | return 0; | 570 | return 0; |
562 | 571 | ||
572 | err_qpn: | ||
573 | if (!sqpn) | ||
574 | mlx4_qp_release_range(dev->dev, qpn, 1); | ||
575 | |||
563 | err_wrid: | 576 | err_wrid: |
564 | if (pd->uobject) { | 577 | if (pd->uobject) { |
565 | if (!init_attr->srq) | 578 | if (!init_attr->srq) |
@@ -655,6 +668,10 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, | |||
655 | mlx4_ib_unlock_cqs(send_cq, recv_cq); | 668 | mlx4_ib_unlock_cqs(send_cq, recv_cq); |
656 | 669 | ||
657 | mlx4_qp_free(dev->dev, &qp->mqp); | 670 | mlx4_qp_free(dev->dev, &qp->mqp); |
671 | |||
672 | if (!is_sqp(dev, qp)) | ||
673 | mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1); | ||
674 | |||
658 | mlx4_mtt_cleanup(dev->dev, &qp->mtt); | 675 | mlx4_mtt_cleanup(dev->dev, &qp->mtt); |
659 | 676 | ||
660 | if (is_user) { | 677 | if (is_user) { |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 68ba5c3482e4..e0c7dfabf2b4 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
@@ -507,6 +507,7 @@ int ipoib_pkey_dev_delay_open(struct net_device *dev); | |||
507 | void ipoib_drain_cq(struct net_device *dev); | 507 | void ipoib_drain_cq(struct net_device *dev); |
508 | 508 | ||
509 | void ipoib_set_ethtool_ops(struct net_device *dev); | 509 | void ipoib_set_ethtool_ops(struct net_device *dev); |
510 | int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca); | ||
510 | 511 | ||
511 | #ifdef CONFIG_INFINIBAND_IPOIB_CM | 512 | #ifdef CONFIG_INFINIBAND_IPOIB_CM |
512 | 513 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c index 66af5c1a76e5..e9795f60e5d6 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c | |||
@@ -42,6 +42,13 @@ static void ipoib_get_drvinfo(struct net_device *netdev, | |||
42 | strncpy(drvinfo->driver, "ipoib", sizeof(drvinfo->driver) - 1); | 42 | strncpy(drvinfo->driver, "ipoib", sizeof(drvinfo->driver) - 1); |
43 | } | 43 | } |
44 | 44 | ||
45 | static u32 ipoib_get_rx_csum(struct net_device *dev) | ||
46 | { | ||
47 | struct ipoib_dev_priv *priv = netdev_priv(dev); | ||
48 | return test_bit(IPOIB_FLAG_CSUM, &priv->flags) && | ||
49 | !test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); | ||
50 | } | ||
51 | |||
45 | static int ipoib_get_coalesce(struct net_device *dev, | 52 | static int ipoib_get_coalesce(struct net_device *dev, |
46 | struct ethtool_coalesce *coal) | 53 | struct ethtool_coalesce *coal) |
47 | { | 54 | { |
@@ -129,7 +136,7 @@ static void ipoib_get_ethtool_stats(struct net_device *dev, | |||
129 | 136 | ||
130 | static const struct ethtool_ops ipoib_ethtool_ops = { | 137 | static const struct ethtool_ops ipoib_ethtool_ops = { |
131 | .get_drvinfo = ipoib_get_drvinfo, | 138 | .get_drvinfo = ipoib_get_drvinfo, |
132 | .get_tso = ethtool_op_get_tso, | 139 | .get_rx_csum = ipoib_get_rx_csum, |
133 | .get_coalesce = ipoib_get_coalesce, | 140 | .get_coalesce = ipoib_get_coalesce, |
134 | .set_coalesce = ipoib_set_coalesce, | 141 | .set_coalesce = ipoib_set_coalesce, |
135 | .get_flags = ethtool_op_get_flags, | 142 | .get_flags = ethtool_op_get_flags, |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 0e748aeeae99..28eb6f03c588 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
@@ -685,10 +685,6 @@ int ipoib_ib_dev_open(struct net_device *dev) | |||
685 | queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, | 685 | queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, |
686 | round_jiffies_relative(HZ)); | 686 | round_jiffies_relative(HZ)); |
687 | 687 | ||
688 | init_timer(&priv->poll_timer); | ||
689 | priv->poll_timer.function = ipoib_ib_tx_timer_func; | ||
690 | priv->poll_timer.data = (unsigned long)dev; | ||
691 | |||
692 | set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); | 688 | set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); |
693 | 689 | ||
694 | return 0; | 690 | return 0; |
@@ -906,6 +902,9 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port) | |||
906 | return -ENODEV; | 902 | return -ENODEV; |
907 | } | 903 | } |
908 | 904 | ||
905 | setup_timer(&priv->poll_timer, ipoib_ib_tx_timer_func, | ||
906 | (unsigned long) dev); | ||
907 | |||
909 | if (dev->flags & IFF_UP) { | 908 | if (dev->flags & IFF_UP) { |
910 | if (ipoib_ib_dev_open(dev)) { | 909 | if (ipoib_ib_dev_open(dev)) { |
911 | ipoib_transport_dev_cleanup(dev); | 910 | ipoib_transport_dev_cleanup(dev); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index c0ee514396df..fddded7900d1 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -1173,11 +1173,48 @@ int ipoib_add_pkey_attr(struct net_device *dev) | |||
1173 | return device_create_file(&dev->dev, &dev_attr_pkey); | 1173 | return device_create_file(&dev->dev, &dev_attr_pkey); |
1174 | } | 1174 | } |
1175 | 1175 | ||
1176 | int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca) | ||
1177 | { | ||
1178 | struct ib_device_attr *device_attr; | ||
1179 | int result = -ENOMEM; | ||
1180 | |||
1181 | device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL); | ||
1182 | if (!device_attr) { | ||
1183 | printk(KERN_WARNING "%s: allocation of %zu bytes failed\n", | ||
1184 | hca->name, sizeof *device_attr); | ||
1185 | return result; | ||
1186 | } | ||
1187 | |||
1188 | result = ib_query_device(hca, device_attr); | ||
1189 | if (result) { | ||
1190 | printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n", | ||
1191 | hca->name, result); | ||
1192 | kfree(device_attr); | ||
1193 | return result; | ||
1194 | } | ||
1195 | priv->hca_caps = device_attr->device_cap_flags; | ||
1196 | |||
1197 | kfree(device_attr); | ||
1198 | |||
1199 | if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) { | ||
1200 | set_bit(IPOIB_FLAG_CSUM, &priv->flags); | ||
1201 | priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; | ||
1202 | } | ||
1203 | |||
1204 | if (lro) | ||
1205 | priv->dev->features |= NETIF_F_LRO; | ||
1206 | |||
1207 | if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO) | ||
1208 | priv->dev->features |= NETIF_F_TSO; | ||
1209 | |||
1210 | return 0; | ||
1211 | } | ||
1212 | |||
1213 | |||
1176 | static struct net_device *ipoib_add_port(const char *format, | 1214 | static struct net_device *ipoib_add_port(const char *format, |
1177 | struct ib_device *hca, u8 port) | 1215 | struct ib_device *hca, u8 port) |
1178 | { | 1216 | { |
1179 | struct ipoib_dev_priv *priv; | 1217 | struct ipoib_dev_priv *priv; |
1180 | struct ib_device_attr *device_attr; | ||
1181 | struct ib_port_attr attr; | 1218 | struct ib_port_attr attr; |
1182 | int result = -ENOMEM; | 1219 | int result = -ENOMEM; |
1183 | 1220 | ||
@@ -1206,31 +1243,8 @@ static struct net_device *ipoib_add_port(const char *format, | |||
1206 | goto device_init_failed; | 1243 | goto device_init_failed; |
1207 | } | 1244 | } |
1208 | 1245 | ||
1209 | device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL); | 1246 | if (ipoib_set_dev_features(priv, hca)) |
1210 | if (!device_attr) { | ||
1211 | printk(KERN_WARNING "%s: allocation of %zu bytes failed\n", | ||
1212 | hca->name, sizeof *device_attr); | ||
1213 | goto device_init_failed; | 1247 | goto device_init_failed; |
1214 | } | ||
1215 | |||
1216 | result = ib_query_device(hca, device_attr); | ||
1217 | if (result) { | ||
1218 | printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n", | ||
1219 | hca->name, result); | ||
1220 | kfree(device_attr); | ||
1221 | goto device_init_failed; | ||
1222 | } | ||
1223 | priv->hca_caps = device_attr->device_cap_flags; | ||
1224 | |||
1225 | kfree(device_attr); | ||
1226 | |||
1227 | if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) { | ||
1228 | set_bit(IPOIB_FLAG_CSUM, &priv->flags); | ||
1229 | priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; | ||
1230 | } | ||
1231 | |||
1232 | if (lro) | ||
1233 | priv->dev->features |= NETIF_F_LRO; | ||
1234 | 1248 | ||
1235 | /* | 1249 | /* |
1236 | * Set the full membership bit, so that we join the right | 1250 | * Set the full membership bit, so that we join the right |
@@ -1266,9 +1280,6 @@ static struct net_device *ipoib_add_port(const char *format, | |||
1266 | goto event_failed; | 1280 | goto event_failed; |
1267 | } | 1281 | } |
1268 | 1282 | ||
1269 | if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO) | ||
1270 | priv->dev->features |= NETIF_F_TSO; | ||
1271 | |||
1272 | result = register_netdev(priv->dev); | 1283 | result = register_netdev(priv->dev); |
1273 | if (result) { | 1284 | if (result) { |
1274 | printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n", | 1285 | printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n", |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index b08eb56196d3..2cf1a4088718 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c | |||
@@ -93,6 +93,10 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) | |||
93 | priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; | 93 | priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; |
94 | set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags); | 94 | set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags); |
95 | 95 | ||
96 | result = ipoib_set_dev_features(priv, ppriv->ca); | ||
97 | if (result) | ||
98 | goto device_init_failed; | ||
99 | |||
96 | priv->pkey = pkey; | 100 | priv->pkey = pkey; |
97 | 101 | ||
98 | memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN); | 102 | memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN); |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index ad301ace6085..a5a9c6da5134 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -2504,6 +2504,15 @@ config PASEMI_MAC | |||
2504 | This driver supports the on-chip 1/10Gbit Ethernet controller on | 2504 | This driver supports the on-chip 1/10Gbit Ethernet controller on |
2505 | PA Semi's PWRficient line of chips. | 2505 | PA Semi's PWRficient line of chips. |
2506 | 2506 | ||
2507 | config MLX4_EN | ||
2508 | tristate "Mellanox Technologies 10Gbit Ethernet support" | ||
2509 | depends on PCI && INET | ||
2510 | select MLX4_CORE | ||
2511 | select INET_LRO | ||
2512 | help | ||
2513 | This driver supports Mellanox Technologies ConnectX Ethernet | ||
2514 | devices. | ||
2515 | |||
2507 | config MLX4_CORE | 2516 | config MLX4_CORE |
2508 | tristate | 2517 | tristate |
2509 | depends on PCI | 2518 | depends on PCI |
diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile index 0952a6528f58..a7a97bf998f8 100644 --- a/drivers/net/mlx4/Makefile +++ b/drivers/net/mlx4/Makefile | |||
@@ -1,4 +1,9 @@ | |||
1 | obj-$(CONFIG_MLX4_CORE) += mlx4_core.o | 1 | obj-$(CONFIG_MLX4_CORE) += mlx4_core.o |
2 | 2 | ||
3 | mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \ | 3 | mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \ |
4 | mr.o pd.o profile.o qp.o reset.o srq.o | 4 | mr.o pd.o port.o profile.o qp.o reset.o srq.o |
5 | |||
6 | obj-$(CONFIG_MLX4_EN) += mlx4_en.o | ||
7 | |||
8 | mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o \ | ||
9 | en_resources.o en_netdev.o | ||
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c index b411b79d72ad..ad95d5f7b630 100644 --- a/drivers/net/mlx4/alloc.c +++ b/drivers/net/mlx4/alloc.c | |||
@@ -48,13 +48,16 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap) | |||
48 | 48 | ||
49 | obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last); | 49 | obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last); |
50 | if (obj >= bitmap->max) { | 50 | if (obj >= bitmap->max) { |
51 | bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask; | 51 | bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) |
52 | & bitmap->mask; | ||
52 | obj = find_first_zero_bit(bitmap->table, bitmap->max); | 53 | obj = find_first_zero_bit(bitmap->table, bitmap->max); |
53 | } | 54 | } |
54 | 55 | ||
55 | if (obj < bitmap->max) { | 56 | if (obj < bitmap->max) { |
56 | set_bit(obj, bitmap->table); | 57 | set_bit(obj, bitmap->table); |
57 | bitmap->last = (obj + 1) & (bitmap->max - 1); | 58 | bitmap->last = (obj + 1); |
59 | if (bitmap->last == bitmap->max) | ||
60 | bitmap->last = 0; | ||
58 | obj |= bitmap->top; | 61 | obj |= bitmap->top; |
59 | } else | 62 | } else |
60 | obj = -1; | 63 | obj = -1; |
@@ -66,16 +69,90 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap) | |||
66 | 69 | ||
67 | void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj) | 70 | void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj) |
68 | { | 71 | { |
69 | obj &= bitmap->max - 1; | 72 | mlx4_bitmap_free_range(bitmap, obj, 1); |
73 | } | ||
74 | |||
75 | static unsigned long find_aligned_range(unsigned long *bitmap, | ||
76 | u32 start, u32 nbits, | ||
77 | int len, int align) | ||
78 | { | ||
79 | unsigned long end, i; | ||
80 | |||
81 | again: | ||
82 | start = ALIGN(start, align); | ||
83 | |||
84 | while ((start < nbits) && test_bit(start, bitmap)) | ||
85 | start += align; | ||
86 | |||
87 | if (start >= nbits) | ||
88 | return -1; | ||
89 | |||
90 | end = start+len; | ||
91 | if (end > nbits) | ||
92 | return -1; | ||
93 | |||
94 | for (i = start + 1; i < end; i++) { | ||
95 | if (test_bit(i, bitmap)) { | ||
96 | start = i + 1; | ||
97 | goto again; | ||
98 | } | ||
99 | } | ||
100 | |||
101 | return start; | ||
102 | } | ||
103 | |||
104 | u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align) | ||
105 | { | ||
106 | u32 obj, i; | ||
107 | |||
108 | if (likely(cnt == 1 && align == 1)) | ||
109 | return mlx4_bitmap_alloc(bitmap); | ||
110 | |||
111 | spin_lock(&bitmap->lock); | ||
112 | |||
113 | obj = find_aligned_range(bitmap->table, bitmap->last, | ||
114 | bitmap->max, cnt, align); | ||
115 | if (obj >= bitmap->max) { | ||
116 | bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) | ||
117 | & bitmap->mask; | ||
118 | obj = find_aligned_range(bitmap->table, 0, bitmap->max, | ||
119 | cnt, align); | ||
120 | } | ||
121 | |||
122 | if (obj < bitmap->max) { | ||
123 | for (i = 0; i < cnt; i++) | ||
124 | set_bit(obj + i, bitmap->table); | ||
125 | if (obj == bitmap->last) { | ||
126 | bitmap->last = (obj + cnt); | ||
127 | if (bitmap->last >= bitmap->max) | ||
128 | bitmap->last = 0; | ||
129 | } | ||
130 | obj |= bitmap->top; | ||
131 | } else | ||
132 | obj = -1; | ||
133 | |||
134 | spin_unlock(&bitmap->lock); | ||
135 | |||
136 | return obj; | ||
137 | } | ||
138 | |||
139 | void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt) | ||
140 | { | ||
141 | u32 i; | ||
142 | |||
143 | obj &= bitmap->max + bitmap->reserved_top - 1; | ||
70 | 144 | ||
71 | spin_lock(&bitmap->lock); | 145 | spin_lock(&bitmap->lock); |
72 | clear_bit(obj, bitmap->table); | 146 | for (i = 0; i < cnt; i++) |
147 | clear_bit(obj + i, bitmap->table); | ||
73 | bitmap->last = min(bitmap->last, obj); | 148 | bitmap->last = min(bitmap->last, obj); |
74 | bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask; | 149 | bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) |
150 | & bitmap->mask; | ||
75 | spin_unlock(&bitmap->lock); | 151 | spin_unlock(&bitmap->lock); |
76 | } | 152 | } |
77 | 153 | ||
78 | int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved) | 154 | int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, |
155 | u32 reserved_bot, u32 reserved_top) | ||
79 | { | 156 | { |
80 | int i; | 157 | int i; |
81 | 158 | ||
@@ -85,14 +162,16 @@ int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved | |||
85 | 162 | ||
86 | bitmap->last = 0; | 163 | bitmap->last = 0; |
87 | bitmap->top = 0; | 164 | bitmap->top = 0; |
88 | bitmap->max = num; | 165 | bitmap->max = num - reserved_top; |
89 | bitmap->mask = mask; | 166 | bitmap->mask = mask; |
167 | bitmap->reserved_top = reserved_top; | ||
90 | spin_lock_init(&bitmap->lock); | 168 | spin_lock_init(&bitmap->lock); |
91 | bitmap->table = kzalloc(BITS_TO_LONGS(num) * sizeof (long), GFP_KERNEL); | 169 | bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) * |
170 | sizeof (long), GFP_KERNEL); | ||
92 | if (!bitmap->table) | 171 | if (!bitmap->table) |
93 | return -ENOMEM; | 172 | return -ENOMEM; |
94 | 173 | ||
95 | for (i = 0; i < reserved; ++i) | 174 | for (i = 0; i < reserved_bot; ++i) |
96 | set_bit(i, bitmap->table); | 175 | set_bit(i, bitmap->table); |
97 | 176 | ||
98 | return 0; | 177 | return 0; |
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c index 9bb50e3f8974..b7ad2829d67e 100644 --- a/drivers/net/mlx4/cq.c +++ b/drivers/net/mlx4/cq.c | |||
@@ -300,7 +300,7 @@ int mlx4_init_cq_table(struct mlx4_dev *dev) | |||
300 | INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC); | 300 | INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC); |
301 | 301 | ||
302 | err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs, | 302 | err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs, |
303 | dev->caps.num_cqs - 1, dev->caps.reserved_cqs); | 303 | dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0); |
304 | if (err) | 304 | if (err) |
305 | return err; | 305 | return err; |
306 | 306 | ||
diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c new file mode 100644 index 000000000000..1368a8010af4 --- /dev/null +++ b/drivers/net/mlx4/en_cq.c | |||
@@ -0,0 +1,146 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | #include <linux/mlx4/cq.h> | ||
35 | #include <linux/mlx4/qp.h> | ||
36 | #include <linux/mlx4/cmd.h> | ||
37 | |||
38 | #include "mlx4_en.h" | ||
39 | |||
40 | static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event) | ||
41 | { | ||
42 | return; | ||
43 | } | ||
44 | |||
45 | |||
46 | int mlx4_en_create_cq(struct mlx4_en_priv *priv, | ||
47 | struct mlx4_en_cq *cq, | ||
48 | int entries, int ring, enum cq_type mode) | ||
49 | { | ||
50 | struct mlx4_en_dev *mdev = priv->mdev; | ||
51 | int err; | ||
52 | |||
53 | cq->size = entries; | ||
54 | if (mode == RX) | ||
55 | cq->buf_size = cq->size * sizeof(struct mlx4_cqe); | ||
56 | else | ||
57 | cq->buf_size = sizeof(struct mlx4_cqe); | ||
58 | |||
59 | cq->ring = ring; | ||
60 | cq->is_tx = mode; | ||
61 | spin_lock_init(&cq->lock); | ||
62 | |||
63 | err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres, | ||
64 | cq->buf_size, 2 * PAGE_SIZE); | ||
65 | if (err) | ||
66 | return err; | ||
67 | |||
68 | err = mlx4_en_map_buffer(&cq->wqres.buf); | ||
69 | if (err) | ||
70 | mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); | ||
71 | |||
72 | return err; | ||
73 | } | ||
74 | |||
75 | int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) | ||
76 | { | ||
77 | struct mlx4_en_dev *mdev = priv->mdev; | ||
78 | int err; | ||
79 | |||
80 | cq->dev = mdev->pndev[priv->port]; | ||
81 | cq->mcq.set_ci_db = cq->wqres.db.db; | ||
82 | cq->mcq.arm_db = cq->wqres.db.db + 1; | ||
83 | *cq->mcq.set_ci_db = 0; | ||
84 | *cq->mcq.arm_db = 0; | ||
85 | cq->buf = (struct mlx4_cqe *) cq->wqres.buf.direct.buf; | ||
86 | memset(cq->buf, 0, cq->buf_size); | ||
87 | |||
88 | err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar, | ||
89 | cq->wqres.db.dma, &cq->mcq, cq->is_tx); | ||
90 | if (err) | ||
91 | return err; | ||
92 | |||
93 | cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq; | ||
94 | cq->mcq.event = mlx4_en_cq_event; | ||
95 | |||
96 | if (cq->is_tx) { | ||
97 | init_timer(&cq->timer); | ||
98 | cq->timer.function = mlx4_en_poll_tx_cq; | ||
99 | cq->timer.data = (unsigned long) cq; | ||
100 | } else { | ||
101 | netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64); | ||
102 | napi_enable(&cq->napi); | ||
103 | } | ||
104 | |||
105 | return 0; | ||
106 | } | ||
107 | |||
108 | void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) | ||
109 | { | ||
110 | struct mlx4_en_dev *mdev = priv->mdev; | ||
111 | |||
112 | mlx4_en_unmap_buffer(&cq->wqres.buf); | ||
113 | mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); | ||
114 | cq->buf_size = 0; | ||
115 | cq->buf = NULL; | ||
116 | } | ||
117 | |||
118 | void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) | ||
119 | { | ||
120 | struct mlx4_en_dev *mdev = priv->mdev; | ||
121 | |||
122 | if (cq->is_tx) | ||
123 | del_timer(&cq->timer); | ||
124 | else | ||
125 | napi_disable(&cq->napi); | ||
126 | |||
127 | mlx4_cq_free(mdev->dev, &cq->mcq); | ||
128 | } | ||
129 | |||
130 | /* Set rx cq moderation parameters */ | ||
131 | int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) | ||
132 | { | ||
133 | return mlx4_cq_modify(priv->mdev->dev, &cq->mcq, | ||
134 | cq->moder_cnt, cq->moder_time); | ||
135 | } | ||
136 | |||
137 | int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) | ||
138 | { | ||
139 | cq->armed = 1; | ||
140 | mlx4_cq_arm(&cq->mcq, MLX4_CQ_DB_REQ_NOT, priv->mdev->uar_map, | ||
141 | &priv->mdev->uar_lock); | ||
142 | |||
143 | return 0; | ||
144 | } | ||
145 | |||
146 | |||
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c new file mode 100644 index 000000000000..1b0eebf84f76 --- /dev/null +++ b/drivers/net/mlx4/en_main.c | |||
@@ -0,0 +1,254 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | #include <linux/cpumask.h> | ||
35 | #include <linux/module.h> | ||
36 | #include <linux/delay.h> | ||
37 | #include <linux/netdevice.h> | ||
38 | #include <linux/cpumask.h> | ||
39 | |||
40 | #include <linux/mlx4/driver.h> | ||
41 | #include <linux/mlx4/device.h> | ||
42 | #include <linux/mlx4/cmd.h> | ||
43 | |||
44 | #include "mlx4_en.h" | ||
45 | |||
46 | MODULE_AUTHOR("Liran Liss, Yevgeny Petrilin"); | ||
47 | MODULE_DESCRIPTION("Mellanox ConnectX HCA Ethernet driver"); | ||
48 | MODULE_LICENSE("Dual BSD/GPL"); | ||
49 | MODULE_VERSION(DRV_VERSION " ("DRV_RELDATE")"); | ||
50 | |||
51 | static const char mlx4_en_version[] = | ||
52 | DRV_NAME ": Mellanox ConnectX HCA Ethernet driver v" | ||
53 | DRV_VERSION " (" DRV_RELDATE ")\n"; | ||
54 | |||
55 | static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr, | ||
56 | enum mlx4_dev_event event, int port) | ||
57 | { | ||
58 | struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr; | ||
59 | struct mlx4_en_priv *priv; | ||
60 | |||
61 | if (!mdev->pndev[port]) | ||
62 | return; | ||
63 | |||
64 | priv = netdev_priv(mdev->pndev[port]); | ||
65 | switch (event) { | ||
66 | case MLX4_DEV_EVENT_PORT_UP: | ||
67 | case MLX4_DEV_EVENT_PORT_DOWN: | ||
68 | /* To prevent races, we poll the link state in a separate | ||
69 | task rather than changing it here */ | ||
70 | priv->link_state = event; | ||
71 | queue_work(mdev->workqueue, &priv->linkstate_task); | ||
72 | break; | ||
73 | |||
74 | case MLX4_DEV_EVENT_CATASTROPHIC_ERROR: | ||
75 | mlx4_err(mdev, "Internal error detected, restarting device\n"); | ||
76 | break; | ||
77 | |||
78 | default: | ||
79 | mlx4_warn(mdev, "Unhandled event: %d\n", event); | ||
80 | } | ||
81 | } | ||
82 | |||
83 | static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr) | ||
84 | { | ||
85 | struct mlx4_en_dev *mdev = endev_ptr; | ||
86 | int i; | ||
87 | |||
88 | mutex_lock(&mdev->state_lock); | ||
89 | mdev->device_up = false; | ||
90 | mutex_unlock(&mdev->state_lock); | ||
91 | |||
92 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) | ||
93 | if (mdev->pndev[i]) | ||
94 | mlx4_en_destroy_netdev(mdev->pndev[i]); | ||
95 | |||
96 | flush_workqueue(mdev->workqueue); | ||
97 | destroy_workqueue(mdev->workqueue); | ||
98 | mlx4_mr_free(dev, &mdev->mr); | ||
99 | mlx4_uar_free(dev, &mdev->priv_uar); | ||
100 | mlx4_pd_free(dev, mdev->priv_pdn); | ||
101 | kfree(mdev); | ||
102 | } | ||
103 | |||
104 | static void *mlx4_en_add(struct mlx4_dev *dev) | ||
105 | { | ||
106 | static int mlx4_en_version_printed; | ||
107 | struct mlx4_en_dev *mdev; | ||
108 | int i; | ||
109 | int err; | ||
110 | |||
111 | if (!mlx4_en_version_printed) { | ||
112 | printk(KERN_INFO "%s", mlx4_en_version); | ||
113 | mlx4_en_version_printed++; | ||
114 | } | ||
115 | |||
116 | mdev = kzalloc(sizeof *mdev, GFP_KERNEL); | ||
117 | if (!mdev) { | ||
118 | dev_err(&dev->pdev->dev, "Device struct alloc failed, " | ||
119 | "aborting.\n"); | ||
120 | err = -ENOMEM; | ||
121 | goto err_free_res; | ||
122 | } | ||
123 | |||
124 | if (mlx4_pd_alloc(dev, &mdev->priv_pdn)) | ||
125 | goto err_free_dev; | ||
126 | |||
127 | if (mlx4_uar_alloc(dev, &mdev->priv_uar)) | ||
128 | goto err_pd; | ||
129 | |||
130 | mdev->uar_map = ioremap(mdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE); | ||
131 | if (!mdev->uar_map) | ||
132 | goto err_uar; | ||
133 | spin_lock_init(&mdev->uar_lock); | ||
134 | |||
135 | mdev->dev = dev; | ||
136 | mdev->dma_device = &(dev->pdev->dev); | ||
137 | mdev->pdev = dev->pdev; | ||
138 | mdev->device_up = false; | ||
139 | |||
140 | mdev->LSO_support = !!(dev->caps.flags & (1 << 15)); | ||
141 | if (!mdev->LSO_support) | ||
142 | mlx4_warn(mdev, "LSO not supported, please upgrade to later " | ||
143 | "FW version to enable LSO\n"); | ||
144 | |||
145 | if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull, | ||
146 | MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ, | ||
147 | 0, 0, &mdev->mr)) { | ||
148 | mlx4_err(mdev, "Failed allocating memory region\n"); | ||
149 | goto err_uar; | ||
150 | } | ||
151 | if (mlx4_mr_enable(mdev->dev, &mdev->mr)) { | ||
152 | mlx4_err(mdev, "Failed enabling memory region\n"); | ||
153 | goto err_mr; | ||
154 | } | ||
155 | |||
156 | /* Build device profile according to supplied module parameters */ | ||
157 | err = mlx4_en_get_profile(mdev); | ||
158 | if (err) { | ||
159 | mlx4_err(mdev, "Bad module parameters, aborting.\n"); | ||
160 | goto err_mr; | ||
161 | } | ||
162 | |||
163 | /* Configure wich ports to start according to module parameters */ | ||
164 | mdev->port_cnt = 0; | ||
165 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) | ||
166 | mdev->port_cnt++; | ||
167 | |||
168 | /* If we did not receive an explicit number of Rx rings, default to | ||
169 | * the number of completion vectors populated by the mlx4_core */ | ||
170 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { | ||
171 | mlx4_info(mdev, "Using %d tx rings for port:%d\n", | ||
172 | mdev->profile.prof[i].tx_ring_num, i); | ||
173 | if (!mdev->profile.prof[i].rx_ring_num) { | ||
174 | mdev->profile.prof[i].rx_ring_num = 1; | ||
175 | mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n", | ||
176 | 1, i); | ||
177 | } else | ||
178 | mlx4_info(mdev, "Using %d rx rings for port:%d\n", | ||
179 | mdev->profile.prof[i].rx_ring_num, i); | ||
180 | } | ||
181 | |||
182 | /* Create our own workqueue for reset/multicast tasks | ||
183 | * Note: we cannot use the shared workqueue because of deadlocks caused | ||
184 | * by the rtnl lock */ | ||
185 | mdev->workqueue = create_singlethread_workqueue("mlx4_en"); | ||
186 | if (!mdev->workqueue) { | ||
187 | err = -ENOMEM; | ||
188 | goto err_close_nic; | ||
189 | } | ||
190 | |||
191 | /* At this stage all non-port specific tasks are complete: | ||
192 | * mark the card state as up */ | ||
193 | mutex_init(&mdev->state_lock); | ||
194 | mdev->device_up = true; | ||
195 | |||
196 | /* Setup ports */ | ||
197 | |||
198 | /* Create a netdev for each port */ | ||
199 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { | ||
200 | mlx4_info(mdev, "Activating port:%d\n", i); | ||
201 | if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i])) { | ||
202 | mdev->pndev[i] = NULL; | ||
203 | goto err_free_netdev; | ||
204 | } | ||
205 | } | ||
206 | return mdev; | ||
207 | |||
208 | |||
209 | err_free_netdev: | ||
210 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { | ||
211 | if (mdev->pndev[i]) | ||
212 | mlx4_en_destroy_netdev(mdev->pndev[i]); | ||
213 | } | ||
214 | |||
215 | mutex_lock(&mdev->state_lock); | ||
216 | mdev->device_up = false; | ||
217 | mutex_unlock(&mdev->state_lock); | ||
218 | flush_workqueue(mdev->workqueue); | ||
219 | |||
220 | /* Stop event queue before we drop down to release shared SW state */ | ||
221 | |||
222 | err_close_nic: | ||
223 | destroy_workqueue(mdev->workqueue); | ||
224 | err_mr: | ||
225 | mlx4_mr_free(dev, &mdev->mr); | ||
226 | err_uar: | ||
227 | mlx4_uar_free(dev, &mdev->priv_uar); | ||
228 | err_pd: | ||
229 | mlx4_pd_free(dev, mdev->priv_pdn); | ||
230 | err_free_dev: | ||
231 | kfree(mdev); | ||
232 | err_free_res: | ||
233 | return NULL; | ||
234 | } | ||
235 | |||
236 | static struct mlx4_interface mlx4_en_interface = { | ||
237 | .add = mlx4_en_add, | ||
238 | .remove = mlx4_en_remove, | ||
239 | .event = mlx4_en_event, | ||
240 | }; | ||
241 | |||
242 | static int __init mlx4_en_init(void) | ||
243 | { | ||
244 | return mlx4_register_interface(&mlx4_en_interface); | ||
245 | } | ||
246 | |||
247 | static void __exit mlx4_en_cleanup(void) | ||
248 | { | ||
249 | mlx4_unregister_interface(&mlx4_en_interface); | ||
250 | } | ||
251 | |||
252 | module_init(mlx4_en_init); | ||
253 | module_exit(mlx4_en_cleanup); | ||
254 | |||
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c new file mode 100644 index 000000000000..a339afbeed38 --- /dev/null +++ b/drivers/net/mlx4/en_netdev.c | |||
@@ -0,0 +1,1088 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | #include <linux/etherdevice.h> | ||
35 | #include <linux/tcp.h> | ||
36 | #include <linux/if_vlan.h> | ||
37 | #include <linux/delay.h> | ||
38 | |||
39 | #include <linux/mlx4/driver.h> | ||
40 | #include <linux/mlx4/device.h> | ||
41 | #include <linux/mlx4/cmd.h> | ||
42 | #include <linux/mlx4/cq.h> | ||
43 | |||
44 | #include "mlx4_en.h" | ||
45 | #include "en_port.h" | ||
46 | |||
47 | |||
48 | static void mlx4_en_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) | ||
49 | { | ||
50 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
51 | struct mlx4_en_dev *mdev = priv->mdev; | ||
52 | int err; | ||
53 | |||
54 | mlx4_dbg(HW, priv, "Registering VLAN group:%p\n", grp); | ||
55 | priv->vlgrp = grp; | ||
56 | |||
57 | mutex_lock(&mdev->state_lock); | ||
58 | if (mdev->device_up && priv->port_up) { | ||
59 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, grp); | ||
60 | if (err) | ||
61 | mlx4_err(mdev, "Failed configuring VLAN filter\n"); | ||
62 | } | ||
63 | mutex_unlock(&mdev->state_lock); | ||
64 | } | ||
65 | |||
66 | static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) | ||
67 | { | ||
68 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
69 | struct mlx4_en_dev *mdev = priv->mdev; | ||
70 | int err; | ||
71 | |||
72 | if (!priv->vlgrp) | ||
73 | return; | ||
74 | |||
75 | mlx4_dbg(HW, priv, "adding VLAN:%d (vlgrp entry:%p)\n", | ||
76 | vid, vlan_group_get_device(priv->vlgrp, vid)); | ||
77 | |||
78 | /* Add VID to port VLAN filter */ | ||
79 | mutex_lock(&mdev->state_lock); | ||
80 | if (mdev->device_up && priv->port_up) { | ||
81 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); | ||
82 | if (err) | ||
83 | mlx4_err(mdev, "Failed configuring VLAN filter\n"); | ||
84 | } | ||
85 | mutex_unlock(&mdev->state_lock); | ||
86 | } | ||
87 | |||
88 | static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | ||
89 | { | ||
90 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
91 | struct mlx4_en_dev *mdev = priv->mdev; | ||
92 | int err; | ||
93 | |||
94 | if (!priv->vlgrp) | ||
95 | return; | ||
96 | |||
97 | mlx4_dbg(HW, priv, "Killing VID:%d (vlgrp:%p vlgrp " | ||
98 | "entry:%p)\n", vid, priv->vlgrp, | ||
99 | vlan_group_get_device(priv->vlgrp, vid)); | ||
100 | vlan_group_set_device(priv->vlgrp, vid, NULL); | ||
101 | |||
102 | /* Remove VID from port VLAN filter */ | ||
103 | mutex_lock(&mdev->state_lock); | ||
104 | if (mdev->device_up && priv->port_up) { | ||
105 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); | ||
106 | if (err) | ||
107 | mlx4_err(mdev, "Failed configuring VLAN filter\n"); | ||
108 | } | ||
109 | mutex_unlock(&mdev->state_lock); | ||
110 | } | ||
111 | |||
112 | static u64 mlx4_en_mac_to_u64(u8 *addr) | ||
113 | { | ||
114 | u64 mac = 0; | ||
115 | int i; | ||
116 | |||
117 | for (i = 0; i < ETH_ALEN; i++) { | ||
118 | mac <<= 8; | ||
119 | mac |= addr[i]; | ||
120 | } | ||
121 | return mac; | ||
122 | } | ||
123 | |||
124 | static int mlx4_en_set_mac(struct net_device *dev, void *addr) | ||
125 | { | ||
126 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
127 | struct mlx4_en_dev *mdev = priv->mdev; | ||
128 | struct sockaddr *saddr = addr; | ||
129 | |||
130 | if (!is_valid_ether_addr(saddr->sa_data)) | ||
131 | return -EADDRNOTAVAIL; | ||
132 | |||
133 | memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN); | ||
134 | priv->mac = mlx4_en_mac_to_u64(dev->dev_addr); | ||
135 | queue_work(mdev->workqueue, &priv->mac_task); | ||
136 | return 0; | ||
137 | } | ||
138 | |||
139 | static void mlx4_en_do_set_mac(struct work_struct *work) | ||
140 | { | ||
141 | struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, | ||
142 | mac_task); | ||
143 | struct mlx4_en_dev *mdev = priv->mdev; | ||
144 | int err = 0; | ||
145 | |||
146 | mutex_lock(&mdev->state_lock); | ||
147 | if (priv->port_up) { | ||
148 | /* Remove old MAC and insert the new one */ | ||
149 | mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index); | ||
150 | err = mlx4_register_mac(mdev->dev, priv->port, | ||
151 | priv->mac, &priv->mac_index); | ||
152 | if (err) | ||
153 | mlx4_err(mdev, "Failed changing HW MAC address\n"); | ||
154 | } else | ||
155 | mlx4_dbg(HW, priv, "Port is down, exiting...\n"); | ||
156 | |||
157 | mutex_unlock(&mdev->state_lock); | ||
158 | } | ||
159 | |||
160 | static void mlx4_en_clear_list(struct net_device *dev) | ||
161 | { | ||
162 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
163 | struct dev_mc_list *plist = priv->mc_list; | ||
164 | struct dev_mc_list *next; | ||
165 | |||
166 | while (plist) { | ||
167 | next = plist->next; | ||
168 | kfree(plist); | ||
169 | plist = next; | ||
170 | } | ||
171 | priv->mc_list = NULL; | ||
172 | } | ||
173 | |||
174 | static void mlx4_en_cache_mclist(struct net_device *dev) | ||
175 | { | ||
176 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
177 | struct mlx4_en_dev *mdev = priv->mdev; | ||
178 | struct dev_mc_list *mclist; | ||
179 | struct dev_mc_list *tmp; | ||
180 | struct dev_mc_list *plist = NULL; | ||
181 | |||
182 | for (mclist = dev->mc_list; mclist; mclist = mclist->next) { | ||
183 | tmp = kmalloc(sizeof(struct dev_mc_list), GFP_ATOMIC); | ||
184 | if (!tmp) { | ||
185 | mlx4_err(mdev, "failed to allocate multicast list\n"); | ||
186 | mlx4_en_clear_list(dev); | ||
187 | return; | ||
188 | } | ||
189 | memcpy(tmp, mclist, sizeof(struct dev_mc_list)); | ||
190 | tmp->next = NULL; | ||
191 | if (plist) | ||
192 | plist->next = tmp; | ||
193 | else | ||
194 | priv->mc_list = tmp; | ||
195 | plist = tmp; | ||
196 | } | ||
197 | } | ||
198 | |||
199 | |||
200 | static void mlx4_en_set_multicast(struct net_device *dev) | ||
201 | { | ||
202 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
203 | |||
204 | if (!priv->port_up) | ||
205 | return; | ||
206 | |||
207 | queue_work(priv->mdev->workqueue, &priv->mcast_task); | ||
208 | } | ||
209 | |||
210 | static void mlx4_en_do_set_multicast(struct work_struct *work) | ||
211 | { | ||
212 | struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, | ||
213 | mcast_task); | ||
214 | struct mlx4_en_dev *mdev = priv->mdev; | ||
215 | struct net_device *dev = priv->dev; | ||
216 | struct dev_mc_list *mclist; | ||
217 | u64 mcast_addr = 0; | ||
218 | int err; | ||
219 | |||
220 | mutex_lock(&mdev->state_lock); | ||
221 | if (!mdev->device_up) { | ||
222 | mlx4_dbg(HW, priv, "Card is not up, ignoring " | ||
223 | "multicast change.\n"); | ||
224 | goto out; | ||
225 | } | ||
226 | if (!priv->port_up) { | ||
227 | mlx4_dbg(HW, priv, "Port is down, ignoring " | ||
228 | "multicast change.\n"); | ||
229 | goto out; | ||
230 | } | ||
231 | |||
232 | /* | ||
233 | * Promsicuous mode: disable all filters | ||
234 | */ | ||
235 | |||
236 | if (dev->flags & IFF_PROMISC) { | ||
237 | if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) { | ||
238 | if (netif_msg_rx_status(priv)) | ||
239 | mlx4_warn(mdev, "Port:%d entering promiscuous mode\n", | ||
240 | priv->port); | ||
241 | priv->flags |= MLX4_EN_FLAG_PROMISC; | ||
242 | |||
243 | /* Enable promiscouos mode */ | ||
244 | err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, | ||
245 | priv->base_qpn, 1); | ||
246 | if (err) | ||
247 | mlx4_err(mdev, "Failed enabling " | ||
248 | "promiscous mode\n"); | ||
249 | |||
250 | /* Disable port multicast filter (unconditionally) */ | ||
251 | err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, | ||
252 | 0, MLX4_MCAST_DISABLE); | ||
253 | if (err) | ||
254 | mlx4_err(mdev, "Failed disabling " | ||
255 | "multicast filter\n"); | ||
256 | |||
257 | /* Disable port VLAN filter */ | ||
258 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL); | ||
259 | if (err) | ||
260 | mlx4_err(mdev, "Failed disabling " | ||
261 | "VLAN filter\n"); | ||
262 | } | ||
263 | goto out; | ||
264 | } | ||
265 | |||
266 | /* | ||
267 | * Not in promiscous mode | ||
268 | */ | ||
269 | |||
270 | if (priv->flags & MLX4_EN_FLAG_PROMISC) { | ||
271 | if (netif_msg_rx_status(priv)) | ||
272 | mlx4_warn(mdev, "Port:%d leaving promiscuous mode\n", | ||
273 | priv->port); | ||
274 | priv->flags &= ~MLX4_EN_FLAG_PROMISC; | ||
275 | |||
276 | /* Disable promiscouos mode */ | ||
277 | err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, | ||
278 | priv->base_qpn, 0); | ||
279 | if (err) | ||
280 | mlx4_err(mdev, "Failed disabling promiscous mode\n"); | ||
281 | |||
282 | /* Enable port VLAN filter */ | ||
283 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); | ||
284 | if (err) | ||
285 | mlx4_err(mdev, "Failed enabling VLAN filter\n"); | ||
286 | } | ||
287 | |||
288 | /* Enable/disable the multicast filter according to IFF_ALLMULTI */ | ||
289 | if (dev->flags & IFF_ALLMULTI) { | ||
290 | err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, | ||
291 | 0, MLX4_MCAST_DISABLE); | ||
292 | if (err) | ||
293 | mlx4_err(mdev, "Failed disabling multicast filter\n"); | ||
294 | } else { | ||
295 | err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, | ||
296 | 0, MLX4_MCAST_DISABLE); | ||
297 | if (err) | ||
298 | mlx4_err(mdev, "Failed disabling multicast filter\n"); | ||
299 | |||
300 | /* Flush mcast filter and init it with broadcast address */ | ||
301 | mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, | ||
302 | 1, MLX4_MCAST_CONFIG); | ||
303 | |||
304 | /* Update multicast list - we cache all addresses so they won't | ||
305 | * change while HW is updated holding the command semaphor */ | ||
306 | netif_tx_lock_bh(dev); | ||
307 | mlx4_en_cache_mclist(dev); | ||
308 | netif_tx_unlock_bh(dev); | ||
309 | for (mclist = priv->mc_list; mclist; mclist = mclist->next) { | ||
310 | mcast_addr = mlx4_en_mac_to_u64(mclist->dmi_addr); | ||
311 | mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, | ||
312 | mcast_addr, 0, MLX4_MCAST_CONFIG); | ||
313 | } | ||
314 | err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, | ||
315 | 0, MLX4_MCAST_ENABLE); | ||
316 | if (err) | ||
317 | mlx4_err(mdev, "Failed enabling multicast filter\n"); | ||
318 | |||
319 | mlx4_en_clear_list(dev); | ||
320 | } | ||
321 | out: | ||
322 | mutex_unlock(&mdev->state_lock); | ||
323 | } | ||
324 | |||
325 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
326 | static void mlx4_en_netpoll(struct net_device *dev) | ||
327 | { | ||
328 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
329 | struct mlx4_en_cq *cq; | ||
330 | unsigned long flags; | ||
331 | int i; | ||
332 | |||
333 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
334 | cq = &priv->rx_cq[i]; | ||
335 | spin_lock_irqsave(&cq->lock, flags); | ||
336 | napi_synchronize(&cq->napi); | ||
337 | mlx4_en_process_rx_cq(dev, cq, 0); | ||
338 | spin_unlock_irqrestore(&cq->lock, flags); | ||
339 | } | ||
340 | } | ||
341 | #endif | ||
342 | |||
343 | static void mlx4_en_tx_timeout(struct net_device *dev) | ||
344 | { | ||
345 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
346 | struct mlx4_en_dev *mdev = priv->mdev; | ||
347 | |||
348 | if (netif_msg_timer(priv)) | ||
349 | mlx4_warn(mdev, "Tx timeout called on port:%d\n", priv->port); | ||
350 | |||
351 | if (netif_carrier_ok(dev)) { | ||
352 | priv->port_stats.tx_timeout++; | ||
353 | mlx4_dbg(DRV, priv, "Scheduling watchdog\n"); | ||
354 | queue_work(mdev->workqueue, &priv->watchdog_task); | ||
355 | } | ||
356 | } | ||
357 | |||
358 | |||
359 | static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev) | ||
360 | { | ||
361 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
362 | |||
363 | spin_lock_bh(&priv->stats_lock); | ||
364 | memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats)); | ||
365 | spin_unlock_bh(&priv->stats_lock); | ||
366 | |||
367 | return &priv->ret_stats; | ||
368 | } | ||
369 | |||
370 | static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) | ||
371 | { | ||
372 | struct mlx4_en_dev *mdev = priv->mdev; | ||
373 | struct mlx4_en_cq *cq; | ||
374 | int i; | ||
375 | |||
376 | /* If we haven't received a specific coalescing setting | ||
377 | * (module param), we set the moderation paramters as follows: | ||
378 | * - moder_cnt is set to the number of mtu sized packets to | ||
379 | * satisfy our coelsing target. | ||
380 | * - moder_time is set to a fixed value. | ||
381 | */ | ||
382 | priv->rx_frames = (mdev->profile.rx_moder_cnt == | ||
383 | MLX4_EN_AUTO_CONF) ? | ||
384 | MLX4_EN_RX_COAL_TARGET / | ||
385 | priv->dev->mtu + 1 : | ||
386 | mdev->profile.rx_moder_cnt; | ||
387 | priv->rx_usecs = (mdev->profile.rx_moder_time == | ||
388 | MLX4_EN_AUTO_CONF) ? | ||
389 | MLX4_EN_RX_COAL_TIME : | ||
390 | mdev->profile.rx_moder_time; | ||
391 | mlx4_dbg(INTR, priv, "Default coalesing params for mtu:%d - " | ||
392 | "rx_frames:%d rx_usecs:%d\n", | ||
393 | priv->dev->mtu, priv->rx_frames, priv->rx_usecs); | ||
394 | |||
395 | /* Setup cq moderation params */ | ||
396 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
397 | cq = &priv->rx_cq[i]; | ||
398 | cq->moder_cnt = priv->rx_frames; | ||
399 | cq->moder_time = priv->rx_usecs; | ||
400 | } | ||
401 | |||
402 | for (i = 0; i < priv->tx_ring_num; i++) { | ||
403 | cq = &priv->tx_cq[i]; | ||
404 | cq->moder_cnt = MLX4_EN_TX_COAL_PKTS; | ||
405 | cq->moder_time = MLX4_EN_TX_COAL_TIME; | ||
406 | } | ||
407 | |||
408 | /* Reset auto-moderation params */ | ||
409 | priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW; | ||
410 | priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW; | ||
411 | priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH; | ||
412 | priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH; | ||
413 | priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL; | ||
414 | priv->adaptive_rx_coal = mdev->profile.auto_moder; | ||
415 | priv->last_moder_time = MLX4_EN_AUTO_CONF; | ||
416 | priv->last_moder_jiffies = 0; | ||
417 | priv->last_moder_packets = 0; | ||
418 | priv->last_moder_tx_packets = 0; | ||
419 | priv->last_moder_bytes = 0; | ||
420 | } | ||
421 | |||
422 | static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) | ||
423 | { | ||
424 | unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies); | ||
425 | struct mlx4_en_dev *mdev = priv->mdev; | ||
426 | struct mlx4_en_cq *cq; | ||
427 | unsigned long packets; | ||
428 | unsigned long rate; | ||
429 | unsigned long avg_pkt_size; | ||
430 | unsigned long rx_packets; | ||
431 | unsigned long rx_bytes; | ||
432 | unsigned long tx_packets; | ||
433 | unsigned long tx_pkt_diff; | ||
434 | unsigned long rx_pkt_diff; | ||
435 | int moder_time; | ||
436 | int i, err; | ||
437 | |||
438 | if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ) | ||
439 | return; | ||
440 | |||
441 | spin_lock_bh(&priv->stats_lock); | ||
442 | rx_packets = priv->stats.rx_packets; | ||
443 | rx_bytes = priv->stats.rx_bytes; | ||
444 | tx_packets = priv->stats.tx_packets; | ||
445 | spin_unlock_bh(&priv->stats_lock); | ||
446 | |||
447 | if (!priv->last_moder_jiffies || !period) | ||
448 | goto out; | ||
449 | |||
450 | tx_pkt_diff = ((unsigned long) (tx_packets - | ||
451 | priv->last_moder_tx_packets)); | ||
452 | rx_pkt_diff = ((unsigned long) (rx_packets - | ||
453 | priv->last_moder_packets)); | ||
454 | packets = max(tx_pkt_diff, rx_pkt_diff); | ||
455 | rate = packets * HZ / period; | ||
456 | avg_pkt_size = packets ? ((unsigned long) (rx_bytes - | ||
457 | priv->last_moder_bytes)) / packets : 0; | ||
458 | |||
459 | /* Apply auto-moderation only when packet rate exceeds a rate that | ||
460 | * it matters */ | ||
461 | if (rate > MLX4_EN_RX_RATE_THRESH) { | ||
462 | /* If tx and rx packet rates are not balanced, assume that | ||
463 | * traffic is mainly BW bound and apply maximum moderation. | ||
464 | * Otherwise, moderate according to packet rate */ | ||
465 | if (2 * tx_pkt_diff > 3 * rx_pkt_diff || | ||
466 | 2 * rx_pkt_diff > 3 * tx_pkt_diff) { | ||
467 | moder_time = priv->rx_usecs_high; | ||
468 | } else { | ||
469 | if (rate < priv->pkt_rate_low) | ||
470 | moder_time = priv->rx_usecs_low; | ||
471 | else if (rate > priv->pkt_rate_high) | ||
472 | moder_time = priv->rx_usecs_high; | ||
473 | else | ||
474 | moder_time = (rate - priv->pkt_rate_low) * | ||
475 | (priv->rx_usecs_high - priv->rx_usecs_low) / | ||
476 | (priv->pkt_rate_high - priv->pkt_rate_low) + | ||
477 | priv->rx_usecs_low; | ||
478 | } | ||
479 | } else { | ||
480 | /* When packet rate is low, use default moderation rather than | ||
481 | * 0 to prevent interrupt storms if traffic suddenly increases */ | ||
482 | moder_time = priv->rx_usecs; | ||
483 | } | ||
484 | |||
485 | mlx4_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n", | ||
486 | tx_pkt_diff * HZ / period, rx_pkt_diff * HZ / period); | ||
487 | |||
488 | mlx4_dbg(INTR, priv, "Rx moder_time changed from:%d to %d period:%lu " | ||
489 | "[jiff] packets:%lu avg_pkt_size:%lu rate:%lu [p/s])\n", | ||
490 | priv->last_moder_time, moder_time, period, packets, | ||
491 | avg_pkt_size, rate); | ||
492 | |||
493 | if (moder_time != priv->last_moder_time) { | ||
494 | priv->last_moder_time = moder_time; | ||
495 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
496 | cq = &priv->rx_cq[i]; | ||
497 | cq->moder_time = moder_time; | ||
498 | err = mlx4_en_set_cq_moder(priv, cq); | ||
499 | if (err) { | ||
500 | mlx4_err(mdev, "Failed modifying moderation for cq:%d " | ||
501 | "on port:%d\n", i, priv->port); | ||
502 | break; | ||
503 | } | ||
504 | } | ||
505 | } | ||
506 | |||
507 | out: | ||
508 | priv->last_moder_packets = rx_packets; | ||
509 | priv->last_moder_tx_packets = tx_packets; | ||
510 | priv->last_moder_bytes = rx_bytes; | ||
511 | priv->last_moder_jiffies = jiffies; | ||
512 | } | ||
513 | |||
514 | static void mlx4_en_do_get_stats(struct work_struct *work) | ||
515 | { | ||
516 | struct delayed_work *delay = container_of(work, struct delayed_work, work); | ||
517 | struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, | ||
518 | stats_task); | ||
519 | struct mlx4_en_dev *mdev = priv->mdev; | ||
520 | int err; | ||
521 | |||
522 | err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); | ||
523 | if (err) | ||
524 | mlx4_dbg(HW, priv, "Could not update stats for " | ||
525 | "port:%d\n", priv->port); | ||
526 | |||
527 | mutex_lock(&mdev->state_lock); | ||
528 | if (mdev->device_up) { | ||
529 | if (priv->port_up) | ||
530 | mlx4_en_auto_moderation(priv); | ||
531 | |||
532 | queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); | ||
533 | } | ||
534 | mutex_unlock(&mdev->state_lock); | ||
535 | } | ||
536 | |||
537 | static void mlx4_en_linkstate(struct work_struct *work) | ||
538 | { | ||
539 | struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, | ||
540 | linkstate_task); | ||
541 | struct mlx4_en_dev *mdev = priv->mdev; | ||
542 | int linkstate = priv->link_state; | ||
543 | |||
544 | mutex_lock(&mdev->state_lock); | ||
545 | /* If observable port state changed set carrier state and | ||
546 | * report to system log */ | ||
547 | if (priv->last_link_state != linkstate) { | ||
548 | if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) { | ||
549 | if (netif_msg_link(priv)) | ||
550 | mlx4_info(mdev, "Port %d - link down\n", priv->port); | ||
551 | netif_carrier_off(priv->dev); | ||
552 | } else { | ||
553 | if (netif_msg_link(priv)) | ||
554 | mlx4_info(mdev, "Port %d - link up\n", priv->port); | ||
555 | netif_carrier_on(priv->dev); | ||
556 | } | ||
557 | } | ||
558 | priv->last_link_state = linkstate; | ||
559 | mutex_unlock(&mdev->state_lock); | ||
560 | } | ||
561 | |||
562 | |||
563 | static int mlx4_en_start_port(struct net_device *dev) | ||
564 | { | ||
565 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
566 | struct mlx4_en_dev *mdev = priv->mdev; | ||
567 | struct mlx4_en_cq *cq; | ||
568 | struct mlx4_en_tx_ring *tx_ring; | ||
569 | struct mlx4_en_rx_ring *rx_ring; | ||
570 | int rx_index = 0; | ||
571 | int tx_index = 0; | ||
572 | u16 stride; | ||
573 | int err = 0; | ||
574 | int i; | ||
575 | int j; | ||
576 | |||
577 | if (priv->port_up) { | ||
578 | mlx4_dbg(DRV, priv, "start port called while port already up\n"); | ||
579 | return 0; | ||
580 | } | ||
581 | |||
582 | /* Calculate Rx buf size */ | ||
583 | dev->mtu = min(dev->mtu, priv->max_mtu); | ||
584 | mlx4_en_calc_rx_buf(dev); | ||
585 | mlx4_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size); | ||
586 | stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + | ||
587 | DS_SIZE * priv->num_frags); | ||
588 | /* Configure rx cq's and rings */ | ||
589 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
590 | cq = &priv->rx_cq[i]; | ||
591 | rx_ring = &priv->rx_ring[i]; | ||
592 | |||
593 | err = mlx4_en_activate_cq(priv, cq); | ||
594 | if (err) { | ||
595 | mlx4_err(mdev, "Failed activating Rx CQ\n"); | ||
596 | goto rx_err; | ||
597 | } | ||
598 | for (j = 0; j < cq->size; j++) | ||
599 | cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK; | ||
600 | err = mlx4_en_set_cq_moder(priv, cq); | ||
601 | if (err) { | ||
602 | mlx4_err(mdev, "Failed setting cq moderation parameters"); | ||
603 | mlx4_en_deactivate_cq(priv, cq); | ||
604 | goto cq_err; | ||
605 | } | ||
606 | mlx4_en_arm_cq(priv, cq); | ||
607 | |||
608 | ++rx_index; | ||
609 | } | ||
610 | |||
611 | err = mlx4_en_activate_rx_rings(priv); | ||
612 | if (err) { | ||
613 | mlx4_err(mdev, "Failed to activate RX rings\n"); | ||
614 | goto cq_err; | ||
615 | } | ||
616 | |||
617 | err = mlx4_en_config_rss_steer(priv); | ||
618 | if (err) { | ||
619 | mlx4_err(mdev, "Failed configuring rss steering\n"); | ||
620 | goto rx_err; | ||
621 | } | ||
622 | |||
623 | /* Configure tx cq's and rings */ | ||
624 | for (i = 0; i < priv->tx_ring_num; i++) { | ||
625 | /* Configure cq */ | ||
626 | cq = &priv->tx_cq[i]; | ||
627 | err = mlx4_en_activate_cq(priv, cq); | ||
628 | if (err) { | ||
629 | mlx4_err(mdev, "Failed allocating Tx CQ\n"); | ||
630 | goto tx_err; | ||
631 | } | ||
632 | err = mlx4_en_set_cq_moder(priv, cq); | ||
633 | if (err) { | ||
634 | mlx4_err(mdev, "Failed setting cq moderation parameters"); | ||
635 | mlx4_en_deactivate_cq(priv, cq); | ||
636 | goto tx_err; | ||
637 | } | ||
638 | mlx4_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i); | ||
639 | cq->buf->wqe_index = cpu_to_be16(0xffff); | ||
640 | |||
641 | /* Configure ring */ | ||
642 | tx_ring = &priv->tx_ring[i]; | ||
643 | err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, | ||
644 | priv->rx_ring[0].srq.srqn); | ||
645 | if (err) { | ||
646 | mlx4_err(mdev, "Failed allocating Tx ring\n"); | ||
647 | mlx4_en_deactivate_cq(priv, cq); | ||
648 | goto tx_err; | ||
649 | } | ||
650 | /* Set initial ownership of all Tx TXBBs to SW (1) */ | ||
651 | for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE) | ||
652 | *((u32 *) (tx_ring->buf + j)) = 0xffffffff; | ||
653 | ++tx_index; | ||
654 | } | ||
655 | |||
656 | /* Configure port */ | ||
657 | err = mlx4_SET_PORT_general(mdev->dev, priv->port, | ||
658 | priv->rx_skb_size + ETH_FCS_LEN, | ||
659 | mdev->profile.tx_pause, | ||
660 | mdev->profile.tx_ppp, | ||
661 | mdev->profile.rx_pause, | ||
662 | mdev->profile.rx_ppp); | ||
663 | if (err) { | ||
664 | mlx4_err(mdev, "Failed setting port general configurations" | ||
665 | " for port %d, with error %d\n", priv->port, err); | ||
666 | goto tx_err; | ||
667 | } | ||
668 | /* Set default qp number */ | ||
669 | err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); | ||
670 | if (err) { | ||
671 | mlx4_err(mdev, "Failed setting default qp numbers\n"); | ||
672 | goto tx_err; | ||
673 | } | ||
674 | /* Set port mac number */ | ||
675 | mlx4_dbg(DRV, priv, "Setting mac for port %d\n", priv->port); | ||
676 | err = mlx4_register_mac(mdev->dev, priv->port, | ||
677 | priv->mac, &priv->mac_index); | ||
678 | if (err) { | ||
679 | mlx4_err(mdev, "Failed setting port mac\n"); | ||
680 | goto tx_err; | ||
681 | } | ||
682 | |||
683 | /* Init port */ | ||
684 | mlx4_dbg(HW, priv, "Initializing port\n"); | ||
685 | err = mlx4_INIT_PORT(mdev->dev, priv->port); | ||
686 | if (err) { | ||
687 | mlx4_err(mdev, "Failed Initializing port\n"); | ||
688 | goto mac_err; | ||
689 | } | ||
690 | |||
691 | /* Schedule multicast task to populate multicast list */ | ||
692 | queue_work(mdev->workqueue, &priv->mcast_task); | ||
693 | |||
694 | priv->port_up = true; | ||
695 | netif_start_queue(dev); | ||
696 | return 0; | ||
697 | |||
698 | mac_err: | ||
699 | mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index); | ||
700 | tx_err: | ||
701 | while (tx_index--) { | ||
702 | mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]); | ||
703 | mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]); | ||
704 | } | ||
705 | |||
706 | mlx4_en_release_rss_steer(priv); | ||
707 | rx_err: | ||
708 | for (i = 0; i < priv->rx_ring_num; i++) | ||
709 | mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[rx_index]); | ||
710 | cq_err: | ||
711 | while (rx_index--) | ||
712 | mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]); | ||
713 | |||
714 | return err; /* need to close devices */ | ||
715 | } | ||
716 | |||
717 | |||
718 | static void mlx4_en_stop_port(struct net_device *dev) | ||
719 | { | ||
720 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
721 | struct mlx4_en_dev *mdev = priv->mdev; | ||
722 | int i; | ||
723 | |||
724 | if (!priv->port_up) { | ||
725 | mlx4_dbg(DRV, priv, "stop port (%d) called while port already down\n", | ||
726 | priv->port); | ||
727 | return; | ||
728 | } | ||
729 | netif_stop_queue(dev); | ||
730 | |||
731 | /* Synchronize with tx routine */ | ||
732 | netif_tx_lock_bh(dev); | ||
733 | priv->port_up = false; | ||
734 | netif_tx_unlock_bh(dev); | ||
735 | |||
736 | /* close port*/ | ||
737 | mlx4_CLOSE_PORT(mdev->dev, priv->port); | ||
738 | |||
739 | /* Unregister Mac address for the port */ | ||
740 | mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index); | ||
741 | |||
742 | /* Free TX Rings */ | ||
743 | for (i = 0; i < priv->tx_ring_num; i++) { | ||
744 | mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]); | ||
745 | mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]); | ||
746 | } | ||
747 | msleep(10); | ||
748 | |||
749 | for (i = 0; i < priv->tx_ring_num; i++) | ||
750 | mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]); | ||
751 | |||
752 | /* Free RSS qps */ | ||
753 | mlx4_en_release_rss_steer(priv); | ||
754 | |||
755 | /* Free RX Rings */ | ||
756 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
757 | mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); | ||
758 | while (test_bit(NAPI_STATE_SCHED, &priv->rx_cq[i].napi.state)) | ||
759 | msleep(1); | ||
760 | mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]); | ||
761 | } | ||
762 | } | ||
763 | |||
764 | static void mlx4_en_restart(struct work_struct *work) | ||
765 | { | ||
766 | struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, | ||
767 | watchdog_task); | ||
768 | struct mlx4_en_dev *mdev = priv->mdev; | ||
769 | struct net_device *dev = priv->dev; | ||
770 | |||
771 | mlx4_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); | ||
772 | mlx4_en_stop_port(dev); | ||
773 | if (mlx4_en_start_port(dev)) | ||
774 | mlx4_err(mdev, "Failed restarting port %d\n", priv->port); | ||
775 | } | ||
776 | |||
777 | |||
778 | static int mlx4_en_open(struct net_device *dev) | ||
779 | { | ||
780 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
781 | struct mlx4_en_dev *mdev = priv->mdev; | ||
782 | int i; | ||
783 | int err = 0; | ||
784 | |||
785 | mutex_lock(&mdev->state_lock); | ||
786 | |||
787 | if (!mdev->device_up) { | ||
788 | mlx4_err(mdev, "Cannot open - device down/disabled\n"); | ||
789 | err = -EBUSY; | ||
790 | goto out; | ||
791 | } | ||
792 | |||
793 | /* Reset HW statistics and performance counters */ | ||
794 | if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) | ||
795 | mlx4_dbg(HW, priv, "Failed dumping statistics\n"); | ||
796 | |||
797 | memset(&priv->stats, 0, sizeof(priv->stats)); | ||
798 | memset(&priv->pstats, 0, sizeof(priv->pstats)); | ||
799 | |||
800 | for (i = 0; i < priv->tx_ring_num; i++) { | ||
801 | priv->tx_ring[i].bytes = 0; | ||
802 | priv->tx_ring[i].packets = 0; | ||
803 | } | ||
804 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
805 | priv->rx_ring[i].bytes = 0; | ||
806 | priv->rx_ring[i].packets = 0; | ||
807 | } | ||
808 | |||
809 | mlx4_en_set_default_moderation(priv); | ||
810 | err = mlx4_en_start_port(dev); | ||
811 | if (err) | ||
812 | mlx4_err(mdev, "Failed starting port:%d\n", priv->port); | ||
813 | |||
814 | out: | ||
815 | mutex_unlock(&mdev->state_lock); | ||
816 | return err; | ||
817 | } | ||
818 | |||
819 | |||
820 | static int mlx4_en_close(struct net_device *dev) | ||
821 | { | ||
822 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
823 | struct mlx4_en_dev *mdev = priv->mdev; | ||
824 | |||
825 | if (netif_msg_ifdown(priv)) | ||
826 | mlx4_info(mdev, "Close called for port:%d\n", priv->port); | ||
827 | |||
828 | mutex_lock(&mdev->state_lock); | ||
829 | |||
830 | mlx4_en_stop_port(dev); | ||
831 | netif_carrier_off(dev); | ||
832 | |||
833 | mutex_unlock(&mdev->state_lock); | ||
834 | return 0; | ||
835 | } | ||
836 | |||
837 | static void mlx4_en_free_resources(struct mlx4_en_priv *priv) | ||
838 | { | ||
839 | int i; | ||
840 | |||
841 | for (i = 0; i < priv->tx_ring_num; i++) { | ||
842 | if (priv->tx_ring[i].tx_info) | ||
843 | mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); | ||
844 | if (priv->tx_cq[i].buf) | ||
845 | mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); | ||
846 | } | ||
847 | |||
848 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
849 | if (priv->rx_ring[i].rx_info) | ||
850 | mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]); | ||
851 | if (priv->rx_cq[i].buf) | ||
852 | mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); | ||
853 | } | ||
854 | } | ||
855 | |||
856 | static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) | ||
857 | { | ||
858 | struct mlx4_en_dev *mdev = priv->mdev; | ||
859 | struct mlx4_en_port_profile *prof = priv->prof; | ||
860 | int i; | ||
861 | |||
862 | /* Create tx Rings */ | ||
863 | for (i = 0; i < priv->tx_ring_num; i++) { | ||
864 | if (mlx4_en_create_cq(priv, &priv->tx_cq[i], | ||
865 | prof->tx_ring_size, i, TX)) | ||
866 | goto err; | ||
867 | |||
868 | if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], | ||
869 | prof->tx_ring_size, TXBB_SIZE)) | ||
870 | goto err; | ||
871 | } | ||
872 | |||
873 | /* Create rx Rings */ | ||
874 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
875 | if (mlx4_en_create_cq(priv, &priv->rx_cq[i], | ||
876 | prof->rx_ring_size, i, RX)) | ||
877 | goto err; | ||
878 | |||
879 | if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i], | ||
880 | prof->rx_ring_size, priv->stride)) | ||
881 | goto err; | ||
882 | } | ||
883 | |||
884 | return 0; | ||
885 | |||
886 | err: | ||
887 | mlx4_err(mdev, "Failed to allocate NIC resources\n"); | ||
888 | return -ENOMEM; | ||
889 | } | ||
890 | |||
891 | |||
892 | void mlx4_en_destroy_netdev(struct net_device *dev) | ||
893 | { | ||
894 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
895 | struct mlx4_en_dev *mdev = priv->mdev; | ||
896 | |||
897 | mlx4_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); | ||
898 | |||
899 | /* Unregister device - this will close the port if it was up */ | ||
900 | if (priv->registered) | ||
901 | unregister_netdev(dev); | ||
902 | |||
903 | if (priv->allocated) | ||
904 | mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE); | ||
905 | |||
906 | cancel_delayed_work(&priv->stats_task); | ||
907 | cancel_delayed_work(&priv->refill_task); | ||
908 | /* flush any pending task for this netdev */ | ||
909 | flush_workqueue(mdev->workqueue); | ||
910 | |||
911 | /* Detach the netdev so tasks would not attempt to access it */ | ||
912 | mutex_lock(&mdev->state_lock); | ||
913 | mdev->pndev[priv->port] = NULL; | ||
914 | mutex_unlock(&mdev->state_lock); | ||
915 | |||
916 | mlx4_en_free_resources(priv); | ||
917 | free_netdev(dev); | ||
918 | } | ||
919 | |||
920 | static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) | ||
921 | { | ||
922 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
923 | struct mlx4_en_dev *mdev = priv->mdev; | ||
924 | int err = 0; | ||
925 | |||
926 | mlx4_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n", | ||
927 | dev->mtu, new_mtu); | ||
928 | |||
929 | if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) { | ||
930 | mlx4_err(mdev, "Bad MTU size:%d.\n", new_mtu); | ||
931 | return -EPERM; | ||
932 | } | ||
933 | dev->mtu = new_mtu; | ||
934 | |||
935 | if (netif_running(dev)) { | ||
936 | mutex_lock(&mdev->state_lock); | ||
937 | if (!mdev->device_up) { | ||
938 | /* NIC is probably restarting - let watchdog task reset | ||
939 | * the port */ | ||
940 | mlx4_dbg(DRV, priv, "Change MTU called with card down!?\n"); | ||
941 | } else { | ||
942 | mlx4_en_stop_port(dev); | ||
943 | mlx4_en_set_default_moderation(priv); | ||
944 | err = mlx4_en_start_port(dev); | ||
945 | if (err) { | ||
946 | mlx4_err(mdev, "Failed restarting port:%d\n", | ||
947 | priv->port); | ||
948 | queue_work(mdev->workqueue, &priv->watchdog_task); | ||
949 | } | ||
950 | } | ||
951 | mutex_unlock(&mdev->state_lock); | ||
952 | } | ||
953 | return 0; | ||
954 | } | ||
955 | |||
956 | int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | ||
957 | struct mlx4_en_port_profile *prof) | ||
958 | { | ||
959 | struct net_device *dev; | ||
960 | struct mlx4_en_priv *priv; | ||
961 | int i; | ||
962 | int err; | ||
963 | |||
964 | dev = alloc_etherdev(sizeof(struct mlx4_en_priv)); | ||
965 | if (dev == NULL) { | ||
966 | mlx4_err(mdev, "Net device allocation failed\n"); | ||
967 | return -ENOMEM; | ||
968 | } | ||
969 | |||
970 | SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev); | ||
971 | |||
972 | /* | ||
973 | * Initialize driver private data | ||
974 | */ | ||
975 | |||
976 | priv = netdev_priv(dev); | ||
977 | memset(priv, 0, sizeof(struct mlx4_en_priv)); | ||
978 | priv->dev = dev; | ||
979 | priv->mdev = mdev; | ||
980 | priv->prof = prof; | ||
981 | priv->port = port; | ||
982 | priv->port_up = false; | ||
983 | priv->rx_csum = 1; | ||
984 | priv->flags = prof->flags; | ||
985 | priv->tx_ring_num = prof->tx_ring_num; | ||
986 | priv->rx_ring_num = prof->rx_ring_num; | ||
987 | priv->mc_list = NULL; | ||
988 | priv->mac_index = -1; | ||
989 | priv->msg_enable = MLX4_EN_MSG_LEVEL; | ||
990 | spin_lock_init(&priv->stats_lock); | ||
991 | INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast); | ||
992 | INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac); | ||
993 | INIT_DELAYED_WORK(&priv->refill_task, mlx4_en_rx_refill); | ||
994 | INIT_WORK(&priv->watchdog_task, mlx4_en_restart); | ||
995 | INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); | ||
996 | INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); | ||
997 | |||
998 | /* Query for default mac and max mtu */ | ||
999 | priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; | ||
1000 | priv->mac = mdev->dev->caps.def_mac[priv->port]; | ||
1001 | if (ILLEGAL_MAC(priv->mac)) { | ||
1002 | mlx4_err(mdev, "Port: %d, invalid mac burned: 0x%llx, quiting\n", | ||
1003 | priv->port, priv->mac); | ||
1004 | err = -EINVAL; | ||
1005 | goto out; | ||
1006 | } | ||
1007 | |||
1008 | priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + | ||
1009 | DS_SIZE * MLX4_EN_MAX_RX_FRAGS); | ||
1010 | err = mlx4_en_alloc_resources(priv); | ||
1011 | if (err) | ||
1012 | goto out; | ||
1013 | |||
1014 | /* Populate Rx default RSS mappings */ | ||
1015 | mlx4_en_set_default_rss_map(priv, &priv->rss_map, priv->rx_ring_num * | ||
1016 | RSS_FACTOR, priv->rx_ring_num); | ||
1017 | /* Allocate page for receive rings */ | ||
1018 | err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, | ||
1019 | MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE); | ||
1020 | if (err) { | ||
1021 | mlx4_err(mdev, "Failed to allocate page for rx qps\n"); | ||
1022 | goto out; | ||
1023 | } | ||
1024 | priv->allocated = 1; | ||
1025 | |||
1026 | /* Populate Tx priority mappings */ | ||
1027 | mlx4_en_set_prio_map(priv, priv->tx_prio_map, prof->tx_ring_num); | ||
1028 | |||
1029 | /* | ||
1030 | * Initialize netdev entry points | ||
1031 | */ | ||
1032 | |||
1033 | dev->open = &mlx4_en_open; | ||
1034 | dev->stop = &mlx4_en_close; | ||
1035 | dev->hard_start_xmit = &mlx4_en_xmit; | ||
1036 | dev->get_stats = &mlx4_en_get_stats; | ||
1037 | dev->set_multicast_list = &mlx4_en_set_multicast; | ||
1038 | dev->set_mac_address = &mlx4_en_set_mac; | ||
1039 | dev->change_mtu = &mlx4_en_change_mtu; | ||
1040 | dev->tx_timeout = &mlx4_en_tx_timeout; | ||
1041 | dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT; | ||
1042 | dev->vlan_rx_register = mlx4_en_vlan_rx_register; | ||
1043 | dev->vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid; | ||
1044 | dev->vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid; | ||
1045 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1046 | dev->poll_controller = mlx4_en_netpoll; | ||
1047 | #endif | ||
1048 | SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops); | ||
1049 | |||
1050 | /* Set defualt MAC */ | ||
1051 | dev->addr_len = ETH_ALEN; | ||
1052 | for (i = 0; i < ETH_ALEN; i++) | ||
1053 | dev->dev_addr[ETH_ALEN - 1 - i] = | ||
1054 | (u8) (priv->mac >> (8 * i)); | ||
1055 | |||
1056 | /* | ||
1057 | * Set driver features | ||
1058 | */ | ||
1059 | dev->features |= NETIF_F_SG; | ||
1060 | dev->features |= NETIF_F_HW_CSUM; | ||
1061 | dev->features |= NETIF_F_HIGHDMA; | ||
1062 | dev->features |= NETIF_F_HW_VLAN_TX | | ||
1063 | NETIF_F_HW_VLAN_RX | | ||
1064 | NETIF_F_HW_VLAN_FILTER; | ||
1065 | if (mdev->profile.num_lro) | ||
1066 | dev->features |= NETIF_F_LRO; | ||
1067 | if (mdev->LSO_support) { | ||
1068 | dev->features |= NETIF_F_TSO; | ||
1069 | dev->features |= NETIF_F_TSO6; | ||
1070 | } | ||
1071 | |||
1072 | mdev->pndev[port] = dev; | ||
1073 | |||
1074 | netif_carrier_off(dev); | ||
1075 | err = register_netdev(dev); | ||
1076 | if (err) { | ||
1077 | mlx4_err(mdev, "Netdev registration failed\n"); | ||
1078 | goto out; | ||
1079 | } | ||
1080 | priv->registered = 1; | ||
1081 | queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); | ||
1082 | return 0; | ||
1083 | |||
1084 | out: | ||
1085 | mlx4_en_destroy_netdev(dev); | ||
1086 | return err; | ||
1087 | } | ||
1088 | |||
diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_params.c new file mode 100644 index 000000000000..c2e69b1bcd0a --- /dev/null +++ b/drivers/net/mlx4/en_params.c | |||
@@ -0,0 +1,480 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | #include <linux/kernel.h> | ||
35 | #include <linux/ethtool.h> | ||
36 | #include <linux/netdevice.h> | ||
37 | |||
38 | #include "mlx4_en.h" | ||
39 | #include "en_port.h" | ||
40 | |||
41 | #define MLX4_EN_PARM_INT(X, def_val, desc) \ | ||
42 | static unsigned int X = def_val;\ | ||
43 | module_param(X , uint, 0444); \ | ||
44 | MODULE_PARM_DESC(X, desc); | ||
45 | |||
46 | |||
47 | /* | ||
48 | * Device scope module parameters | ||
49 | */ | ||
50 | |||
51 | |||
52 | /* Use a XOR rathern than Toeplitz hash function for RSS */ | ||
53 | MLX4_EN_PARM_INT(rss_xor, 0, "Use XOR hash function for RSS"); | ||
54 | |||
55 | /* RSS hash type mask - default to <saddr, daddr, sport, dport> */ | ||
56 | MLX4_EN_PARM_INT(rss_mask, 0xf, "RSS hash type bitmask"); | ||
57 | |||
58 | /* Number of LRO sessions per Rx ring (rounded up to a power of two) */ | ||
59 | MLX4_EN_PARM_INT(num_lro, MLX4_EN_MAX_LRO_DESCRIPTORS, | ||
60 | "Number of LRO sessions per ring or disabled (0)"); | ||
61 | |||
62 | /* Priority pausing */ | ||
63 | MLX4_EN_PARM_INT(pptx, MLX4_EN_DEF_TX_PAUSE, | ||
64 | "Pause policy on TX: 0 never generate pause frames " | ||
65 | "1 generate pause frames according to RX buffer threshold"); | ||
66 | MLX4_EN_PARM_INT(pprx, MLX4_EN_DEF_RX_PAUSE, | ||
67 | "Pause policy on RX: 0 ignore received pause frames " | ||
68 | "1 respect received pause frames"); | ||
69 | MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]." | ||
70 | " Per priority bit mask"); | ||
71 | MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]." | ||
72 | " Per priority bit mask"); | ||
73 | |||
74 | /* Interrupt moderation tunning */ | ||
75 | MLX4_EN_PARM_INT(rx_moder_cnt, MLX4_EN_AUTO_CONF, | ||
76 | "Max coalesced descriptors for Rx interrupt moderation"); | ||
77 | MLX4_EN_PARM_INT(rx_moder_time, MLX4_EN_AUTO_CONF, | ||
78 | "Timeout following last packet for Rx interrupt moderation"); | ||
79 | MLX4_EN_PARM_INT(auto_moder, 1, "Enable dynamic interrupt moderation"); | ||
80 | |||
81 | MLX4_EN_PARM_INT(rx_ring_num1, 0, "Number or Rx rings for port 1 (0 = #cores)"); | ||
82 | MLX4_EN_PARM_INT(rx_ring_num2, 0, "Number or Rx rings for port 2 (0 = #cores)"); | ||
83 | |||
84 | MLX4_EN_PARM_INT(tx_ring_size1, MLX4_EN_AUTO_CONF, "Tx ring size for port 1"); | ||
85 | MLX4_EN_PARM_INT(tx_ring_size2, MLX4_EN_AUTO_CONF, "Tx ring size for port 2"); | ||
86 | MLX4_EN_PARM_INT(rx_ring_size1, MLX4_EN_AUTO_CONF, "Rx ring size for port 1"); | ||
87 | MLX4_EN_PARM_INT(rx_ring_size2, MLX4_EN_AUTO_CONF, "Rx ring size for port 2"); | ||
88 | |||
89 | |||
90 | int mlx4_en_get_profile(struct mlx4_en_dev *mdev) | ||
91 | { | ||
92 | struct mlx4_en_profile *params = &mdev->profile; | ||
93 | |||
94 | params->rx_moder_cnt = min_t(int, rx_moder_cnt, MLX4_EN_AUTO_CONF); | ||
95 | params->rx_moder_time = min_t(int, rx_moder_time, MLX4_EN_AUTO_CONF); | ||
96 | params->auto_moder = auto_moder; | ||
97 | params->rss_xor = (rss_xor != 0); | ||
98 | params->rss_mask = rss_mask & 0x1f; | ||
99 | params->num_lro = min_t(int, num_lro , MLX4_EN_MAX_LRO_DESCRIPTORS); | ||
100 | params->rx_pause = pprx; | ||
101 | params->rx_ppp = pfcrx; | ||
102 | params->tx_pause = pptx; | ||
103 | params->tx_ppp = pfctx; | ||
104 | if (params->rx_ppp || params->tx_ppp) { | ||
105 | params->prof[1].tx_ring_num = MLX4_EN_TX_RING_NUM; | ||
106 | params->prof[2].tx_ring_num = MLX4_EN_TX_RING_NUM; | ||
107 | } else { | ||
108 | params->prof[1].tx_ring_num = 1; | ||
109 | params->prof[2].tx_ring_num = 1; | ||
110 | } | ||
111 | params->prof[1].rx_ring_num = min_t(int, rx_ring_num1, MAX_RX_RINGS); | ||
112 | params->prof[2].rx_ring_num = min_t(int, rx_ring_num2, MAX_RX_RINGS); | ||
113 | |||
114 | if (tx_ring_size1 == MLX4_EN_AUTO_CONF) | ||
115 | tx_ring_size1 = MLX4_EN_DEF_TX_RING_SIZE; | ||
116 | params->prof[1].tx_ring_size = | ||
117 | (tx_ring_size1 < MLX4_EN_MIN_TX_SIZE) ? | ||
118 | MLX4_EN_MIN_TX_SIZE : roundup_pow_of_two(tx_ring_size1); | ||
119 | |||
120 | if (tx_ring_size2 == MLX4_EN_AUTO_CONF) | ||
121 | tx_ring_size2 = MLX4_EN_DEF_TX_RING_SIZE; | ||
122 | params->prof[2].tx_ring_size = | ||
123 | (tx_ring_size2 < MLX4_EN_MIN_TX_SIZE) ? | ||
124 | MLX4_EN_MIN_TX_SIZE : roundup_pow_of_two(tx_ring_size2); | ||
125 | |||
126 | if (rx_ring_size1 == MLX4_EN_AUTO_CONF) | ||
127 | rx_ring_size1 = MLX4_EN_DEF_RX_RING_SIZE; | ||
128 | params->prof[1].rx_ring_size = | ||
129 | (rx_ring_size1 < MLX4_EN_MIN_RX_SIZE) ? | ||
130 | MLX4_EN_MIN_RX_SIZE : roundup_pow_of_two(rx_ring_size1); | ||
131 | |||
132 | if (rx_ring_size2 == MLX4_EN_AUTO_CONF) | ||
133 | rx_ring_size2 = MLX4_EN_DEF_RX_RING_SIZE; | ||
134 | params->prof[2].rx_ring_size = | ||
135 | (rx_ring_size2 < MLX4_EN_MIN_RX_SIZE) ? | ||
136 | MLX4_EN_MIN_RX_SIZE : roundup_pow_of_two(rx_ring_size2); | ||
137 | return 0; | ||
138 | } | ||
139 | |||
140 | |||
141 | /* | ||
142 | * Ethtool support | ||
143 | */ | ||
144 | |||
145 | static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv) | ||
146 | { | ||
147 | int i; | ||
148 | |||
149 | priv->port_stats.lro_aggregated = 0; | ||
150 | priv->port_stats.lro_flushed = 0; | ||
151 | priv->port_stats.lro_no_desc = 0; | ||
152 | |||
153 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
154 | priv->port_stats.lro_aggregated += priv->rx_ring[i].lro.stats.aggregated; | ||
155 | priv->port_stats.lro_flushed += priv->rx_ring[i].lro.stats.flushed; | ||
156 | priv->port_stats.lro_no_desc += priv->rx_ring[i].lro.stats.no_desc; | ||
157 | } | ||
158 | } | ||
159 | |||
160 | static void | ||
161 | mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) | ||
162 | { | ||
163 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
164 | struct mlx4_en_dev *mdev = priv->mdev; | ||
165 | |||
166 | sprintf(drvinfo->driver, DRV_NAME " (%s)", mdev->dev->board_id); | ||
167 | strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32); | ||
168 | sprintf(drvinfo->fw_version, "%d.%d.%d", | ||
169 | (u16) (mdev->dev->caps.fw_ver >> 32), | ||
170 | (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff), | ||
171 | (u16) (mdev->dev->caps.fw_ver & 0xffff)); | ||
172 | strncpy(drvinfo->bus_info, pci_name(mdev->dev->pdev), 32); | ||
173 | drvinfo->n_stats = 0; | ||
174 | drvinfo->regdump_len = 0; | ||
175 | drvinfo->eedump_len = 0; | ||
176 | } | ||
177 | |||
178 | static u32 mlx4_en_get_tso(struct net_device *dev) | ||
179 | { | ||
180 | return (dev->features & NETIF_F_TSO) != 0; | ||
181 | } | ||
182 | |||
183 | static int mlx4_en_set_tso(struct net_device *dev, u32 data) | ||
184 | { | ||
185 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
186 | |||
187 | if (data) { | ||
188 | if (!priv->mdev->LSO_support) | ||
189 | return -EPERM; | ||
190 | dev->features |= (NETIF_F_TSO | NETIF_F_TSO6); | ||
191 | } else | ||
192 | dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); | ||
193 | return 0; | ||
194 | } | ||
195 | |||
196 | static u32 mlx4_en_get_rx_csum(struct net_device *dev) | ||
197 | { | ||
198 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
199 | return priv->rx_csum; | ||
200 | } | ||
201 | |||
202 | static int mlx4_en_set_rx_csum(struct net_device *dev, u32 data) | ||
203 | { | ||
204 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
205 | priv->rx_csum = (data != 0); | ||
206 | return 0; | ||
207 | } | ||
208 | |||
209 | static const char main_strings[][ETH_GSTRING_LEN] = { | ||
210 | "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", | ||
211 | "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", | ||
212 | "rx_length_errors", "rx_over_errors", "rx_crc_errors", | ||
213 | "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors", | ||
214 | "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors", | ||
215 | "tx_heartbeat_errors", "tx_window_errors", | ||
216 | |||
217 | /* port statistics */ | ||
218 | "lro_aggregated", "lro_flushed", "lro_no_desc", "tso_packets", | ||
219 | "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed", | ||
220 | "rx_csum_good", "rx_csum_none", "tx_chksum_offload", | ||
221 | |||
222 | /* packet statistics */ | ||
223 | "broadcast", "rx_prio_0", "rx_prio_1", "rx_prio_2", "rx_prio_3", | ||
224 | "rx_prio_4", "rx_prio_5", "rx_prio_6", "rx_prio_7", "tx_prio_0", | ||
225 | "tx_prio_1", "tx_prio_2", "tx_prio_3", "tx_prio_4", "tx_prio_5", | ||
226 | "tx_prio_6", "tx_prio_7", | ||
227 | }; | ||
228 | #define NUM_MAIN_STATS 21 | ||
229 | #define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS) | ||
230 | |||
231 | static u32 mlx4_en_get_msglevel(struct net_device *dev) | ||
232 | { | ||
233 | return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable; | ||
234 | } | ||
235 | |||
236 | static void mlx4_en_set_msglevel(struct net_device *dev, u32 val) | ||
237 | { | ||
238 | ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable = val; | ||
239 | } | ||
240 | |||
241 | static void mlx4_en_get_wol(struct net_device *netdev, | ||
242 | struct ethtool_wolinfo *wol) | ||
243 | { | ||
244 | wol->supported = 0; | ||
245 | wol->wolopts = 0; | ||
246 | |||
247 | return; | ||
248 | } | ||
249 | |||
250 | static int mlx4_en_get_sset_count(struct net_device *dev, int sset) | ||
251 | { | ||
252 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
253 | |||
254 | if (sset != ETH_SS_STATS) | ||
255 | return -EOPNOTSUPP; | ||
256 | |||
257 | return NUM_ALL_STATS + (priv->tx_ring_num + priv->rx_ring_num) * 2; | ||
258 | } | ||
259 | |||
260 | static void mlx4_en_get_ethtool_stats(struct net_device *dev, | ||
261 | struct ethtool_stats *stats, uint64_t *data) | ||
262 | { | ||
263 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
264 | int index = 0; | ||
265 | int i; | ||
266 | |||
267 | spin_lock_bh(&priv->stats_lock); | ||
268 | |||
269 | mlx4_en_update_lro_stats(priv); | ||
270 | |||
271 | for (i = 0; i < NUM_MAIN_STATS; i++) | ||
272 | data[index++] = ((unsigned long *) &priv->stats)[i]; | ||
273 | for (i = 0; i < NUM_PORT_STATS; i++) | ||
274 | data[index++] = ((unsigned long *) &priv->port_stats)[i]; | ||
275 | for (i = 0; i < priv->tx_ring_num; i++) { | ||
276 | data[index++] = priv->tx_ring[i].packets; | ||
277 | data[index++] = priv->tx_ring[i].bytes; | ||
278 | } | ||
279 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
280 | data[index++] = priv->rx_ring[i].packets; | ||
281 | data[index++] = priv->rx_ring[i].bytes; | ||
282 | } | ||
283 | for (i = 0; i < NUM_PKT_STATS; i++) | ||
284 | data[index++] = ((unsigned long *) &priv->pkstats)[i]; | ||
285 | spin_unlock_bh(&priv->stats_lock); | ||
286 | |||
287 | } | ||
288 | |||
289 | static void mlx4_en_get_strings(struct net_device *dev, | ||
290 | uint32_t stringset, uint8_t *data) | ||
291 | { | ||
292 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
293 | int index = 0; | ||
294 | int i; | ||
295 | |||
296 | if (stringset != ETH_SS_STATS) | ||
297 | return; | ||
298 | |||
299 | /* Add main counters */ | ||
300 | for (i = 0; i < NUM_MAIN_STATS; i++) | ||
301 | strcpy(data + (index++) * ETH_GSTRING_LEN, main_strings[i]); | ||
302 | for (i = 0; i < NUM_PORT_STATS; i++) | ||
303 | strcpy(data + (index++) * ETH_GSTRING_LEN, | ||
304 | main_strings[i + NUM_MAIN_STATS]); | ||
305 | for (i = 0; i < priv->tx_ring_num; i++) { | ||
306 | sprintf(data + (index++) * ETH_GSTRING_LEN, | ||
307 | "tx%d_packets", i); | ||
308 | sprintf(data + (index++) * ETH_GSTRING_LEN, | ||
309 | "tx%d_bytes", i); | ||
310 | } | ||
311 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
312 | sprintf(data + (index++) * ETH_GSTRING_LEN, | ||
313 | "rx%d_packets", i); | ||
314 | sprintf(data + (index++) * ETH_GSTRING_LEN, | ||
315 | "rx%d_bytes", i); | ||
316 | } | ||
317 | for (i = 0; i < NUM_PKT_STATS; i++) | ||
318 | strcpy(data + (index++) * ETH_GSTRING_LEN, | ||
319 | main_strings[i + NUM_MAIN_STATS + NUM_PORT_STATS]); | ||
320 | } | ||
321 | |||
322 | static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
323 | { | ||
324 | cmd->autoneg = AUTONEG_DISABLE; | ||
325 | cmd->supported = SUPPORTED_10000baseT_Full; | ||
326 | cmd->advertising = SUPPORTED_10000baseT_Full; | ||
327 | if (netif_carrier_ok(dev)) { | ||
328 | cmd->speed = SPEED_10000; | ||
329 | cmd->duplex = DUPLEX_FULL; | ||
330 | } else { | ||
331 | cmd->speed = -1; | ||
332 | cmd->duplex = -1; | ||
333 | } | ||
334 | return 0; | ||
335 | } | ||
336 | |||
337 | static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
338 | { | ||
339 | if ((cmd->autoneg == AUTONEG_ENABLE) || | ||
340 | (cmd->speed != SPEED_10000) || (cmd->duplex != DUPLEX_FULL)) | ||
341 | return -EINVAL; | ||
342 | |||
343 | /* Nothing to change */ | ||
344 | return 0; | ||
345 | } | ||
346 | |||
347 | static int mlx4_en_get_coalesce(struct net_device *dev, | ||
348 | struct ethtool_coalesce *coal) | ||
349 | { | ||
350 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
351 | |||
352 | coal->tx_coalesce_usecs = 0; | ||
353 | coal->tx_max_coalesced_frames = 0; | ||
354 | coal->rx_coalesce_usecs = priv->rx_usecs; | ||
355 | coal->rx_max_coalesced_frames = priv->rx_frames; | ||
356 | |||
357 | coal->pkt_rate_low = priv->pkt_rate_low; | ||
358 | coal->rx_coalesce_usecs_low = priv->rx_usecs_low; | ||
359 | coal->pkt_rate_high = priv->pkt_rate_high; | ||
360 | coal->rx_coalesce_usecs_high = priv->rx_usecs_high; | ||
361 | coal->rate_sample_interval = priv->sample_interval; | ||
362 | coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal; | ||
363 | return 0; | ||
364 | } | ||
365 | |||
366 | static int mlx4_en_set_coalesce(struct net_device *dev, | ||
367 | struct ethtool_coalesce *coal) | ||
368 | { | ||
369 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
370 | int err, i; | ||
371 | |||
372 | priv->rx_frames = (coal->rx_max_coalesced_frames == | ||
373 | MLX4_EN_AUTO_CONF) ? | ||
374 | MLX4_EN_RX_COAL_TARGET / | ||
375 | priv->dev->mtu + 1 : | ||
376 | coal->rx_max_coalesced_frames; | ||
377 | priv->rx_usecs = (coal->rx_coalesce_usecs == | ||
378 | MLX4_EN_AUTO_CONF) ? | ||
379 | MLX4_EN_RX_COAL_TIME : | ||
380 | coal->rx_coalesce_usecs; | ||
381 | |||
382 | /* Set adaptive coalescing params */ | ||
383 | priv->pkt_rate_low = coal->pkt_rate_low; | ||
384 | priv->rx_usecs_low = coal->rx_coalesce_usecs_low; | ||
385 | priv->pkt_rate_high = coal->pkt_rate_high; | ||
386 | priv->rx_usecs_high = coal->rx_coalesce_usecs_high; | ||
387 | priv->sample_interval = coal->rate_sample_interval; | ||
388 | priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce; | ||
389 | priv->last_moder_time = MLX4_EN_AUTO_CONF; | ||
390 | if (priv->adaptive_rx_coal) | ||
391 | return 0; | ||
392 | |||
393 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
394 | priv->rx_cq[i].moder_cnt = priv->rx_frames; | ||
395 | priv->rx_cq[i].moder_time = priv->rx_usecs; | ||
396 | err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]); | ||
397 | if (err) | ||
398 | return err; | ||
399 | } | ||
400 | return 0; | ||
401 | } | ||
402 | |||
403 | static int mlx4_en_set_pauseparam(struct net_device *dev, | ||
404 | struct ethtool_pauseparam *pause) | ||
405 | { | ||
406 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
407 | struct mlx4_en_dev *mdev = priv->mdev; | ||
408 | int err; | ||
409 | |||
410 | mdev->profile.tx_pause = pause->tx_pause != 0; | ||
411 | mdev->profile.rx_pause = pause->rx_pause != 0; | ||
412 | err = mlx4_SET_PORT_general(mdev->dev, priv->port, | ||
413 | priv->rx_skb_size + ETH_FCS_LEN, | ||
414 | mdev->profile.tx_pause, | ||
415 | mdev->profile.tx_ppp, | ||
416 | mdev->profile.rx_pause, | ||
417 | mdev->profile.rx_ppp); | ||
418 | if (err) | ||
419 | mlx4_err(mdev, "Failed setting pause params to\n"); | ||
420 | |||
421 | return err; | ||
422 | } | ||
423 | |||
424 | static void mlx4_en_get_pauseparam(struct net_device *dev, | ||
425 | struct ethtool_pauseparam *pause) | ||
426 | { | ||
427 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
428 | struct mlx4_en_dev *mdev = priv->mdev; | ||
429 | |||
430 | pause->tx_pause = mdev->profile.tx_pause; | ||
431 | pause->rx_pause = mdev->profile.rx_pause; | ||
432 | } | ||
433 | |||
434 | static void mlx4_en_get_ringparam(struct net_device *dev, | ||
435 | struct ethtool_ringparam *param) | ||
436 | { | ||
437 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
438 | struct mlx4_en_dev *mdev = priv->mdev; | ||
439 | |||
440 | memset(param, 0, sizeof(*param)); | ||
441 | param->rx_max_pending = mdev->dev->caps.max_rq_sg; | ||
442 | param->tx_max_pending = mdev->dev->caps.max_sq_sg; | ||
443 | param->rx_pending = mdev->profile.prof[priv->port].rx_ring_size; | ||
444 | param->tx_pending = mdev->profile.prof[priv->port].tx_ring_size; | ||
445 | } | ||
446 | |||
447 | const struct ethtool_ops mlx4_en_ethtool_ops = { | ||
448 | .get_drvinfo = mlx4_en_get_drvinfo, | ||
449 | .get_settings = mlx4_en_get_settings, | ||
450 | .set_settings = mlx4_en_set_settings, | ||
451 | #ifdef NETIF_F_TSO | ||
452 | .get_tso = mlx4_en_get_tso, | ||
453 | .set_tso = mlx4_en_set_tso, | ||
454 | #endif | ||
455 | .get_sg = ethtool_op_get_sg, | ||
456 | .set_sg = ethtool_op_set_sg, | ||
457 | .get_link = ethtool_op_get_link, | ||
458 | .get_rx_csum = mlx4_en_get_rx_csum, | ||
459 | .set_rx_csum = mlx4_en_set_rx_csum, | ||
460 | .get_tx_csum = ethtool_op_get_tx_csum, | ||
461 | .set_tx_csum = ethtool_op_set_tx_ipv6_csum, | ||
462 | .get_strings = mlx4_en_get_strings, | ||
463 | .get_sset_count = mlx4_en_get_sset_count, | ||
464 | .get_ethtool_stats = mlx4_en_get_ethtool_stats, | ||
465 | .get_wol = mlx4_en_get_wol, | ||
466 | .get_msglevel = mlx4_en_get_msglevel, | ||
467 | .set_msglevel = mlx4_en_set_msglevel, | ||
468 | .get_coalesce = mlx4_en_get_coalesce, | ||
469 | .set_coalesce = mlx4_en_set_coalesce, | ||
470 | .get_pauseparam = mlx4_en_get_pauseparam, | ||
471 | .set_pauseparam = mlx4_en_set_pauseparam, | ||
472 | .get_ringparam = mlx4_en_get_ringparam, | ||
473 | .get_flags = ethtool_op_get_flags, | ||
474 | .set_flags = ethtool_op_set_flags, | ||
475 | }; | ||
476 | |||
477 | |||
478 | |||
479 | |||
480 | |||
diff --git a/drivers/net/mlx4/en_port.c b/drivers/net/mlx4/en_port.c new file mode 100644 index 000000000000..c5a4c0389752 --- /dev/null +++ b/drivers/net/mlx4/en_port.c | |||
@@ -0,0 +1,261 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | |||
35 | #include <linux/if_vlan.h> | ||
36 | |||
37 | #include <linux/mlx4/device.h> | ||
38 | #include <linux/mlx4/cmd.h> | ||
39 | |||
40 | #include "en_port.h" | ||
41 | #include "mlx4_en.h" | ||
42 | |||
43 | |||
44 | int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, | ||
45 | u64 mac, u64 clear, u8 mode) | ||
46 | { | ||
47 | return mlx4_cmd(dev, (mac | (clear << 63)), port, mode, | ||
48 | MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B); | ||
49 | } | ||
50 | |||
51 | int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, u8 port, struct vlan_group *grp) | ||
52 | { | ||
53 | struct mlx4_cmd_mailbox *mailbox; | ||
54 | struct mlx4_set_vlan_fltr_mbox *filter; | ||
55 | int i; | ||
56 | int j; | ||
57 | int index = 0; | ||
58 | u32 entry; | ||
59 | int err = 0; | ||
60 | |||
61 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
62 | if (IS_ERR(mailbox)) | ||
63 | return PTR_ERR(mailbox); | ||
64 | |||
65 | filter = mailbox->buf; | ||
66 | if (grp) { | ||
67 | memset(filter, 0, sizeof *filter); | ||
68 | for (i = VLAN_FLTR_SIZE - 1; i >= 0; i--) { | ||
69 | entry = 0; | ||
70 | for (j = 0; j < 32; j++) | ||
71 | if (vlan_group_get_device(grp, index++)) | ||
72 | entry |= 1 << j; | ||
73 | filter->entry[i] = cpu_to_be32(entry); | ||
74 | } | ||
75 | } else { | ||
76 | /* When no vlans are configured we block all vlans */ | ||
77 | memset(filter, 0, sizeof(*filter)); | ||
78 | } | ||
79 | err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_VLAN_FLTR, | ||
80 | MLX4_CMD_TIME_CLASS_B); | ||
81 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
82 | return err; | ||
83 | } | ||
84 | |||
85 | |||
86 | int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu, | ||
87 | u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx) | ||
88 | { | ||
89 | struct mlx4_cmd_mailbox *mailbox; | ||
90 | struct mlx4_set_port_general_context *context; | ||
91 | int err; | ||
92 | u32 in_mod; | ||
93 | |||
94 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
95 | if (IS_ERR(mailbox)) | ||
96 | return PTR_ERR(mailbox); | ||
97 | context = mailbox->buf; | ||
98 | memset(context, 0, sizeof *context); | ||
99 | |||
100 | context->flags = SET_PORT_GEN_ALL_VALID; | ||
101 | context->mtu = cpu_to_be16(mtu); | ||
102 | context->pptx = (pptx * (!pfctx)) << 7; | ||
103 | context->pfctx = pfctx; | ||
104 | context->pprx = (pprx * (!pfcrx)) << 7; | ||
105 | context->pfcrx = pfcrx; | ||
106 | |||
107 | in_mod = MLX4_SET_PORT_GENERAL << 8 | port; | ||
108 | err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, | ||
109 | MLX4_CMD_TIME_CLASS_B); | ||
110 | |||
111 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
112 | return err; | ||
113 | } | ||
114 | |||
115 | int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, | ||
116 | u8 promisc) | ||
117 | { | ||
118 | struct mlx4_cmd_mailbox *mailbox; | ||
119 | struct mlx4_set_port_rqp_calc_context *context; | ||
120 | int err; | ||
121 | u32 in_mod; | ||
122 | |||
123 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
124 | if (IS_ERR(mailbox)) | ||
125 | return PTR_ERR(mailbox); | ||
126 | context = mailbox->buf; | ||
127 | memset(context, 0, sizeof *context); | ||
128 | |||
129 | context->base_qpn = cpu_to_be32(base_qpn); | ||
130 | context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | base_qpn); | ||
131 | context->mcast = cpu_to_be32(1 << SET_PORT_PROMISC_SHIFT | base_qpn); | ||
132 | context->intra_no_vlan = 0; | ||
133 | context->no_vlan = MLX4_NO_VLAN_IDX; | ||
134 | context->intra_vlan_miss = 0; | ||
135 | context->vlan_miss = MLX4_VLAN_MISS_IDX; | ||
136 | |||
137 | in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port; | ||
138 | err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, | ||
139 | MLX4_CMD_TIME_CLASS_B); | ||
140 | |||
141 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
142 | return err; | ||
143 | } | ||
144 | |||
145 | |||
146 | int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) | ||
147 | { | ||
148 | struct mlx4_en_stat_out_mbox *mlx4_en_stats; | ||
149 | struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]); | ||
150 | struct net_device_stats *stats = &priv->stats; | ||
151 | struct mlx4_cmd_mailbox *mailbox; | ||
152 | u64 in_mod = reset << 8 | port; | ||
153 | int err; | ||
154 | |||
155 | mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); | ||
156 | if (IS_ERR(mailbox)) | ||
157 | return PTR_ERR(mailbox); | ||
158 | memset(mailbox->buf, 0, sizeof(*mlx4_en_stats)); | ||
159 | err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0, | ||
160 | MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B); | ||
161 | if (err) | ||
162 | goto out; | ||
163 | |||
164 | mlx4_en_stats = mailbox->buf; | ||
165 | |||
166 | spin_lock_bh(&priv->stats_lock); | ||
167 | |||
168 | stats->rx_packets = be32_to_cpu(mlx4_en_stats->RTOTFRMS) - | ||
169 | be32_to_cpu(mlx4_en_stats->RDROP); | ||
170 | stats->tx_packets = be64_to_cpu(mlx4_en_stats->TTOT_prio_0) + | ||
171 | be64_to_cpu(mlx4_en_stats->TTOT_prio_1) + | ||
172 | be64_to_cpu(mlx4_en_stats->TTOT_prio_2) + | ||
173 | be64_to_cpu(mlx4_en_stats->TTOT_prio_3) + | ||
174 | be64_to_cpu(mlx4_en_stats->TTOT_prio_4) + | ||
175 | be64_to_cpu(mlx4_en_stats->TTOT_prio_5) + | ||
176 | be64_to_cpu(mlx4_en_stats->TTOT_prio_6) + | ||
177 | be64_to_cpu(mlx4_en_stats->TTOT_prio_7) + | ||
178 | be64_to_cpu(mlx4_en_stats->TTOT_novlan) + | ||
179 | be64_to_cpu(mlx4_en_stats->TTOT_loopbk); | ||
180 | stats->rx_bytes = be64_to_cpu(mlx4_en_stats->ROCT_prio_0) + | ||
181 | be64_to_cpu(mlx4_en_stats->ROCT_prio_1) + | ||
182 | be64_to_cpu(mlx4_en_stats->ROCT_prio_2) + | ||
183 | be64_to_cpu(mlx4_en_stats->ROCT_prio_3) + | ||
184 | be64_to_cpu(mlx4_en_stats->ROCT_prio_4) + | ||
185 | be64_to_cpu(mlx4_en_stats->ROCT_prio_5) + | ||
186 | be64_to_cpu(mlx4_en_stats->ROCT_prio_6) + | ||
187 | be64_to_cpu(mlx4_en_stats->ROCT_prio_7) + | ||
188 | be64_to_cpu(mlx4_en_stats->ROCT_novlan); | ||
189 | |||
190 | stats->tx_bytes = be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_0) + | ||
191 | be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_1) + | ||
192 | be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_2) + | ||
193 | be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_3) + | ||
194 | be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_4) + | ||
195 | be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_5) + | ||
196 | be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_6) + | ||
197 | be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_7) + | ||
198 | be64_to_cpu(mlx4_en_stats->TTTLOCT_novlan) + | ||
199 | be64_to_cpu(mlx4_en_stats->TTTLOCT_loopbk); | ||
200 | |||
201 | stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) + | ||
202 | be32_to_cpu(mlx4_en_stats->RdropLength) + | ||
203 | be32_to_cpu(mlx4_en_stats->RJBBR) + | ||
204 | be32_to_cpu(mlx4_en_stats->RCRC) + | ||
205 | be32_to_cpu(mlx4_en_stats->RRUNT); | ||
206 | stats->tx_errors = be32_to_cpu(mlx4_en_stats->TDROP); | ||
207 | stats->multicast = be64_to_cpu(mlx4_en_stats->MCAST_prio_0) + | ||
208 | be64_to_cpu(mlx4_en_stats->MCAST_prio_1) + | ||
209 | be64_to_cpu(mlx4_en_stats->MCAST_prio_2) + | ||
210 | be64_to_cpu(mlx4_en_stats->MCAST_prio_3) + | ||
211 | be64_to_cpu(mlx4_en_stats->MCAST_prio_4) + | ||
212 | be64_to_cpu(mlx4_en_stats->MCAST_prio_5) + | ||
213 | be64_to_cpu(mlx4_en_stats->MCAST_prio_6) + | ||
214 | be64_to_cpu(mlx4_en_stats->MCAST_prio_7) + | ||
215 | be64_to_cpu(mlx4_en_stats->MCAST_novlan); | ||
216 | stats->collisions = 0; | ||
217 | stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength); | ||
218 | stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); | ||
219 | stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC); | ||
220 | stats->rx_frame_errors = 0; | ||
221 | stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); | ||
222 | stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); | ||
223 | stats->tx_aborted_errors = 0; | ||
224 | stats->tx_carrier_errors = 0; | ||
225 | stats->tx_fifo_errors = 0; | ||
226 | stats->tx_heartbeat_errors = 0; | ||
227 | stats->tx_window_errors = 0; | ||
228 | |||
229 | priv->pkstats.broadcast = | ||
230 | be64_to_cpu(mlx4_en_stats->RBCAST_prio_0) + | ||
231 | be64_to_cpu(mlx4_en_stats->RBCAST_prio_1) + | ||
232 | be64_to_cpu(mlx4_en_stats->RBCAST_prio_2) + | ||
233 | be64_to_cpu(mlx4_en_stats->RBCAST_prio_3) + | ||
234 | be64_to_cpu(mlx4_en_stats->RBCAST_prio_4) + | ||
235 | be64_to_cpu(mlx4_en_stats->RBCAST_prio_5) + | ||
236 | be64_to_cpu(mlx4_en_stats->RBCAST_prio_6) + | ||
237 | be64_to_cpu(mlx4_en_stats->RBCAST_prio_7) + | ||
238 | be64_to_cpu(mlx4_en_stats->RBCAST_novlan); | ||
239 | priv->pkstats.rx_prio[0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_0); | ||
240 | priv->pkstats.rx_prio[1] = be64_to_cpu(mlx4_en_stats->RTOT_prio_1); | ||
241 | priv->pkstats.rx_prio[2] = be64_to_cpu(mlx4_en_stats->RTOT_prio_2); | ||
242 | priv->pkstats.rx_prio[3] = be64_to_cpu(mlx4_en_stats->RTOT_prio_3); | ||
243 | priv->pkstats.rx_prio[4] = be64_to_cpu(mlx4_en_stats->RTOT_prio_4); | ||
244 | priv->pkstats.rx_prio[5] = be64_to_cpu(mlx4_en_stats->RTOT_prio_5); | ||
245 | priv->pkstats.rx_prio[6] = be64_to_cpu(mlx4_en_stats->RTOT_prio_6); | ||
246 | priv->pkstats.rx_prio[7] = be64_to_cpu(mlx4_en_stats->RTOT_prio_7); | ||
247 | priv->pkstats.tx_prio[0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_0); | ||
248 | priv->pkstats.tx_prio[1] = be64_to_cpu(mlx4_en_stats->TTOT_prio_1); | ||
249 | priv->pkstats.tx_prio[2] = be64_to_cpu(mlx4_en_stats->TTOT_prio_2); | ||
250 | priv->pkstats.tx_prio[3] = be64_to_cpu(mlx4_en_stats->TTOT_prio_3); | ||
251 | priv->pkstats.tx_prio[4] = be64_to_cpu(mlx4_en_stats->TTOT_prio_4); | ||
252 | priv->pkstats.tx_prio[5] = be64_to_cpu(mlx4_en_stats->TTOT_prio_5); | ||
253 | priv->pkstats.tx_prio[6] = be64_to_cpu(mlx4_en_stats->TTOT_prio_6); | ||
254 | priv->pkstats.tx_prio[7] = be64_to_cpu(mlx4_en_stats->TTOT_prio_7); | ||
255 | spin_unlock_bh(&priv->stats_lock); | ||
256 | |||
257 | out: | ||
258 | mlx4_free_cmd_mailbox(mdev->dev, mailbox); | ||
259 | return err; | ||
260 | } | ||
261 | |||
diff --git a/drivers/net/mlx4/en_port.h b/drivers/net/mlx4/en_port.h new file mode 100644 index 000000000000..e6477f12beb5 --- /dev/null +++ b/drivers/net/mlx4/en_port.h | |||
@@ -0,0 +1,570 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | #ifndef _MLX4_EN_PORT_H_ | ||
35 | #define _MLX4_EN_PORT_H_ | ||
36 | |||
37 | |||
38 | #define SET_PORT_GEN_ALL_VALID 0x7 | ||
39 | #define SET_PORT_PROMISC_SHIFT 31 | ||
40 | |||
41 | enum { | ||
42 | MLX4_CMD_SET_VLAN_FLTR = 0x47, | ||
43 | MLX4_CMD_SET_MCAST_FLTR = 0x48, | ||
44 | MLX4_CMD_DUMP_ETH_STATS = 0x49, | ||
45 | }; | ||
46 | |||
47 | struct mlx4_set_port_general_context { | ||
48 | u8 reserved[3]; | ||
49 | u8 flags; | ||
50 | u16 reserved2; | ||
51 | __be16 mtu; | ||
52 | u8 pptx; | ||
53 | u8 pfctx; | ||
54 | u16 reserved3; | ||
55 | u8 pprx; | ||
56 | u8 pfcrx; | ||
57 | u16 reserved4; | ||
58 | }; | ||
59 | |||
60 | struct mlx4_set_port_rqp_calc_context { | ||
61 | __be32 base_qpn; | ||
62 | __be32 flags; | ||
63 | u8 reserved[3]; | ||
64 | u8 mac_miss; | ||
65 | u8 intra_no_vlan; | ||
66 | u8 no_vlan; | ||
67 | u8 intra_vlan_miss; | ||
68 | u8 vlan_miss; | ||
69 | u8 reserved2[3]; | ||
70 | u8 no_vlan_prio; | ||
71 | __be32 promisc; | ||
72 | __be32 mcast; | ||
73 | }; | ||
74 | |||
75 | #define VLAN_FLTR_SIZE 128 | ||
76 | struct mlx4_set_vlan_fltr_mbox { | ||
77 | __be32 entry[VLAN_FLTR_SIZE]; | ||
78 | }; | ||
79 | |||
80 | |||
81 | enum { | ||
82 | MLX4_MCAST_CONFIG = 0, | ||
83 | MLX4_MCAST_DISABLE = 1, | ||
84 | MLX4_MCAST_ENABLE = 2, | ||
85 | }; | ||
86 | |||
87 | |||
88 | struct mlx4_en_stat_out_mbox { | ||
89 | /* Received frames with a length of 64 octets */ | ||
90 | __be64 R64_prio_0; | ||
91 | __be64 R64_prio_1; | ||
92 | __be64 R64_prio_2; | ||
93 | __be64 R64_prio_3; | ||
94 | __be64 R64_prio_4; | ||
95 | __be64 R64_prio_5; | ||
96 | __be64 R64_prio_6; | ||
97 | __be64 R64_prio_7; | ||
98 | __be64 R64_novlan; | ||
99 | /* Received frames with a length of 127 octets */ | ||
100 | __be64 R127_prio_0; | ||
101 | __be64 R127_prio_1; | ||
102 | __be64 R127_prio_2; | ||
103 | __be64 R127_prio_3; | ||
104 | __be64 R127_prio_4; | ||
105 | __be64 R127_prio_5; | ||
106 | __be64 R127_prio_6; | ||
107 | __be64 R127_prio_7; | ||
108 | __be64 R127_novlan; | ||
109 | /* Received frames with a length of 255 octets */ | ||
110 | __be64 R255_prio_0; | ||
111 | __be64 R255_prio_1; | ||
112 | __be64 R255_prio_2; | ||
113 | __be64 R255_prio_3; | ||
114 | __be64 R255_prio_4; | ||
115 | __be64 R255_prio_5; | ||
116 | __be64 R255_prio_6; | ||
117 | __be64 R255_prio_7; | ||
118 | __be64 R255_novlan; | ||
119 | /* Received frames with a length of 511 octets */ | ||
120 | __be64 R511_prio_0; | ||
121 | __be64 R511_prio_1; | ||
122 | __be64 R511_prio_2; | ||
123 | __be64 R511_prio_3; | ||
124 | __be64 R511_prio_4; | ||
125 | __be64 R511_prio_5; | ||
126 | __be64 R511_prio_6; | ||
127 | __be64 R511_prio_7; | ||
128 | __be64 R511_novlan; | ||
129 | /* Received frames with a length of 1023 octets */ | ||
130 | __be64 R1023_prio_0; | ||
131 | __be64 R1023_prio_1; | ||
132 | __be64 R1023_prio_2; | ||
133 | __be64 R1023_prio_3; | ||
134 | __be64 R1023_prio_4; | ||
135 | __be64 R1023_prio_5; | ||
136 | __be64 R1023_prio_6; | ||
137 | __be64 R1023_prio_7; | ||
138 | __be64 R1023_novlan; | ||
139 | /* Received frames with a length of 1518 octets */ | ||
140 | __be64 R1518_prio_0; | ||
141 | __be64 R1518_prio_1; | ||
142 | __be64 R1518_prio_2; | ||
143 | __be64 R1518_prio_3; | ||
144 | __be64 R1518_prio_4; | ||
145 | __be64 R1518_prio_5; | ||
146 | __be64 R1518_prio_6; | ||
147 | __be64 R1518_prio_7; | ||
148 | __be64 R1518_novlan; | ||
149 | /* Received frames with a length of 1522 octets */ | ||
150 | __be64 R1522_prio_0; | ||
151 | __be64 R1522_prio_1; | ||
152 | __be64 R1522_prio_2; | ||
153 | __be64 R1522_prio_3; | ||
154 | __be64 R1522_prio_4; | ||
155 | __be64 R1522_prio_5; | ||
156 | __be64 R1522_prio_6; | ||
157 | __be64 R1522_prio_7; | ||
158 | __be64 R1522_novlan; | ||
159 | /* Received frames with a length of 1548 octets */ | ||
160 | __be64 R1548_prio_0; | ||
161 | __be64 R1548_prio_1; | ||
162 | __be64 R1548_prio_2; | ||
163 | __be64 R1548_prio_3; | ||
164 | __be64 R1548_prio_4; | ||
165 | __be64 R1548_prio_5; | ||
166 | __be64 R1548_prio_6; | ||
167 | __be64 R1548_prio_7; | ||
168 | __be64 R1548_novlan; | ||
169 | /* Received frames with a length of 1548 < octets < MTU */ | ||
170 | __be64 R2MTU_prio_0; | ||
171 | __be64 R2MTU_prio_1; | ||
172 | __be64 R2MTU_prio_2; | ||
173 | __be64 R2MTU_prio_3; | ||
174 | __be64 R2MTU_prio_4; | ||
175 | __be64 R2MTU_prio_5; | ||
176 | __be64 R2MTU_prio_6; | ||
177 | __be64 R2MTU_prio_7; | ||
178 | __be64 R2MTU_novlan; | ||
179 | /* Received frames with a length of MTU< octets and good CRC */ | ||
180 | __be64 RGIANT_prio_0; | ||
181 | __be64 RGIANT_prio_1; | ||
182 | __be64 RGIANT_prio_2; | ||
183 | __be64 RGIANT_prio_3; | ||
184 | __be64 RGIANT_prio_4; | ||
185 | __be64 RGIANT_prio_5; | ||
186 | __be64 RGIANT_prio_6; | ||
187 | __be64 RGIANT_prio_7; | ||
188 | __be64 RGIANT_novlan; | ||
189 | /* Received broadcast frames with good CRC */ | ||
190 | __be64 RBCAST_prio_0; | ||
191 | __be64 RBCAST_prio_1; | ||
192 | __be64 RBCAST_prio_2; | ||
193 | __be64 RBCAST_prio_3; | ||
194 | __be64 RBCAST_prio_4; | ||
195 | __be64 RBCAST_prio_5; | ||
196 | __be64 RBCAST_prio_6; | ||
197 | __be64 RBCAST_prio_7; | ||
198 | __be64 RBCAST_novlan; | ||
199 | /* Received multicast frames with good CRC */ | ||
200 | __be64 MCAST_prio_0; | ||
201 | __be64 MCAST_prio_1; | ||
202 | __be64 MCAST_prio_2; | ||
203 | __be64 MCAST_prio_3; | ||
204 | __be64 MCAST_prio_4; | ||
205 | __be64 MCAST_prio_5; | ||
206 | __be64 MCAST_prio_6; | ||
207 | __be64 MCAST_prio_7; | ||
208 | __be64 MCAST_novlan; | ||
209 | /* Received unicast not short or GIANT frames with good CRC */ | ||
210 | __be64 RTOTG_prio_0; | ||
211 | __be64 RTOTG_prio_1; | ||
212 | __be64 RTOTG_prio_2; | ||
213 | __be64 RTOTG_prio_3; | ||
214 | __be64 RTOTG_prio_4; | ||
215 | __be64 RTOTG_prio_5; | ||
216 | __be64 RTOTG_prio_6; | ||
217 | __be64 RTOTG_prio_7; | ||
218 | __be64 RTOTG_novlan; | ||
219 | |||
220 | /* Count of total octets of received frames, includes framing characters */ | ||
221 | __be64 RTTLOCT_prio_0; | ||
222 | /* Count of total octets of received frames, not including framing | ||
223 | characters */ | ||
224 | __be64 RTTLOCT_NOFRM_prio_0; | ||
225 | /* Count of Total number of octets received | ||
226 | (only for frames without errors) */ | ||
227 | __be64 ROCT_prio_0; | ||
228 | |||
229 | __be64 RTTLOCT_prio_1; | ||
230 | __be64 RTTLOCT_NOFRM_prio_1; | ||
231 | __be64 ROCT_prio_1; | ||
232 | |||
233 | __be64 RTTLOCT_prio_2; | ||
234 | __be64 RTTLOCT_NOFRM_prio_2; | ||
235 | __be64 ROCT_prio_2; | ||
236 | |||
237 | __be64 RTTLOCT_prio_3; | ||
238 | __be64 RTTLOCT_NOFRM_prio_3; | ||
239 | __be64 ROCT_prio_3; | ||
240 | |||
241 | __be64 RTTLOCT_prio_4; | ||
242 | __be64 RTTLOCT_NOFRM_prio_4; | ||
243 | __be64 ROCT_prio_4; | ||
244 | |||
245 | __be64 RTTLOCT_prio_5; | ||
246 | __be64 RTTLOCT_NOFRM_prio_5; | ||
247 | __be64 ROCT_prio_5; | ||
248 | |||
249 | __be64 RTTLOCT_prio_6; | ||
250 | __be64 RTTLOCT_NOFRM_prio_6; | ||
251 | __be64 ROCT_prio_6; | ||
252 | |||
253 | __be64 RTTLOCT_prio_7; | ||
254 | __be64 RTTLOCT_NOFRM_prio_7; | ||
255 | __be64 ROCT_prio_7; | ||
256 | |||
257 | __be64 RTTLOCT_novlan; | ||
258 | __be64 RTTLOCT_NOFRM_novlan; | ||
259 | __be64 ROCT_novlan; | ||
260 | |||
261 | /* Count of Total received frames including bad frames */ | ||
262 | __be64 RTOT_prio_0; | ||
263 | /* Count of Total number of received frames with 802.1Q encapsulation */ | ||
264 | __be64 R1Q_prio_0; | ||
265 | __be64 reserved1; | ||
266 | |||
267 | __be64 RTOT_prio_1; | ||
268 | __be64 R1Q_prio_1; | ||
269 | __be64 reserved2; | ||
270 | |||
271 | __be64 RTOT_prio_2; | ||
272 | __be64 R1Q_prio_2; | ||
273 | __be64 reserved3; | ||
274 | |||
275 | __be64 RTOT_prio_3; | ||
276 | __be64 R1Q_prio_3; | ||
277 | __be64 reserved4; | ||
278 | |||
279 | __be64 RTOT_prio_4; | ||
280 | __be64 R1Q_prio_4; | ||
281 | __be64 reserved5; | ||
282 | |||
283 | __be64 RTOT_prio_5; | ||
284 | __be64 R1Q_prio_5; | ||
285 | __be64 reserved6; | ||
286 | |||
287 | __be64 RTOT_prio_6; | ||
288 | __be64 R1Q_prio_6; | ||
289 | __be64 reserved7; | ||
290 | |||
291 | __be64 RTOT_prio_7; | ||
292 | __be64 R1Q_prio_7; | ||
293 | __be64 reserved8; | ||
294 | |||
295 | __be64 RTOT_novlan; | ||
296 | __be64 R1Q_novlan; | ||
297 | __be64 reserved9; | ||
298 | |||
299 | /* Total number of Successfully Received Control Frames */ | ||
300 | __be64 RCNTL; | ||
301 | __be64 reserved10; | ||
302 | __be64 reserved11; | ||
303 | __be64 reserved12; | ||
304 | /* Count of received frames with a length/type field value between 46 | ||
305 | (42 for VLANtagged frames) and 1500 (also 1500 for VLAN-tagged frames), | ||
306 | inclusive */ | ||
307 | __be64 RInRangeLengthErr; | ||
308 | /* Count of received frames with length/type field between 1501 and 1535 | ||
309 | decimal, inclusive */ | ||
310 | __be64 ROutRangeLengthErr; | ||
311 | /* Count of received frames that are longer than max allowed size for | ||
312 | 802.3 frames (1518/1522) */ | ||
313 | __be64 RFrmTooLong; | ||
314 | /* Count frames received with PCS error */ | ||
315 | __be64 PCS; | ||
316 | |||
317 | /* Transmit frames with a length of 64 octets */ | ||
318 | __be64 T64_prio_0; | ||
319 | __be64 T64_prio_1; | ||
320 | __be64 T64_prio_2; | ||
321 | __be64 T64_prio_3; | ||
322 | __be64 T64_prio_4; | ||
323 | __be64 T64_prio_5; | ||
324 | __be64 T64_prio_6; | ||
325 | __be64 T64_prio_7; | ||
326 | __be64 T64_novlan; | ||
327 | __be64 T64_loopbk; | ||
328 | /* Transmit frames with a length of 65 to 127 octets. */ | ||
329 | __be64 T127_prio_0; | ||
330 | __be64 T127_prio_1; | ||
331 | __be64 T127_prio_2; | ||
332 | __be64 T127_prio_3; | ||
333 | __be64 T127_prio_4; | ||
334 | __be64 T127_prio_5; | ||
335 | __be64 T127_prio_6; | ||
336 | __be64 T127_prio_7; | ||
337 | __be64 T127_novlan; | ||
338 | __be64 T127_loopbk; | ||
339 | /* Transmit frames with a length of 128 to 255 octets */ | ||
340 | __be64 T255_prio_0; | ||
341 | __be64 T255_prio_1; | ||
342 | __be64 T255_prio_2; | ||
343 | __be64 T255_prio_3; | ||
344 | __be64 T255_prio_4; | ||
345 | __be64 T255_prio_5; | ||
346 | __be64 T255_prio_6; | ||
347 | __be64 T255_prio_7; | ||
348 | __be64 T255_novlan; | ||
349 | __be64 T255_loopbk; | ||
350 | /* Transmit frames with a length of 256 to 511 octets */ | ||
351 | __be64 T511_prio_0; | ||
352 | __be64 T511_prio_1; | ||
353 | __be64 T511_prio_2; | ||
354 | __be64 T511_prio_3; | ||
355 | __be64 T511_prio_4; | ||
356 | __be64 T511_prio_5; | ||
357 | __be64 T511_prio_6; | ||
358 | __be64 T511_prio_7; | ||
359 | __be64 T511_novlan; | ||
360 | __be64 T511_loopbk; | ||
361 | /* Transmit frames with a length of 512 to 1023 octets */ | ||
362 | __be64 T1023_prio_0; | ||
363 | __be64 T1023_prio_1; | ||
364 | __be64 T1023_prio_2; | ||
365 | __be64 T1023_prio_3; | ||
366 | __be64 T1023_prio_4; | ||
367 | __be64 T1023_prio_5; | ||
368 | __be64 T1023_prio_6; | ||
369 | __be64 T1023_prio_7; | ||
370 | __be64 T1023_novlan; | ||
371 | __be64 T1023_loopbk; | ||
372 | /* Transmit frames with a length of 1024 to 1518 octets */ | ||
373 | __be64 T1518_prio_0; | ||
374 | __be64 T1518_prio_1; | ||
375 | __be64 T1518_prio_2; | ||
376 | __be64 T1518_prio_3; | ||
377 | __be64 T1518_prio_4; | ||
378 | __be64 T1518_prio_5; | ||
379 | __be64 T1518_prio_6; | ||
380 | __be64 T1518_prio_7; | ||
381 | __be64 T1518_novlan; | ||
382 | __be64 T1518_loopbk; | ||
383 | /* Counts transmit frames with a length of 1519 to 1522 bytes */ | ||
384 | __be64 T1522_prio_0; | ||
385 | __be64 T1522_prio_1; | ||
386 | __be64 T1522_prio_2; | ||
387 | __be64 T1522_prio_3; | ||
388 | __be64 T1522_prio_4; | ||
389 | __be64 T1522_prio_5; | ||
390 | __be64 T1522_prio_6; | ||
391 | __be64 T1522_prio_7; | ||
392 | __be64 T1522_novlan; | ||
393 | __be64 T1522_loopbk; | ||
394 | /* Transmit frames with a length of 1523 to 1548 octets */ | ||
395 | __be64 T1548_prio_0; | ||
396 | __be64 T1548_prio_1; | ||
397 | __be64 T1548_prio_2; | ||
398 | __be64 T1548_prio_3; | ||
399 | __be64 T1548_prio_4; | ||
400 | __be64 T1548_prio_5; | ||
401 | __be64 T1548_prio_6; | ||
402 | __be64 T1548_prio_7; | ||
403 | __be64 T1548_novlan; | ||
404 | __be64 T1548_loopbk; | ||
405 | /* Counts transmit frames with a length of 1549 to MTU bytes */ | ||
406 | __be64 T2MTU_prio_0; | ||
407 | __be64 T2MTU_prio_1; | ||
408 | __be64 T2MTU_prio_2; | ||
409 | __be64 T2MTU_prio_3; | ||
410 | __be64 T2MTU_prio_4; | ||
411 | __be64 T2MTU_prio_5; | ||
412 | __be64 T2MTU_prio_6; | ||
413 | __be64 T2MTU_prio_7; | ||
414 | __be64 T2MTU_novlan; | ||
415 | __be64 T2MTU_loopbk; | ||
416 | /* Transmit frames with a length greater than MTU octets and a good CRC. */ | ||
417 | __be64 TGIANT_prio_0; | ||
418 | __be64 TGIANT_prio_1; | ||
419 | __be64 TGIANT_prio_2; | ||
420 | __be64 TGIANT_prio_3; | ||
421 | __be64 TGIANT_prio_4; | ||
422 | __be64 TGIANT_prio_5; | ||
423 | __be64 TGIANT_prio_6; | ||
424 | __be64 TGIANT_prio_7; | ||
425 | __be64 TGIANT_novlan; | ||
426 | __be64 TGIANT_loopbk; | ||
427 | /* Transmit broadcast frames with a good CRC */ | ||
428 | __be64 TBCAST_prio_0; | ||
429 | __be64 TBCAST_prio_1; | ||
430 | __be64 TBCAST_prio_2; | ||
431 | __be64 TBCAST_prio_3; | ||
432 | __be64 TBCAST_prio_4; | ||
433 | __be64 TBCAST_prio_5; | ||
434 | __be64 TBCAST_prio_6; | ||
435 | __be64 TBCAST_prio_7; | ||
436 | __be64 TBCAST_novlan; | ||
437 | __be64 TBCAST_loopbk; | ||
438 | /* Transmit multicast frames with a good CRC */ | ||
439 | __be64 TMCAST_prio_0; | ||
440 | __be64 TMCAST_prio_1; | ||
441 | __be64 TMCAST_prio_2; | ||
442 | __be64 TMCAST_prio_3; | ||
443 | __be64 TMCAST_prio_4; | ||
444 | __be64 TMCAST_prio_5; | ||
445 | __be64 TMCAST_prio_6; | ||
446 | __be64 TMCAST_prio_7; | ||
447 | __be64 TMCAST_novlan; | ||
448 | __be64 TMCAST_loopbk; | ||
449 | /* Transmit good frames that are neither broadcast nor multicast */ | ||
450 | __be64 TTOTG_prio_0; | ||
451 | __be64 TTOTG_prio_1; | ||
452 | __be64 TTOTG_prio_2; | ||
453 | __be64 TTOTG_prio_3; | ||
454 | __be64 TTOTG_prio_4; | ||
455 | __be64 TTOTG_prio_5; | ||
456 | __be64 TTOTG_prio_6; | ||
457 | __be64 TTOTG_prio_7; | ||
458 | __be64 TTOTG_novlan; | ||
459 | __be64 TTOTG_loopbk; | ||
460 | |||
461 | /* total octets of transmitted frames, including framing characters */ | ||
462 | __be64 TTTLOCT_prio_0; | ||
463 | /* total octets of transmitted frames, not including framing characters */ | ||
464 | __be64 TTTLOCT_NOFRM_prio_0; | ||
465 | /* ifOutOctets */ | ||
466 | __be64 TOCT_prio_0; | ||
467 | |||
468 | __be64 TTTLOCT_prio_1; | ||
469 | __be64 TTTLOCT_NOFRM_prio_1; | ||
470 | __be64 TOCT_prio_1; | ||
471 | |||
472 | __be64 TTTLOCT_prio_2; | ||
473 | __be64 TTTLOCT_NOFRM_prio_2; | ||
474 | __be64 TOCT_prio_2; | ||
475 | |||
476 | __be64 TTTLOCT_prio_3; | ||
477 | __be64 TTTLOCT_NOFRM_prio_3; | ||
478 | __be64 TOCT_prio_3; | ||
479 | |||
480 | __be64 TTTLOCT_prio_4; | ||
481 | __be64 TTTLOCT_NOFRM_prio_4; | ||
482 | __be64 TOCT_prio_4; | ||
483 | |||
484 | __be64 TTTLOCT_prio_5; | ||
485 | __be64 TTTLOCT_NOFRM_prio_5; | ||
486 | __be64 TOCT_prio_5; | ||
487 | |||
488 | __be64 TTTLOCT_prio_6; | ||
489 | __be64 TTTLOCT_NOFRM_prio_6; | ||
490 | __be64 TOCT_prio_6; | ||
491 | |||
492 | __be64 TTTLOCT_prio_7; | ||
493 | __be64 TTTLOCT_NOFRM_prio_7; | ||
494 | __be64 TOCT_prio_7; | ||
495 | |||
496 | __be64 TTTLOCT_novlan; | ||
497 | __be64 TTTLOCT_NOFRM_novlan; | ||
498 | __be64 TOCT_novlan; | ||
499 | |||
500 | __be64 TTTLOCT_loopbk; | ||
501 | __be64 TTTLOCT_NOFRM_loopbk; | ||
502 | __be64 TOCT_loopbk; | ||
503 | |||
504 | /* Total frames transmitted with a good CRC that are not aborted */ | ||
505 | __be64 TTOT_prio_0; | ||
506 | /* Total number of frames transmitted with 802.1Q encapsulation */ | ||
507 | __be64 T1Q_prio_0; | ||
508 | __be64 reserved13; | ||
509 | |||
510 | __be64 TTOT_prio_1; | ||
511 | __be64 T1Q_prio_1; | ||
512 | __be64 reserved14; | ||
513 | |||
514 | __be64 TTOT_prio_2; | ||
515 | __be64 T1Q_prio_2; | ||
516 | __be64 reserved15; | ||
517 | |||
518 | __be64 TTOT_prio_3; | ||
519 | __be64 T1Q_prio_3; | ||
520 | __be64 reserved16; | ||
521 | |||
522 | __be64 TTOT_prio_4; | ||
523 | __be64 T1Q_prio_4; | ||
524 | __be64 reserved17; | ||
525 | |||
526 | __be64 TTOT_prio_5; | ||
527 | __be64 T1Q_prio_5; | ||
528 | __be64 reserved18; | ||
529 | |||
530 | __be64 TTOT_prio_6; | ||
531 | __be64 T1Q_prio_6; | ||
532 | __be64 reserved19; | ||
533 | |||
534 | __be64 TTOT_prio_7; | ||
535 | __be64 T1Q_prio_7; | ||
536 | __be64 reserved20; | ||
537 | |||
538 | __be64 TTOT_novlan; | ||
539 | __be64 T1Q_novlan; | ||
540 | __be64 reserved21; | ||
541 | |||
542 | __be64 TTOT_loopbk; | ||
543 | __be64 T1Q_loopbk; | ||
544 | __be64 reserved22; | ||
545 | |||
546 | /* Received frames with a length greater than MTU octets and a bad CRC */ | ||
547 | __be32 RJBBR; | ||
548 | /* Received frames with a bad CRC that are not runts, jabbers, | ||
549 | or alignment errors */ | ||
550 | __be32 RCRC; | ||
551 | /* Received frames with SFD with a length of less than 64 octets and a | ||
552 | bad CRC */ | ||
553 | __be32 RRUNT; | ||
554 | /* Received frames with a length less than 64 octets and a good CRC */ | ||
555 | __be32 RSHORT; | ||
556 | /* Total Number of Received Packets Dropped */ | ||
557 | __be32 RDROP; | ||
558 | /* Drop due to overflow */ | ||
559 | __be32 RdropOvflw; | ||
560 | /* Drop due to overflow */ | ||
561 | __be32 RdropLength; | ||
562 | /* Total of good frames. Does not include frames received with | ||
563 | frame-too-long, FCS, or length errors */ | ||
564 | __be32 RTOTFRMS; | ||
565 | /* Total dropped Xmited packets */ | ||
566 | __be32 TDROP; | ||
567 | }; | ||
568 | |||
569 | |||
570 | #endif | ||
diff --git a/drivers/net/mlx4/en_resources.c b/drivers/net/mlx4/en_resources.c new file mode 100644 index 000000000000..a0545209e507 --- /dev/null +++ b/drivers/net/mlx4/en_resources.c | |||
@@ -0,0 +1,96 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | #include <linux/vmalloc.h> | ||
35 | #include <linux/mlx4/qp.h> | ||
36 | |||
37 | #include "mlx4_en.h" | ||
38 | |||
39 | void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, | ||
40 | int is_tx, int rss, int qpn, int cqn, int srqn, | ||
41 | struct mlx4_qp_context *context) | ||
42 | { | ||
43 | struct mlx4_en_dev *mdev = priv->mdev; | ||
44 | |||
45 | memset(context, 0, sizeof *context); | ||
46 | context->flags = cpu_to_be32(7 << 16 | rss << 13); | ||
47 | context->pd = cpu_to_be32(mdev->priv_pdn); | ||
48 | context->mtu_msgmax = 0xff; | ||
49 | context->rq_size_stride = 0; | ||
50 | if (is_tx) | ||
51 | context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4); | ||
52 | else | ||
53 | context->sq_size_stride = 1; | ||
54 | context->usr_page = cpu_to_be32(mdev->priv_uar.index); | ||
55 | context->local_qpn = cpu_to_be32(qpn); | ||
56 | context->pri_path.ackto = 1 & 0x07; | ||
57 | context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6; | ||
58 | context->pri_path.counter_index = 0xff; | ||
59 | context->cqn_send = cpu_to_be32(cqn); | ||
60 | context->cqn_recv = cpu_to_be32(cqn); | ||
61 | context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2); | ||
62 | if (!rss) | ||
63 | context->srqn = cpu_to_be32(MLX4_EN_USE_SRQ | srqn); | ||
64 | } | ||
65 | |||
66 | |||
67 | int mlx4_en_map_buffer(struct mlx4_buf *buf) | ||
68 | { | ||
69 | struct page **pages; | ||
70 | int i; | ||
71 | |||
72 | if (BITS_PER_LONG == 64 || buf->nbufs == 1) | ||
73 | return 0; | ||
74 | |||
75 | pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL); | ||
76 | if (!pages) | ||
77 | return -ENOMEM; | ||
78 | |||
79 | for (i = 0; i < buf->nbufs; ++i) | ||
80 | pages[i] = virt_to_page(buf->page_list[i].buf); | ||
81 | |||
82 | buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); | ||
83 | kfree(pages); | ||
84 | if (!buf->direct.buf) | ||
85 | return -ENOMEM; | ||
86 | |||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | void mlx4_en_unmap_buffer(struct mlx4_buf *buf) | ||
91 | { | ||
92 | if (BITS_PER_LONG == 64 || buf->nbufs == 1) | ||
93 | return; | ||
94 | |||
95 | vunmap(buf->direct.buf); | ||
96 | } | ||
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c new file mode 100644 index 000000000000..6232227f56c3 --- /dev/null +++ b/drivers/net/mlx4/en_rx.c | |||
@@ -0,0 +1,1080 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | #include <linux/mlx4/cq.h> | ||
35 | #include <linux/mlx4/qp.h> | ||
36 | #include <linux/skbuff.h> | ||
37 | #include <linux/if_ether.h> | ||
38 | #include <linux/if_vlan.h> | ||
39 | #include <linux/vmalloc.h> | ||
40 | |||
41 | #include "mlx4_en.h" | ||
42 | |||
43 | static void *get_wqe(struct mlx4_en_rx_ring *ring, int n) | ||
44 | { | ||
45 | int offset = n << ring->srq.wqe_shift; | ||
46 | return ring->buf + offset; | ||
47 | } | ||
48 | |||
49 | static void mlx4_en_srq_event(struct mlx4_srq *srq, enum mlx4_event type) | ||
50 | { | ||
51 | return; | ||
52 | } | ||
53 | |||
54 | static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr, | ||
55 | void **ip_hdr, void **tcpudp_hdr, | ||
56 | u64 *hdr_flags, void *priv) | ||
57 | { | ||
58 | *mac_hdr = page_address(frags->page) + frags->page_offset; | ||
59 | *ip_hdr = *mac_hdr + ETH_HLEN; | ||
60 | *tcpudp_hdr = (struct tcphdr *)(*ip_hdr + sizeof(struct iphdr)); | ||
61 | *hdr_flags = LRO_IPV4 | LRO_TCP; | ||
62 | |||
63 | return 0; | ||
64 | } | ||
65 | |||
66 | static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv, | ||
67 | struct mlx4_en_rx_desc *rx_desc, | ||
68 | struct skb_frag_struct *skb_frags, | ||
69 | struct mlx4_en_rx_alloc *ring_alloc, | ||
70 | int i) | ||
71 | { | ||
72 | struct mlx4_en_dev *mdev = priv->mdev; | ||
73 | struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; | ||
74 | struct mlx4_en_rx_alloc *page_alloc = &ring_alloc[i]; | ||
75 | struct page *page; | ||
76 | dma_addr_t dma; | ||
77 | |||
78 | if (page_alloc->offset == frag_info->last_offset) { | ||
79 | /* Allocate new page */ | ||
80 | page = alloc_pages(GFP_ATOMIC | __GFP_COMP, MLX4_EN_ALLOC_ORDER); | ||
81 | if (!page) | ||
82 | return -ENOMEM; | ||
83 | |||
84 | skb_frags[i].page = page_alloc->page; | ||
85 | skb_frags[i].page_offset = page_alloc->offset; | ||
86 | page_alloc->page = page; | ||
87 | page_alloc->offset = frag_info->frag_align; | ||
88 | } else { | ||
89 | page = page_alloc->page; | ||
90 | get_page(page); | ||
91 | |||
92 | skb_frags[i].page = page; | ||
93 | skb_frags[i].page_offset = page_alloc->offset; | ||
94 | page_alloc->offset += frag_info->frag_stride; | ||
95 | } | ||
96 | dma = pci_map_single(mdev->pdev, page_address(skb_frags[i].page) + | ||
97 | skb_frags[i].page_offset, frag_info->frag_size, | ||
98 | PCI_DMA_FROMDEVICE); | ||
99 | rx_desc->data[i].addr = cpu_to_be64(dma); | ||
100 | return 0; | ||
101 | } | ||
102 | |||
103 | static int mlx4_en_init_allocator(struct mlx4_en_priv *priv, | ||
104 | struct mlx4_en_rx_ring *ring) | ||
105 | { | ||
106 | struct mlx4_en_rx_alloc *page_alloc; | ||
107 | int i; | ||
108 | |||
109 | for (i = 0; i < priv->num_frags; i++) { | ||
110 | page_alloc = &ring->page_alloc[i]; | ||
111 | page_alloc->page = alloc_pages(GFP_ATOMIC | __GFP_COMP, | ||
112 | MLX4_EN_ALLOC_ORDER); | ||
113 | if (!page_alloc->page) | ||
114 | goto out; | ||
115 | |||
116 | page_alloc->offset = priv->frag_info[i].frag_align; | ||
117 | mlx4_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n", | ||
118 | i, page_alloc->page); | ||
119 | } | ||
120 | return 0; | ||
121 | |||
122 | out: | ||
123 | while (i--) { | ||
124 | page_alloc = &ring->page_alloc[i]; | ||
125 | put_page(page_alloc->page); | ||
126 | page_alloc->page = NULL; | ||
127 | } | ||
128 | return -ENOMEM; | ||
129 | } | ||
130 | |||
131 | static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv, | ||
132 | struct mlx4_en_rx_ring *ring) | ||
133 | { | ||
134 | struct mlx4_en_rx_alloc *page_alloc; | ||
135 | int i; | ||
136 | |||
137 | for (i = 0; i < priv->num_frags; i++) { | ||
138 | page_alloc = &ring->page_alloc[i]; | ||
139 | mlx4_dbg(DRV, priv, "Freeing allocator:%d count:%d\n", | ||
140 | i, page_count(page_alloc->page)); | ||
141 | |||
142 | put_page(page_alloc->page); | ||
143 | page_alloc->page = NULL; | ||
144 | } | ||
145 | } | ||
146 | |||
147 | |||
148 | static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv, | ||
149 | struct mlx4_en_rx_ring *ring, int index) | ||
150 | { | ||
151 | struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index; | ||
152 | struct skb_frag_struct *skb_frags = ring->rx_info + | ||
153 | (index << priv->log_rx_info); | ||
154 | int possible_frags; | ||
155 | int i; | ||
156 | |||
157 | /* Pre-link descriptor */ | ||
158 | rx_desc->next.next_wqe_index = cpu_to_be16((index + 1) & ring->size_mask); | ||
159 | |||
160 | /* Set size and memtype fields */ | ||
161 | for (i = 0; i < priv->num_frags; i++) { | ||
162 | skb_frags[i].size = priv->frag_info[i].frag_size; | ||
163 | rx_desc->data[i].byte_count = | ||
164 | cpu_to_be32(priv->frag_info[i].frag_size); | ||
165 | rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key); | ||
166 | } | ||
167 | |||
168 | /* If the number of used fragments does not fill up the ring stride, | ||
169 | * remaining (unused) fragments must be padded with null address/size | ||
170 | * and a special memory key */ | ||
171 | possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE; | ||
172 | for (i = priv->num_frags; i < possible_frags; i++) { | ||
173 | rx_desc->data[i].byte_count = 0; | ||
174 | rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD); | ||
175 | rx_desc->data[i].addr = 0; | ||
176 | } | ||
177 | } | ||
178 | |||
179 | |||
180 | static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv, | ||
181 | struct mlx4_en_rx_ring *ring, int index) | ||
182 | { | ||
183 | struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride); | ||
184 | struct skb_frag_struct *skb_frags = ring->rx_info + | ||
185 | (index << priv->log_rx_info); | ||
186 | int i; | ||
187 | |||
188 | for (i = 0; i < priv->num_frags; i++) | ||
189 | if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, ring->page_alloc, i)) | ||
190 | goto err; | ||
191 | |||
192 | return 0; | ||
193 | |||
194 | err: | ||
195 | while (i--) | ||
196 | put_page(skb_frags[i].page); | ||
197 | return -ENOMEM; | ||
198 | } | ||
199 | |||
200 | static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring) | ||
201 | { | ||
202 | *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff); | ||
203 | } | ||
204 | |||
205 | static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) | ||
206 | { | ||
207 | struct mlx4_en_dev *mdev = priv->mdev; | ||
208 | struct mlx4_en_rx_ring *ring; | ||
209 | int ring_ind; | ||
210 | int buf_ind; | ||
211 | |||
212 | for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) { | ||
213 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { | ||
214 | ring = &priv->rx_ring[ring_ind]; | ||
215 | |||
216 | if (mlx4_en_prepare_rx_desc(priv, ring, | ||
217 | ring->actual_size)) { | ||
218 | if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) { | ||
219 | mlx4_err(mdev, "Failed to allocate " | ||
220 | "enough rx buffers\n"); | ||
221 | return -ENOMEM; | ||
222 | } else { | ||
223 | if (netif_msg_rx_err(priv)) | ||
224 | mlx4_warn(mdev, | ||
225 | "Only %d buffers allocated\n", | ||
226 | ring->actual_size); | ||
227 | goto out; | ||
228 | } | ||
229 | } | ||
230 | ring->actual_size++; | ||
231 | ring->prod++; | ||
232 | } | ||
233 | } | ||
234 | out: | ||
235 | return 0; | ||
236 | } | ||
237 | |||
238 | static int mlx4_en_fill_rx_buf(struct net_device *dev, | ||
239 | struct mlx4_en_rx_ring *ring) | ||
240 | { | ||
241 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
242 | int num = 0; | ||
243 | int err; | ||
244 | |||
245 | while ((u32) (ring->prod - ring->cons) < ring->actual_size) { | ||
246 | err = mlx4_en_prepare_rx_desc(priv, ring, ring->prod & | ||
247 | ring->size_mask); | ||
248 | if (err) { | ||
249 | if (netif_msg_rx_err(priv)) | ||
250 | mlx4_warn(priv->mdev, | ||
251 | "Failed preparing rx descriptor\n"); | ||
252 | priv->port_stats.rx_alloc_failed++; | ||
253 | break; | ||
254 | } | ||
255 | ++num; | ||
256 | ++ring->prod; | ||
257 | } | ||
258 | if ((u32) (ring->prod - ring->cons) == ring->size) | ||
259 | ring->full = 1; | ||
260 | |||
261 | return num; | ||
262 | } | ||
263 | |||
264 | static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, | ||
265 | struct mlx4_en_rx_ring *ring) | ||
266 | { | ||
267 | struct mlx4_en_dev *mdev = priv->mdev; | ||
268 | struct skb_frag_struct *skb_frags; | ||
269 | struct mlx4_en_rx_desc *rx_desc; | ||
270 | dma_addr_t dma; | ||
271 | int index; | ||
272 | int nr; | ||
273 | |||
274 | mlx4_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n", | ||
275 | ring->cons, ring->prod); | ||
276 | |||
277 | /* Unmap and free Rx buffers */ | ||
278 | BUG_ON((u32) (ring->prod - ring->cons) > ring->size); | ||
279 | while (ring->cons != ring->prod) { | ||
280 | index = ring->cons & ring->size_mask; | ||
281 | rx_desc = ring->buf + (index << ring->log_stride); | ||
282 | skb_frags = ring->rx_info + (index << priv->log_rx_info); | ||
283 | mlx4_dbg(DRV, priv, "Processing descriptor:%d\n", index); | ||
284 | |||
285 | for (nr = 0; nr < priv->num_frags; nr++) { | ||
286 | mlx4_dbg(DRV, priv, "Freeing fragment:%d\n", nr); | ||
287 | dma = be64_to_cpu(rx_desc->data[nr].addr); | ||
288 | |||
289 | mlx4_dbg(DRV, priv, "Unmaping buffer at dma:0x%llx\n", (u64) dma); | ||
290 | pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size, | ||
291 | PCI_DMA_FROMDEVICE); | ||
292 | put_page(skb_frags[nr].page); | ||
293 | } | ||
294 | ++ring->cons; | ||
295 | } | ||
296 | } | ||
297 | |||
298 | |||
299 | void mlx4_en_rx_refill(struct work_struct *work) | ||
300 | { | ||
301 | struct delayed_work *delay = container_of(work, struct delayed_work, work); | ||
302 | struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, | ||
303 | refill_task); | ||
304 | struct mlx4_en_dev *mdev = priv->mdev; | ||
305 | struct net_device *dev = priv->dev; | ||
306 | struct mlx4_en_rx_ring *ring; | ||
307 | int need_refill = 0; | ||
308 | int i; | ||
309 | |||
310 | mutex_lock(&mdev->state_lock); | ||
311 | if (!mdev->device_up || !priv->port_up) | ||
312 | goto out; | ||
313 | |||
314 | /* We only get here if there are no receive buffers, so we can't race | ||
315 | * with Rx interrupts while filling buffers */ | ||
316 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
317 | ring = &priv->rx_ring[i]; | ||
318 | if (ring->need_refill) { | ||
319 | if (mlx4_en_fill_rx_buf(dev, ring)) { | ||
320 | ring->need_refill = 0; | ||
321 | mlx4_en_update_rx_prod_db(ring); | ||
322 | } else | ||
323 | need_refill = 1; | ||
324 | } | ||
325 | } | ||
326 | if (need_refill) | ||
327 | queue_delayed_work(mdev->workqueue, &priv->refill_task, HZ); | ||
328 | |||
329 | out: | ||
330 | mutex_unlock(&mdev->state_lock); | ||
331 | } | ||
332 | |||
333 | |||
334 | int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, | ||
335 | struct mlx4_en_rx_ring *ring, u32 size, u16 stride) | ||
336 | { | ||
337 | struct mlx4_en_dev *mdev = priv->mdev; | ||
338 | int err; | ||
339 | int tmp; | ||
340 | |||
341 | /* Sanity check SRQ size before proceeding */ | ||
342 | if (size >= mdev->dev->caps.max_srq_wqes) | ||
343 | return -EINVAL; | ||
344 | |||
345 | ring->prod = 0; | ||
346 | ring->cons = 0; | ||
347 | ring->size = size; | ||
348 | ring->size_mask = size - 1; | ||
349 | ring->stride = stride; | ||
350 | ring->log_stride = ffs(ring->stride) - 1; | ||
351 | ring->buf_size = ring->size * ring->stride; | ||
352 | |||
353 | tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS * | ||
354 | sizeof(struct skb_frag_struct)); | ||
355 | ring->rx_info = vmalloc(tmp); | ||
356 | if (!ring->rx_info) { | ||
357 | mlx4_err(mdev, "Failed allocating rx_info ring\n"); | ||
358 | return -ENOMEM; | ||
359 | } | ||
360 | mlx4_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n", | ||
361 | ring->rx_info, tmp); | ||
362 | |||
363 | err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, | ||
364 | ring->buf_size, 2 * PAGE_SIZE); | ||
365 | if (err) | ||
366 | goto err_ring; | ||
367 | |||
368 | err = mlx4_en_map_buffer(&ring->wqres.buf); | ||
369 | if (err) { | ||
370 | mlx4_err(mdev, "Failed to map RX buffer\n"); | ||
371 | goto err_hwq; | ||
372 | } | ||
373 | ring->buf = ring->wqres.buf.direct.buf; | ||
374 | |||
375 | /* Configure lro mngr */ | ||
376 | memset(&ring->lro, 0, sizeof(struct net_lro_mgr)); | ||
377 | ring->lro.dev = priv->dev; | ||
378 | ring->lro.features = LRO_F_NAPI; | ||
379 | ring->lro.frag_align_pad = NET_IP_ALIGN; | ||
380 | ring->lro.ip_summed = CHECKSUM_UNNECESSARY; | ||
381 | ring->lro.ip_summed_aggr = CHECKSUM_UNNECESSARY; | ||
382 | ring->lro.max_desc = mdev->profile.num_lro; | ||
383 | ring->lro.max_aggr = MAX_SKB_FRAGS; | ||
384 | ring->lro.lro_arr = kzalloc(mdev->profile.num_lro * | ||
385 | sizeof(struct net_lro_desc), | ||
386 | GFP_KERNEL); | ||
387 | if (!ring->lro.lro_arr) { | ||
388 | mlx4_err(mdev, "Failed to allocate lro array\n"); | ||
389 | goto err_map; | ||
390 | } | ||
391 | ring->lro.get_frag_header = mlx4_en_get_frag_header; | ||
392 | |||
393 | return 0; | ||
394 | |||
395 | err_map: | ||
396 | mlx4_en_unmap_buffer(&ring->wqres.buf); | ||
397 | err_hwq: | ||
398 | mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); | ||
399 | err_ring: | ||
400 | vfree(ring->rx_info); | ||
401 | ring->rx_info = NULL; | ||
402 | return err; | ||
403 | } | ||
404 | |||
405 | int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) | ||
406 | { | ||
407 | struct mlx4_en_dev *mdev = priv->mdev; | ||
408 | struct mlx4_wqe_srq_next_seg *next; | ||
409 | struct mlx4_en_rx_ring *ring; | ||
410 | int i; | ||
411 | int ring_ind; | ||
412 | int err; | ||
413 | int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + | ||
414 | DS_SIZE * priv->num_frags); | ||
415 | int max_gs = (stride - sizeof(struct mlx4_wqe_srq_next_seg)) / DS_SIZE; | ||
416 | |||
417 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { | ||
418 | ring = &priv->rx_ring[ring_ind]; | ||
419 | |||
420 | ring->prod = 0; | ||
421 | ring->cons = 0; | ||
422 | ring->actual_size = 0; | ||
423 | ring->cqn = priv->rx_cq[ring_ind].mcq.cqn; | ||
424 | |||
425 | ring->stride = stride; | ||
426 | ring->log_stride = ffs(ring->stride) - 1; | ||
427 | ring->buf_size = ring->size * ring->stride; | ||
428 | |||
429 | memset(ring->buf, 0, ring->buf_size); | ||
430 | mlx4_en_update_rx_prod_db(ring); | ||
431 | |||
432 | /* Initailize all descriptors */ | ||
433 | for (i = 0; i < ring->size; i++) | ||
434 | mlx4_en_init_rx_desc(priv, ring, i); | ||
435 | |||
436 | /* Initialize page allocators */ | ||
437 | err = mlx4_en_init_allocator(priv, ring); | ||
438 | if (err) { | ||
439 | mlx4_err(mdev, "Failed initializing ring allocator\n"); | ||
440 | goto err_allocator; | ||
441 | } | ||
442 | |||
443 | /* Fill Rx buffers */ | ||
444 | ring->full = 0; | ||
445 | } | ||
446 | if (mlx4_en_fill_rx_buffers(priv)) | ||
447 | goto err_buffers; | ||
448 | |||
449 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { | ||
450 | ring = &priv->rx_ring[ring_ind]; | ||
451 | |||
452 | mlx4_en_update_rx_prod_db(ring); | ||
453 | |||
454 | /* Configure SRQ representing the ring */ | ||
455 | ring->srq.max = ring->size; | ||
456 | ring->srq.max_gs = max_gs; | ||
457 | ring->srq.wqe_shift = ilog2(ring->stride); | ||
458 | |||
459 | for (i = 0; i < ring->srq.max; ++i) { | ||
460 | next = get_wqe(ring, i); | ||
461 | next->next_wqe_index = | ||
462 | cpu_to_be16((i + 1) & (ring->srq.max - 1)); | ||
463 | } | ||
464 | |||
465 | err = mlx4_srq_alloc(mdev->dev, mdev->priv_pdn, &ring->wqres.mtt, | ||
466 | ring->wqres.db.dma, &ring->srq); | ||
467 | if (err){ | ||
468 | mlx4_err(mdev, "Failed to allocate srq\n"); | ||
469 | goto err_srq; | ||
470 | } | ||
471 | ring->srq.event = mlx4_en_srq_event; | ||
472 | } | ||
473 | |||
474 | return 0; | ||
475 | |||
476 | err_srq: | ||
477 | while (ring_ind >= 0) { | ||
478 | ring = &priv->rx_ring[ring_ind]; | ||
479 | mlx4_srq_free(mdev->dev, &ring->srq); | ||
480 | ring_ind--; | ||
481 | } | ||
482 | |||
483 | err_buffers: | ||
484 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) | ||
485 | mlx4_en_free_rx_buf(priv, &priv->rx_ring[ring_ind]); | ||
486 | |||
487 | ring_ind = priv->rx_ring_num - 1; | ||
488 | err_allocator: | ||
489 | while (ring_ind >= 0) { | ||
490 | mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]); | ||
491 | ring_ind--; | ||
492 | } | ||
493 | return err; | ||
494 | } | ||
495 | |||
496 | void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, | ||
497 | struct mlx4_en_rx_ring *ring) | ||
498 | { | ||
499 | struct mlx4_en_dev *mdev = priv->mdev; | ||
500 | |||
501 | kfree(ring->lro.lro_arr); | ||
502 | mlx4_en_unmap_buffer(&ring->wqres.buf); | ||
503 | mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); | ||
504 | vfree(ring->rx_info); | ||
505 | ring->rx_info = NULL; | ||
506 | } | ||
507 | |||
508 | void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, | ||
509 | struct mlx4_en_rx_ring *ring) | ||
510 | { | ||
511 | struct mlx4_en_dev *mdev = priv->mdev; | ||
512 | |||
513 | mlx4_srq_free(mdev->dev, &ring->srq); | ||
514 | mlx4_en_free_rx_buf(priv, ring); | ||
515 | mlx4_en_destroy_allocator(priv, ring); | ||
516 | } | ||
517 | |||
518 | |||
519 | /* Unmap a completed descriptor and free unused pages */ | ||
520 | static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, | ||
521 | struct mlx4_en_rx_desc *rx_desc, | ||
522 | struct skb_frag_struct *skb_frags, | ||
523 | struct skb_frag_struct *skb_frags_rx, | ||
524 | struct mlx4_en_rx_alloc *page_alloc, | ||
525 | int length) | ||
526 | { | ||
527 | struct mlx4_en_dev *mdev = priv->mdev; | ||
528 | struct mlx4_en_frag_info *frag_info; | ||
529 | int nr; | ||
530 | dma_addr_t dma; | ||
531 | |||
532 | /* Collect used fragments while replacing them in the HW descirptors */ | ||
533 | for (nr = 0; nr < priv->num_frags; nr++) { | ||
534 | frag_info = &priv->frag_info[nr]; | ||
535 | if (length <= frag_info->frag_prefix_size) | ||
536 | break; | ||
537 | |||
538 | /* Save page reference in skb */ | ||
539 | skb_frags_rx[nr].page = skb_frags[nr].page; | ||
540 | skb_frags_rx[nr].size = skb_frags[nr].size; | ||
541 | skb_frags_rx[nr].page_offset = skb_frags[nr].page_offset; | ||
542 | dma = be64_to_cpu(rx_desc->data[nr].addr); | ||
543 | |||
544 | /* Allocate a replacement page */ | ||
545 | if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, page_alloc, nr)) | ||
546 | goto fail; | ||
547 | |||
548 | /* Unmap buffer */ | ||
549 | pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size, | ||
550 | PCI_DMA_FROMDEVICE); | ||
551 | } | ||
552 | /* Adjust size of last fragment to match actual length */ | ||
553 | skb_frags_rx[nr - 1].size = length - | ||
554 | priv->frag_info[nr - 1].frag_prefix_size; | ||
555 | return nr; | ||
556 | |||
557 | fail: | ||
558 | /* Drop all accumulated fragments (which have already been replaced in | ||
559 | * the descriptor) of this packet; remaining fragments are reused... */ | ||
560 | while (nr > 0) { | ||
561 | nr--; | ||
562 | put_page(skb_frags_rx[nr].page); | ||
563 | } | ||
564 | return 0; | ||
565 | } | ||
566 | |||
567 | |||
568 | static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv, | ||
569 | struct mlx4_en_rx_desc *rx_desc, | ||
570 | struct skb_frag_struct *skb_frags, | ||
571 | struct mlx4_en_rx_alloc *page_alloc, | ||
572 | unsigned int length) | ||
573 | { | ||
574 | struct mlx4_en_dev *mdev = priv->mdev; | ||
575 | struct sk_buff *skb; | ||
576 | void *va; | ||
577 | int used_frags; | ||
578 | dma_addr_t dma; | ||
579 | |||
580 | skb = dev_alloc_skb(SMALL_PACKET_SIZE + NET_IP_ALIGN); | ||
581 | if (!skb) { | ||
582 | mlx4_dbg(RX_ERR, priv, "Failed allocating skb\n"); | ||
583 | return NULL; | ||
584 | } | ||
585 | skb->dev = priv->dev; | ||
586 | skb_reserve(skb, NET_IP_ALIGN); | ||
587 | skb->len = length; | ||
588 | skb->truesize = length + sizeof(struct sk_buff); | ||
589 | |||
590 | /* Get pointer to first fragment so we could copy the headers into the | ||
591 | * (linear part of the) skb */ | ||
592 | va = page_address(skb_frags[0].page) + skb_frags[0].page_offset; | ||
593 | |||
594 | if (length <= SMALL_PACKET_SIZE) { | ||
595 | /* We are copying all relevant data to the skb - temporarily | ||
596 | * synch buffers for the copy */ | ||
597 | dma = be64_to_cpu(rx_desc->data[0].addr); | ||
598 | dma_sync_single_range_for_cpu(&mdev->pdev->dev, dma, 0, | ||
599 | length, DMA_FROM_DEVICE); | ||
600 | skb_copy_to_linear_data(skb, va, length); | ||
601 | dma_sync_single_range_for_device(&mdev->pdev->dev, dma, 0, | ||
602 | length, DMA_FROM_DEVICE); | ||
603 | skb->tail += length; | ||
604 | } else { | ||
605 | |||
606 | /* Move relevant fragments to skb */ | ||
607 | used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags, | ||
608 | skb_shinfo(skb)->frags, | ||
609 | page_alloc, length); | ||
610 | skb_shinfo(skb)->nr_frags = used_frags; | ||
611 | |||
612 | /* Copy headers into the skb linear buffer */ | ||
613 | memcpy(skb->data, va, HEADER_COPY_SIZE); | ||
614 | skb->tail += HEADER_COPY_SIZE; | ||
615 | |||
616 | /* Skip headers in first fragment */ | ||
617 | skb_shinfo(skb)->frags[0].page_offset += HEADER_COPY_SIZE; | ||
618 | |||
619 | /* Adjust size of first fragment */ | ||
620 | skb_shinfo(skb)->frags[0].size -= HEADER_COPY_SIZE; | ||
621 | skb->data_len = length - HEADER_COPY_SIZE; | ||
622 | } | ||
623 | return skb; | ||
624 | } | ||
625 | |||
626 | static void mlx4_en_copy_desc(struct mlx4_en_priv *priv, | ||
627 | struct mlx4_en_rx_ring *ring, | ||
628 | int from, int to, int num) | ||
629 | { | ||
630 | struct skb_frag_struct *skb_frags_from; | ||
631 | struct skb_frag_struct *skb_frags_to; | ||
632 | struct mlx4_en_rx_desc *rx_desc_from; | ||
633 | struct mlx4_en_rx_desc *rx_desc_to; | ||
634 | int from_index, to_index; | ||
635 | int nr, i; | ||
636 | |||
637 | for (i = 0; i < num; i++) { | ||
638 | from_index = (from + i) & ring->size_mask; | ||
639 | to_index = (to + i) & ring->size_mask; | ||
640 | skb_frags_from = ring->rx_info + (from_index << priv->log_rx_info); | ||
641 | skb_frags_to = ring->rx_info + (to_index << priv->log_rx_info); | ||
642 | rx_desc_from = ring->buf + (from_index << ring->log_stride); | ||
643 | rx_desc_to = ring->buf + (to_index << ring->log_stride); | ||
644 | |||
645 | for (nr = 0; nr < priv->num_frags; nr++) { | ||
646 | skb_frags_to[nr].page = skb_frags_from[nr].page; | ||
647 | skb_frags_to[nr].page_offset = skb_frags_from[nr].page_offset; | ||
648 | rx_desc_to->data[nr].addr = rx_desc_from->data[nr].addr; | ||
649 | } | ||
650 | } | ||
651 | } | ||
652 | |||
653 | |||
654 | int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) | ||
655 | { | ||
656 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
657 | struct mlx4_en_dev *mdev = priv->mdev; | ||
658 | struct mlx4_cqe *cqe; | ||
659 | struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring]; | ||
660 | struct skb_frag_struct *skb_frags; | ||
661 | struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS]; | ||
662 | struct mlx4_en_rx_desc *rx_desc; | ||
663 | struct sk_buff *skb; | ||
664 | int index; | ||
665 | int nr; | ||
666 | unsigned int length; | ||
667 | int polled = 0; | ||
668 | int ip_summed; | ||
669 | |||
670 | if (!priv->port_up) | ||
671 | return 0; | ||
672 | |||
673 | /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx | ||
674 | * descriptor offset can be deduced from the CQE index instead of | ||
675 | * reading 'cqe->index' */ | ||
676 | index = cq->mcq.cons_index & ring->size_mask; | ||
677 | cqe = &cq->buf[index]; | ||
678 | |||
679 | /* Process all completed CQEs */ | ||
680 | while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, | ||
681 | cq->mcq.cons_index & cq->size)) { | ||
682 | |||
683 | skb_frags = ring->rx_info + (index << priv->log_rx_info); | ||
684 | rx_desc = ring->buf + (index << ring->log_stride); | ||
685 | |||
686 | /* | ||
687 | * make sure we read the CQE after we read the ownership bit | ||
688 | */ | ||
689 | rmb(); | ||
690 | |||
691 | /* Drop packet on bad receive or bad checksum */ | ||
692 | if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == | ||
693 | MLX4_CQE_OPCODE_ERROR)) { | ||
694 | mlx4_err(mdev, "CQE completed in error - vendor " | ||
695 | "syndrom:%d syndrom:%d\n", | ||
696 | ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome, | ||
697 | ((struct mlx4_err_cqe *) cqe)->syndrome); | ||
698 | goto next; | ||
699 | } | ||
700 | if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) { | ||
701 | mlx4_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n"); | ||
702 | goto next; | ||
703 | } | ||
704 | |||
705 | /* | ||
706 | * Packet is OK - process it. | ||
707 | */ | ||
708 | length = be32_to_cpu(cqe->byte_cnt); | ||
709 | ring->bytes += length; | ||
710 | ring->packets++; | ||
711 | |||
712 | if (likely(priv->rx_csum)) { | ||
713 | if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && | ||
714 | (cqe->checksum == cpu_to_be16(0xffff))) { | ||
715 | priv->port_stats.rx_chksum_good++; | ||
716 | /* This packet is eligible for LRO if it is: | ||
717 | * - DIX Ethernet (type interpretation) | ||
718 | * - TCP/IP (v4) | ||
719 | * - without IP options | ||
720 | * - not an IP fragment */ | ||
721 | if (mlx4_en_can_lro(cqe->status) && | ||
722 | dev->features & NETIF_F_LRO) { | ||
723 | |||
724 | nr = mlx4_en_complete_rx_desc( | ||
725 | priv, rx_desc, | ||
726 | skb_frags, lro_frags, | ||
727 | ring->page_alloc, length); | ||
728 | if (!nr) | ||
729 | goto next; | ||
730 | |||
731 | if (priv->vlgrp && (cqe->vlan_my_qpn & | ||
732 | cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK))) { | ||
733 | lro_vlan_hwaccel_receive_frags( | ||
734 | &ring->lro, lro_frags, | ||
735 | length, length, | ||
736 | priv->vlgrp, | ||
737 | be16_to_cpu(cqe->sl_vid), | ||
738 | NULL, 0); | ||
739 | } else | ||
740 | lro_receive_frags(&ring->lro, | ||
741 | lro_frags, | ||
742 | length, | ||
743 | length, | ||
744 | NULL, 0); | ||
745 | |||
746 | goto next; | ||
747 | } | ||
748 | |||
749 | /* LRO not possible, complete processing here */ | ||
750 | ip_summed = CHECKSUM_UNNECESSARY; | ||
751 | INC_PERF_COUNTER(priv->pstats.lro_misses); | ||
752 | } else { | ||
753 | ip_summed = CHECKSUM_NONE; | ||
754 | priv->port_stats.rx_chksum_none++; | ||
755 | } | ||
756 | } else { | ||
757 | ip_summed = CHECKSUM_NONE; | ||
758 | priv->port_stats.rx_chksum_none++; | ||
759 | } | ||
760 | |||
761 | skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags, | ||
762 | ring->page_alloc, length); | ||
763 | if (!skb) { | ||
764 | priv->stats.rx_dropped++; | ||
765 | goto next; | ||
766 | } | ||
767 | |||
768 | skb->ip_summed = ip_summed; | ||
769 | skb->protocol = eth_type_trans(skb, dev); | ||
770 | |||
771 | /* Push it up the stack */ | ||
772 | if (priv->vlgrp && (be32_to_cpu(cqe->vlan_my_qpn) & | ||
773 | MLX4_CQE_VLAN_PRESENT_MASK)) { | ||
774 | vlan_hwaccel_receive_skb(skb, priv->vlgrp, | ||
775 | be16_to_cpu(cqe->sl_vid)); | ||
776 | } else | ||
777 | netif_receive_skb(skb); | ||
778 | |||
779 | dev->last_rx = jiffies; | ||
780 | |||
781 | next: | ||
782 | ++cq->mcq.cons_index; | ||
783 | index = (cq->mcq.cons_index) & ring->size_mask; | ||
784 | cqe = &cq->buf[index]; | ||
785 | if (++polled == budget) { | ||
786 | /* We are here because we reached the NAPI budget - | ||
787 | * flush only pending LRO sessions */ | ||
788 | lro_flush_all(&ring->lro); | ||
789 | goto out; | ||
790 | } | ||
791 | } | ||
792 | |||
793 | /* If CQ is empty flush all LRO sessions unconditionally */ | ||
794 | lro_flush_all(&ring->lro); | ||
795 | |||
796 | out: | ||
797 | AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled); | ||
798 | mlx4_cq_set_ci(&cq->mcq); | ||
799 | wmb(); /* ensure HW sees CQ consumer before we post new buffers */ | ||
800 | ring->cons = cq->mcq.cons_index; | ||
801 | ring->prod += polled; /* Polled descriptors were realocated in place */ | ||
802 | if (unlikely(!ring->full)) { | ||
803 | mlx4_en_copy_desc(priv, ring, ring->cons - polled, | ||
804 | ring->prod - polled, polled); | ||
805 | mlx4_en_fill_rx_buf(dev, ring); | ||
806 | } | ||
807 | mlx4_en_update_rx_prod_db(ring); | ||
808 | return polled; | ||
809 | } | ||
810 | |||
811 | |||
812 | void mlx4_en_rx_irq(struct mlx4_cq *mcq) | ||
813 | { | ||
814 | struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); | ||
815 | struct mlx4_en_priv *priv = netdev_priv(cq->dev); | ||
816 | |||
817 | if (priv->port_up) | ||
818 | netif_rx_schedule(cq->dev, &cq->napi); | ||
819 | else | ||
820 | mlx4_en_arm_cq(priv, cq); | ||
821 | } | ||
822 | |||
823 | /* Rx CQ polling - called by NAPI */ | ||
824 | int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) | ||
825 | { | ||
826 | struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); | ||
827 | struct net_device *dev = cq->dev; | ||
828 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
829 | int done; | ||
830 | |||
831 | done = mlx4_en_process_rx_cq(dev, cq, budget); | ||
832 | |||
833 | /* If we used up all the quota - we're probably not done yet... */ | ||
834 | if (done == budget) | ||
835 | INC_PERF_COUNTER(priv->pstats.napi_quota); | ||
836 | else { | ||
837 | /* Done for now */ | ||
838 | netif_rx_complete(dev, napi); | ||
839 | mlx4_en_arm_cq(priv, cq); | ||
840 | } | ||
841 | return done; | ||
842 | } | ||
843 | |||
844 | |||
845 | /* Calculate the last offset position that accomodates a full fragment | ||
846 | * (assuming fagment size = stride-align) */ | ||
847 | static int mlx4_en_last_alloc_offset(struct mlx4_en_priv *priv, u16 stride, u16 align) | ||
848 | { | ||
849 | u16 res = MLX4_EN_ALLOC_SIZE % stride; | ||
850 | u16 offset = MLX4_EN_ALLOC_SIZE - stride - res + align; | ||
851 | |||
852 | mlx4_dbg(DRV, priv, "Calculated last offset for stride:%d align:%d " | ||
853 | "res:%d offset:%d\n", stride, align, res, offset); | ||
854 | return offset; | ||
855 | } | ||
856 | |||
857 | |||
858 | static int frag_sizes[] = { | ||
859 | FRAG_SZ0, | ||
860 | FRAG_SZ1, | ||
861 | FRAG_SZ2, | ||
862 | FRAG_SZ3 | ||
863 | }; | ||
864 | |||
865 | void mlx4_en_calc_rx_buf(struct net_device *dev) | ||
866 | { | ||
867 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
868 | int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN + ETH_LLC_SNAP_SIZE; | ||
869 | int buf_size = 0; | ||
870 | int i = 0; | ||
871 | |||
872 | while (buf_size < eff_mtu) { | ||
873 | priv->frag_info[i].frag_size = | ||
874 | (eff_mtu > buf_size + frag_sizes[i]) ? | ||
875 | frag_sizes[i] : eff_mtu - buf_size; | ||
876 | priv->frag_info[i].frag_prefix_size = buf_size; | ||
877 | if (!i) { | ||
878 | priv->frag_info[i].frag_align = NET_IP_ALIGN; | ||
879 | priv->frag_info[i].frag_stride = | ||
880 | ALIGN(frag_sizes[i] + NET_IP_ALIGN, SMP_CACHE_BYTES); | ||
881 | } else { | ||
882 | priv->frag_info[i].frag_align = 0; | ||
883 | priv->frag_info[i].frag_stride = | ||
884 | ALIGN(frag_sizes[i], SMP_CACHE_BYTES); | ||
885 | } | ||
886 | priv->frag_info[i].last_offset = mlx4_en_last_alloc_offset( | ||
887 | priv, priv->frag_info[i].frag_stride, | ||
888 | priv->frag_info[i].frag_align); | ||
889 | buf_size += priv->frag_info[i].frag_size; | ||
890 | i++; | ||
891 | } | ||
892 | |||
893 | priv->num_frags = i; | ||
894 | priv->rx_skb_size = eff_mtu; | ||
895 | priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct skb_frag_struct)); | ||
896 | |||
897 | mlx4_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d " | ||
898 | "num_frags:%d):\n", eff_mtu, priv->num_frags); | ||
899 | for (i = 0; i < priv->num_frags; i++) { | ||
900 | mlx4_dbg(DRV, priv, " frag:%d - size:%d prefix:%d align:%d " | ||
901 | "stride:%d last_offset:%d\n", i, | ||
902 | priv->frag_info[i].frag_size, | ||
903 | priv->frag_info[i].frag_prefix_size, | ||
904 | priv->frag_info[i].frag_align, | ||
905 | priv->frag_info[i].frag_stride, | ||
906 | priv->frag_info[i].last_offset); | ||
907 | } | ||
908 | } | ||
909 | |||
910 | /* RSS related functions */ | ||
911 | |||
912 | /* Calculate rss size and map each entry in rss table to rx ring */ | ||
913 | void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv, | ||
914 | struct mlx4_en_rss_map *rss_map, | ||
915 | int num_entries, int num_rings) | ||
916 | { | ||
917 | int i; | ||
918 | |||
919 | rss_map->size = roundup_pow_of_two(num_entries); | ||
920 | mlx4_dbg(DRV, priv, "Setting default RSS map of %d entires\n", | ||
921 | rss_map->size); | ||
922 | |||
923 | for (i = 0; i < rss_map->size; i++) { | ||
924 | rss_map->map[i] = i % num_rings; | ||
925 | mlx4_dbg(DRV, priv, "Entry %d ---> ring %d\n", i, rss_map->map[i]); | ||
926 | } | ||
927 | } | ||
928 | |||
929 | static void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event) | ||
930 | { | ||
931 | return; | ||
932 | } | ||
933 | |||
934 | |||
935 | static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, | ||
936 | int qpn, int srqn, int cqn, | ||
937 | enum mlx4_qp_state *state, | ||
938 | struct mlx4_qp *qp) | ||
939 | { | ||
940 | struct mlx4_en_dev *mdev = priv->mdev; | ||
941 | struct mlx4_qp_context *context; | ||
942 | int err = 0; | ||
943 | |||
944 | context = kmalloc(sizeof *context , GFP_KERNEL); | ||
945 | if (!context) { | ||
946 | mlx4_err(mdev, "Failed to allocate qp context\n"); | ||
947 | return -ENOMEM; | ||
948 | } | ||
949 | |||
950 | err = mlx4_qp_alloc(mdev->dev, qpn, qp); | ||
951 | if (err) { | ||
952 | mlx4_err(mdev, "Failed to allocate qp #%d\n", qpn); | ||
953 | goto out; | ||
954 | return err; | ||
955 | } | ||
956 | qp->event = mlx4_en_sqp_event; | ||
957 | |||
958 | memset(context, 0, sizeof *context); | ||
959 | mlx4_en_fill_qp_context(priv, 0, 0, 0, 0, qpn, cqn, srqn, context); | ||
960 | |||
961 | err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, context, qp, state); | ||
962 | if (err) { | ||
963 | mlx4_qp_remove(mdev->dev, qp); | ||
964 | mlx4_qp_free(mdev->dev, qp); | ||
965 | } | ||
966 | out: | ||
967 | kfree(context); | ||
968 | return err; | ||
969 | } | ||
970 | |||
971 | /* Allocate rx qp's and configure them according to rss map */ | ||
972 | int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) | ||
973 | { | ||
974 | struct mlx4_en_dev *mdev = priv->mdev; | ||
975 | struct mlx4_en_rss_map *rss_map = &priv->rss_map; | ||
976 | struct mlx4_qp_context context; | ||
977 | struct mlx4_en_rss_context *rss_context; | ||
978 | void *ptr; | ||
979 | int rss_xor = mdev->profile.rss_xor; | ||
980 | u8 rss_mask = mdev->profile.rss_mask; | ||
981 | int i, srqn, qpn, cqn; | ||
982 | int err = 0; | ||
983 | int good_qps = 0; | ||
984 | |||
985 | mlx4_dbg(DRV, priv, "Configuring rss steering for port %u\n", priv->port); | ||
986 | err = mlx4_qp_reserve_range(mdev->dev, rss_map->size, | ||
987 | rss_map->size, &rss_map->base_qpn); | ||
988 | if (err) { | ||
989 | mlx4_err(mdev, "Failed reserving %d qps for port %u\n", | ||
990 | rss_map->size, priv->port); | ||
991 | return err; | ||
992 | } | ||
993 | |||
994 | for (i = 0; i < rss_map->size; i++) { | ||
995 | cqn = priv->rx_ring[rss_map->map[i]].cqn; | ||
996 | srqn = priv->rx_ring[rss_map->map[i]].srq.srqn; | ||
997 | qpn = rss_map->base_qpn + i; | ||
998 | err = mlx4_en_config_rss_qp(priv, qpn, srqn, cqn, | ||
999 | &rss_map->state[i], | ||
1000 | &rss_map->qps[i]); | ||
1001 | if (err) | ||
1002 | goto rss_err; | ||
1003 | |||
1004 | ++good_qps; | ||
1005 | } | ||
1006 | |||
1007 | /* Configure RSS indirection qp */ | ||
1008 | err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &priv->base_qpn); | ||
1009 | if (err) { | ||
1010 | mlx4_err(mdev, "Failed to reserve range for RSS " | ||
1011 | "indirection qp\n"); | ||
1012 | goto rss_err; | ||
1013 | } | ||
1014 | err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp); | ||
1015 | if (err) { | ||
1016 | mlx4_err(mdev, "Failed to allocate RSS indirection QP\n"); | ||
1017 | goto reserve_err; | ||
1018 | } | ||
1019 | rss_map->indir_qp.event = mlx4_en_sqp_event; | ||
1020 | mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, | ||
1021 | priv->rx_ring[0].cqn, 0, &context); | ||
1022 | |||
1023 | ptr = ((void *) &context) + 0x3c; | ||
1024 | rss_context = (struct mlx4_en_rss_context *) ptr; | ||
1025 | rss_context->base_qpn = cpu_to_be32(ilog2(rss_map->size) << 24 | | ||
1026 | (rss_map->base_qpn)); | ||
1027 | rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn); | ||
1028 | rss_context->hash_fn = rss_xor & 0x3; | ||
1029 | rss_context->flags = rss_mask << 2; | ||
1030 | |||
1031 | err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context, | ||
1032 | &rss_map->indir_qp, &rss_map->indir_state); | ||
1033 | if (err) | ||
1034 | goto indir_err; | ||
1035 | |||
1036 | return 0; | ||
1037 | |||
1038 | indir_err: | ||
1039 | mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state, | ||
1040 | MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); | ||
1041 | mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); | ||
1042 | mlx4_qp_free(mdev->dev, &rss_map->indir_qp); | ||
1043 | reserve_err: | ||
1044 | mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1); | ||
1045 | rss_err: | ||
1046 | for (i = 0; i < good_qps; i++) { | ||
1047 | mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], | ||
1048 | MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]); | ||
1049 | mlx4_qp_remove(mdev->dev, &rss_map->qps[i]); | ||
1050 | mlx4_qp_free(mdev->dev, &rss_map->qps[i]); | ||
1051 | } | ||
1052 | mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, rss_map->size); | ||
1053 | return err; | ||
1054 | } | ||
1055 | |||
1056 | void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv) | ||
1057 | { | ||
1058 | struct mlx4_en_dev *mdev = priv->mdev; | ||
1059 | struct mlx4_en_rss_map *rss_map = &priv->rss_map; | ||
1060 | int i; | ||
1061 | |||
1062 | mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state, | ||
1063 | MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); | ||
1064 | mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); | ||
1065 | mlx4_qp_free(mdev->dev, &rss_map->indir_qp); | ||
1066 | mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1); | ||
1067 | |||
1068 | for (i = 0; i < rss_map->size; i++) { | ||
1069 | mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], | ||
1070 | MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]); | ||
1071 | mlx4_qp_remove(mdev->dev, &rss_map->qps[i]); | ||
1072 | mlx4_qp_free(mdev->dev, &rss_map->qps[i]); | ||
1073 | } | ||
1074 | mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, rss_map->size); | ||
1075 | } | ||
1076 | |||
1077 | |||
1078 | |||
1079 | |||
1080 | |||
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c new file mode 100644 index 000000000000..8592f8fb8475 --- /dev/null +++ b/drivers/net/mlx4/en_tx.c | |||
@@ -0,0 +1,820 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | #include <asm/page.h> | ||
35 | #include <linux/mlx4/cq.h> | ||
36 | #include <linux/mlx4/qp.h> | ||
37 | #include <linux/skbuff.h> | ||
38 | #include <linux/if_vlan.h> | ||
39 | #include <linux/vmalloc.h> | ||
40 | |||
41 | #include "mlx4_en.h" | ||
42 | |||
43 | enum { | ||
44 | MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */ | ||
45 | }; | ||
46 | |||
47 | static int inline_thold __read_mostly = MAX_INLINE; | ||
48 | |||
49 | module_param_named(inline_thold, inline_thold, int, 0444); | ||
50 | MODULE_PARM_DESC(inline_thold, "treshold for using inline data"); | ||
51 | |||
52 | int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, | ||
53 | struct mlx4_en_tx_ring *ring, u32 size, | ||
54 | u16 stride) | ||
55 | { | ||
56 | struct mlx4_en_dev *mdev = priv->mdev; | ||
57 | int tmp; | ||
58 | int err; | ||
59 | |||
60 | ring->size = size; | ||
61 | ring->size_mask = size - 1; | ||
62 | ring->stride = stride; | ||
63 | |||
64 | inline_thold = min(inline_thold, MAX_INLINE); | ||
65 | |||
66 | spin_lock_init(&ring->comp_lock); | ||
67 | |||
68 | tmp = size * sizeof(struct mlx4_en_tx_info); | ||
69 | ring->tx_info = vmalloc(tmp); | ||
70 | if (!ring->tx_info) { | ||
71 | mlx4_err(mdev, "Failed allocating tx_info ring\n"); | ||
72 | return -ENOMEM; | ||
73 | } | ||
74 | mlx4_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", | ||
75 | ring->tx_info, tmp); | ||
76 | |||
77 | ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL); | ||
78 | if (!ring->bounce_buf) { | ||
79 | mlx4_err(mdev, "Failed allocating bounce buffer\n"); | ||
80 | err = -ENOMEM; | ||
81 | goto err_tx; | ||
82 | } | ||
83 | ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE); | ||
84 | |||
85 | err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, | ||
86 | 2 * PAGE_SIZE); | ||
87 | if (err) { | ||
88 | mlx4_err(mdev, "Failed allocating hwq resources\n"); | ||
89 | goto err_bounce; | ||
90 | } | ||
91 | |||
92 | err = mlx4_en_map_buffer(&ring->wqres.buf); | ||
93 | if (err) { | ||
94 | mlx4_err(mdev, "Failed to map TX buffer\n"); | ||
95 | goto err_hwq_res; | ||
96 | } | ||
97 | |||
98 | ring->buf = ring->wqres.buf.direct.buf; | ||
99 | |||
100 | mlx4_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d " | ||
101 | "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, | ||
102 | ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); | ||
103 | |||
104 | err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn); | ||
105 | if (err) { | ||
106 | mlx4_err(mdev, "Failed reserving qp for tx ring.\n"); | ||
107 | goto err_map; | ||
108 | } | ||
109 | |||
110 | err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp); | ||
111 | if (err) { | ||
112 | mlx4_err(mdev, "Failed allocating qp %d\n", ring->qpn); | ||
113 | goto err_reserve; | ||
114 | } | ||
115 | |||
116 | return 0; | ||
117 | |||
118 | err_reserve: | ||
119 | mlx4_qp_release_range(mdev->dev, ring->qpn, 1); | ||
120 | err_map: | ||
121 | mlx4_en_unmap_buffer(&ring->wqres.buf); | ||
122 | err_hwq_res: | ||
123 | mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); | ||
124 | err_bounce: | ||
125 | kfree(ring->bounce_buf); | ||
126 | ring->bounce_buf = NULL; | ||
127 | err_tx: | ||
128 | vfree(ring->tx_info); | ||
129 | ring->tx_info = NULL; | ||
130 | return err; | ||
131 | } | ||
132 | |||
133 | void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, | ||
134 | struct mlx4_en_tx_ring *ring) | ||
135 | { | ||
136 | struct mlx4_en_dev *mdev = priv->mdev; | ||
137 | mlx4_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); | ||
138 | |||
139 | mlx4_qp_remove(mdev->dev, &ring->qp); | ||
140 | mlx4_qp_free(mdev->dev, &ring->qp); | ||
141 | mlx4_qp_release_range(mdev->dev, ring->qpn, 1); | ||
142 | mlx4_en_unmap_buffer(&ring->wqres.buf); | ||
143 | mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); | ||
144 | kfree(ring->bounce_buf); | ||
145 | ring->bounce_buf = NULL; | ||
146 | vfree(ring->tx_info); | ||
147 | ring->tx_info = NULL; | ||
148 | } | ||
149 | |||
150 | int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, | ||
151 | struct mlx4_en_tx_ring *ring, | ||
152 | int cq, int srqn) | ||
153 | { | ||
154 | struct mlx4_en_dev *mdev = priv->mdev; | ||
155 | int err; | ||
156 | |||
157 | ring->cqn = cq; | ||
158 | ring->prod = 0; | ||
159 | ring->cons = 0xffffffff; | ||
160 | ring->last_nr_txbb = 1; | ||
161 | ring->poll_cnt = 0; | ||
162 | ring->blocked = 0; | ||
163 | memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info)); | ||
164 | memset(ring->buf, 0, ring->buf_size); | ||
165 | |||
166 | ring->qp_state = MLX4_QP_STATE_RST; | ||
167 | ring->doorbell_qpn = swab32(ring->qp.qpn << 8); | ||
168 | |||
169 | mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, | ||
170 | ring->cqn, srqn, &ring->context); | ||
171 | |||
172 | err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, | ||
173 | &ring->qp, &ring->qp_state); | ||
174 | |||
175 | return err; | ||
176 | } | ||
177 | |||
178 | void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, | ||
179 | struct mlx4_en_tx_ring *ring) | ||
180 | { | ||
181 | struct mlx4_en_dev *mdev = priv->mdev; | ||
182 | |||
183 | mlx4_qp_modify(mdev->dev, NULL, ring->qp_state, | ||
184 | MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp); | ||
185 | } | ||
186 | |||
187 | |||
188 | static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, | ||
189 | struct mlx4_en_tx_ring *ring, | ||
190 | int index, u8 owner) | ||
191 | { | ||
192 | struct mlx4_en_dev *mdev = priv->mdev; | ||
193 | struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; | ||
194 | struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE; | ||
195 | struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset; | ||
196 | struct sk_buff *skb = tx_info->skb; | ||
197 | struct skb_frag_struct *frag; | ||
198 | void *end = ring->buf + ring->buf_size; | ||
199 | int frags = skb_shinfo(skb)->nr_frags; | ||
200 | int i; | ||
201 | __be32 *ptr = (__be32 *)tx_desc; | ||
202 | __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT)); | ||
203 | |||
204 | /* Optimize the common case when there are no wraparounds */ | ||
205 | if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) { | ||
206 | if (tx_info->linear) { | ||
207 | pci_unmap_single(mdev->pdev, | ||
208 | (dma_addr_t) be64_to_cpu(data->addr), | ||
209 | be32_to_cpu(data->byte_count), | ||
210 | PCI_DMA_TODEVICE); | ||
211 | ++data; | ||
212 | } | ||
213 | |||
214 | for (i = 0; i < frags; i++) { | ||
215 | frag = &skb_shinfo(skb)->frags[i]; | ||
216 | pci_unmap_page(mdev->pdev, | ||
217 | (dma_addr_t) be64_to_cpu(data[i].addr), | ||
218 | frag->size, PCI_DMA_TODEVICE); | ||
219 | } | ||
220 | /* Stamp the freed descriptor */ | ||
221 | for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) { | ||
222 | *ptr = stamp; | ||
223 | ptr += STAMP_DWORDS; | ||
224 | } | ||
225 | |||
226 | } else { | ||
227 | if ((void *) data >= end) { | ||
228 | data = (struct mlx4_wqe_data_seg *) | ||
229 | (ring->buf + ((void *) data - end)); | ||
230 | } | ||
231 | |||
232 | if (tx_info->linear) { | ||
233 | pci_unmap_single(mdev->pdev, | ||
234 | (dma_addr_t) be64_to_cpu(data->addr), | ||
235 | be32_to_cpu(data->byte_count), | ||
236 | PCI_DMA_TODEVICE); | ||
237 | ++data; | ||
238 | } | ||
239 | |||
240 | for (i = 0; i < frags; i++) { | ||
241 | /* Check for wraparound before unmapping */ | ||
242 | if ((void *) data >= end) | ||
243 | data = (struct mlx4_wqe_data_seg *) ring->buf; | ||
244 | frag = &skb_shinfo(skb)->frags[i]; | ||
245 | pci_unmap_page(mdev->pdev, | ||
246 | (dma_addr_t) be64_to_cpu(data->addr), | ||
247 | frag->size, PCI_DMA_TODEVICE); | ||
248 | } | ||
249 | /* Stamp the freed descriptor */ | ||
250 | for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) { | ||
251 | *ptr = stamp; | ||
252 | ptr += STAMP_DWORDS; | ||
253 | if ((void *) ptr >= end) { | ||
254 | ptr = ring->buf; | ||
255 | stamp ^= cpu_to_be32(0x80000000); | ||
256 | } | ||
257 | } | ||
258 | |||
259 | } | ||
260 | dev_kfree_skb_any(skb); | ||
261 | return tx_info->nr_txbb; | ||
262 | } | ||
263 | |||
264 | |||
265 | int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) | ||
266 | { | ||
267 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
268 | int cnt = 0; | ||
269 | |||
270 | /* Skip last polled descriptor */ | ||
271 | ring->cons += ring->last_nr_txbb; | ||
272 | mlx4_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n", | ||
273 | ring->cons, ring->prod); | ||
274 | |||
275 | if ((u32) (ring->prod - ring->cons) > ring->size) { | ||
276 | if (netif_msg_tx_err(priv)) | ||
277 | mlx4_warn(priv->mdev, "Tx consumer passed producer!\n"); | ||
278 | return 0; | ||
279 | } | ||
280 | |||
281 | while (ring->cons != ring->prod) { | ||
282 | ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring, | ||
283 | ring->cons & ring->size_mask, | ||
284 | !!(ring->cons & ring->size)); | ||
285 | ring->cons += ring->last_nr_txbb; | ||
286 | cnt++; | ||
287 | } | ||
288 | |||
289 | if (cnt) | ||
290 | mlx4_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt); | ||
291 | |||
292 | return cnt; | ||
293 | } | ||
294 | |||
295 | void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num) | ||
296 | { | ||
297 | int block = 8 / ring_num; | ||
298 | int extra = 8 - (block * ring_num); | ||
299 | int num = 0; | ||
300 | u16 ring = 1; | ||
301 | int prio; | ||
302 | |||
303 | if (ring_num == 1) { | ||
304 | for (prio = 0; prio < 8; prio++) | ||
305 | prio_map[prio] = 0; | ||
306 | return; | ||
307 | } | ||
308 | |||
309 | for (prio = 0; prio < 8; prio++) { | ||
310 | if (extra && (num == block + 1)) { | ||
311 | ring++; | ||
312 | num = 0; | ||
313 | extra--; | ||
314 | } else if (!extra && (num == block)) { | ||
315 | ring++; | ||
316 | num = 0; | ||
317 | } | ||
318 | prio_map[prio] = ring; | ||
319 | mlx4_dbg(DRV, priv, " prio:%d --> ring:%d\n", prio, ring); | ||
320 | num++; | ||
321 | } | ||
322 | } | ||
323 | |||
324 | static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) | ||
325 | { | ||
326 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
327 | struct mlx4_cq *mcq = &cq->mcq; | ||
328 | struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; | ||
329 | struct mlx4_cqe *cqe = cq->buf; | ||
330 | u16 index; | ||
331 | u16 new_index; | ||
332 | u32 txbbs_skipped = 0; | ||
333 | u32 cq_last_sav; | ||
334 | |||
335 | /* index always points to the first TXBB of the last polled descriptor */ | ||
336 | index = ring->cons & ring->size_mask; | ||
337 | new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask; | ||
338 | if (index == new_index) | ||
339 | return; | ||
340 | |||
341 | if (!priv->port_up) | ||
342 | return; | ||
343 | |||
344 | /* | ||
345 | * We use a two-stage loop: | ||
346 | * - the first samples the HW-updated CQE | ||
347 | * - the second frees TXBBs until the last sample | ||
348 | * This lets us amortize CQE cache misses, while still polling the CQ | ||
349 | * until is quiescent. | ||
350 | */ | ||
351 | cq_last_sav = mcq->cons_index; | ||
352 | do { | ||
353 | do { | ||
354 | /* Skip over last polled CQE */ | ||
355 | index = (index + ring->last_nr_txbb) & ring->size_mask; | ||
356 | txbbs_skipped += ring->last_nr_txbb; | ||
357 | |||
358 | /* Poll next CQE */ | ||
359 | ring->last_nr_txbb = mlx4_en_free_tx_desc( | ||
360 | priv, ring, index, | ||
361 | !!((ring->cons + txbbs_skipped) & | ||
362 | ring->size)); | ||
363 | ++mcq->cons_index; | ||
364 | |||
365 | } while (index != new_index); | ||
366 | |||
367 | new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask; | ||
368 | } while (index != new_index); | ||
369 | AVG_PERF_COUNTER(priv->pstats.tx_coal_avg, | ||
370 | (u32) (mcq->cons_index - cq_last_sav)); | ||
371 | |||
372 | /* | ||
373 | * To prevent CQ overflow we first update CQ consumer and only then | ||
374 | * the ring consumer. | ||
375 | */ | ||
376 | mlx4_cq_set_ci(mcq); | ||
377 | wmb(); | ||
378 | ring->cons += txbbs_skipped; | ||
379 | |||
380 | /* Wakeup Tx queue if this ring stopped it */ | ||
381 | if (unlikely(ring->blocked)) { | ||
382 | if (((u32) (ring->prod - ring->cons) <= | ||
383 | ring->size - HEADROOM - MAX_DESC_TXBBS) && !cq->armed) { | ||
384 | |||
385 | /* TODO: support multiqueue netdevs. Currently, we block | ||
386 | * when *any* ring is full. Note that: | ||
387 | * - 2 Tx rings can unblock at the same time and call | ||
388 | * netif_wake_queue(), which is OK since this | ||
389 | * operation is idempotent. | ||
390 | * - We might wake the queue just after another ring | ||
391 | * stopped it. This is no big deal because the next | ||
392 | * transmission on that ring would stop the queue. | ||
393 | */ | ||
394 | ring->blocked = 0; | ||
395 | netif_wake_queue(dev); | ||
396 | priv->port_stats.wake_queue++; | ||
397 | } | ||
398 | } | ||
399 | } | ||
400 | |||
401 | void mlx4_en_tx_irq(struct mlx4_cq *mcq) | ||
402 | { | ||
403 | struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); | ||
404 | struct mlx4_en_priv *priv = netdev_priv(cq->dev); | ||
405 | struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; | ||
406 | |||
407 | spin_lock_irq(&ring->comp_lock); | ||
408 | cq->armed = 0; | ||
409 | mlx4_en_process_tx_cq(cq->dev, cq); | ||
410 | if (ring->blocked) | ||
411 | mlx4_en_arm_cq(priv, cq); | ||
412 | else | ||
413 | mod_timer(&cq->timer, jiffies + 1); | ||
414 | spin_unlock_irq(&ring->comp_lock); | ||
415 | } | ||
416 | |||
417 | |||
418 | void mlx4_en_poll_tx_cq(unsigned long data) | ||
419 | { | ||
420 | struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data; | ||
421 | struct mlx4_en_priv *priv = netdev_priv(cq->dev); | ||
422 | struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; | ||
423 | u32 inflight; | ||
424 | |||
425 | INC_PERF_COUNTER(priv->pstats.tx_poll); | ||
426 | |||
427 | netif_tx_lock(priv->dev); | ||
428 | spin_lock_irq(&ring->comp_lock); | ||
429 | mlx4_en_process_tx_cq(cq->dev, cq); | ||
430 | inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb); | ||
431 | |||
432 | /* If there are still packets in flight and the timer has not already | ||
433 | * been scheduled by the Tx routine then schedule it here to guarantee | ||
434 | * completion processing of these packets */ | ||
435 | if (inflight && priv->port_up) | ||
436 | mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); | ||
437 | |||
438 | spin_unlock_irq(&ring->comp_lock); | ||
439 | netif_tx_unlock(priv->dev); | ||
440 | } | ||
441 | |||
442 | static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, | ||
443 | struct mlx4_en_tx_ring *ring, | ||
444 | u32 index, | ||
445 | unsigned int desc_size) | ||
446 | { | ||
447 | u32 copy = (ring->size - index) * TXBB_SIZE; | ||
448 | int i; | ||
449 | |||
450 | for (i = desc_size - copy - 4; i >= 0; i -= 4) { | ||
451 | if ((i & (TXBB_SIZE - 1)) == 0) | ||
452 | wmb(); | ||
453 | |||
454 | *((u32 *) (ring->buf + i)) = | ||
455 | *((u32 *) (ring->bounce_buf + copy + i)); | ||
456 | } | ||
457 | |||
458 | for (i = copy - 4; i >= 4 ; i -= 4) { | ||
459 | if ((i & (TXBB_SIZE - 1)) == 0) | ||
460 | wmb(); | ||
461 | |||
462 | *((u32 *) (ring->buf + index * TXBB_SIZE + i)) = | ||
463 | *((u32 *) (ring->bounce_buf + i)); | ||
464 | } | ||
465 | |||
466 | /* Return real descriptor location */ | ||
467 | return ring->buf + index * TXBB_SIZE; | ||
468 | } | ||
469 | |||
470 | static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind) | ||
471 | { | ||
472 | struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind]; | ||
473 | struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind]; | ||
474 | |||
475 | /* If we don't have a pending timer, set one up to catch our recent | ||
476 | post in case the interface becomes idle */ | ||
477 | if (!timer_pending(&cq->timer)) | ||
478 | mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); | ||
479 | |||
480 | /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */ | ||
481 | if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0) | ||
482 | mlx4_en_process_tx_cq(priv->dev, cq); | ||
483 | } | ||
484 | |||
485 | static void *get_frag_ptr(struct sk_buff *skb) | ||
486 | { | ||
487 | struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; | ||
488 | struct page *page = frag->page; | ||
489 | void *ptr; | ||
490 | |||
491 | ptr = page_address(page); | ||
492 | if (unlikely(!ptr)) | ||
493 | return NULL; | ||
494 | |||
495 | return ptr + frag->page_offset; | ||
496 | } | ||
497 | |||
498 | static int is_inline(struct sk_buff *skb, void **pfrag) | ||
499 | { | ||
500 | void *ptr; | ||
501 | |||
502 | if (inline_thold && !skb_is_gso(skb) && skb->len <= inline_thold) { | ||
503 | if (skb_shinfo(skb)->nr_frags == 1) { | ||
504 | ptr = get_frag_ptr(skb); | ||
505 | if (unlikely(!ptr)) | ||
506 | return 0; | ||
507 | |||
508 | if (pfrag) | ||
509 | *pfrag = ptr; | ||
510 | |||
511 | return 1; | ||
512 | } else if (unlikely(skb_shinfo(skb)->nr_frags)) | ||
513 | return 0; | ||
514 | else | ||
515 | return 1; | ||
516 | } | ||
517 | |||
518 | return 0; | ||
519 | } | ||
520 | |||
521 | static int inline_size(struct sk_buff *skb) | ||
522 | { | ||
523 | if (skb->len + CTRL_SIZE + sizeof(struct mlx4_wqe_inline_seg) | ||
524 | <= MLX4_INLINE_ALIGN) | ||
525 | return ALIGN(skb->len + CTRL_SIZE + | ||
526 | sizeof(struct mlx4_wqe_inline_seg), 16); | ||
527 | else | ||
528 | return ALIGN(skb->len + CTRL_SIZE + 2 * | ||
529 | sizeof(struct mlx4_wqe_inline_seg), 16); | ||
530 | } | ||
531 | |||
532 | static int get_real_size(struct sk_buff *skb, struct net_device *dev, | ||
533 | int *lso_header_size) | ||
534 | { | ||
535 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
536 | struct mlx4_en_dev *mdev = priv->mdev; | ||
537 | int real_size; | ||
538 | |||
539 | if (skb_is_gso(skb)) { | ||
540 | *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb); | ||
541 | real_size = CTRL_SIZE + skb_shinfo(skb)->nr_frags * DS_SIZE + | ||
542 | ALIGN(*lso_header_size + 4, DS_SIZE); | ||
543 | if (unlikely(*lso_header_size != skb_headlen(skb))) { | ||
544 | /* We add a segment for the skb linear buffer only if | ||
545 | * it contains data */ | ||
546 | if (*lso_header_size < skb_headlen(skb)) | ||
547 | real_size += DS_SIZE; | ||
548 | else { | ||
549 | if (netif_msg_tx_err(priv)) | ||
550 | mlx4_warn(mdev, "Non-linear headers\n"); | ||
551 | dev_kfree_skb_any(skb); | ||
552 | return 0; | ||
553 | } | ||
554 | } | ||
555 | if (unlikely(*lso_header_size > MAX_LSO_HDR_SIZE)) { | ||
556 | if (netif_msg_tx_err(priv)) | ||
557 | mlx4_warn(mdev, "LSO header size too big\n"); | ||
558 | dev_kfree_skb_any(skb); | ||
559 | return 0; | ||
560 | } | ||
561 | } else { | ||
562 | *lso_header_size = 0; | ||
563 | if (!is_inline(skb, NULL)) | ||
564 | real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE; | ||
565 | else | ||
566 | real_size = inline_size(skb); | ||
567 | } | ||
568 | |||
569 | return real_size; | ||
570 | } | ||
571 | |||
572 | static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *skb, | ||
573 | int real_size, u16 *vlan_tag, int tx_ind, void *fragptr) | ||
574 | { | ||
575 | struct mlx4_wqe_inline_seg *inl = &tx_desc->inl; | ||
576 | int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl; | ||
577 | |||
578 | if (skb->len <= spc) { | ||
579 | inl->byte_count = cpu_to_be32(1 << 31 | skb->len); | ||
580 | skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb)); | ||
581 | if (skb_shinfo(skb)->nr_frags) | ||
582 | memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr, | ||
583 | skb_shinfo(skb)->frags[0].size); | ||
584 | |||
585 | } else { | ||
586 | inl->byte_count = cpu_to_be32(1 << 31 | spc); | ||
587 | if (skb_headlen(skb) <= spc) { | ||
588 | skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb)); | ||
589 | if (skb_headlen(skb) < spc) { | ||
590 | memcpy(((void *)(inl + 1)) + skb_headlen(skb), | ||
591 | fragptr, spc - skb_headlen(skb)); | ||
592 | fragptr += spc - skb_headlen(skb); | ||
593 | } | ||
594 | inl = (void *) (inl + 1) + spc; | ||
595 | memcpy(((void *)(inl + 1)), fragptr, skb->len - spc); | ||
596 | } else { | ||
597 | skb_copy_from_linear_data(skb, inl + 1, spc); | ||
598 | inl = (void *) (inl + 1) + spc; | ||
599 | skb_copy_from_linear_data_offset(skb, spc, inl + 1, | ||
600 | skb_headlen(skb) - spc); | ||
601 | if (skb_shinfo(skb)->nr_frags) | ||
602 | memcpy(((void *)(inl + 1)) + skb_headlen(skb) - spc, | ||
603 | fragptr, skb_shinfo(skb)->frags[0].size); | ||
604 | } | ||
605 | |||
606 | wmb(); | ||
607 | inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc)); | ||
608 | } | ||
609 | tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag); | ||
610 | tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!(*vlan_tag); | ||
611 | tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f; | ||
612 | } | ||
613 | |||
614 | static int get_vlan_info(struct mlx4_en_priv *priv, struct sk_buff *skb, | ||
615 | u16 *vlan_tag) | ||
616 | { | ||
617 | int tx_ind; | ||
618 | |||
619 | /* Obtain VLAN information if present */ | ||
620 | if (priv->vlgrp && vlan_tx_tag_present(skb)) { | ||
621 | *vlan_tag = vlan_tx_tag_get(skb); | ||
622 | /* Set the Tx ring to use according to vlan priority */ | ||
623 | tx_ind = priv->tx_prio_map[*vlan_tag >> 13]; | ||
624 | } else { | ||
625 | *vlan_tag = 0; | ||
626 | tx_ind = 0; | ||
627 | } | ||
628 | return tx_ind; | ||
629 | } | ||
630 | |||
631 | int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | ||
632 | { | ||
633 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
634 | struct mlx4_en_dev *mdev = priv->mdev; | ||
635 | struct mlx4_en_tx_ring *ring; | ||
636 | struct mlx4_en_cq *cq; | ||
637 | struct mlx4_en_tx_desc *tx_desc; | ||
638 | struct mlx4_wqe_data_seg *data; | ||
639 | struct skb_frag_struct *frag; | ||
640 | struct mlx4_en_tx_info *tx_info; | ||
641 | int tx_ind = 0; | ||
642 | int nr_txbb; | ||
643 | int desc_size; | ||
644 | int real_size; | ||
645 | dma_addr_t dma; | ||
646 | u32 index; | ||
647 | __be32 op_own; | ||
648 | u16 vlan_tag; | ||
649 | int i; | ||
650 | int lso_header_size; | ||
651 | void *fragptr; | ||
652 | |||
653 | if (unlikely(!skb->len)) { | ||
654 | dev_kfree_skb_any(skb); | ||
655 | return NETDEV_TX_OK; | ||
656 | } | ||
657 | real_size = get_real_size(skb, dev, &lso_header_size); | ||
658 | if (unlikely(!real_size)) | ||
659 | return NETDEV_TX_OK; | ||
660 | |||
661 | /* Allign descriptor to TXBB size */ | ||
662 | desc_size = ALIGN(real_size, TXBB_SIZE); | ||
663 | nr_txbb = desc_size / TXBB_SIZE; | ||
664 | if (unlikely(nr_txbb > MAX_DESC_TXBBS)) { | ||
665 | if (netif_msg_tx_err(priv)) | ||
666 | mlx4_warn(mdev, "Oversized header or SG list\n"); | ||
667 | dev_kfree_skb_any(skb); | ||
668 | return NETDEV_TX_OK; | ||
669 | } | ||
670 | |||
671 | tx_ind = get_vlan_info(priv, skb, &vlan_tag); | ||
672 | ring = &priv->tx_ring[tx_ind]; | ||
673 | |||
674 | /* Check available TXBBs And 2K spare for prefetch */ | ||
675 | if (unlikely(((int)(ring->prod - ring->cons)) > | ||
676 | ring->size - HEADROOM - MAX_DESC_TXBBS)) { | ||
677 | /* every full Tx ring stops queue. | ||
678 | * TODO: implement multi-queue support (per-queue stop) */ | ||
679 | netif_stop_queue(dev); | ||
680 | ring->blocked = 1; | ||
681 | priv->port_stats.queue_stopped++; | ||
682 | |||
683 | /* Use interrupts to find out when queue opened */ | ||
684 | cq = &priv->tx_cq[tx_ind]; | ||
685 | mlx4_en_arm_cq(priv, cq); | ||
686 | return NETDEV_TX_BUSY; | ||
687 | } | ||
688 | |||
689 | /* Now that we know what Tx ring to use */ | ||
690 | if (unlikely(!priv->port_up)) { | ||
691 | if (netif_msg_tx_err(priv)) | ||
692 | mlx4_warn(mdev, "xmit: port down!\n"); | ||
693 | dev_kfree_skb_any(skb); | ||
694 | return NETDEV_TX_OK; | ||
695 | } | ||
696 | |||
697 | /* Track current inflight packets for performance analysis */ | ||
698 | AVG_PERF_COUNTER(priv->pstats.inflight_avg, | ||
699 | (u32) (ring->prod - ring->cons - 1)); | ||
700 | |||
701 | /* Packet is good - grab an index and transmit it */ | ||
702 | index = ring->prod & ring->size_mask; | ||
703 | |||
704 | /* See if we have enough space for whole descriptor TXBB for setting | ||
705 | * SW ownership on next descriptor; if not, use a bounce buffer. */ | ||
706 | if (likely(index + nr_txbb <= ring->size)) | ||
707 | tx_desc = ring->buf + index * TXBB_SIZE; | ||
708 | else | ||
709 | tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf; | ||
710 | |||
711 | /* Save skb in tx_info ring */ | ||
712 | tx_info = &ring->tx_info[index]; | ||
713 | tx_info->skb = skb; | ||
714 | tx_info->nr_txbb = nr_txbb; | ||
715 | |||
716 | /* Prepare ctrl segement apart opcode+ownership, which depends on | ||
717 | * whether LSO is used */ | ||
718 | tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag); | ||
719 | tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!vlan_tag; | ||
720 | tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f; | ||
721 | tx_desc->ctrl.srcrb_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | | ||
722 | MLX4_WQE_CTRL_SOLICITED); | ||
723 | if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { | ||
724 | tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM | | ||
725 | MLX4_WQE_CTRL_TCP_UDP_CSUM); | ||
726 | priv->port_stats.tx_chksum_offload++; | ||
727 | } | ||
728 | |||
729 | /* Handle LSO (TSO) packets */ | ||
730 | if (lso_header_size) { | ||
731 | /* Mark opcode as LSO */ | ||
732 | op_own = cpu_to_be32(MLX4_OPCODE_LSO | (1 << 6)) | | ||
733 | ((ring->prod & ring->size) ? | ||
734 | cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); | ||
735 | |||
736 | /* Fill in the LSO prefix */ | ||
737 | tx_desc->lso.mss_hdr_size = cpu_to_be32( | ||
738 | skb_shinfo(skb)->gso_size << 16 | lso_header_size); | ||
739 | |||
740 | /* Copy headers; | ||
741 | * note that we already verified that it is linear */ | ||
742 | memcpy(tx_desc->lso.header, skb->data, lso_header_size); | ||
743 | data = ((void *) &tx_desc->lso + | ||
744 | ALIGN(lso_header_size + 4, DS_SIZE)); | ||
745 | |||
746 | priv->port_stats.tso_packets++; | ||
747 | i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) + | ||
748 | !!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size); | ||
749 | ring->bytes += skb->len + (i - 1) * lso_header_size; | ||
750 | ring->packets += i; | ||
751 | } else { | ||
752 | /* Normal (Non LSO) packet */ | ||
753 | op_own = cpu_to_be32(MLX4_OPCODE_SEND) | | ||
754 | ((ring->prod & ring->size) ? | ||
755 | cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); | ||
756 | data = &tx_desc->data; | ||
757 | ring->bytes += max(skb->len, (unsigned int) ETH_ZLEN); | ||
758 | ring->packets++; | ||
759 | |||
760 | } | ||
761 | AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len); | ||
762 | |||
763 | |||
764 | /* valid only for none inline segments */ | ||
765 | tx_info->data_offset = (void *) data - (void *) tx_desc; | ||
766 | |||
767 | tx_info->linear = (lso_header_size < skb_headlen(skb) && !is_inline(skb, NULL)) ? 1 : 0; | ||
768 | data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1; | ||
769 | |||
770 | if (!is_inline(skb, &fragptr)) { | ||
771 | /* Map fragments */ | ||
772 | for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) { | ||
773 | frag = &skb_shinfo(skb)->frags[i]; | ||
774 | dma = pci_map_page(mdev->dev->pdev, frag->page, frag->page_offset, | ||
775 | frag->size, PCI_DMA_TODEVICE); | ||
776 | data->addr = cpu_to_be64(dma); | ||
777 | data->lkey = cpu_to_be32(mdev->mr.key); | ||
778 | wmb(); | ||
779 | data->byte_count = cpu_to_be32(frag->size); | ||
780 | --data; | ||
781 | } | ||
782 | |||
783 | /* Map linear part */ | ||
784 | if (tx_info->linear) { | ||
785 | dma = pci_map_single(mdev->dev->pdev, skb->data + lso_header_size, | ||
786 | skb_headlen(skb) - lso_header_size, PCI_DMA_TODEVICE); | ||
787 | data->addr = cpu_to_be64(dma); | ||
788 | data->lkey = cpu_to_be32(mdev->mr.key); | ||
789 | wmb(); | ||
790 | data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size); | ||
791 | } | ||
792 | } else | ||
793 | build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr); | ||
794 | |||
795 | ring->prod += nr_txbb; | ||
796 | |||
797 | /* If we used a bounce buffer then copy descriptor back into place */ | ||
798 | if (tx_desc == (struct mlx4_en_tx_desc *) ring->bounce_buf) | ||
799 | tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size); | ||
800 | |||
801 | /* Run destructor before passing skb to HW */ | ||
802 | if (likely(!skb_shared(skb))) | ||
803 | skb_orphan(skb); | ||
804 | |||
805 | /* Ensure new descirptor hits memory | ||
806 | * before setting ownership of this descriptor to HW */ | ||
807 | wmb(); | ||
808 | tx_desc->ctrl.owner_opcode = op_own; | ||
809 | |||
810 | /* Ring doorbell! */ | ||
811 | wmb(); | ||
812 | writel(ring->doorbell_qpn, mdev->uar_map + MLX4_SEND_DOORBELL); | ||
813 | dev->trans_start = jiffies; | ||
814 | |||
815 | /* Poll CQ here */ | ||
816 | mlx4_en_xmit_poll(priv, tx_ind); | ||
817 | |||
818 | return 0; | ||
819 | } | ||
820 | |||
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c index 8a8b56135a58..de169338cd90 100644 --- a/drivers/net/mlx4/eq.c +++ b/drivers/net/mlx4/eq.c | |||
@@ -558,7 +558,7 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) | |||
558 | int i; | 558 | int i; |
559 | 559 | ||
560 | err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs, | 560 | err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs, |
561 | dev->caps.num_eqs - 1, dev->caps.reserved_eqs); | 561 | dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0); |
562 | if (err) | 562 | if (err) |
563 | return err; | 563 | return err; |
564 | 564 | ||
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c index 7e32955da982..be09fdb79cb8 100644 --- a/drivers/net/mlx4/fw.c +++ b/drivers/net/mlx4/fw.c | |||
@@ -88,6 +88,7 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u32 flags) | |||
88 | [ 8] = "P_Key violation counter", | 88 | [ 8] = "P_Key violation counter", |
89 | [ 9] = "Q_Key violation counter", | 89 | [ 9] = "Q_Key violation counter", |
90 | [10] = "VMM", | 90 | [10] = "VMM", |
91 | [12] = "DPDP", | ||
91 | [16] = "MW support", | 92 | [16] = "MW support", |
92 | [17] = "APM support", | 93 | [17] = "APM support", |
93 | [18] = "Atomic ops support", | 94 | [18] = "Atomic ops support", |
@@ -346,7 +347,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
346 | MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); | 347 | MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); |
347 | dev_cap->max_vl[i] = field >> 4; | 348 | dev_cap->max_vl[i] = field >> 4; |
348 | MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET); | 349 | MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET); |
349 | dev_cap->max_mtu[i] = field >> 4; | 350 | dev_cap->ib_mtu[i] = field >> 4; |
350 | dev_cap->max_port_width[i] = field & 0xf; | 351 | dev_cap->max_port_width[i] = field & 0xf; |
351 | MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET); | 352 | MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET); |
352 | dev_cap->max_gids[i] = 1 << (field & 0xf); | 353 | dev_cap->max_gids[i] = 1 << (field & 0xf); |
@@ -354,9 +355,13 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
354 | dev_cap->max_pkeys[i] = 1 << (field & 0xf); | 355 | dev_cap->max_pkeys[i] = 1 << (field & 0xf); |
355 | } | 356 | } |
356 | } else { | 357 | } else { |
358 | #define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00 | ||
357 | #define QUERY_PORT_MTU_OFFSET 0x01 | 359 | #define QUERY_PORT_MTU_OFFSET 0x01 |
360 | #define QUERY_PORT_ETH_MTU_OFFSET 0x02 | ||
358 | #define QUERY_PORT_WIDTH_OFFSET 0x06 | 361 | #define QUERY_PORT_WIDTH_OFFSET 0x06 |
359 | #define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07 | 362 | #define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07 |
363 | #define QUERY_PORT_MAC_OFFSET 0x08 | ||
364 | #define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a | ||
360 | #define QUERY_PORT_MAX_VL_OFFSET 0x0b | 365 | #define QUERY_PORT_MAX_VL_OFFSET 0x0b |
361 | 366 | ||
362 | for (i = 1; i <= dev_cap->num_ports; ++i) { | 367 | for (i = 1; i <= dev_cap->num_ports; ++i) { |
@@ -365,8 +370,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
365 | if (err) | 370 | if (err) |
366 | goto out; | 371 | goto out; |
367 | 372 | ||
373 | MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET); | ||
374 | dev_cap->supported_port_types[i] = field & 3; | ||
368 | MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET); | 375 | MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET); |
369 | dev_cap->max_mtu[i] = field & 0xf; | 376 | dev_cap->ib_mtu[i] = field & 0xf; |
370 | MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET); | 377 | MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET); |
371 | dev_cap->max_port_width[i] = field & 0xf; | 378 | dev_cap->max_port_width[i] = field & 0xf; |
372 | MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET); | 379 | MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET); |
@@ -374,6 +381,11 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
374 | dev_cap->max_pkeys[i] = 1 << (field & 0xf); | 381 | dev_cap->max_pkeys[i] = 1 << (field & 0xf); |
375 | MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET); | 382 | MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET); |
376 | dev_cap->max_vl[i] = field & 0xf; | 383 | dev_cap->max_vl[i] = field & 0xf; |
384 | MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET); | ||
385 | dev_cap->log_max_macs[i] = field & 0xf; | ||
386 | dev_cap->log_max_vlans[i] = field >> 4; | ||
387 | MLX4_GET(dev_cap->eth_mtu[i], outbox, QUERY_PORT_ETH_MTU_OFFSET); | ||
388 | MLX4_GET(dev_cap->def_mac[i], outbox, QUERY_PORT_MAC_OFFSET); | ||
377 | } | 389 | } |
378 | } | 390 | } |
379 | 391 | ||
@@ -407,7 +419,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
407 | mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n", | 419 | mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n", |
408 | dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz); | 420 | dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz); |
409 | mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n", | 421 | mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n", |
410 | dev_cap->local_ca_ack_delay, 128 << dev_cap->max_mtu[1], | 422 | dev_cap->local_ca_ack_delay, 128 << dev_cap->ib_mtu[1], |
411 | dev_cap->max_port_width[1]); | 423 | dev_cap->max_port_width[1]); |
412 | mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n", | 424 | mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n", |
413 | dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg); | 425 | dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg); |
@@ -819,7 +831,7 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, int port) | |||
819 | flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT; | 831 | flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT; |
820 | MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET); | 832 | MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET); |
821 | 833 | ||
822 | field = 128 << dev->caps.mtu_cap[port]; | 834 | field = 128 << dev->caps.ib_mtu_cap[port]; |
823 | MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET); | 835 | MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET); |
824 | field = dev->caps.gid_table_len[port]; | 836 | field = dev->caps.gid_table_len[port]; |
825 | MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET); | 837 | MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET); |
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h index decbb5c2ad41..526d7f30c041 100644 --- a/drivers/net/mlx4/fw.h +++ b/drivers/net/mlx4/fw.h | |||
@@ -66,11 +66,13 @@ struct mlx4_dev_cap { | |||
66 | int local_ca_ack_delay; | 66 | int local_ca_ack_delay; |
67 | int num_ports; | 67 | int num_ports; |
68 | u32 max_msg_sz; | 68 | u32 max_msg_sz; |
69 | int max_mtu[MLX4_MAX_PORTS + 1]; | 69 | int ib_mtu[MLX4_MAX_PORTS + 1]; |
70 | int max_port_width[MLX4_MAX_PORTS + 1]; | 70 | int max_port_width[MLX4_MAX_PORTS + 1]; |
71 | int max_vl[MLX4_MAX_PORTS + 1]; | 71 | int max_vl[MLX4_MAX_PORTS + 1]; |
72 | int max_gids[MLX4_MAX_PORTS + 1]; | 72 | int max_gids[MLX4_MAX_PORTS + 1]; |
73 | int max_pkeys[MLX4_MAX_PORTS + 1]; | 73 | int max_pkeys[MLX4_MAX_PORTS + 1]; |
74 | u64 def_mac[MLX4_MAX_PORTS + 1]; | ||
75 | u16 eth_mtu[MLX4_MAX_PORTS + 1]; | ||
74 | u16 stat_rate_support; | 76 | u16 stat_rate_support; |
75 | u32 flags; | 77 | u32 flags; |
76 | int reserved_uars; | 78 | int reserved_uars; |
@@ -102,6 +104,9 @@ struct mlx4_dev_cap { | |||
102 | u32 reserved_lkey; | 104 | u32 reserved_lkey; |
103 | u64 max_icm_sz; | 105 | u64 max_icm_sz; |
104 | int max_gso_sz; | 106 | int max_gso_sz; |
107 | u8 supported_port_types[MLX4_MAX_PORTS + 1]; | ||
108 | u8 log_max_macs[MLX4_MAX_PORTS + 1]; | ||
109 | u8 log_max_vlans[MLX4_MAX_PORTS + 1]; | ||
105 | }; | 110 | }; |
106 | 111 | ||
107 | struct mlx4_adapter { | 112 | struct mlx4_adapter { |
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index 1252a919de2e..468921b8f4b6 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c | |||
@@ -85,6 +85,57 @@ static struct mlx4_profile default_profile = { | |||
85 | .num_mtt = 1 << 20, | 85 | .num_mtt = 1 << 20, |
86 | }; | 86 | }; |
87 | 87 | ||
88 | static int log_num_mac = 2; | ||
89 | module_param_named(log_num_mac, log_num_mac, int, 0444); | ||
90 | MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)"); | ||
91 | |||
92 | static int log_num_vlan; | ||
93 | module_param_named(log_num_vlan, log_num_vlan, int, 0444); | ||
94 | MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)"); | ||
95 | |||
96 | static int use_prio; | ||
97 | module_param_named(use_prio, use_prio, bool, 0444); | ||
98 | MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports " | ||
99 | "(0/1, default 0)"); | ||
100 | |||
101 | static int mlx4_check_port_params(struct mlx4_dev *dev, | ||
102 | enum mlx4_port_type *port_type) | ||
103 | { | ||
104 | int i; | ||
105 | |||
106 | for (i = 0; i < dev->caps.num_ports - 1; i++) { | ||
107 | if (port_type[i] != port_type[i+1] && | ||
108 | !(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { | ||
109 | mlx4_err(dev, "Only same port types supported " | ||
110 | "on this HCA, aborting.\n"); | ||
111 | return -EINVAL; | ||
112 | } | ||
113 | } | ||
114 | if ((port_type[0] == MLX4_PORT_TYPE_ETH) && | ||
115 | (port_type[1] == MLX4_PORT_TYPE_IB)) { | ||
116 | mlx4_err(dev, "eth-ib configuration is not supported.\n"); | ||
117 | return -EINVAL; | ||
118 | } | ||
119 | |||
120 | for (i = 0; i < dev->caps.num_ports; i++) { | ||
121 | if (!(port_type[i] & dev->caps.supported_type[i+1])) { | ||
122 | mlx4_err(dev, "Requested port type for port %d is not " | ||
123 | "supported on this HCA\n", i + 1); | ||
124 | return -EINVAL; | ||
125 | } | ||
126 | } | ||
127 | return 0; | ||
128 | } | ||
129 | |||
130 | static void mlx4_set_port_mask(struct mlx4_dev *dev) | ||
131 | { | ||
132 | int i; | ||
133 | |||
134 | dev->caps.port_mask = 0; | ||
135 | for (i = 1; i <= dev->caps.num_ports; ++i) | ||
136 | if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB) | ||
137 | dev->caps.port_mask |= 1 << (i - 1); | ||
138 | } | ||
88 | static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | 139 | static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) |
89 | { | 140 | { |
90 | int err; | 141 | int err; |
@@ -120,10 +171,13 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
120 | dev->caps.num_ports = dev_cap->num_ports; | 171 | dev->caps.num_ports = dev_cap->num_ports; |
121 | for (i = 1; i <= dev->caps.num_ports; ++i) { | 172 | for (i = 1; i <= dev->caps.num_ports; ++i) { |
122 | dev->caps.vl_cap[i] = dev_cap->max_vl[i]; | 173 | dev->caps.vl_cap[i] = dev_cap->max_vl[i]; |
123 | dev->caps.mtu_cap[i] = dev_cap->max_mtu[i]; | 174 | dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i]; |
124 | dev->caps.gid_table_len[i] = dev_cap->max_gids[i]; | 175 | dev->caps.gid_table_len[i] = dev_cap->max_gids[i]; |
125 | dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i]; | 176 | dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i]; |
126 | dev->caps.port_width_cap[i] = dev_cap->max_port_width[i]; | 177 | dev->caps.port_width_cap[i] = dev_cap->max_port_width[i]; |
178 | dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i]; | ||
179 | dev->caps.def_mac[i] = dev_cap->def_mac[i]; | ||
180 | dev->caps.supported_type[i] = dev_cap->supported_port_types[i]; | ||
127 | } | 181 | } |
128 | 182 | ||
129 | dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; | 183 | dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; |
@@ -134,7 +188,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
134 | dev->caps.max_rq_sg = dev_cap->max_rq_sg; | 188 | dev->caps.max_rq_sg = dev_cap->max_rq_sg; |
135 | dev->caps.max_wqes = dev_cap->max_qp_sz; | 189 | dev->caps.max_wqes = dev_cap->max_qp_sz; |
136 | dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp; | 190 | dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp; |
137 | dev->caps.reserved_qps = dev_cap->reserved_qps; | ||
138 | dev->caps.max_srq_wqes = dev_cap->max_srq_sz; | 191 | dev->caps.max_srq_wqes = dev_cap->max_srq_sz; |
139 | dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1; | 192 | dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1; |
140 | dev->caps.reserved_srqs = dev_cap->reserved_srqs; | 193 | dev->caps.reserved_srqs = dev_cap->reserved_srqs; |
@@ -163,9 +216,138 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
163 | dev->caps.stat_rate_support = dev_cap->stat_rate_support; | 216 | dev->caps.stat_rate_support = dev_cap->stat_rate_support; |
164 | dev->caps.max_gso_sz = dev_cap->max_gso_sz; | 217 | dev->caps.max_gso_sz = dev_cap->max_gso_sz; |
165 | 218 | ||
219 | dev->caps.log_num_macs = log_num_mac; | ||
220 | dev->caps.log_num_vlans = log_num_vlan; | ||
221 | dev->caps.log_num_prios = use_prio ? 3 : 0; | ||
222 | |||
223 | for (i = 1; i <= dev->caps.num_ports; ++i) { | ||
224 | if (dev->caps.supported_type[i] != MLX4_PORT_TYPE_ETH) | ||
225 | dev->caps.port_type[i] = MLX4_PORT_TYPE_IB; | ||
226 | else | ||
227 | dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; | ||
228 | |||
229 | if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) { | ||
230 | dev->caps.log_num_macs = dev_cap->log_max_macs[i]; | ||
231 | mlx4_warn(dev, "Requested number of MACs is too much " | ||
232 | "for port %d, reducing to %d.\n", | ||
233 | i, 1 << dev->caps.log_num_macs); | ||
234 | } | ||
235 | if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) { | ||
236 | dev->caps.log_num_vlans = dev_cap->log_max_vlans[i]; | ||
237 | mlx4_warn(dev, "Requested number of VLANs is too much " | ||
238 | "for port %d, reducing to %d.\n", | ||
239 | i, 1 << dev->caps.log_num_vlans); | ||
240 | } | ||
241 | } | ||
242 | |||
243 | mlx4_set_port_mask(dev); | ||
244 | |||
245 | dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps; | ||
246 | dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] = | ||
247 | dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] = | ||
248 | (1 << dev->caps.log_num_macs) * | ||
249 | (1 << dev->caps.log_num_vlans) * | ||
250 | (1 << dev->caps.log_num_prios) * | ||
251 | dev->caps.num_ports; | ||
252 | dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH; | ||
253 | |||
254 | dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] + | ||
255 | dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] + | ||
256 | dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] + | ||
257 | dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH]; | ||
258 | |||
166 | return 0; | 259 | return 0; |
167 | } | 260 | } |
168 | 261 | ||
262 | /* | ||
263 | * Change the port configuration of the device. | ||
264 | * Every user of this function must hold the port mutex. | ||
265 | */ | ||
266 | static int mlx4_change_port_types(struct mlx4_dev *dev, | ||
267 | enum mlx4_port_type *port_types) | ||
268 | { | ||
269 | int err = 0; | ||
270 | int change = 0; | ||
271 | int port; | ||
272 | |||
273 | for (port = 0; port < dev->caps.num_ports; port++) { | ||
274 | if (port_types[port] != dev->caps.port_type[port + 1]) { | ||
275 | change = 1; | ||
276 | dev->caps.port_type[port + 1] = port_types[port]; | ||
277 | } | ||
278 | } | ||
279 | if (change) { | ||
280 | mlx4_unregister_device(dev); | ||
281 | for (port = 1; port <= dev->caps.num_ports; port++) { | ||
282 | mlx4_CLOSE_PORT(dev, port); | ||
283 | err = mlx4_SET_PORT(dev, port); | ||
284 | if (err) { | ||
285 | mlx4_err(dev, "Failed to set port %d, " | ||
286 | "aborting\n", port); | ||
287 | goto out; | ||
288 | } | ||
289 | } | ||
290 | mlx4_set_port_mask(dev); | ||
291 | err = mlx4_register_device(dev); | ||
292 | } | ||
293 | |||
294 | out: | ||
295 | return err; | ||
296 | } | ||
297 | |||
298 | static ssize_t show_port_type(struct device *dev, | ||
299 | struct device_attribute *attr, | ||
300 | char *buf) | ||
301 | { | ||
302 | struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, | ||
303 | port_attr); | ||
304 | struct mlx4_dev *mdev = info->dev; | ||
305 | |||
306 | return sprintf(buf, "%s\n", | ||
307 | mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB ? | ||
308 | "ib" : "eth"); | ||
309 | } | ||
310 | |||
311 | static ssize_t set_port_type(struct device *dev, | ||
312 | struct device_attribute *attr, | ||
313 | const char *buf, size_t count) | ||
314 | { | ||
315 | struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, | ||
316 | port_attr); | ||
317 | struct mlx4_dev *mdev = info->dev; | ||
318 | struct mlx4_priv *priv = mlx4_priv(mdev); | ||
319 | enum mlx4_port_type types[MLX4_MAX_PORTS]; | ||
320 | int i; | ||
321 | int err = 0; | ||
322 | |||
323 | if (!strcmp(buf, "ib\n")) | ||
324 | info->tmp_type = MLX4_PORT_TYPE_IB; | ||
325 | else if (!strcmp(buf, "eth\n")) | ||
326 | info->tmp_type = MLX4_PORT_TYPE_ETH; | ||
327 | else { | ||
328 | mlx4_err(mdev, "%s is not supported port type\n", buf); | ||
329 | return -EINVAL; | ||
330 | } | ||
331 | |||
332 | mutex_lock(&priv->port_mutex); | ||
333 | for (i = 0; i < mdev->caps.num_ports; i++) | ||
334 | types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type : | ||
335 | mdev->caps.port_type[i+1]; | ||
336 | |||
337 | err = mlx4_check_port_params(mdev, types); | ||
338 | if (err) | ||
339 | goto out; | ||
340 | |||
341 | for (i = 1; i <= mdev->caps.num_ports; i++) | ||
342 | priv->port[i].tmp_type = 0; | ||
343 | |||
344 | err = mlx4_change_port_types(mdev, types); | ||
345 | |||
346 | out: | ||
347 | mutex_unlock(&priv->port_mutex); | ||
348 | return err ? err : count; | ||
349 | } | ||
350 | |||
169 | static int mlx4_load_fw(struct mlx4_dev *dev) | 351 | static int mlx4_load_fw(struct mlx4_dev *dev) |
170 | { | 352 | { |
171 | struct mlx4_priv *priv = mlx4_priv(dev); | 353 | struct mlx4_priv *priv = mlx4_priv(dev); |
@@ -211,7 +393,8 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, | |||
211 | ((u64) (MLX4_CMPT_TYPE_QP * | 393 | ((u64) (MLX4_CMPT_TYPE_QP * |
212 | cmpt_entry_sz) << MLX4_CMPT_SHIFT), | 394 | cmpt_entry_sz) << MLX4_CMPT_SHIFT), |
213 | cmpt_entry_sz, dev->caps.num_qps, | 395 | cmpt_entry_sz, dev->caps.num_qps, |
214 | dev->caps.reserved_qps, 0, 0); | 396 | dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], |
397 | 0, 0); | ||
215 | if (err) | 398 | if (err) |
216 | goto err; | 399 | goto err; |
217 | 400 | ||
@@ -336,7 +519,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, | |||
336 | init_hca->qpc_base, | 519 | init_hca->qpc_base, |
337 | dev_cap->qpc_entry_sz, | 520 | dev_cap->qpc_entry_sz, |
338 | dev->caps.num_qps, | 521 | dev->caps.num_qps, |
339 | dev->caps.reserved_qps, 0, 0); | 522 | dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], |
523 | 0, 0); | ||
340 | if (err) { | 524 | if (err) { |
341 | mlx4_err(dev, "Failed to map QP context memory, aborting.\n"); | 525 | mlx4_err(dev, "Failed to map QP context memory, aborting.\n"); |
342 | goto err_unmap_dmpt; | 526 | goto err_unmap_dmpt; |
@@ -346,7 +530,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, | |||
346 | init_hca->auxc_base, | 530 | init_hca->auxc_base, |
347 | dev_cap->aux_entry_sz, | 531 | dev_cap->aux_entry_sz, |
348 | dev->caps.num_qps, | 532 | dev->caps.num_qps, |
349 | dev->caps.reserved_qps, 0, 0); | 533 | dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], |
534 | 0, 0); | ||
350 | if (err) { | 535 | if (err) { |
351 | mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n"); | 536 | mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n"); |
352 | goto err_unmap_qp; | 537 | goto err_unmap_qp; |
@@ -356,7 +541,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, | |||
356 | init_hca->altc_base, | 541 | init_hca->altc_base, |
357 | dev_cap->altc_entry_sz, | 542 | dev_cap->altc_entry_sz, |
358 | dev->caps.num_qps, | 543 | dev->caps.num_qps, |
359 | dev->caps.reserved_qps, 0, 0); | 544 | dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], |
545 | 0, 0); | ||
360 | if (err) { | 546 | if (err) { |
361 | mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n"); | 547 | mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n"); |
362 | goto err_unmap_auxc; | 548 | goto err_unmap_auxc; |
@@ -366,7 +552,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, | |||
366 | init_hca->rdmarc_base, | 552 | init_hca->rdmarc_base, |
367 | dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, | 553 | dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, |
368 | dev->caps.num_qps, | 554 | dev->caps.num_qps, |
369 | dev->caps.reserved_qps, 0, 0); | 555 | dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], |
556 | 0, 0); | ||
370 | if (err) { | 557 | if (err) { |
371 | mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); | 558 | mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); |
372 | goto err_unmap_altc; | 559 | goto err_unmap_altc; |
@@ -565,6 +752,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) | |||
565 | { | 752 | { |
566 | struct mlx4_priv *priv = mlx4_priv(dev); | 753 | struct mlx4_priv *priv = mlx4_priv(dev); |
567 | int err; | 754 | int err; |
755 | int port; | ||
568 | 756 | ||
569 | err = mlx4_init_uar_table(dev); | 757 | err = mlx4_init_uar_table(dev); |
570 | if (err) { | 758 | if (err) { |
@@ -663,8 +851,20 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) | |||
663 | goto err_qp_table_free; | 851 | goto err_qp_table_free; |
664 | } | 852 | } |
665 | 853 | ||
854 | for (port = 1; port <= dev->caps.num_ports; port++) { | ||
855 | err = mlx4_SET_PORT(dev, port); | ||
856 | if (err) { | ||
857 | mlx4_err(dev, "Failed to set port %d, aborting\n", | ||
858 | port); | ||
859 | goto err_mcg_table_free; | ||
860 | } | ||
861 | } | ||
862 | |||
666 | return 0; | 863 | return 0; |
667 | 864 | ||
865 | err_mcg_table_free: | ||
866 | mlx4_cleanup_mcg_table(dev); | ||
867 | |||
668 | err_qp_table_free: | 868 | err_qp_table_free: |
669 | mlx4_cleanup_qp_table(dev); | 869 | mlx4_cleanup_qp_table(dev); |
670 | 870 | ||
@@ -728,11 +928,45 @@ no_msi: | |||
728 | priv->eq_table.eq[i].irq = dev->pdev->irq; | 928 | priv->eq_table.eq[i].irq = dev->pdev->irq; |
729 | } | 929 | } |
730 | 930 | ||
931 | static int mlx4_init_port_info(struct mlx4_dev *dev, int port) | ||
932 | { | ||
933 | struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; | ||
934 | int err = 0; | ||
935 | |||
936 | info->dev = dev; | ||
937 | info->port = port; | ||
938 | mlx4_init_mac_table(dev, &info->mac_table); | ||
939 | mlx4_init_vlan_table(dev, &info->vlan_table); | ||
940 | |||
941 | sprintf(info->dev_name, "mlx4_port%d", port); | ||
942 | info->port_attr.attr.name = info->dev_name; | ||
943 | info->port_attr.attr.mode = S_IRUGO | S_IWUSR; | ||
944 | info->port_attr.show = show_port_type; | ||
945 | info->port_attr.store = set_port_type; | ||
946 | |||
947 | err = device_create_file(&dev->pdev->dev, &info->port_attr); | ||
948 | if (err) { | ||
949 | mlx4_err(dev, "Failed to create file for port %d\n", port); | ||
950 | info->port = -1; | ||
951 | } | ||
952 | |||
953 | return err; | ||
954 | } | ||
955 | |||
956 | static void mlx4_cleanup_port_info(struct mlx4_port_info *info) | ||
957 | { | ||
958 | if (info->port < 0) | ||
959 | return; | ||
960 | |||
961 | device_remove_file(&info->dev->pdev->dev, &info->port_attr); | ||
962 | } | ||
963 | |||
731 | static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | 964 | static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) |
732 | { | 965 | { |
733 | struct mlx4_priv *priv; | 966 | struct mlx4_priv *priv; |
734 | struct mlx4_dev *dev; | 967 | struct mlx4_dev *dev; |
735 | int err; | 968 | int err; |
969 | int port; | ||
736 | 970 | ||
737 | printk(KERN_INFO PFX "Initializing %s\n", | 971 | printk(KERN_INFO PFX "Initializing %s\n", |
738 | pci_name(pdev)); | 972 | pci_name(pdev)); |
@@ -807,6 +1041,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
807 | INIT_LIST_HEAD(&priv->ctx_list); | 1041 | INIT_LIST_HEAD(&priv->ctx_list); |
808 | spin_lock_init(&priv->ctx_lock); | 1042 | spin_lock_init(&priv->ctx_lock); |
809 | 1043 | ||
1044 | mutex_init(&priv->port_mutex); | ||
1045 | |||
810 | INIT_LIST_HEAD(&priv->pgdir_list); | 1046 | INIT_LIST_HEAD(&priv->pgdir_list); |
811 | mutex_init(&priv->pgdir_mutex); | 1047 | mutex_init(&priv->pgdir_mutex); |
812 | 1048 | ||
@@ -842,15 +1078,24 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
842 | if (err) | 1078 | if (err) |
843 | goto err_close; | 1079 | goto err_close; |
844 | 1080 | ||
1081 | for (port = 1; port <= dev->caps.num_ports; port++) { | ||
1082 | err = mlx4_init_port_info(dev, port); | ||
1083 | if (err) | ||
1084 | goto err_port; | ||
1085 | } | ||
1086 | |||
845 | err = mlx4_register_device(dev); | 1087 | err = mlx4_register_device(dev); |
846 | if (err) | 1088 | if (err) |
847 | goto err_cleanup; | 1089 | goto err_port; |
848 | 1090 | ||
849 | pci_set_drvdata(pdev, dev); | 1091 | pci_set_drvdata(pdev, dev); |
850 | 1092 | ||
851 | return 0; | 1093 | return 0; |
852 | 1094 | ||
853 | err_cleanup: | 1095 | err_port: |
1096 | for (port = 1; port <= dev->caps.num_ports; port++) | ||
1097 | mlx4_cleanup_port_info(&priv->port[port]); | ||
1098 | |||
854 | mlx4_cleanup_mcg_table(dev); | 1099 | mlx4_cleanup_mcg_table(dev); |
855 | mlx4_cleanup_qp_table(dev); | 1100 | mlx4_cleanup_qp_table(dev); |
856 | mlx4_cleanup_srq_table(dev); | 1101 | mlx4_cleanup_srq_table(dev); |
@@ -907,8 +1152,10 @@ static void mlx4_remove_one(struct pci_dev *pdev) | |||
907 | if (dev) { | 1152 | if (dev) { |
908 | mlx4_unregister_device(dev); | 1153 | mlx4_unregister_device(dev); |
909 | 1154 | ||
910 | for (p = 1; p <= dev->caps.num_ports; ++p) | 1155 | for (p = 1; p <= dev->caps.num_ports; p++) { |
1156 | mlx4_cleanup_port_info(&priv->port[p]); | ||
911 | mlx4_CLOSE_PORT(dev, p); | 1157 | mlx4_CLOSE_PORT(dev, p); |
1158 | } | ||
912 | 1159 | ||
913 | mlx4_cleanup_mcg_table(dev); | 1160 | mlx4_cleanup_mcg_table(dev); |
914 | mlx4_cleanup_qp_table(dev); | 1161 | mlx4_cleanup_qp_table(dev); |
@@ -948,6 +1195,8 @@ static struct pci_device_id mlx4_pci_table[] = { | |||
948 | { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */ | 1195 | { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */ |
949 | { PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */ | 1196 | { PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */ |
950 | { PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */ | 1197 | { PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */ |
1198 | { PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */ | ||
1199 | { PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */ | ||
951 | { 0, } | 1200 | { 0, } |
952 | }; | 1201 | }; |
953 | 1202 | ||
@@ -960,10 +1209,28 @@ static struct pci_driver mlx4_driver = { | |||
960 | .remove = __devexit_p(mlx4_remove_one) | 1209 | .remove = __devexit_p(mlx4_remove_one) |
961 | }; | 1210 | }; |
962 | 1211 | ||
1212 | static int __init mlx4_verify_params(void) | ||
1213 | { | ||
1214 | if ((log_num_mac < 0) || (log_num_mac > 7)) { | ||
1215 | printk(KERN_WARNING "mlx4_core: bad num_mac: %d\n", log_num_mac); | ||
1216 | return -1; | ||
1217 | } | ||
1218 | |||
1219 | if ((log_num_vlan < 0) || (log_num_vlan > 7)) { | ||
1220 | printk(KERN_WARNING "mlx4_core: bad num_vlan: %d\n", log_num_vlan); | ||
1221 | return -1; | ||
1222 | } | ||
1223 | |||
1224 | return 0; | ||
1225 | } | ||
1226 | |||
963 | static int __init mlx4_init(void) | 1227 | static int __init mlx4_init(void) |
964 | { | 1228 | { |
965 | int ret; | 1229 | int ret; |
966 | 1230 | ||
1231 | if (mlx4_verify_params()) | ||
1232 | return -EINVAL; | ||
1233 | |||
967 | ret = mlx4_catas_init(); | 1234 | ret = mlx4_catas_init(); |
968 | if (ret) | 1235 | if (ret) |
969 | return ret; | 1236 | return ret; |
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c index c83f88ce0736..592c01ae2c5d 100644 --- a/drivers/net/mlx4/mcg.c +++ b/drivers/net/mlx4/mcg.c | |||
@@ -368,8 +368,8 @@ int mlx4_init_mcg_table(struct mlx4_dev *dev) | |||
368 | struct mlx4_priv *priv = mlx4_priv(dev); | 368 | struct mlx4_priv *priv = mlx4_priv(dev); |
369 | int err; | 369 | int err; |
370 | 370 | ||
371 | err = mlx4_bitmap_init(&priv->mcg_table.bitmap, | 371 | err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms, |
372 | dev->caps.num_amgms, dev->caps.num_amgms - 1, 0); | 372 | dev->caps.num_amgms - 1, 0, 0); |
373 | if (err) | 373 | if (err) |
374 | return err; | 374 | return err; |
375 | 375 | ||
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h index 5337e3ac3e78..fa431fad0eec 100644 --- a/drivers/net/mlx4/mlx4.h +++ b/drivers/net/mlx4/mlx4.h | |||
@@ -111,6 +111,7 @@ struct mlx4_bitmap { | |||
111 | u32 last; | 111 | u32 last; |
112 | u32 top; | 112 | u32 top; |
113 | u32 max; | 113 | u32 max; |
114 | u32 reserved_top; | ||
114 | u32 mask; | 115 | u32 mask; |
115 | spinlock_t lock; | 116 | spinlock_t lock; |
116 | unsigned long *table; | 117 | unsigned long *table; |
@@ -251,6 +252,38 @@ struct mlx4_catas_err { | |||
251 | struct list_head list; | 252 | struct list_head list; |
252 | }; | 253 | }; |
253 | 254 | ||
255 | #define MLX4_MAX_MAC_NUM 128 | ||
256 | #define MLX4_MAC_TABLE_SIZE (MLX4_MAX_MAC_NUM << 3) | ||
257 | |||
258 | struct mlx4_mac_table { | ||
259 | __be64 entries[MLX4_MAX_MAC_NUM]; | ||
260 | int refs[MLX4_MAX_MAC_NUM]; | ||
261 | struct mutex mutex; | ||
262 | int total; | ||
263 | int max; | ||
264 | }; | ||
265 | |||
266 | #define MLX4_MAX_VLAN_NUM 128 | ||
267 | #define MLX4_VLAN_TABLE_SIZE (MLX4_MAX_VLAN_NUM << 2) | ||
268 | |||
269 | struct mlx4_vlan_table { | ||
270 | __be32 entries[MLX4_MAX_VLAN_NUM]; | ||
271 | int refs[MLX4_MAX_VLAN_NUM]; | ||
272 | struct mutex mutex; | ||
273 | int total; | ||
274 | int max; | ||
275 | }; | ||
276 | |||
277 | struct mlx4_port_info { | ||
278 | struct mlx4_dev *dev; | ||
279 | int port; | ||
280 | char dev_name[16]; | ||
281 | struct device_attribute port_attr; | ||
282 | enum mlx4_port_type tmp_type; | ||
283 | struct mlx4_mac_table mac_table; | ||
284 | struct mlx4_vlan_table vlan_table; | ||
285 | }; | ||
286 | |||
254 | struct mlx4_priv { | 287 | struct mlx4_priv { |
255 | struct mlx4_dev dev; | 288 | struct mlx4_dev dev; |
256 | 289 | ||
@@ -279,6 +312,8 @@ struct mlx4_priv { | |||
279 | 312 | ||
280 | struct mlx4_uar driver_uar; | 313 | struct mlx4_uar driver_uar; |
281 | void __iomem *kar; | 314 | void __iomem *kar; |
315 | struct mlx4_port_info port[MLX4_MAX_PORTS + 1]; | ||
316 | struct mutex port_mutex; | ||
282 | }; | 317 | }; |
283 | 318 | ||
284 | static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev) | 319 | static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev) |
@@ -288,7 +323,10 @@ static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev) | |||
288 | 323 | ||
289 | u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap); | 324 | u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap); |
290 | void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj); | 325 | void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj); |
291 | int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved); | 326 | u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align); |
327 | void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt); | ||
328 | int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, | ||
329 | u32 reserved_bot, u32 resetrved_top); | ||
292 | void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap); | 330 | void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap); |
293 | 331 | ||
294 | int mlx4_reset(struct mlx4_dev *dev); | 332 | int mlx4_reset(struct mlx4_dev *dev); |
@@ -346,4 +384,9 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type); | |||
346 | 384 | ||
347 | void mlx4_handle_catas_err(struct mlx4_dev *dev); | 385 | void mlx4_handle_catas_err(struct mlx4_dev *dev); |
348 | 386 | ||
387 | void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table); | ||
388 | void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table); | ||
389 | |||
390 | int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port); | ||
391 | |||
349 | #endif /* MLX4_H */ | 392 | #endif /* MLX4_H */ |
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h new file mode 100644 index 000000000000..11fb17c6e97b --- /dev/null +++ b/drivers/net/mlx4/mlx4_en.h | |||
@@ -0,0 +1,561 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | #ifndef _MLX4_EN_H_ | ||
35 | #define _MLX4_EN_H_ | ||
36 | |||
37 | #include <linux/compiler.h> | ||
38 | #include <linux/list.h> | ||
39 | #include <linux/mutex.h> | ||
40 | #include <linux/netdevice.h> | ||
41 | #include <linux/inet_lro.h> | ||
42 | |||
43 | #include <linux/mlx4/device.h> | ||
44 | #include <linux/mlx4/qp.h> | ||
45 | #include <linux/mlx4/cq.h> | ||
46 | #include <linux/mlx4/srq.h> | ||
47 | #include <linux/mlx4/doorbell.h> | ||
48 | |||
49 | #include "en_port.h" | ||
50 | |||
51 | #define DRV_NAME "mlx4_en" | ||
52 | #define DRV_VERSION "1.4.0" | ||
53 | #define DRV_RELDATE "Sep 2008" | ||
54 | |||
55 | |||
56 | #define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN) | ||
57 | |||
58 | #define mlx4_dbg(mlevel, priv, format, arg...) \ | ||
59 | if (NETIF_MSG_##mlevel & priv->msg_enable) \ | ||
60 | printk(KERN_DEBUG "%s %s: " format , DRV_NAME ,\ | ||
61 | (&priv->mdev->pdev->dev)->bus_id , ## arg) | ||
62 | |||
63 | #define mlx4_err(mdev, format, arg...) \ | ||
64 | printk(KERN_ERR "%s %s: " format , DRV_NAME ,\ | ||
65 | (&mdev->pdev->dev)->bus_id , ## arg) | ||
66 | #define mlx4_info(mdev, format, arg...) \ | ||
67 | printk(KERN_INFO "%s %s: " format , DRV_NAME ,\ | ||
68 | (&mdev->pdev->dev)->bus_id , ## arg) | ||
69 | #define mlx4_warn(mdev, format, arg...) \ | ||
70 | printk(KERN_WARNING "%s %s: " format , DRV_NAME ,\ | ||
71 | (&mdev->pdev->dev)->bus_id , ## arg) | ||
72 | |||
73 | /* | ||
74 | * Device constants | ||
75 | */ | ||
76 | |||
77 | |||
78 | #define MLX4_EN_PAGE_SHIFT 12 | ||
79 | #define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT) | ||
80 | #define MAX_TX_RINGS 16 | ||
81 | #define MAX_RX_RINGS 16 | ||
82 | #define MAX_RSS_MAP_SIZE 64 | ||
83 | #define RSS_FACTOR 2 | ||
84 | #define TXBB_SIZE 64 | ||
85 | #define HEADROOM (2048 / TXBB_SIZE + 1) | ||
86 | #define MAX_LSO_HDR_SIZE 92 | ||
87 | #define STAMP_STRIDE 64 | ||
88 | #define STAMP_DWORDS (STAMP_STRIDE / 4) | ||
89 | #define STAMP_SHIFT 31 | ||
90 | #define STAMP_VAL 0x7fffffff | ||
91 | #define STATS_DELAY (HZ / 4) | ||
92 | |||
93 | /* Typical TSO descriptor with 16 gather entries is 352 bytes... */ | ||
94 | #define MAX_DESC_SIZE 512 | ||
95 | #define MAX_DESC_TXBBS (MAX_DESC_SIZE / TXBB_SIZE) | ||
96 | |||
97 | /* | ||
98 | * OS related constants and tunables | ||
99 | */ | ||
100 | |||
101 | #define MLX4_EN_WATCHDOG_TIMEOUT (15 * HZ) | ||
102 | |||
103 | #define MLX4_EN_ALLOC_ORDER 2 | ||
104 | #define MLX4_EN_ALLOC_SIZE (PAGE_SIZE << MLX4_EN_ALLOC_ORDER) | ||
105 | |||
106 | #define MLX4_EN_MAX_LRO_DESCRIPTORS 32 | ||
107 | |||
108 | /* Receive fragment sizes; we use at most 4 fragments (for 9600 byte MTU | ||
109 | * and 4K allocations) */ | ||
110 | enum { | ||
111 | FRAG_SZ0 = 512 - NET_IP_ALIGN, | ||
112 | FRAG_SZ1 = 1024, | ||
113 | FRAG_SZ2 = 4096, | ||
114 | FRAG_SZ3 = MLX4_EN_ALLOC_SIZE | ||
115 | }; | ||
116 | #define MLX4_EN_MAX_RX_FRAGS 4 | ||
117 | |||
118 | /* Minimum ring size for our page-allocation sceme to work */ | ||
119 | #define MLX4_EN_MIN_RX_SIZE (MLX4_EN_ALLOC_SIZE / SMP_CACHE_BYTES) | ||
120 | #define MLX4_EN_MIN_TX_SIZE (4096 / TXBB_SIZE) | ||
121 | |||
122 | #define MLX4_EN_TX_RING_NUM 9 | ||
123 | #define MLX4_EN_DEF_TX_RING_SIZE 1024 | ||
124 | #define MLX4_EN_DEF_RX_RING_SIZE 1024 | ||
125 | |||
126 | /* Target number of bytes to coalesce with interrupt moderation */ | ||
127 | #define MLX4_EN_RX_COAL_TARGET 0x20000 | ||
128 | #define MLX4_EN_RX_COAL_TIME 0x10 | ||
129 | |||
130 | #define MLX4_EN_TX_COAL_PKTS 5 | ||
131 | #define MLX4_EN_TX_COAL_TIME 0x80 | ||
132 | |||
133 | #define MLX4_EN_RX_RATE_LOW 400000 | ||
134 | #define MLX4_EN_RX_COAL_TIME_LOW 0 | ||
135 | #define MLX4_EN_RX_RATE_HIGH 450000 | ||
136 | #define MLX4_EN_RX_COAL_TIME_HIGH 128 | ||
137 | #define MLX4_EN_RX_SIZE_THRESH 1024 | ||
138 | #define MLX4_EN_RX_RATE_THRESH (1000000 / MLX4_EN_RX_COAL_TIME_HIGH) | ||
139 | #define MLX4_EN_SAMPLE_INTERVAL 0 | ||
140 | |||
141 | #define MLX4_EN_AUTO_CONF 0xffff | ||
142 | |||
143 | #define MLX4_EN_DEF_RX_PAUSE 1 | ||
144 | #define MLX4_EN_DEF_TX_PAUSE 1 | ||
145 | |||
146 | /* Interval between sucessive polls in the Tx routine when polling is used | ||
147 | instead of interrupts (in per-core Tx rings) - should be power of 2 */ | ||
148 | #define MLX4_EN_TX_POLL_MODER 16 | ||
149 | #define MLX4_EN_TX_POLL_TIMEOUT (HZ / 4) | ||
150 | |||
151 | #define ETH_LLC_SNAP_SIZE 8 | ||
152 | |||
153 | #define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN) | ||
154 | #define HEADER_COPY_SIZE (128 - NET_IP_ALIGN) | ||
155 | |||
156 | #define MLX4_EN_MIN_MTU 46 | ||
157 | #define ETH_BCAST 0xffffffffffffULL | ||
158 | |||
159 | #ifdef MLX4_EN_PERF_STAT | ||
160 | /* Number of samples to 'average' */ | ||
161 | #define AVG_SIZE 128 | ||
162 | #define AVG_FACTOR 1024 | ||
163 | #define NUM_PERF_STATS NUM_PERF_COUNTERS | ||
164 | |||
165 | #define INC_PERF_COUNTER(cnt) (++(cnt)) | ||
166 | #define ADD_PERF_COUNTER(cnt, add) ((cnt) += (add)) | ||
167 | #define AVG_PERF_COUNTER(cnt, sample) \ | ||
168 | ((cnt) = ((cnt) * (AVG_SIZE - 1) + (sample) * AVG_FACTOR) / AVG_SIZE) | ||
169 | #define GET_PERF_COUNTER(cnt) (cnt) | ||
170 | #define GET_AVG_PERF_COUNTER(cnt) ((cnt) / AVG_FACTOR) | ||
171 | |||
172 | #else | ||
173 | |||
174 | #define NUM_PERF_STATS 0 | ||
175 | #define INC_PERF_COUNTER(cnt) do {} while (0) | ||
176 | #define ADD_PERF_COUNTER(cnt, add) do {} while (0) | ||
177 | #define AVG_PERF_COUNTER(cnt, sample) do {} while (0) | ||
178 | #define GET_PERF_COUNTER(cnt) (0) | ||
179 | #define GET_AVG_PERF_COUNTER(cnt) (0) | ||
180 | #endif /* MLX4_EN_PERF_STAT */ | ||
181 | |||
182 | /* | ||
183 | * Configurables | ||
184 | */ | ||
185 | |||
186 | enum cq_type { | ||
187 | RX = 0, | ||
188 | TX = 1, | ||
189 | }; | ||
190 | |||
191 | |||
192 | /* | ||
193 | * Useful macros | ||
194 | */ | ||
195 | #define ROUNDUP_LOG2(x) ilog2(roundup_pow_of_two(x)) | ||
196 | #define XNOR(x, y) (!(x) == !(y)) | ||
197 | #define ILLEGAL_MAC(addr) (addr == 0xffffffffffffULL || addr == 0x0) | ||
198 | |||
199 | |||
200 | struct mlx4_en_tx_info { | ||
201 | struct sk_buff *skb; | ||
202 | u32 nr_txbb; | ||
203 | u8 linear; | ||
204 | u8 data_offset; | ||
205 | }; | ||
206 | |||
207 | |||
208 | #define MLX4_EN_BIT_DESC_OWN 0x80000000 | ||
209 | #define CTRL_SIZE sizeof(struct mlx4_wqe_ctrl_seg) | ||
210 | #define MLX4_EN_MEMTYPE_PAD 0x100 | ||
211 | #define DS_SIZE sizeof(struct mlx4_wqe_data_seg) | ||
212 | |||
213 | |||
214 | struct mlx4_en_tx_desc { | ||
215 | struct mlx4_wqe_ctrl_seg ctrl; | ||
216 | union { | ||
217 | struct mlx4_wqe_data_seg data; /* at least one data segment */ | ||
218 | struct mlx4_wqe_lso_seg lso; | ||
219 | struct mlx4_wqe_inline_seg inl; | ||
220 | }; | ||
221 | }; | ||
222 | |||
223 | #define MLX4_EN_USE_SRQ 0x01000000 | ||
224 | |||
225 | struct mlx4_en_rx_alloc { | ||
226 | struct page *page; | ||
227 | u16 offset; | ||
228 | }; | ||
229 | |||
230 | struct mlx4_en_tx_ring { | ||
231 | struct mlx4_hwq_resources wqres; | ||
232 | u32 size ; /* number of TXBBs */ | ||
233 | u32 size_mask; | ||
234 | u16 stride; | ||
235 | u16 cqn; /* index of port CQ associated with this ring */ | ||
236 | u32 prod; | ||
237 | u32 cons; | ||
238 | u32 buf_size; | ||
239 | u32 doorbell_qpn; | ||
240 | void *buf; | ||
241 | u16 poll_cnt; | ||
242 | int blocked; | ||
243 | struct mlx4_en_tx_info *tx_info; | ||
244 | u8 *bounce_buf; | ||
245 | u32 last_nr_txbb; | ||
246 | struct mlx4_qp qp; | ||
247 | struct mlx4_qp_context context; | ||
248 | int qpn; | ||
249 | enum mlx4_qp_state qp_state; | ||
250 | struct mlx4_srq dummy; | ||
251 | unsigned long bytes; | ||
252 | unsigned long packets; | ||
253 | spinlock_t comp_lock; | ||
254 | }; | ||
255 | |||
256 | struct mlx4_en_rx_desc { | ||
257 | struct mlx4_wqe_srq_next_seg next; | ||
258 | /* actual number of entries depends on rx ring stride */ | ||
259 | struct mlx4_wqe_data_seg data[0]; | ||
260 | }; | ||
261 | |||
262 | struct mlx4_en_rx_ring { | ||
263 | struct mlx4_srq srq; | ||
264 | struct mlx4_hwq_resources wqres; | ||
265 | struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS]; | ||
266 | struct net_lro_mgr lro; | ||
267 | u32 size ; /* number of Rx descs*/ | ||
268 | u32 actual_size; | ||
269 | u32 size_mask; | ||
270 | u16 stride; | ||
271 | u16 log_stride; | ||
272 | u16 cqn; /* index of port CQ associated with this ring */ | ||
273 | u32 prod; | ||
274 | u32 cons; | ||
275 | u32 buf_size; | ||
276 | int need_refill; | ||
277 | int full; | ||
278 | void *buf; | ||
279 | void *rx_info; | ||
280 | unsigned long bytes; | ||
281 | unsigned long packets; | ||
282 | }; | ||
283 | |||
284 | |||
285 | static inline int mlx4_en_can_lro(__be16 status) | ||
286 | { | ||
287 | return (status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 | | ||
288 | MLX4_CQE_STATUS_IPV4F | | ||
289 | MLX4_CQE_STATUS_IPV6 | | ||
290 | MLX4_CQE_STATUS_IPV4OPT | | ||
291 | MLX4_CQE_STATUS_TCP | | ||
292 | MLX4_CQE_STATUS_UDP | | ||
293 | MLX4_CQE_STATUS_IPOK)) == | ||
294 | cpu_to_be16(MLX4_CQE_STATUS_IPV4 | | ||
295 | MLX4_CQE_STATUS_IPOK | | ||
296 | MLX4_CQE_STATUS_TCP); | ||
297 | } | ||
298 | |||
299 | struct mlx4_en_cq { | ||
300 | struct mlx4_cq mcq; | ||
301 | struct mlx4_hwq_resources wqres; | ||
302 | int ring; | ||
303 | spinlock_t lock; | ||
304 | struct net_device *dev; | ||
305 | struct napi_struct napi; | ||
306 | /* Per-core Tx cq processing support */ | ||
307 | struct timer_list timer; | ||
308 | int size; | ||
309 | int buf_size; | ||
310 | unsigned vector; | ||
311 | enum cq_type is_tx; | ||
312 | u16 moder_time; | ||
313 | u16 moder_cnt; | ||
314 | int armed; | ||
315 | struct mlx4_cqe *buf; | ||
316 | #define MLX4_EN_OPCODE_ERROR 0x1e | ||
317 | }; | ||
318 | |||
319 | struct mlx4_en_port_profile { | ||
320 | u32 flags; | ||
321 | u32 tx_ring_num; | ||
322 | u32 rx_ring_num; | ||
323 | u32 tx_ring_size; | ||
324 | u32 rx_ring_size; | ||
325 | }; | ||
326 | |||
327 | struct mlx4_en_profile { | ||
328 | int rss_xor; | ||
329 | int num_lro; | ||
330 | u8 rss_mask; | ||
331 | u32 active_ports; | ||
332 | u32 small_pkt_int; | ||
333 | int rx_moder_cnt; | ||
334 | int rx_moder_time; | ||
335 | int auto_moder; | ||
336 | u8 rx_pause; | ||
337 | u8 rx_ppp; | ||
338 | u8 tx_pause; | ||
339 | u8 tx_ppp; | ||
340 | u8 no_reset; | ||
341 | struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1]; | ||
342 | }; | ||
343 | |||
344 | struct mlx4_en_dev { | ||
345 | struct mlx4_dev *dev; | ||
346 | struct pci_dev *pdev; | ||
347 | struct mutex state_lock; | ||
348 | struct net_device *pndev[MLX4_MAX_PORTS + 1]; | ||
349 | u32 port_cnt; | ||
350 | bool device_up; | ||
351 | struct mlx4_en_profile profile; | ||
352 | u32 LSO_support; | ||
353 | struct workqueue_struct *workqueue; | ||
354 | struct device *dma_device; | ||
355 | void __iomem *uar_map; | ||
356 | struct mlx4_uar priv_uar; | ||
357 | struct mlx4_mr mr; | ||
358 | u32 priv_pdn; | ||
359 | spinlock_t uar_lock; | ||
360 | }; | ||
361 | |||
362 | |||
363 | struct mlx4_en_rss_map { | ||
364 | int size; | ||
365 | int base_qpn; | ||
366 | u16 map[MAX_RSS_MAP_SIZE]; | ||
367 | struct mlx4_qp qps[MAX_RSS_MAP_SIZE]; | ||
368 | enum mlx4_qp_state state[MAX_RSS_MAP_SIZE]; | ||
369 | struct mlx4_qp indir_qp; | ||
370 | enum mlx4_qp_state indir_state; | ||
371 | }; | ||
372 | |||
373 | struct mlx4_en_rss_context { | ||
374 | __be32 base_qpn; | ||
375 | __be32 default_qpn; | ||
376 | u16 reserved; | ||
377 | u8 hash_fn; | ||
378 | u8 flags; | ||
379 | __be32 rss_key[10]; | ||
380 | }; | ||
381 | |||
382 | struct mlx4_en_pkt_stats { | ||
383 | unsigned long broadcast; | ||
384 | unsigned long rx_prio[8]; | ||
385 | unsigned long tx_prio[8]; | ||
386 | #define NUM_PKT_STATS 17 | ||
387 | }; | ||
388 | |||
389 | struct mlx4_en_port_stats { | ||
390 | unsigned long lro_aggregated; | ||
391 | unsigned long lro_flushed; | ||
392 | unsigned long lro_no_desc; | ||
393 | unsigned long tso_packets; | ||
394 | unsigned long queue_stopped; | ||
395 | unsigned long wake_queue; | ||
396 | unsigned long tx_timeout; | ||
397 | unsigned long rx_alloc_failed; | ||
398 | unsigned long rx_chksum_good; | ||
399 | unsigned long rx_chksum_none; | ||
400 | unsigned long tx_chksum_offload; | ||
401 | #define NUM_PORT_STATS 11 | ||
402 | }; | ||
403 | |||
404 | struct mlx4_en_perf_stats { | ||
405 | u32 tx_poll; | ||
406 | u64 tx_pktsz_avg; | ||
407 | u32 inflight_avg; | ||
408 | u16 tx_coal_avg; | ||
409 | u16 rx_coal_avg; | ||
410 | u32 napi_quota; | ||
411 | #define NUM_PERF_COUNTERS 6 | ||
412 | }; | ||
413 | |||
414 | struct mlx4_en_frag_info { | ||
415 | u16 frag_size; | ||
416 | u16 frag_prefix_size; | ||
417 | u16 frag_stride; | ||
418 | u16 frag_align; | ||
419 | u16 last_offset; | ||
420 | |||
421 | }; | ||
422 | |||
423 | struct mlx4_en_priv { | ||
424 | struct mlx4_en_dev *mdev; | ||
425 | struct mlx4_en_port_profile *prof; | ||
426 | struct net_device *dev; | ||
427 | struct vlan_group *vlgrp; | ||
428 | struct net_device_stats stats; | ||
429 | struct net_device_stats ret_stats; | ||
430 | spinlock_t stats_lock; | ||
431 | |||
432 | unsigned long last_moder_packets; | ||
433 | unsigned long last_moder_tx_packets; | ||
434 | unsigned long last_moder_bytes; | ||
435 | unsigned long last_moder_jiffies; | ||
436 | int last_moder_time; | ||
437 | u16 rx_usecs; | ||
438 | u16 rx_frames; | ||
439 | u16 tx_usecs; | ||
440 | u16 tx_frames; | ||
441 | u32 pkt_rate_low; | ||
442 | u16 rx_usecs_low; | ||
443 | u32 pkt_rate_high; | ||
444 | u16 rx_usecs_high; | ||
445 | u16 sample_interval; | ||
446 | u16 adaptive_rx_coal; | ||
447 | u32 msg_enable; | ||
448 | |||
449 | struct mlx4_hwq_resources res; | ||
450 | int link_state; | ||
451 | int last_link_state; | ||
452 | bool port_up; | ||
453 | int port; | ||
454 | int registered; | ||
455 | int allocated; | ||
456 | int stride; | ||
457 | int rx_csum; | ||
458 | u64 mac; | ||
459 | int mac_index; | ||
460 | unsigned max_mtu; | ||
461 | int base_qpn; | ||
462 | |||
463 | struct mlx4_en_rss_map rss_map; | ||
464 | u16 tx_prio_map[8]; | ||
465 | u32 flags; | ||
466 | #define MLX4_EN_FLAG_PROMISC 0x1 | ||
467 | u32 tx_ring_num; | ||
468 | u32 rx_ring_num; | ||
469 | u32 rx_skb_size; | ||
470 | struct mlx4_en_frag_info frag_info[MLX4_EN_MAX_RX_FRAGS]; | ||
471 | u16 num_frags; | ||
472 | u16 log_rx_info; | ||
473 | |||
474 | struct mlx4_en_tx_ring tx_ring[MAX_TX_RINGS]; | ||
475 | struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS]; | ||
476 | struct mlx4_en_cq tx_cq[MAX_TX_RINGS]; | ||
477 | struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; | ||
478 | struct work_struct mcast_task; | ||
479 | struct work_struct mac_task; | ||
480 | struct delayed_work refill_task; | ||
481 | struct work_struct watchdog_task; | ||
482 | struct work_struct linkstate_task; | ||
483 | struct delayed_work stats_task; | ||
484 | struct mlx4_en_perf_stats pstats; | ||
485 | struct mlx4_en_pkt_stats pkstats; | ||
486 | struct mlx4_en_port_stats port_stats; | ||
487 | struct dev_mc_list *mc_list; | ||
488 | struct mlx4_en_stat_out_mbox hw_stats; | ||
489 | }; | ||
490 | |||
491 | |||
492 | void mlx4_en_destroy_netdev(struct net_device *dev); | ||
493 | int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | ||
494 | struct mlx4_en_port_profile *prof); | ||
495 | |||
496 | int mlx4_en_get_profile(struct mlx4_en_dev *mdev); | ||
497 | |||
498 | int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, | ||
499 | int entries, int ring, enum cq_type mode); | ||
500 | void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); | ||
501 | int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); | ||
502 | void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); | ||
503 | int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); | ||
504 | int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); | ||
505 | |||
506 | void mlx4_en_poll_tx_cq(unsigned long data); | ||
507 | void mlx4_en_tx_irq(struct mlx4_cq *mcq); | ||
508 | int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); | ||
509 | |||
510 | int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, | ||
511 | u32 size, u16 stride); | ||
512 | void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring); | ||
513 | int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, | ||
514 | struct mlx4_en_tx_ring *ring, | ||
515 | int cq, int srqn); | ||
516 | void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, | ||
517 | struct mlx4_en_tx_ring *ring); | ||
518 | |||
519 | int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, | ||
520 | struct mlx4_en_rx_ring *ring, | ||
521 | u32 size, u16 stride); | ||
522 | void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, | ||
523 | struct mlx4_en_rx_ring *ring); | ||
524 | int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv); | ||
525 | void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, | ||
526 | struct mlx4_en_rx_ring *ring); | ||
527 | int mlx4_en_process_rx_cq(struct net_device *dev, | ||
528 | struct mlx4_en_cq *cq, | ||
529 | int budget); | ||
530 | int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget); | ||
531 | void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, | ||
532 | int is_tx, int rss, int qpn, int cqn, int srqn, | ||
533 | struct mlx4_qp_context *context); | ||
534 | int mlx4_en_map_buffer(struct mlx4_buf *buf); | ||
535 | void mlx4_en_unmap_buffer(struct mlx4_buf *buf); | ||
536 | |||
537 | void mlx4_en_calc_rx_buf(struct net_device *dev); | ||
538 | void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv, | ||
539 | struct mlx4_en_rss_map *rss_map, | ||
540 | int num_entries, int num_rings); | ||
541 | void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num); | ||
542 | int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv); | ||
543 | void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv); | ||
544 | int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring); | ||
545 | void mlx4_en_rx_refill(struct work_struct *work); | ||
546 | void mlx4_en_rx_irq(struct mlx4_cq *mcq); | ||
547 | |||
548 | int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode); | ||
549 | int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, u8 port, struct vlan_group *grp); | ||
550 | int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu, | ||
551 | u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx); | ||
552 | int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, | ||
553 | u8 promisc); | ||
554 | |||
555 | int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset); | ||
556 | |||
557 | /* | ||
558 | * Globals | ||
559 | */ | ||
560 | extern const struct ethtool_ops mlx4_en_ethtool_ops; | ||
561 | #endif | ||
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c index d1dd5b48dbd1..0caf74cae8bc 100644 --- a/drivers/net/mlx4/mr.c +++ b/drivers/net/mlx4/mr.c | |||
@@ -461,7 +461,7 @@ int mlx4_init_mr_table(struct mlx4_dev *dev) | |||
461 | int err; | 461 | int err; |
462 | 462 | ||
463 | err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts, | 463 | err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts, |
464 | ~0, dev->caps.reserved_mrws); | 464 | ~0, dev->caps.reserved_mrws, 0); |
465 | if (err) | 465 | if (err) |
466 | return err; | 466 | return err; |
467 | 467 | ||
diff --git a/drivers/net/mlx4/pd.c b/drivers/net/mlx4/pd.c index aa616892d09c..26d1a7a9e375 100644 --- a/drivers/net/mlx4/pd.c +++ b/drivers/net/mlx4/pd.c | |||
@@ -62,7 +62,7 @@ int mlx4_init_pd_table(struct mlx4_dev *dev) | |||
62 | struct mlx4_priv *priv = mlx4_priv(dev); | 62 | struct mlx4_priv *priv = mlx4_priv(dev); |
63 | 63 | ||
64 | return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds, | 64 | return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds, |
65 | (1 << 24) - 1, dev->caps.reserved_pds); | 65 | (1 << 24) - 1, dev->caps.reserved_pds, 0); |
66 | } | 66 | } |
67 | 67 | ||
68 | void mlx4_cleanup_pd_table(struct mlx4_dev *dev) | 68 | void mlx4_cleanup_pd_table(struct mlx4_dev *dev) |
@@ -100,7 +100,7 @@ int mlx4_init_uar_table(struct mlx4_dev *dev) | |||
100 | 100 | ||
101 | return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap, | 101 | return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap, |
102 | dev->caps.num_uars, dev->caps.num_uars - 1, | 102 | dev->caps.num_uars, dev->caps.num_uars - 1, |
103 | max(128, dev->caps.reserved_uars)); | 103 | max(128, dev->caps.reserved_uars), 0); |
104 | } | 104 | } |
105 | 105 | ||
106 | void mlx4_cleanup_uar_table(struct mlx4_dev *dev) | 106 | void mlx4_cleanup_uar_table(struct mlx4_dev *dev) |
diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c new file mode 100644 index 000000000000..e2fdab42c4ce --- /dev/null +++ b/drivers/net/mlx4/port.c | |||
@@ -0,0 +1,282 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #include <linux/errno.h> | ||
34 | #include <linux/if_ether.h> | ||
35 | |||
36 | #include <linux/mlx4/cmd.h> | ||
37 | |||
38 | #include "mlx4.h" | ||
39 | |||
40 | #define MLX4_MAC_VALID (1ull << 63) | ||
41 | #define MLX4_MAC_MASK 0xffffffffffffULL | ||
42 | |||
43 | #define MLX4_VLAN_VALID (1u << 31) | ||
44 | #define MLX4_VLAN_MASK 0xfff | ||
45 | |||
46 | void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table) | ||
47 | { | ||
48 | int i; | ||
49 | |||
50 | mutex_init(&table->mutex); | ||
51 | for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { | ||
52 | table->entries[i] = 0; | ||
53 | table->refs[i] = 0; | ||
54 | } | ||
55 | table->max = 1 << dev->caps.log_num_macs; | ||
56 | table->total = 0; | ||
57 | } | ||
58 | |||
59 | void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table) | ||
60 | { | ||
61 | int i; | ||
62 | |||
63 | mutex_init(&table->mutex); | ||
64 | for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) { | ||
65 | table->entries[i] = 0; | ||
66 | table->refs[i] = 0; | ||
67 | } | ||
68 | table->max = 1 << dev->caps.log_num_vlans; | ||
69 | table->total = 0; | ||
70 | } | ||
71 | |||
72 | static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port, | ||
73 | __be64 *entries) | ||
74 | { | ||
75 | struct mlx4_cmd_mailbox *mailbox; | ||
76 | u32 in_mod; | ||
77 | int err; | ||
78 | |||
79 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
80 | if (IS_ERR(mailbox)) | ||
81 | return PTR_ERR(mailbox); | ||
82 | |||
83 | memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE); | ||
84 | |||
85 | in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port; | ||
86 | err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, | ||
87 | MLX4_CMD_TIME_CLASS_B); | ||
88 | |||
89 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
90 | return err; | ||
91 | } | ||
92 | |||
93 | int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index) | ||
94 | { | ||
95 | struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table; | ||
96 | int i, err = 0; | ||
97 | int free = -1; | ||
98 | |||
99 | mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac); | ||
100 | mutex_lock(&table->mutex); | ||
101 | for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) { | ||
102 | if (free < 0 && !table->refs[i]) { | ||
103 | free = i; | ||
104 | continue; | ||
105 | } | ||
106 | |||
107 | if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { | ||
108 | /* MAC already registered, increase refernce count */ | ||
109 | *index = i; | ||
110 | ++table->refs[i]; | ||
111 | goto out; | ||
112 | } | ||
113 | } | ||
114 | mlx4_dbg(dev, "Free MAC index is %d\n", free); | ||
115 | |||
116 | if (table->total == table->max) { | ||
117 | /* No free mac entries */ | ||
118 | err = -ENOSPC; | ||
119 | goto out; | ||
120 | } | ||
121 | |||
122 | /* Register new MAC */ | ||
123 | table->refs[free] = 1; | ||
124 | table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID); | ||
125 | |||
126 | err = mlx4_set_port_mac_table(dev, port, table->entries); | ||
127 | if (unlikely(err)) { | ||
128 | mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) mac); | ||
129 | table->refs[free] = 0; | ||
130 | table->entries[free] = 0; | ||
131 | goto out; | ||
132 | } | ||
133 | |||
134 | *index = free; | ||
135 | ++table->total; | ||
136 | out: | ||
137 | mutex_unlock(&table->mutex); | ||
138 | return err; | ||
139 | } | ||
140 | EXPORT_SYMBOL_GPL(mlx4_register_mac); | ||
141 | |||
142 | void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index) | ||
143 | { | ||
144 | struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table; | ||
145 | |||
146 | mutex_lock(&table->mutex); | ||
147 | if (!table->refs[index]) { | ||
148 | mlx4_warn(dev, "No MAC entry for index %d\n", index); | ||
149 | goto out; | ||
150 | } | ||
151 | if (--table->refs[index]) { | ||
152 | mlx4_warn(dev, "Have more references for index %d," | ||
153 | "no need to modify MAC table\n", index); | ||
154 | goto out; | ||
155 | } | ||
156 | table->entries[index] = 0; | ||
157 | mlx4_set_port_mac_table(dev, port, table->entries); | ||
158 | --table->total; | ||
159 | out: | ||
160 | mutex_unlock(&table->mutex); | ||
161 | } | ||
162 | EXPORT_SYMBOL_GPL(mlx4_unregister_mac); | ||
163 | |||
164 | static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port, | ||
165 | __be32 *entries) | ||
166 | { | ||
167 | struct mlx4_cmd_mailbox *mailbox; | ||
168 | u32 in_mod; | ||
169 | int err; | ||
170 | |||
171 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
172 | if (IS_ERR(mailbox)) | ||
173 | return PTR_ERR(mailbox); | ||
174 | |||
175 | memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE); | ||
176 | in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port; | ||
177 | err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, | ||
178 | MLX4_CMD_TIME_CLASS_B); | ||
179 | |||
180 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
181 | |||
182 | return err; | ||
183 | } | ||
184 | |||
185 | int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index) | ||
186 | { | ||
187 | struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; | ||
188 | int i, err = 0; | ||
189 | int free = -1; | ||
190 | |||
191 | mutex_lock(&table->mutex); | ||
192 | for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) { | ||
193 | if (free < 0 && (table->refs[i] == 0)) { | ||
194 | free = i; | ||
195 | continue; | ||
196 | } | ||
197 | |||
198 | if (table->refs[i] && | ||
199 | (vlan == (MLX4_VLAN_MASK & | ||
200 | be32_to_cpu(table->entries[i])))) { | ||
201 | /* Vlan already registered, increase refernce count */ | ||
202 | *index = i; | ||
203 | ++table->refs[i]; | ||
204 | goto out; | ||
205 | } | ||
206 | } | ||
207 | |||
208 | if (table->total == table->max) { | ||
209 | /* No free vlan entries */ | ||
210 | err = -ENOSPC; | ||
211 | goto out; | ||
212 | } | ||
213 | |||
214 | /* Register new MAC */ | ||
215 | table->refs[free] = 1; | ||
216 | table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID); | ||
217 | |||
218 | err = mlx4_set_port_vlan_table(dev, port, table->entries); | ||
219 | if (unlikely(err)) { | ||
220 | mlx4_warn(dev, "Failed adding vlan: %u\n", vlan); | ||
221 | table->refs[free] = 0; | ||
222 | table->entries[free] = 0; | ||
223 | goto out; | ||
224 | } | ||
225 | |||
226 | *index = free; | ||
227 | ++table->total; | ||
228 | out: | ||
229 | mutex_unlock(&table->mutex); | ||
230 | return err; | ||
231 | } | ||
232 | EXPORT_SYMBOL_GPL(mlx4_register_vlan); | ||
233 | |||
234 | void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index) | ||
235 | { | ||
236 | struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; | ||
237 | |||
238 | if (index < MLX4_VLAN_REGULAR) { | ||
239 | mlx4_warn(dev, "Trying to free special vlan index %d\n", index); | ||
240 | return; | ||
241 | } | ||
242 | |||
243 | mutex_lock(&table->mutex); | ||
244 | if (!table->refs[index]) { | ||
245 | mlx4_warn(dev, "No vlan entry for index %d\n", index); | ||
246 | goto out; | ||
247 | } | ||
248 | if (--table->refs[index]) { | ||
249 | mlx4_dbg(dev, "Have more references for index %d," | ||
250 | "no need to modify vlan table\n", index); | ||
251 | goto out; | ||
252 | } | ||
253 | table->entries[index] = 0; | ||
254 | mlx4_set_port_vlan_table(dev, port, table->entries); | ||
255 | --table->total; | ||
256 | out: | ||
257 | mutex_unlock(&table->mutex); | ||
258 | } | ||
259 | EXPORT_SYMBOL_GPL(mlx4_unregister_vlan); | ||
260 | |||
261 | int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port) | ||
262 | { | ||
263 | struct mlx4_cmd_mailbox *mailbox; | ||
264 | int err; | ||
265 | u8 is_eth = dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH; | ||
266 | |||
267 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
268 | if (IS_ERR(mailbox)) | ||
269 | return PTR_ERR(mailbox); | ||
270 | |||
271 | memset(mailbox->buf, 0, 256); | ||
272 | if (is_eth) { | ||
273 | ((u8 *) mailbox->buf)[3] = 6; | ||
274 | ((__be16 *) mailbox->buf)[4] = cpu_to_be16(1 << 15); | ||
275 | ((__be16 *) mailbox->buf)[6] = cpu_to_be16(1 << 15); | ||
276 | } | ||
277 | err = mlx4_cmd(dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT, | ||
278 | MLX4_CMD_TIME_CLASS_B); | ||
279 | |||
280 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
281 | return err; | ||
282 | } | ||
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c index c49a86044bf7..1c565ef8d179 100644 --- a/drivers/net/mlx4/qp.c +++ b/drivers/net/mlx4/qp.c | |||
@@ -147,19 +147,42 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | |||
147 | } | 147 | } |
148 | EXPORT_SYMBOL_GPL(mlx4_qp_modify); | 148 | EXPORT_SYMBOL_GPL(mlx4_qp_modify); |
149 | 149 | ||
150 | int mlx4_qp_alloc(struct mlx4_dev *dev, int sqpn, struct mlx4_qp *qp) | 150 | int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base) |
151 | { | ||
152 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
153 | struct mlx4_qp_table *qp_table = &priv->qp_table; | ||
154 | int qpn; | ||
155 | |||
156 | qpn = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align); | ||
157 | if (qpn == -1) | ||
158 | return -ENOMEM; | ||
159 | |||
160 | *base = qpn; | ||
161 | return 0; | ||
162 | } | ||
163 | EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range); | ||
164 | |||
165 | void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) | ||
166 | { | ||
167 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
168 | struct mlx4_qp_table *qp_table = &priv->qp_table; | ||
169 | if (base_qpn < dev->caps.sqp_start + 8) | ||
170 | return; | ||
171 | |||
172 | mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt); | ||
173 | } | ||
174 | EXPORT_SYMBOL_GPL(mlx4_qp_release_range); | ||
175 | |||
176 | int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp) | ||
151 | { | 177 | { |
152 | struct mlx4_priv *priv = mlx4_priv(dev); | 178 | struct mlx4_priv *priv = mlx4_priv(dev); |
153 | struct mlx4_qp_table *qp_table = &priv->qp_table; | 179 | struct mlx4_qp_table *qp_table = &priv->qp_table; |
154 | int err; | 180 | int err; |
155 | 181 | ||
156 | if (sqpn) | 182 | if (!qpn) |
157 | qp->qpn = sqpn; | 183 | return -EINVAL; |
158 | else { | 184 | |
159 | qp->qpn = mlx4_bitmap_alloc(&qp_table->bitmap); | 185 | qp->qpn = qpn; |
160 | if (qp->qpn == -1) | ||
161 | return -ENOMEM; | ||
162 | } | ||
163 | 186 | ||
164 | err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn); | 187 | err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn); |
165 | if (err) | 188 | if (err) |
@@ -208,9 +231,6 @@ err_put_qp: | |||
208 | mlx4_table_put(dev, &qp_table->qp_table, qp->qpn); | 231 | mlx4_table_put(dev, &qp_table->qp_table, qp->qpn); |
209 | 232 | ||
210 | err_out: | 233 | err_out: |
211 | if (!sqpn) | ||
212 | mlx4_bitmap_free(&qp_table->bitmap, qp->qpn); | ||
213 | |||
214 | return err; | 234 | return err; |
215 | } | 235 | } |
216 | EXPORT_SYMBOL_GPL(mlx4_qp_alloc); | 236 | EXPORT_SYMBOL_GPL(mlx4_qp_alloc); |
@@ -239,9 +259,6 @@ void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp) | |||
239 | mlx4_table_put(dev, &qp_table->altc_table, qp->qpn); | 259 | mlx4_table_put(dev, &qp_table->altc_table, qp->qpn); |
240 | mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn); | 260 | mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn); |
241 | mlx4_table_put(dev, &qp_table->qp_table, qp->qpn); | 261 | mlx4_table_put(dev, &qp_table->qp_table, qp->qpn); |
242 | |||
243 | if (qp->qpn >= dev->caps.sqp_start + 8) | ||
244 | mlx4_bitmap_free(&qp_table->bitmap, qp->qpn); | ||
245 | } | 262 | } |
246 | EXPORT_SYMBOL_GPL(mlx4_qp_free); | 263 | EXPORT_SYMBOL_GPL(mlx4_qp_free); |
247 | 264 | ||
@@ -255,6 +272,7 @@ int mlx4_init_qp_table(struct mlx4_dev *dev) | |||
255 | { | 272 | { |
256 | struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; | 273 | struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; |
257 | int err; | 274 | int err; |
275 | int reserved_from_top = 0; | ||
258 | 276 | ||
259 | spin_lock_init(&qp_table->lock); | 277 | spin_lock_init(&qp_table->lock); |
260 | INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC); | 278 | INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC); |
@@ -264,9 +282,40 @@ int mlx4_init_qp_table(struct mlx4_dev *dev) | |||
264 | * block of special QPs must be aligned to a multiple of 8, so | 282 | * block of special QPs must be aligned to a multiple of 8, so |
265 | * round up. | 283 | * round up. |
266 | */ | 284 | */ |
267 | dev->caps.sqp_start = ALIGN(dev->caps.reserved_qps, 8); | 285 | dev->caps.sqp_start = |
286 | ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8); | ||
287 | |||
288 | { | ||
289 | int sort[MLX4_NUM_QP_REGION]; | ||
290 | int i, j, tmp; | ||
291 | int last_base = dev->caps.num_qps; | ||
292 | |||
293 | for (i = 1; i < MLX4_NUM_QP_REGION; ++i) | ||
294 | sort[i] = i; | ||
295 | |||
296 | for (i = MLX4_NUM_QP_REGION; i > 0; --i) { | ||
297 | for (j = 2; j < i; ++j) { | ||
298 | if (dev->caps.reserved_qps_cnt[sort[j]] > | ||
299 | dev->caps.reserved_qps_cnt[sort[j - 1]]) { | ||
300 | tmp = sort[j]; | ||
301 | sort[j] = sort[j - 1]; | ||
302 | sort[j - 1] = tmp; | ||
303 | } | ||
304 | } | ||
305 | } | ||
306 | |||
307 | for (i = 1; i < MLX4_NUM_QP_REGION; ++i) { | ||
308 | last_base -= dev->caps.reserved_qps_cnt[sort[i]]; | ||
309 | dev->caps.reserved_qps_base[sort[i]] = last_base; | ||
310 | reserved_from_top += | ||
311 | dev->caps.reserved_qps_cnt[sort[i]]; | ||
312 | } | ||
313 | |||
314 | } | ||
315 | |||
268 | err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps, | 316 | err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps, |
269 | (1 << 24) - 1, dev->caps.sqp_start + 8); | 317 | (1 << 23) - 1, dev->caps.sqp_start + 8, |
318 | reserved_from_top); | ||
270 | if (err) | 319 | if (err) |
271 | return err; | 320 | return err; |
272 | 321 | ||
diff --git a/drivers/net/mlx4/srq.c b/drivers/net/mlx4/srq.c index 533eb6db24b3..fe9f218691f5 100644 --- a/drivers/net/mlx4/srq.c +++ b/drivers/net/mlx4/srq.c | |||
@@ -245,7 +245,7 @@ int mlx4_init_srq_table(struct mlx4_dev *dev) | |||
245 | INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC); | 245 | INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC); |
246 | 246 | ||
247 | err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs, | 247 | err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs, |
248 | dev->caps.num_srqs - 1, dev->caps.reserved_srqs); | 248 | dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0); |
249 | if (err) | 249 | if (err) |
250 | return err; | 250 | return err; |
251 | 251 | ||
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index 77323a72dd3c..cf9c679ab38b 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h | |||
@@ -132,6 +132,15 @@ enum { | |||
132 | MLX4_MAILBOX_SIZE = 4096 | 132 | MLX4_MAILBOX_SIZE = 4096 |
133 | }; | 133 | }; |
134 | 134 | ||
135 | enum { | ||
136 | /* set port opcode modifiers */ | ||
137 | MLX4_SET_PORT_GENERAL = 0x0, | ||
138 | MLX4_SET_PORT_RQP_CALC = 0x1, | ||
139 | MLX4_SET_PORT_MAC_TABLE = 0x2, | ||
140 | MLX4_SET_PORT_VLAN_TABLE = 0x3, | ||
141 | MLX4_SET_PORT_PRIO_MAP = 0x4, | ||
142 | }; | ||
143 | |||
135 | struct mlx4_dev; | 144 | struct mlx4_dev; |
136 | 145 | ||
137 | struct mlx4_cmd_mailbox { | 146 | struct mlx4_cmd_mailbox { |
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index b2f944468313..bd9977b89490 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
@@ -60,6 +60,7 @@ enum { | |||
60 | MLX4_DEV_CAP_FLAG_IPOIB_CSUM = 1 << 7, | 60 | MLX4_DEV_CAP_FLAG_IPOIB_CSUM = 1 << 7, |
61 | MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1 << 8, | 61 | MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1 << 8, |
62 | MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1 << 9, | 62 | MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1 << 9, |
63 | MLX4_DEV_CAP_FLAG_DPDP = 1 << 12, | ||
63 | MLX4_DEV_CAP_FLAG_MEM_WINDOW = 1 << 16, | 64 | MLX4_DEV_CAP_FLAG_MEM_WINDOW = 1 << 16, |
64 | MLX4_DEV_CAP_FLAG_APM = 1 << 17, | 65 | MLX4_DEV_CAP_FLAG_APM = 1 << 17, |
65 | MLX4_DEV_CAP_FLAG_ATOMIC = 1 << 18, | 66 | MLX4_DEV_CAP_FLAG_ATOMIC = 1 << 18, |
@@ -145,6 +146,29 @@ enum { | |||
145 | MLX4_MTT_FLAG_PRESENT = 1 | 146 | MLX4_MTT_FLAG_PRESENT = 1 |
146 | }; | 147 | }; |
147 | 148 | ||
149 | enum mlx4_qp_region { | ||
150 | MLX4_QP_REGION_FW = 0, | ||
151 | MLX4_QP_REGION_ETH_ADDR, | ||
152 | MLX4_QP_REGION_FC_ADDR, | ||
153 | MLX4_QP_REGION_FC_EXCH, | ||
154 | MLX4_NUM_QP_REGION | ||
155 | }; | ||
156 | |||
157 | enum mlx4_port_type { | ||
158 | MLX4_PORT_TYPE_IB = 1 << 0, | ||
159 | MLX4_PORT_TYPE_ETH = 1 << 1, | ||
160 | }; | ||
161 | |||
162 | enum mlx4_special_vlan_idx { | ||
163 | MLX4_NO_VLAN_IDX = 0, | ||
164 | MLX4_VLAN_MISS_IDX, | ||
165 | MLX4_VLAN_REGULAR | ||
166 | }; | ||
167 | |||
168 | enum { | ||
169 | MLX4_NUM_FEXCH = 64 * 1024, | ||
170 | }; | ||
171 | |||
148 | static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) | 172 | static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) |
149 | { | 173 | { |
150 | return (major << 32) | (minor << 16) | subminor; | 174 | return (major << 32) | (minor << 16) | subminor; |
@@ -154,7 +178,9 @@ struct mlx4_caps { | |||
154 | u64 fw_ver; | 178 | u64 fw_ver; |
155 | int num_ports; | 179 | int num_ports; |
156 | int vl_cap[MLX4_MAX_PORTS + 1]; | 180 | int vl_cap[MLX4_MAX_PORTS + 1]; |
157 | int mtu_cap[MLX4_MAX_PORTS + 1]; | 181 | int ib_mtu_cap[MLX4_MAX_PORTS + 1]; |
182 | u64 def_mac[MLX4_MAX_PORTS + 1]; | ||
183 | int eth_mtu_cap[MLX4_MAX_PORTS + 1]; | ||
158 | int gid_table_len[MLX4_MAX_PORTS + 1]; | 184 | int gid_table_len[MLX4_MAX_PORTS + 1]; |
159 | int pkey_table_len[MLX4_MAX_PORTS + 1]; | 185 | int pkey_table_len[MLX4_MAX_PORTS + 1]; |
160 | int local_ca_ack_delay; | 186 | int local_ca_ack_delay; |
@@ -169,7 +195,6 @@ struct mlx4_caps { | |||
169 | int max_rq_desc_sz; | 195 | int max_rq_desc_sz; |
170 | int max_qp_init_rdma; | 196 | int max_qp_init_rdma; |
171 | int max_qp_dest_rdma; | 197 | int max_qp_dest_rdma; |
172 | int reserved_qps; | ||
173 | int sqp_start; | 198 | int sqp_start; |
174 | int num_srqs; | 199 | int num_srqs; |
175 | int max_srq_wqes; | 200 | int max_srq_wqes; |
@@ -201,6 +226,15 @@ struct mlx4_caps { | |||
201 | u16 stat_rate_support; | 226 | u16 stat_rate_support; |
202 | u8 port_width_cap[MLX4_MAX_PORTS + 1]; | 227 | u8 port_width_cap[MLX4_MAX_PORTS + 1]; |
203 | int max_gso_sz; | 228 | int max_gso_sz; |
229 | int reserved_qps_cnt[MLX4_NUM_QP_REGION]; | ||
230 | int reserved_qps; | ||
231 | int reserved_qps_base[MLX4_NUM_QP_REGION]; | ||
232 | int log_num_macs; | ||
233 | int log_num_vlans; | ||
234 | int log_num_prios; | ||
235 | enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; | ||
236 | u8 supported_type[MLX4_MAX_PORTS + 1]; | ||
237 | u32 port_mask; | ||
204 | }; | 238 | }; |
205 | 239 | ||
206 | struct mlx4_buf_list { | 240 | struct mlx4_buf_list { |
@@ -355,6 +389,11 @@ struct mlx4_init_port_param { | |||
355 | u64 si_guid; | 389 | u64 si_guid; |
356 | }; | 390 | }; |
357 | 391 | ||
392 | #define mlx4_foreach_port(port, dev, type) \ | ||
393 | for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ | ||
394 | if (((type) == MLX4_PORT_TYPE_IB ? (dev)->caps.port_mask : \ | ||
395 | ~(dev)->caps.port_mask) & 1 << ((port) - 1)) | ||
396 | |||
358 | int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, | 397 | int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, |
359 | struct mlx4_buf *buf); | 398 | struct mlx4_buf *buf); |
360 | void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf); | 399 | void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf); |
@@ -400,7 +439,10 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, | |||
400 | int collapsed); | 439 | int collapsed); |
401 | void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); | 440 | void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); |
402 | 441 | ||
403 | int mlx4_qp_alloc(struct mlx4_dev *dev, int sqpn, struct mlx4_qp *qp); | 442 | int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base); |
443 | void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt); | ||
444 | |||
445 | int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp); | ||
404 | void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp); | 446 | void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp); |
405 | 447 | ||
406 | int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt, | 448 | int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt, |
@@ -416,6 +458,12 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | |||
416 | int block_mcast_loopback); | 458 | int block_mcast_loopback); |
417 | int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]); | 459 | int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]); |
418 | 460 | ||
461 | int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index); | ||
462 | void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index); | ||
463 | |||
464 | int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); | ||
465 | void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index); | ||
466 | |||
419 | int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, | 467 | int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, |
420 | int npages, u64 iova, u32 *lkey, u32 *rkey); | 468 | int npages, u64 iova, u32 *lkey, u32 *rkey); |
421 | int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, | 469 | int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, |