diff options
author | Matan Barak <matanb@mellanox.com> | 2013-11-07 08:25:17 -0500 |
---|---|---|
committer | Roland Dreier <roland@purestorage.com> | 2014-01-14 17:06:50 -0500 |
commit | c1c98501121eefa0888a42566ec7233a1626f678 (patch) | |
tree | d719e212ee2075957270ea42646675cc4fc82d21 /drivers/infiniband/hw/mlx4 | |
parent | a37a1a428431d3e7e9f53530b5c56ff7867bd487 (diff) |
IB/mlx4: Add support for steerable IB UD QPs
This patch adds support for steerable (NETIF) QP creation. When we
create the device, we allocate a range of steerable QPs.
Afterward when a QP is created with the NETIF flag, it's allocated
from this range. Allocation is managed by bitmap allocator.
Internal steering rules for those QPs is automatically generated on
their creation.
Signed-off-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband/hw/mlx4')
-rw-r--r-- | drivers/infiniband/hw/mlx4/main.c | 106 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/mlx4_ib.h | 11 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/qp.c | 53 |
3 files changed, 163 insertions, 7 deletions
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 6b7f227ca9e4..ea5844e89b2a 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -1850,8 +1850,35 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
1850 | spin_lock_init(&ibdev->sm_lock); | 1850 | spin_lock_init(&ibdev->sm_lock); |
1851 | mutex_init(&ibdev->cap_mask_mutex); | 1851 | mutex_init(&ibdev->cap_mask_mutex); |
1852 | 1852 | ||
1853 | if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) { | ||
1854 | ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS; | ||
1855 | err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count, | ||
1856 | MLX4_IB_UC_STEER_QPN_ALIGN, | ||
1857 | &ibdev->steer_qpn_base); | ||
1858 | if (err) | ||
1859 | goto err_counter; | ||
1860 | |||
1861 | ibdev->ib_uc_qpns_bitmap = | ||
1862 | kmalloc(BITS_TO_LONGS(ibdev->steer_qpn_count) * | ||
1863 | sizeof(long), | ||
1864 | GFP_KERNEL); | ||
1865 | if (!ibdev->ib_uc_qpns_bitmap) { | ||
1866 | dev_err(&dev->pdev->dev, "bit map alloc failed\n"); | ||
1867 | goto err_steer_qp_release; | ||
1868 | } | ||
1869 | |||
1870 | bitmap_zero(ibdev->ib_uc_qpns_bitmap, ibdev->steer_qpn_count); | ||
1871 | |||
1872 | err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE( | ||
1873 | dev, ibdev->steer_qpn_base, | ||
1874 | ibdev->steer_qpn_base + | ||
1875 | ibdev->steer_qpn_count - 1); | ||
1876 | if (err) | ||
1877 | goto err_steer_free_bitmap; | ||
1878 | } | ||
1879 | |||
1853 | if (ib_register_device(&ibdev->ib_dev, NULL)) | 1880 | if (ib_register_device(&ibdev->ib_dev, NULL)) |
1854 | goto err_counter; | 1881 | goto err_steer_free_bitmap; |
1855 | 1882 | ||
1856 | if (mlx4_ib_mad_init(ibdev)) | 1883 | if (mlx4_ib_mad_init(ibdev)) |
1857 | goto err_reg; | 1884 | goto err_reg; |
@@ -1902,6 +1929,13 @@ err_mad: | |||
1902 | err_reg: | 1929 | err_reg: |
1903 | ib_unregister_device(&ibdev->ib_dev); | 1930 | ib_unregister_device(&ibdev->ib_dev); |
1904 | 1931 | ||
1932 | err_steer_free_bitmap: | ||
1933 | kfree(ibdev->ib_uc_qpns_bitmap); | ||
1934 | |||
1935 | err_steer_qp_release: | ||
1936 | if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) | ||
1937 | mlx4_qp_release_range(dev, ibdev->steer_qpn_base, | ||
1938 | ibdev->steer_qpn_count); | ||
1905 | err_counter: | 1939 | err_counter: |
1906 | for (; i; --i) | 1940 | for (; i; --i) |
1907 | if (ibdev->counters[i - 1] != -1) | 1941 | if (ibdev->counters[i - 1] != -1) |
@@ -1922,6 +1956,69 @@ err_dealloc: | |||
1922 | return NULL; | 1956 | return NULL; |
1923 | } | 1957 | } |
1924 | 1958 | ||
1959 | int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn) | ||
1960 | { | ||
1961 | int offset; | ||
1962 | |||
1963 | WARN_ON(!dev->ib_uc_qpns_bitmap); | ||
1964 | |||
1965 | offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap, | ||
1966 | dev->steer_qpn_count, | ||
1967 | get_count_order(count)); | ||
1968 | if (offset < 0) | ||
1969 | return offset; | ||
1970 | |||
1971 | *qpn = dev->steer_qpn_base + offset; | ||
1972 | return 0; | ||
1973 | } | ||
1974 | |||
1975 | void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count) | ||
1976 | { | ||
1977 | if (!qpn || | ||
1978 | dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED) | ||
1979 | return; | ||
1980 | |||
1981 | BUG_ON(qpn < dev->steer_qpn_base); | ||
1982 | |||
1983 | bitmap_release_region(dev->ib_uc_qpns_bitmap, | ||
1984 | qpn - dev->steer_qpn_base, | ||
1985 | get_count_order(count)); | ||
1986 | } | ||
1987 | |||
1988 | int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, | ||
1989 | int is_attach) | ||
1990 | { | ||
1991 | int err; | ||
1992 | size_t flow_size; | ||
1993 | struct ib_flow_attr *flow = NULL; | ||
1994 | struct ib_flow_spec_ib *ib_spec; | ||
1995 | |||
1996 | if (is_attach) { | ||
1997 | flow_size = sizeof(struct ib_flow_attr) + | ||
1998 | sizeof(struct ib_flow_spec_ib); | ||
1999 | flow = kzalloc(flow_size, GFP_KERNEL); | ||
2000 | if (!flow) | ||
2001 | return -ENOMEM; | ||
2002 | flow->port = mqp->port; | ||
2003 | flow->num_of_specs = 1; | ||
2004 | flow->size = flow_size; | ||
2005 | ib_spec = (struct ib_flow_spec_ib *)(flow + 1); | ||
2006 | ib_spec->type = IB_FLOW_SPEC_IB; | ||
2007 | ib_spec->size = sizeof(struct ib_flow_spec_ib); | ||
2008 | /* Add an empty rule for IB L2 */ | ||
2009 | memset(&ib_spec->mask, 0, sizeof(ib_spec->mask)); | ||
2010 | |||
2011 | err = __mlx4_ib_create_flow(&mqp->ibqp, flow, | ||
2012 | IB_FLOW_DOMAIN_NIC, | ||
2013 | MLX4_FS_REGULAR, | ||
2014 | &mqp->reg_id); | ||
2015 | } else { | ||
2016 | err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id); | ||
2017 | } | ||
2018 | kfree(flow); | ||
2019 | return err; | ||
2020 | } | ||
2021 | |||
1925 | static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) | 2022 | static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) |
1926 | { | 2023 | { |
1927 | struct mlx4_ib_dev *ibdev = ibdev_ptr; | 2024 | struct mlx4_ib_dev *ibdev = ibdev_ptr; |
@@ -1935,6 +2032,13 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) | |||
1935 | pr_warn("failure unregistering notifier\n"); | 2032 | pr_warn("failure unregistering notifier\n"); |
1936 | ibdev->iboe.nb.notifier_call = NULL; | 2033 | ibdev->iboe.nb.notifier_call = NULL; |
1937 | } | 2034 | } |
2035 | |||
2036 | if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) { | ||
2037 | mlx4_qp_release_range(dev, ibdev->steer_qpn_base, | ||
2038 | ibdev->steer_qpn_count); | ||
2039 | kfree(ibdev->ib_uc_qpns_bitmap); | ||
2040 | } | ||
2041 | |||
1938 | iounmap(ibdev->uar_map); | 2042 | iounmap(ibdev->uar_map); |
1939 | for (p = 0; p < ibdev->num_ports; ++p) | 2043 | for (p = 0; p < ibdev->num_ports; ++p) |
1940 | if (ibdev->counters[p] != -1) | 2044 | if (ibdev->counters[p] != -1) |
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index e9fb39603ee9..837f9aa3d2a2 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h | |||
@@ -68,6 +68,8 @@ enum { | |||
68 | /*module param to indicate if SM assigns the alias_GUID*/ | 68 | /*module param to indicate if SM assigns the alias_GUID*/ |
69 | extern int mlx4_ib_sm_guid_assign; | 69 | extern int mlx4_ib_sm_guid_assign; |
70 | 70 | ||
71 | #define MLX4_IB_UC_STEER_QPN_ALIGN 1 | ||
72 | #define MLX4_IB_UC_MAX_NUM_QPS 256 | ||
71 | struct mlx4_ib_ucontext { | 73 | struct mlx4_ib_ucontext { |
72 | struct ib_ucontext ibucontext; | 74 | struct ib_ucontext ibucontext; |
73 | struct mlx4_uar uar; | 75 | struct mlx4_uar uar; |
@@ -153,6 +155,7 @@ struct mlx4_ib_wq { | |||
153 | enum mlx4_ib_qp_flags { | 155 | enum mlx4_ib_qp_flags { |
154 | MLX4_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO, | 156 | MLX4_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO, |
155 | MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK, | 157 | MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK, |
158 | MLX4_IB_QP_NETIF = IB_QP_CREATE_NETIF_QP, | ||
156 | MLX4_IB_SRIOV_TUNNEL_QP = 1 << 30, | 159 | MLX4_IB_SRIOV_TUNNEL_QP = 1 << 30, |
157 | MLX4_IB_SRIOV_SQP = 1 << 31, | 160 | MLX4_IB_SRIOV_SQP = 1 << 31, |
158 | }; | 161 | }; |
@@ -270,6 +273,7 @@ struct mlx4_ib_qp { | |||
270 | struct list_head gid_list; | 273 | struct list_head gid_list; |
271 | struct list_head steering_rules; | 274 | struct list_head steering_rules; |
272 | struct mlx4_ib_buf *sqp_proxy_rcv; | 275 | struct mlx4_ib_buf *sqp_proxy_rcv; |
276 | u64 reg_id; | ||
273 | 277 | ||
274 | }; | 278 | }; |
275 | 279 | ||
@@ -494,6 +498,9 @@ struct mlx4_ib_dev { | |||
494 | struct kobject *dev_ports_parent[MLX4_MFUNC_MAX]; | 498 | struct kobject *dev_ports_parent[MLX4_MFUNC_MAX]; |
495 | struct mlx4_ib_iov_port iov_ports[MLX4_MAX_PORTS]; | 499 | struct mlx4_ib_iov_port iov_ports[MLX4_MAX_PORTS]; |
496 | struct pkey_mgt pkeys; | 500 | struct pkey_mgt pkeys; |
501 | unsigned long *ib_uc_qpns_bitmap; | ||
502 | int steer_qpn_count; | ||
503 | int steer_qpn_base; | ||
497 | int steering_support; | 504 | int steering_support; |
498 | }; | 505 | }; |
499 | 506 | ||
@@ -753,5 +760,9 @@ void mlx4_ib_device_unregister_sysfs(struct mlx4_ib_dev *device); | |||
753 | 760 | ||
754 | __be64 mlx4_ib_gen_node_guid(void); | 761 | __be64 mlx4_ib_gen_node_guid(void); |
755 | 762 | ||
763 | int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn); | ||
764 | void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count); | ||
765 | int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, | ||
766 | int is_attach); | ||
756 | 767 | ||
757 | #endif /* MLX4_IB_H */ | 768 | #endif /* MLX4_IB_H */ |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 4f10af2905b5..387fbf274151 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -716,6 +716,14 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
716 | if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) | 716 | if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) |
717 | qp->flags |= MLX4_IB_QP_LSO; | 717 | qp->flags |= MLX4_IB_QP_LSO; |
718 | 718 | ||
719 | if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) { | ||
720 | if (dev->steering_support == | ||
721 | MLX4_STEERING_MODE_DEVICE_MANAGED) | ||
722 | qp->flags |= MLX4_IB_QP_NETIF; | ||
723 | else | ||
724 | goto err; | ||
725 | } | ||
726 | |||
719 | err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp); | 727 | err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp); |
720 | if (err) | 728 | if (err) |
721 | goto err; | 729 | goto err; |
@@ -765,7 +773,11 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
765 | if (init_attr->qp_type == IB_QPT_RAW_PACKET) | 773 | if (init_attr->qp_type == IB_QPT_RAW_PACKET) |
766 | err = mlx4_qp_reserve_range(dev->dev, 1, 1 << 8, &qpn); | 774 | err = mlx4_qp_reserve_range(dev->dev, 1, 1 << 8, &qpn); |
767 | else | 775 | else |
768 | err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn); | 776 | if (qp->flags & MLX4_IB_QP_NETIF) |
777 | err = mlx4_ib_steer_qp_alloc(dev, 1, &qpn); | ||
778 | else | ||
779 | err = mlx4_qp_reserve_range(dev->dev, 1, 1, | ||
780 | &qpn); | ||
769 | if (err) | 781 | if (err) |
770 | goto err_proxy; | 782 | goto err_proxy; |
771 | } | 783 | } |
@@ -790,8 +802,12 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
790 | return 0; | 802 | return 0; |
791 | 803 | ||
792 | err_qpn: | 804 | err_qpn: |
793 | if (!sqpn) | 805 | if (!sqpn) { |
794 | mlx4_qp_release_range(dev->dev, qpn, 1); | 806 | if (qp->flags & MLX4_IB_QP_NETIF) |
807 | mlx4_ib_steer_qp_free(dev, qpn, 1); | ||
808 | else | ||
809 | mlx4_qp_release_range(dev->dev, qpn, 1); | ||
810 | } | ||
795 | err_proxy: | 811 | err_proxy: |
796 | if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI) | 812 | if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI) |
797 | free_proxy_bufs(pd->device, qp); | 813 | free_proxy_bufs(pd->device, qp); |
@@ -932,8 +948,12 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, | |||
932 | 948 | ||
933 | mlx4_qp_free(dev->dev, &qp->mqp); | 949 | mlx4_qp_free(dev->dev, &qp->mqp); |
934 | 950 | ||
935 | if (!is_sqp(dev, qp) && !is_tunnel_qp(dev, qp)) | 951 | if (!is_sqp(dev, qp) && !is_tunnel_qp(dev, qp)) { |
936 | mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1); | 952 | if (qp->flags & MLX4_IB_QP_NETIF) |
953 | mlx4_ib_steer_qp_free(dev, qp->mqp.qpn, 1); | ||
954 | else | ||
955 | mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1); | ||
956 | } | ||
937 | 957 | ||
938 | mlx4_mtt_cleanup(dev->dev, &qp->mtt); | 958 | mlx4_mtt_cleanup(dev->dev, &qp->mtt); |
939 | 959 | ||
@@ -987,9 +1007,16 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, | |||
987 | */ | 1007 | */ |
988 | if (init_attr->create_flags & ~(MLX4_IB_QP_LSO | | 1008 | if (init_attr->create_flags & ~(MLX4_IB_QP_LSO | |
989 | MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK | | 1009 | MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK | |
990 | MLX4_IB_SRIOV_TUNNEL_QP | MLX4_IB_SRIOV_SQP)) | 1010 | MLX4_IB_SRIOV_TUNNEL_QP | |
1011 | MLX4_IB_SRIOV_SQP | | ||
1012 | MLX4_IB_QP_NETIF)) | ||
991 | return ERR_PTR(-EINVAL); | 1013 | return ERR_PTR(-EINVAL); |
992 | 1014 | ||
1015 | if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) { | ||
1016 | if (init_attr->qp_type != IB_QPT_UD) | ||
1017 | return ERR_PTR(-EINVAL); | ||
1018 | } | ||
1019 | |||
993 | if (init_attr->create_flags && | 1020 | if (init_attr->create_flags && |
994 | (udata || | 1021 | (udata || |
995 | ((init_attr->create_flags & ~MLX4_IB_SRIOV_SQP) && | 1022 | ((init_attr->create_flags & ~MLX4_IB_SRIOV_SQP) && |
@@ -1235,6 +1262,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
1235 | struct mlx4_qp_context *context; | 1262 | struct mlx4_qp_context *context; |
1236 | enum mlx4_qp_optpar optpar = 0; | 1263 | enum mlx4_qp_optpar optpar = 0; |
1237 | int sqd_event; | 1264 | int sqd_event; |
1265 | int steer_qp = 0; | ||
1238 | int err = -EINVAL; | 1266 | int err = -EINVAL; |
1239 | 1267 | ||
1240 | context = kzalloc(sizeof *context, GFP_KERNEL); | 1268 | context = kzalloc(sizeof *context, GFP_KERNEL); |
@@ -1319,6 +1347,11 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
1319 | optpar |= MLX4_QP_OPTPAR_COUNTER_INDEX; | 1347 | optpar |= MLX4_QP_OPTPAR_COUNTER_INDEX; |
1320 | } else | 1348 | } else |
1321 | context->pri_path.counter_index = 0xff; | 1349 | context->pri_path.counter_index = 0xff; |
1350 | |||
1351 | if (qp->flags & MLX4_IB_QP_NETIF) { | ||
1352 | mlx4_ib_steer_qp_reg(dev, qp, 1); | ||
1353 | steer_qp = 1; | ||
1354 | } | ||
1322 | } | 1355 | } |
1323 | 1356 | ||
1324 | if (attr_mask & IB_QP_PKEY_INDEX) { | 1357 | if (attr_mask & IB_QP_PKEY_INDEX) { |
@@ -1547,9 +1580,14 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
1547 | qp->sq_next_wqe = 0; | 1580 | qp->sq_next_wqe = 0; |
1548 | if (qp->rq.wqe_cnt) | 1581 | if (qp->rq.wqe_cnt) |
1549 | *qp->db.db = 0; | 1582 | *qp->db.db = 0; |
1583 | |||
1584 | if (qp->flags & MLX4_IB_QP_NETIF) | ||
1585 | mlx4_ib_steer_qp_reg(dev, qp, 0); | ||
1550 | } | 1586 | } |
1551 | 1587 | ||
1552 | out: | 1588 | out: |
1589 | if (err && steer_qp) | ||
1590 | mlx4_ib_steer_qp_reg(dev, qp, 0); | ||
1553 | kfree(context); | 1591 | kfree(context); |
1554 | return err; | 1592 | return err; |
1555 | } | 1593 | } |
@@ -2762,6 +2800,9 @@ done: | |||
2762 | if (qp->flags & MLX4_IB_QP_LSO) | 2800 | if (qp->flags & MLX4_IB_QP_LSO) |
2763 | qp_init_attr->create_flags |= IB_QP_CREATE_IPOIB_UD_LSO; | 2801 | qp_init_attr->create_flags |= IB_QP_CREATE_IPOIB_UD_LSO; |
2764 | 2802 | ||
2803 | if (qp->flags & MLX4_IB_QP_NETIF) | ||
2804 | qp_init_attr->create_flags |= IB_QP_CREATE_NETIF_QP; | ||
2805 | |||
2765 | qp_init_attr->sq_sig_type = | 2806 | qp_init_attr->sq_sig_type = |
2766 | qp->sq_signal_bits == cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) ? | 2807 | qp->sq_signal_bits == cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) ? |
2767 | IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; | 2808 | IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; |