aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@mellanox.com>2018-06-04 10:48:11 -0400
committerJason Gunthorpe <jgg@mellanox.com>2018-06-04 10:48:11 -0400
commit0f45e69d625a423d225968c3b59da7f31c5d70b4 (patch)
treeb03055e4874a81e4f2276170ec22b188ecb982fe
parent27d036e33237e49801780eb703ea38dad5449e12 (diff)
parent1a1e03dc15cfa94b7e878a32a979705df614d9c4 (diff)
Merge tag 'verbs_flow_counters' of git://git.kernel.org/pub/scm/linux/kernel/git/leon/linux-rdma.git into for-next
Pull verbs counters series from Leon Romanovsky: ==================== Verbs flow counters support This series comes to allow user space applications to monitor real time traffic activity and events of the verbs objects it manages, e.g.: ibv_qp, ibv_wq, ibv_flow. The API enables generic counters creation and define mapping to association with a verbs object, the current mlx5 driver is using this API for flow counters. With this API, an application can monitor the entire life cycle of object activity, defined here as a static counters attachment. This API also allows dynamic counters monitoring of measurement points for a partial period in the verbs object life cycle. In addition it presents the implementation of the generic counters interface. This will be achieved by extending flow creation by adding a new flow count specification type which allows the user to associate a previously created flow counters using the generic verbs counters interface to the created flow, once associated the user could read statistics by using the read function of the generic counters interface. The API includes: 1. create and destroyed API of a new counters objects 2. read the counters values from HW Note: Attaching API to allow application to define the measurement points per objects is a user space only API and this data is passed to kernel when the counted object (e.g. flow) is created with the counters object. =================== * tag 'verbs_flow_counters': IB/mlx5: Add counters read support IB/mlx5: Add flow counters read support IB/mlx5: Add flow counters binding support IB/mlx5: Add counters create and destroy support IB/uverbs: Add support for flow counters IB/core: Add support for flow counters IB/core: Support passing uhw for create_flow IB/uverbs: Add read counters support IB/core: Introduce counters read verb IB/uverbs: Add create/destroy counters support IB/core: Introduce counters object and its create/destroy IB/uverbs: Add an ib_uobject getter to ioctl() infrastructure net/mlx5: Export flow counter related API net/mlx5: Use flow counter pointer as input to the query function
-rw-r--r--drivers/infiniband/core/Makefile2
-rw-r--r--drivers/infiniband/core/uverbs.h2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c88
-rw-r--r--drivers/infiniband/core/uverbs_std_types.c3
-rw-r--r--drivers/infiniband/core/uverbs_std_types_counters.c157
-rw-r--r--drivers/infiniband/core/uverbs_std_types_cq.c23
-rw-r--r--drivers/infiniband/core/uverbs_std_types_flow_action.c4
-rw-r--r--drivers/infiniband/core/verbs.c2
-rw-r--r--drivers/infiniband/hw/mlx4/main.c6
-rw-r--r--drivers/infiniband/hw/mlx5/main.c305
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c7
-rw-r--r--include/linux/mlx5/fs.h4
-rw-r--r--include/rdma/ib_verbs.h43
-rw-r--r--include/rdma/uverbs_ioctl.h11
-rw-r--r--include/uapi/rdma/ib_user_ioctl_cmds.h21
-rw-r--r--include/uapi/rdma/ib_user_verbs.h13
-rw-r--r--include/uapi/rdma/mlx5-abi.h24
20 files changed, 712 insertions, 56 deletions
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index 8d42373a2d8a..61667705d746 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -37,4 +37,4 @@ ib_uverbs-y := uverbs_main.o uverbs_cmd.o uverbs_marshall.o \
37 rdma_core.o uverbs_std_types.o uverbs_ioctl.o \ 37 rdma_core.o uverbs_std_types.o uverbs_ioctl.o \
38 uverbs_ioctl_merge.o uverbs_std_types_cq.o \ 38 uverbs_ioctl_merge.o uverbs_std_types_cq.o \
39 uverbs_std_types_flow_action.o uverbs_std_types_dm.o \ 39 uverbs_std_types_flow_action.o uverbs_std_types_dm.o \
40 uverbs_std_types_mr.o 40 uverbs_std_types_mr.o uverbs_std_types_counters.o
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index cfb51618ab7a..c0d40fc3a53a 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -263,6 +263,7 @@ struct ib_uverbs_flow_spec {
263 struct ib_uverbs_flow_spec_action_tag flow_tag; 263 struct ib_uverbs_flow_spec_action_tag flow_tag;
264 struct ib_uverbs_flow_spec_action_drop drop; 264 struct ib_uverbs_flow_spec_action_drop drop;
265 struct ib_uverbs_flow_spec_action_handle action; 265 struct ib_uverbs_flow_spec_action_handle action;
266 struct ib_uverbs_flow_spec_action_count flow_count;
266 }; 267 };
267}; 268};
268 269
@@ -287,6 +288,7 @@ extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_RWQ_IND_TBL);
287extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_XRCD); 288extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_XRCD);
288extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_FLOW_ACTION); 289extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_FLOW_ACTION);
289extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_DM); 290extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_DM);
291extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_COUNTERS);
290 292
291#define IB_UVERBS_DECLARE_CMD(name) \ 293#define IB_UVERBS_DECLARE_CMD(name) \
292 ssize_t ib_uverbs_##name(struct ib_uverbs_file *file, \ 294 ssize_t ib_uverbs_##name(struct ib_uverbs_file *file, \
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index e74262ee104c..3179a95c6f5e 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -2748,43 +2748,82 @@ out_put:
2748struct ib_uflow_resources { 2748struct ib_uflow_resources {
2749 size_t max; 2749 size_t max;
2750 size_t num; 2750 size_t num;
2751 struct ib_flow_action *collection[0]; 2751 size_t collection_num;
2752 size_t counters_num;
2753 struct ib_counters **counters;
2754 struct ib_flow_action **collection;
2752}; 2755};
2753 2756
2754static struct ib_uflow_resources *flow_resources_alloc(size_t num_specs) 2757static struct ib_uflow_resources *flow_resources_alloc(size_t num_specs)
2755{ 2758{
2756 struct ib_uflow_resources *resources; 2759 struct ib_uflow_resources *resources;
2757 2760
2758 resources = 2761 resources = kzalloc(sizeof(*resources), GFP_KERNEL);
2759 kmalloc(sizeof(*resources) +
2760 num_specs * sizeof(*resources->collection), GFP_KERNEL);
2761 2762
2762 if (!resources) 2763 if (!resources)
2763 return NULL; 2764 goto err_res;
2765
2766 resources->counters =
2767 kcalloc(num_specs, sizeof(*resources->counters), GFP_KERNEL);
2768
2769 if (!resources->counters)
2770 goto err_cnt;
2771
2772 resources->collection =
2773 kcalloc(num_specs, sizeof(*resources->collection), GFP_KERNEL);
2774
2775 if (!resources->collection)
2776 goto err_collection;
2764 2777
2765 resources->num = 0;
2766 resources->max = num_specs; 2778 resources->max = num_specs;
2767 2779
2768 return resources; 2780 return resources;
2781
2782err_collection:
2783 kfree(resources->counters);
2784err_cnt:
2785 kfree(resources);
2786err_res:
2787 return NULL;
2769} 2788}
2770 2789
2771void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res) 2790void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res)
2772{ 2791{
2773 unsigned int i; 2792 unsigned int i;
2774 2793
2775 for (i = 0; i < uflow_res->num; i++) 2794 for (i = 0; i < uflow_res->collection_num; i++)
2776 atomic_dec(&uflow_res->collection[i]->usecnt); 2795 atomic_dec(&uflow_res->collection[i]->usecnt);
2777 2796
2797 for (i = 0; i < uflow_res->counters_num; i++)
2798 atomic_dec(&uflow_res->counters[i]->usecnt);
2799
2800 kfree(uflow_res->collection);
2801 kfree(uflow_res->counters);
2778 kfree(uflow_res); 2802 kfree(uflow_res);
2779} 2803}
2780 2804
2781static void flow_resources_add(struct ib_uflow_resources *uflow_res, 2805static void flow_resources_add(struct ib_uflow_resources *uflow_res,
2782 struct ib_flow_action *action) 2806 enum ib_flow_spec_type type,
2807 void *ibobj)
2783{ 2808{
2784 WARN_ON(uflow_res->num >= uflow_res->max); 2809 WARN_ON(uflow_res->num >= uflow_res->max);
2785 2810
2786 atomic_inc(&action->usecnt); 2811 switch (type) {
2787 uflow_res->collection[uflow_res->num++] = action; 2812 case IB_FLOW_SPEC_ACTION_HANDLE:
2813 atomic_inc(&((struct ib_flow_action *)ibobj)->usecnt);
2814 uflow_res->collection[uflow_res->collection_num++] =
2815 (struct ib_flow_action *)ibobj;
2816 break;
2817 case IB_FLOW_SPEC_ACTION_COUNT:
2818 atomic_inc(&((struct ib_counters *)ibobj)->usecnt);
2819 uflow_res->counters[uflow_res->counters_num++] =
2820 (struct ib_counters *)ibobj;
2821 break;
2822 default:
2823 WARN_ON(1);
2824 }
2825
2826 uflow_res->num++;
2788} 2827}
2789 2828
2790static int kern_spec_to_ib_spec_action(struct ib_ucontext *ucontext, 2829static int kern_spec_to_ib_spec_action(struct ib_ucontext *ucontext,
@@ -2821,9 +2860,29 @@ static int kern_spec_to_ib_spec_action(struct ib_ucontext *ucontext,
2821 return -EINVAL; 2860 return -EINVAL;
2822 ib_spec->action.size = 2861 ib_spec->action.size =
2823 sizeof(struct ib_flow_spec_action_handle); 2862 sizeof(struct ib_flow_spec_action_handle);
2824 flow_resources_add(uflow_res, ib_spec->action.act); 2863 flow_resources_add(uflow_res,
2864 IB_FLOW_SPEC_ACTION_HANDLE,
2865 ib_spec->action.act);
2825 uobj_put_obj_read(ib_spec->action.act); 2866 uobj_put_obj_read(ib_spec->action.act);
2826 break; 2867 break;
2868 case IB_FLOW_SPEC_ACTION_COUNT:
2869 if (kern_spec->flow_count.size !=
2870 sizeof(struct ib_uverbs_flow_spec_action_count))
2871 return -EINVAL;
2872 ib_spec->flow_count.counters =
2873 uobj_get_obj_read(counters,
2874 UVERBS_OBJECT_COUNTERS,
2875 kern_spec->flow_count.handle,
2876 ucontext);
2877 if (!ib_spec->flow_count.counters)
2878 return -EINVAL;
2879 ib_spec->flow_count.size =
2880 sizeof(struct ib_flow_spec_action_count);
2881 flow_resources_add(uflow_res,
2882 IB_FLOW_SPEC_ACTION_COUNT,
2883 ib_spec->flow_count.counters);
2884 uobj_put_obj_read(ib_spec->flow_count.counters);
2885 break;
2827 default: 2886 default:
2828 return -EINVAL; 2887 return -EINVAL;
2829 } 2888 }
@@ -3542,11 +3601,16 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
3542 err = -EINVAL; 3601 err = -EINVAL;
3543 goto err_free; 3602 goto err_free;
3544 } 3603 }
3545 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER); 3604
3605 flow_id = qp->device->create_flow(qp, flow_attr,
3606 IB_FLOW_DOMAIN_USER, uhw);
3607
3546 if (IS_ERR(flow_id)) { 3608 if (IS_ERR(flow_id)) {
3547 err = PTR_ERR(flow_id); 3609 err = PTR_ERR(flow_id);
3548 goto err_free; 3610 goto err_free;
3549 } 3611 }
3612 atomic_inc(&qp->usecnt);
3613 flow_id->qp = qp;
3550 flow_id->uobject = uobj; 3614 flow_id->uobject = uobj;
3551 uobj->object = flow_id; 3615 uobj->object = flow_id;
3552 uflow = container_of(uobj, typeof(*uflow), uobject); 3616 uflow = container_of(uobj, typeof(*uflow), uobject);
diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c
index 569f48bd821e..b570acbd94af 100644
--- a/drivers/infiniband/core/uverbs_std_types.c
+++ b/drivers/infiniband/core/uverbs_std_types.c
@@ -302,7 +302,8 @@ static DECLARE_UVERBS_OBJECT_TREE(uverbs_default_objects,
302 &UVERBS_OBJECT(UVERBS_OBJECT_RWQ_IND_TBL), 302 &UVERBS_OBJECT(UVERBS_OBJECT_RWQ_IND_TBL),
303 &UVERBS_OBJECT(UVERBS_OBJECT_XRCD), 303 &UVERBS_OBJECT(UVERBS_OBJECT_XRCD),
304 &UVERBS_OBJECT(UVERBS_OBJECT_FLOW_ACTION), 304 &UVERBS_OBJECT(UVERBS_OBJECT_FLOW_ACTION),
305 &UVERBS_OBJECT(UVERBS_OBJECT_DM)); 305 &UVERBS_OBJECT(UVERBS_OBJECT_DM),
306 &UVERBS_OBJECT(UVERBS_OBJECT_COUNTERS));
306 307
307const struct uverbs_object_tree_def *uverbs_default_get_objects(void) 308const struct uverbs_object_tree_def *uverbs_default_get_objects(void)
308{ 309{
diff --git a/drivers/infiniband/core/uverbs_std_types_counters.c b/drivers/infiniband/core/uverbs_std_types_counters.c
new file mode 100644
index 000000000000..03b182a684a6
--- /dev/null
+++ b/drivers/infiniband/core/uverbs_std_types_counters.c
@@ -0,0 +1,157 @@
1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/*
3 * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include "uverbs.h"
35#include <rdma/uverbs_std_types.h>
36
37static int uverbs_free_counters(struct ib_uobject *uobject,
38 enum rdma_remove_reason why)
39{
40 struct ib_counters *counters = uobject->object;
41
42 if (why == RDMA_REMOVE_DESTROY &&
43 atomic_read(&counters->usecnt))
44 return -EBUSY;
45
46 return counters->device->destroy_counters(counters);
47}
48
49static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_CREATE)(struct ib_device *ib_dev,
50 struct ib_uverbs_file *file,
51 struct uverbs_attr_bundle *attrs)
52{
53 struct ib_counters *counters;
54 struct ib_uobject *uobj;
55 int ret;
56
57 /*
58 * This check should be removed once the infrastructure
59 * have the ability to remove methods from parse tree once
60 * such condition is met.
61 */
62 if (!ib_dev->create_counters)
63 return -EOPNOTSUPP;
64
65 uobj = uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_COUNTERS_HANDLE);
66 counters = ib_dev->create_counters(ib_dev, attrs);
67 if (IS_ERR(counters)) {
68 ret = PTR_ERR(counters);
69 goto err_create_counters;
70 }
71
72 counters->device = ib_dev;
73 counters->uobject = uobj;
74 uobj->object = counters;
75 atomic_set(&counters->usecnt, 0);
76
77 return 0;
78
79err_create_counters:
80 return ret;
81}
82
83static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_READ)(struct ib_device *ib_dev,
84 struct ib_uverbs_file *file,
85 struct uverbs_attr_bundle *attrs)
86{
87 struct ib_counters_read_attr read_attr = {};
88 const struct uverbs_attr *uattr;
89 struct ib_counters *counters =
90 uverbs_attr_get_obj(attrs, UVERBS_ATTR_READ_COUNTERS_HANDLE);
91 int ret;
92
93 if (!ib_dev->read_counters)
94 return -EOPNOTSUPP;
95
96 if (!atomic_read(&counters->usecnt))
97 return -EINVAL;
98
99 ret = uverbs_copy_from(&read_attr.flags, attrs,
100 UVERBS_ATTR_READ_COUNTERS_FLAGS);
101 if (ret)
102 return ret;
103
104 uattr = uverbs_attr_get(attrs, UVERBS_ATTR_READ_COUNTERS_BUFF);
105 read_attr.ncounters = uattr->ptr_attr.len / sizeof(u64);
106 read_attr.counters_buff = kcalloc(read_attr.ncounters,
107 sizeof(u64), GFP_KERNEL);
108 if (!read_attr.counters_buff)
109 return -ENOMEM;
110
111 ret = ib_dev->read_counters(counters,
112 &read_attr,
113 attrs);
114 if (ret)
115 goto err_read;
116
117 ret = uverbs_copy_to(attrs, UVERBS_ATTR_READ_COUNTERS_BUFF,
118 read_attr.counters_buff,
119 read_attr.ncounters * sizeof(u64));
120
121err_read:
122 kfree(read_attr.counters_buff);
123 return ret;
124}
125
126static DECLARE_UVERBS_NAMED_METHOD(UVERBS_METHOD_COUNTERS_CREATE,
127 &UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_COUNTERS_HANDLE,
128 UVERBS_OBJECT_COUNTERS,
129 UVERBS_ACCESS_NEW,
130 UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
131
132static DECLARE_UVERBS_NAMED_METHOD_WITH_HANDLER(UVERBS_METHOD_COUNTERS_DESTROY,
133 uverbs_destroy_def_handler,
134 &UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_COUNTERS_HANDLE,
135 UVERBS_OBJECT_COUNTERS,
136 UVERBS_ACCESS_DESTROY,
137 UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
138
139#define MAX_COUNTERS_BUFF_SIZE USHRT_MAX
140static DECLARE_UVERBS_NAMED_METHOD(UVERBS_METHOD_COUNTERS_READ,
141 &UVERBS_ATTR_IDR(UVERBS_ATTR_READ_COUNTERS_HANDLE,
142 UVERBS_OBJECT_COUNTERS,
143 UVERBS_ACCESS_READ,
144 UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
145 &UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_READ_COUNTERS_BUFF,
146 UVERBS_ATTR_SIZE(0, MAX_COUNTERS_BUFF_SIZE),
147 UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
148 &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_READ_COUNTERS_FLAGS,
149 UVERBS_ATTR_TYPE(__u32),
150 UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
151
152DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_COUNTERS,
153 &UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_counters),
154 &UVERBS_METHOD(UVERBS_METHOD_COUNTERS_CREATE),
155 &UVERBS_METHOD(UVERBS_METHOD_COUNTERS_DESTROY),
156 &UVERBS_METHOD(UVERBS_METHOD_COUNTERS_READ));
157
diff --git a/drivers/infiniband/core/uverbs_std_types_cq.c b/drivers/infiniband/core/uverbs_std_types_cq.c
index b0dbae9dd0d7..3d293d01afea 100644
--- a/drivers/infiniband/core/uverbs_std_types_cq.c
+++ b/drivers/infiniband/core/uverbs_std_types_cq.c
@@ -65,7 +65,6 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(struct ib_device *ib_dev,
65 struct ib_cq_init_attr attr = {}; 65 struct ib_cq_init_attr attr = {};
66 struct ib_cq *cq; 66 struct ib_cq *cq;
67 struct ib_uverbs_completion_event_file *ev_file = NULL; 67 struct ib_uverbs_completion_event_file *ev_file = NULL;
68 const struct uverbs_attr *ev_file_attr;
69 struct ib_uobject *ev_file_uobj; 68 struct ib_uobject *ev_file_uobj;
70 69
71 if (!(ib_dev->uverbs_cmd_mask & 1ULL << IB_USER_VERBS_CMD_CREATE_CQ)) 70 if (!(ib_dev->uverbs_cmd_mask & 1ULL << IB_USER_VERBS_CMD_CREATE_CQ))
@@ -87,10 +86,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(struct ib_device *ib_dev,
87 UVERBS_ATTR_CREATE_CQ_FLAGS))) 86 UVERBS_ATTR_CREATE_CQ_FLAGS)))
88 return -EFAULT; 87 return -EFAULT;
89 88
90 ev_file_attr = uverbs_attr_get(attrs, UVERBS_ATTR_CREATE_CQ_COMP_CHANNEL); 89 ev_file_uobj = uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_CQ_COMP_CHANNEL);
91 if (!IS_ERR(ev_file_attr)) { 90 if (!IS_ERR(ev_file_uobj)) {
92 ev_file_uobj = ev_file_attr->obj_attr.uobject;
93
94 ev_file = container_of(ev_file_uobj, 91 ev_file = container_of(ev_file_uobj,
95 struct ib_uverbs_completion_event_file, 92 struct ib_uverbs_completion_event_file,
96 uobj_file.uobj); 93 uobj_file.uobj);
@@ -102,8 +99,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(struct ib_device *ib_dev,
102 goto err_event_file; 99 goto err_event_file;
103 } 100 }
104 101
105 obj = container_of(uverbs_attr_get(attrs, 102 obj = container_of(uverbs_attr_get_uobject(attrs,
106 UVERBS_ATTR_CREATE_CQ_HANDLE)->obj_attr.uobject, 103 UVERBS_ATTR_CREATE_CQ_HANDLE),
107 typeof(*obj), uobject); 104 typeof(*obj), uobject);
108 obj->uverbs_file = ucontext->ufile; 105 obj->uverbs_file = ucontext->ufile;
109 obj->comp_events_reported = 0; 106 obj->comp_events_reported = 0;
@@ -170,13 +167,17 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_DESTROY)(struct ib_device *ib_dev,
170 struct ib_uverbs_file *file, 167 struct ib_uverbs_file *file,
171 struct uverbs_attr_bundle *attrs) 168 struct uverbs_attr_bundle *attrs)
172{ 169{
173 struct ib_uverbs_destroy_cq_resp resp;
174 struct ib_uobject *uobj = 170 struct ib_uobject *uobj =
175 uverbs_attr_get(attrs, UVERBS_ATTR_DESTROY_CQ_HANDLE)->obj_attr.uobject; 171 uverbs_attr_get_uobject(attrs, UVERBS_ATTR_DESTROY_CQ_HANDLE);
176 struct ib_ucq_object *obj = container_of(uobj, struct ib_ucq_object, 172 struct ib_uverbs_destroy_cq_resp resp;
177 uobject); 173 struct ib_ucq_object *obj;
178 int ret; 174 int ret;
179 175
176 if (IS_ERR(uobj))
177 return PTR_ERR(uobj);
178
179 obj = container_of(uobj, struct ib_ucq_object, uobject);
180
180 if (!(ib_dev->uverbs_cmd_mask & 1ULL << IB_USER_VERBS_CMD_DESTROY_CQ)) 181 if (!(ib_dev->uverbs_cmd_mask & 1ULL << IB_USER_VERBS_CMD_DESTROY_CQ))
181 return -EOPNOTSUPP; 182 return -EOPNOTSUPP;
182 183
diff --git a/drivers/infiniband/core/uverbs_std_types_flow_action.c b/drivers/infiniband/core/uverbs_std_types_flow_action.c
index b4f016dfa23d..a7be51cf2e42 100644
--- a/drivers/infiniband/core/uverbs_std_types_flow_action.c
+++ b/drivers/infiniband/core/uverbs_std_types_flow_action.c
@@ -320,7 +320,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE)(struct ib_device
320 return ret; 320 return ret;
321 321
322 /* No need to check as this attribute is marked as MANDATORY */ 322 /* No need to check as this attribute is marked as MANDATORY */
323 uobj = uverbs_attr_get(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_HANDLE)->obj_attr.uobject; 323 uobj = uverbs_attr_get_uobject(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_HANDLE);
324 action = ib_dev->create_flow_action_esp(ib_dev, &esp_attr.hdr, attrs); 324 action = ib_dev->create_flow_action_esp(ib_dev, &esp_attr.hdr, attrs);
325 if (IS_ERR(action)) 325 if (IS_ERR(action))
326 return PTR_ERR(action); 326 return PTR_ERR(action);
@@ -350,7 +350,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY)(struct ib_device
350 if (ret) 350 if (ret)
351 return ret; 351 return ret;
352 352
353 uobj = uverbs_attr_get(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_HANDLE)->obj_attr.uobject; 353 uobj = uverbs_attr_get_uobject(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_HANDLE);
354 action = uobj->object; 354 action = uobj->object;
355 355
356 if (action->type != IB_FLOW_ACTION_ESP) 356 if (action->type != IB_FLOW_ACTION_ESP)
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 6ddfb1fade79..0b56828c1319 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1983,7 +1983,7 @@ struct ib_flow *ib_create_flow(struct ib_qp *qp,
1983 if (!qp->device->create_flow) 1983 if (!qp->device->create_flow)
1984 return ERR_PTR(-EOPNOTSUPP); 1984 return ERR_PTR(-EOPNOTSUPP);
1985 1985
1986 flow_id = qp->device->create_flow(qp, flow_attr, domain); 1986 flow_id = qp->device->create_flow(qp, flow_attr, domain, NULL);
1987 if (!IS_ERR(flow_id)) { 1987 if (!IS_ERR(flow_id)) {
1988 atomic_inc(&qp->usecnt); 1988 atomic_inc(&qp->usecnt);
1989 flow_id->qp = qp; 1989 flow_id->qp = qp;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 722c825e3e71..f839bf3b1497 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1808,7 +1808,7 @@ static int mlx4_ib_add_dont_trap_rule(struct mlx4_dev *dev,
1808 1808
1809static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, 1809static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1810 struct ib_flow_attr *flow_attr, 1810 struct ib_flow_attr *flow_attr,
1811 int domain) 1811 int domain, struct ib_udata *udata)
1812{ 1812{
1813 int err = 0, i = 0, j = 0; 1813 int err = 0, i = 0, j = 0;
1814 struct mlx4_ib_flow *mflow; 1814 struct mlx4_ib_flow *mflow;
@@ -1826,6 +1826,10 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1826 (flow_attr->type != IB_FLOW_ATTR_NORMAL)) 1826 (flow_attr->type != IB_FLOW_ATTR_NORMAL))
1827 return ERR_PTR(-EOPNOTSUPP); 1827 return ERR_PTR(-EOPNOTSUPP);
1828 1828
1829 if (udata &&
1830 udata->inlen && !ib_is_udata_cleared(udata, 0, udata->inlen))
1831 return ERR_PTR(-EOPNOTSUPP);
1832
1829 memset(type, 0, sizeof(type)); 1833 memset(type, 0, sizeof(type));
1830 1834
1831 mflow = kzalloc(sizeof(*mflow), GFP_KERNEL); 1835 mflow = kzalloc(sizeof(*mflow), GFP_KERNEL);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 0541581c5d84..3544150f3469 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -2443,7 +2443,7 @@ static int check_mpls_supp_fields(u32 field_support, const __be32 *set_mask)
2443#define LAST_TUNNEL_FIELD tunnel_id 2443#define LAST_TUNNEL_FIELD tunnel_id
2444#define LAST_FLOW_TAG_FIELD tag_id 2444#define LAST_FLOW_TAG_FIELD tag_id
2445#define LAST_DROP_FIELD size 2445#define LAST_DROP_FIELD size
2446#define LAST_DROP_FIELD size 2446#define LAST_COUNTERS_FIELD counters
2447 2447
2448/* Field is the last supported field */ 2448/* Field is the last supported field */
2449#define FIELDS_NOT_SUPPORTED(filter, field)\ 2449#define FIELDS_NOT_SUPPORTED(filter, field)\
@@ -2807,6 +2807,18 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
2807 if (ret) 2807 if (ret)
2808 return ret; 2808 return ret;
2809 break; 2809 break;
2810 case IB_FLOW_SPEC_ACTION_COUNT:
2811 if (FIELDS_NOT_SUPPORTED(ib_spec->flow_count,
2812 LAST_COUNTERS_FIELD))
2813 return -EOPNOTSUPP;
2814
2815 /* for now support only one counters spec per flow */
2816 if (action->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
2817 return -EINVAL;
2818
2819 action->counters = ib_spec->flow_count.counters;
2820 action->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
2821 break;
2810 default: 2822 default:
2811 return -EINVAL; 2823 return -EINVAL;
2812 } 2824 }
@@ -2954,6 +2966,17 @@ static void put_flow_table(struct mlx5_ib_dev *dev,
2954 } 2966 }
2955} 2967}
2956 2968
2969static void counters_clear_description(struct ib_counters *counters)
2970{
2971 struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
2972
2973 mutex_lock(&mcounters->mcntrs_mutex);
2974 kfree(mcounters->counters_data);
2975 mcounters->counters_data = NULL;
2976 mcounters->cntrs_max_index = 0;
2977 mutex_unlock(&mcounters->mcntrs_mutex);
2978}
2979
2957static int mlx5_ib_destroy_flow(struct ib_flow *flow_id) 2980static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
2958{ 2981{
2959 struct mlx5_ib_dev *dev = to_mdev(flow_id->qp->device); 2982 struct mlx5_ib_dev *dev = to_mdev(flow_id->qp->device);
@@ -2973,8 +2996,11 @@ static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
2973 2996
2974 mlx5_del_flow_rules(handler->rule); 2997 mlx5_del_flow_rules(handler->rule);
2975 put_flow_table(dev, handler->prio, true); 2998 put_flow_table(dev, handler->prio, true);
2976 mutex_unlock(&dev->flow_db->lock); 2999 if (handler->ibcounters &&
3000 atomic_read(&handler->ibcounters->usecnt) == 1)
3001 counters_clear_description(handler->ibcounters);
2977 3002
3003 mutex_unlock(&dev->flow_db->lock);
2978 kfree(handler); 3004 kfree(handler);
2979 3005
2980 return 0; 3006 return 0;
@@ -3094,22 +3120,143 @@ static void set_underlay_qp(struct mlx5_ib_dev *dev,
3094 } 3120 }
3095} 3121}
3096 3122
3123static int read_flow_counters(struct ib_device *ibdev,
3124 struct mlx5_read_counters_attr *read_attr)
3125{
3126 struct mlx5_fc *fc = read_attr->hw_cntrs_hndl;
3127 struct mlx5_ib_dev *dev = to_mdev(ibdev);
3128
3129 return mlx5_fc_query(dev->mdev, fc,
3130 &read_attr->out[IB_COUNTER_PACKETS],
3131 &read_attr->out[IB_COUNTER_BYTES]);
3132}
3133
3134/* flow counters currently expose two counters packets and bytes */
3135#define FLOW_COUNTERS_NUM 2
3136static int counters_set_description(struct ib_counters *counters,
3137 enum mlx5_ib_counters_type counters_type,
3138 struct mlx5_ib_flow_counters_desc *desc_data,
3139 u32 ncounters)
3140{
3141 struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
3142 u32 cntrs_max_index = 0;
3143 int i;
3144
3145 if (counters_type != MLX5_IB_COUNTERS_FLOW)
3146 return -EINVAL;
3147
3148 /* init the fields for the object */
3149 mcounters->type = counters_type;
3150 mcounters->read_counters = read_flow_counters;
3151 mcounters->counters_num = FLOW_COUNTERS_NUM;
3152 mcounters->ncounters = ncounters;
3153 /* each counter entry have both description and index pair */
3154 for (i = 0; i < ncounters; i++) {
3155 if (desc_data[i].description > IB_COUNTER_BYTES)
3156 return -EINVAL;
3157
3158 if (cntrs_max_index <= desc_data[i].index)
3159 cntrs_max_index = desc_data[i].index + 1;
3160 }
3161
3162 mutex_lock(&mcounters->mcntrs_mutex);
3163 mcounters->counters_data = desc_data;
3164 mcounters->cntrs_max_index = cntrs_max_index;
3165 mutex_unlock(&mcounters->mcntrs_mutex);
3166
3167 return 0;
3168}
3169
3170#define MAX_COUNTERS_NUM (USHRT_MAX / (sizeof(u32) * 2))
3171static int flow_counters_set_data(struct ib_counters *ibcounters,
3172 struct mlx5_ib_create_flow *ucmd)
3173{
3174 struct mlx5_ib_mcounters *mcounters = to_mcounters(ibcounters);
3175 struct mlx5_ib_flow_counters_data *cntrs_data = NULL;
3176 struct mlx5_ib_flow_counters_desc *desc_data = NULL;
3177 bool hw_hndl = false;
3178 int ret = 0;
3179
3180 if (ucmd && ucmd->ncounters_data != 0) {
3181 cntrs_data = ucmd->data;
3182 if (cntrs_data->ncounters > MAX_COUNTERS_NUM)
3183 return -EINVAL;
3184
3185 desc_data = kcalloc(cntrs_data->ncounters,
3186 sizeof(*desc_data),
3187 GFP_KERNEL);
3188 if (!desc_data)
3189 return -ENOMEM;
3190
3191 if (copy_from_user(desc_data,
3192 u64_to_user_ptr(cntrs_data->counters_data),
3193 sizeof(*desc_data) * cntrs_data->ncounters)) {
3194 ret = -EFAULT;
3195 goto free;
3196 }
3197 }
3198
3199 if (!mcounters->hw_cntrs_hndl) {
3200 mcounters->hw_cntrs_hndl = mlx5_fc_create(
3201 to_mdev(ibcounters->device)->mdev, false);
3202 if (!mcounters->hw_cntrs_hndl) {
3203 ret = -ENOMEM;
3204 goto free;
3205 }
3206 hw_hndl = true;
3207 }
3208
3209 if (desc_data) {
3210 /* counters already bound to at least one flow */
3211 if (mcounters->cntrs_max_index) {
3212 ret = -EINVAL;
3213 goto free_hndl;
3214 }
3215
3216 ret = counters_set_description(ibcounters,
3217 MLX5_IB_COUNTERS_FLOW,
3218 desc_data,
3219 cntrs_data->ncounters);
3220 if (ret)
3221 goto free_hndl;
3222
3223 } else if (!mcounters->cntrs_max_index) {
3224 /* counters not bound yet, must have udata passed */
3225 ret = -EINVAL;
3226 goto free_hndl;
3227 }
3228
3229 return 0;
3230
3231free_hndl:
3232 if (hw_hndl) {
3233 mlx5_fc_destroy(to_mdev(ibcounters->device)->mdev,
3234 mcounters->hw_cntrs_hndl);
3235 mcounters->hw_cntrs_hndl = NULL;
3236 }
3237free:
3238 kfree(desc_data);
3239 return ret;
3240}
3241
3097static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev, 3242static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
3098 struct mlx5_ib_flow_prio *ft_prio, 3243 struct mlx5_ib_flow_prio *ft_prio,
3099 const struct ib_flow_attr *flow_attr, 3244 const struct ib_flow_attr *flow_attr,
3100 struct mlx5_flow_destination *dst, 3245 struct mlx5_flow_destination *dst,
3101 u32 underlay_qpn) 3246 u32 underlay_qpn,
3247 struct mlx5_ib_create_flow *ucmd)
3102{ 3248{
3103 struct mlx5_flow_table *ft = ft_prio->flow_table; 3249 struct mlx5_flow_table *ft = ft_prio->flow_table;
3104 struct mlx5_ib_flow_handler *handler; 3250 struct mlx5_ib_flow_handler *handler;
3105 struct mlx5_flow_act flow_act = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG}; 3251 struct mlx5_flow_act flow_act = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG};
3106 struct mlx5_flow_spec *spec; 3252 struct mlx5_flow_spec *spec;
3107 struct mlx5_flow_destination *rule_dst = dst; 3253 struct mlx5_flow_destination dest_arr[2] = {};
3254 struct mlx5_flow_destination *rule_dst = dest_arr;
3108 const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr); 3255 const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr);
3109 unsigned int spec_index; 3256 unsigned int spec_index;
3110 u32 prev_type = 0; 3257 u32 prev_type = 0;
3111 int err = 0; 3258 int err = 0;
3112 int dest_num = 1; 3259 int dest_num = 0;
3113 bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS; 3260 bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS;
3114 3261
3115 if (!is_valid_attr(dev->mdev, flow_attr)) 3262 if (!is_valid_attr(dev->mdev, flow_attr))
@@ -3123,6 +3270,10 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
3123 } 3270 }
3124 3271
3125 INIT_LIST_HEAD(&handler->list); 3272 INIT_LIST_HEAD(&handler->list);
3273 if (dst) {
3274 memcpy(&dest_arr[0], dst, sizeof(*dst));
3275 dest_num++;
3276 }
3126 3277
3127 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) { 3278 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
3128 err = parse_flow_attr(dev->mdev, spec->match_criteria, 3279 err = parse_flow_attr(dev->mdev, spec->match_criteria,
@@ -3159,15 +3310,30 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
3159 goto free; 3310 goto free;
3160 } 3311 }
3161 3312
3313 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
3314 err = flow_counters_set_data(flow_act.counters, ucmd);
3315 if (err)
3316 goto free;
3317
3318 handler->ibcounters = flow_act.counters;
3319 dest_arr[dest_num].type =
3320 MLX5_FLOW_DESTINATION_TYPE_COUNTER;
3321 dest_arr[dest_num].counter =
3322 to_mcounters(flow_act.counters)->hw_cntrs_hndl;
3323 dest_num++;
3324 }
3325
3162 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP) { 3326 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
3163 rule_dst = NULL; 3327 if (!(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT)) {
3164 dest_num = 0; 3328 rule_dst = NULL;
3329 dest_num = 0;
3330 }
3165 } else { 3331 } else {
3166 if (is_egress) 3332 if (is_egress)
3167 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW; 3333 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
3168 else 3334 else
3169 flow_act.action |= 3335 flow_act.action |=
3170 dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST : 3336 dest_num ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
3171 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; 3337 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
3172 } 3338 }
3173 3339
@@ -3193,8 +3359,12 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
3193 3359
3194 ft_prio->flow_table = ft; 3360 ft_prio->flow_table = ft;
3195free: 3361free:
3196 if (err) 3362 if (err && handler) {
3363 if (handler->ibcounters &&
3364 atomic_read(&handler->ibcounters->usecnt) == 1)
3365 counters_clear_description(handler->ibcounters);
3197 kfree(handler); 3366 kfree(handler);
3367 }
3198 kvfree(spec); 3368 kvfree(spec);
3199 return err ? ERR_PTR(err) : handler; 3369 return err ? ERR_PTR(err) : handler;
3200} 3370}
@@ -3204,7 +3374,7 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
3204 const struct ib_flow_attr *flow_attr, 3374 const struct ib_flow_attr *flow_attr,
3205 struct mlx5_flow_destination *dst) 3375 struct mlx5_flow_destination *dst)
3206{ 3376{
3207 return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0); 3377 return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0, NULL);
3208} 3378}
3209 3379
3210static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev, 3380static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev,
@@ -3334,7 +3504,8 @@ err:
3334 3504
3335static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp, 3505static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
3336 struct ib_flow_attr *flow_attr, 3506 struct ib_flow_attr *flow_attr,
3337 int domain) 3507 int domain,
3508 struct ib_udata *udata)
3338{ 3509{
3339 struct mlx5_ib_dev *dev = to_mdev(qp->device); 3510 struct mlx5_ib_dev *dev = to_mdev(qp->device);
3340 struct mlx5_ib_qp *mqp = to_mqp(qp); 3511 struct mlx5_ib_qp *mqp = to_mqp(qp);
@@ -3343,9 +3514,44 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
3343 struct mlx5_ib_flow_prio *ft_prio_tx = NULL; 3514 struct mlx5_ib_flow_prio *ft_prio_tx = NULL;
3344 struct mlx5_ib_flow_prio *ft_prio; 3515 struct mlx5_ib_flow_prio *ft_prio;
3345 bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS; 3516 bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS;
3517 struct mlx5_ib_create_flow *ucmd = NULL, ucmd_hdr;
3518 size_t min_ucmd_sz, required_ucmd_sz;
3346 int err; 3519 int err;
3347 int underlay_qpn; 3520 int underlay_qpn;
3348 3521
3522 if (udata && udata->inlen) {
3523 min_ucmd_sz = offsetof(typeof(ucmd_hdr), reserved) +
3524 sizeof(ucmd_hdr.reserved);
3525 if (udata->inlen < min_ucmd_sz)
3526 return ERR_PTR(-EOPNOTSUPP);
3527
3528 err = ib_copy_from_udata(&ucmd_hdr, udata, min_ucmd_sz);
3529 if (err)
3530 return ERR_PTR(err);
3531
3532 /* currently supports only one counters data */
3533 if (ucmd_hdr.ncounters_data > 1)
3534 return ERR_PTR(-EINVAL);
3535
3536 required_ucmd_sz = min_ucmd_sz +
3537 sizeof(struct mlx5_ib_flow_counters_data) *
3538 ucmd_hdr.ncounters_data;
3539 if (udata->inlen > required_ucmd_sz &&
3540 !ib_is_udata_cleared(udata, required_ucmd_sz,
3541 udata->inlen - required_ucmd_sz))
3542 return ERR_PTR(-EOPNOTSUPP);
3543
3544 ucmd = kzalloc(required_ucmd_sz, GFP_KERNEL);
3545 if (!ucmd)
3546 return ERR_PTR(-ENOMEM);
3547
3548 err = ib_copy_from_udata(ucmd, udata, required_ucmd_sz);
3549 if (err) {
3550 kfree(ucmd);
3551 return ERR_PTR(err);
3552 }
3553 }
3554
3349 if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) 3555 if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO)
3350 return ERR_PTR(-ENOMEM); 3556 return ERR_PTR(-ENOMEM);
3351 3557
@@ -3399,7 +3605,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
3399 underlay_qpn = (mqp->flags & MLX5_IB_QP_UNDERLAY) ? 3605 underlay_qpn = (mqp->flags & MLX5_IB_QP_UNDERLAY) ?
3400 mqp->underlay_qpn : 0; 3606 mqp->underlay_qpn : 0;
3401 handler = _create_flow_rule(dev, ft_prio, flow_attr, 3607 handler = _create_flow_rule(dev, ft_prio, flow_attr,
3402 dst, underlay_qpn); 3608 dst, underlay_qpn, ucmd);
3403 } 3609 }
3404 } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || 3610 } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
3405 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) { 3611 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
@@ -3420,6 +3626,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
3420 3626
3421 mutex_unlock(&dev->flow_db->lock); 3627 mutex_unlock(&dev->flow_db->lock);
3422 kfree(dst); 3628 kfree(dst);
3629 kfree(ucmd);
3423 3630
3424 return &handler->ibflow; 3631 return &handler->ibflow;
3425 3632
@@ -3430,6 +3637,7 @@ destroy_ft:
3430unlock: 3637unlock:
3431 mutex_unlock(&dev->flow_db->lock); 3638 mutex_unlock(&dev->flow_db->lock);
3432 kfree(dst); 3639 kfree(dst);
3640 kfree(ucmd);
3433 kfree(handler); 3641 kfree(handler);
3434 return ERR_PTR(err); 3642 return ERR_PTR(err);
3435} 3643}
@@ -5090,6 +5298,76 @@ static void depopulate_specs_root(struct mlx5_ib_dev *dev)
5090 uverbs_free_spec_tree(dev->ib_dev.specs_root); 5298 uverbs_free_spec_tree(dev->ib_dev.specs_root);
5091} 5299}
5092 5300
5301static int mlx5_ib_read_counters(struct ib_counters *counters,
5302 struct ib_counters_read_attr *read_attr,
5303 struct uverbs_attr_bundle *attrs)
5304{
5305 struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
5306 struct mlx5_read_counters_attr mread_attr = {};
5307 struct mlx5_ib_flow_counters_desc *desc;
5308 int ret, i;
5309
5310 mutex_lock(&mcounters->mcntrs_mutex);
5311 if (mcounters->cntrs_max_index > read_attr->ncounters) {
5312 ret = -EINVAL;
5313 goto err_bound;
5314 }
5315
5316 mread_attr.out = kcalloc(mcounters->counters_num, sizeof(u64),
5317 GFP_KERNEL);
5318 if (!mread_attr.out) {
5319 ret = -ENOMEM;
5320 goto err_bound;
5321 }
5322
5323 mread_attr.hw_cntrs_hndl = mcounters->hw_cntrs_hndl;
5324 mread_attr.flags = read_attr->flags;
5325 ret = mcounters->read_counters(counters->device, &mread_attr);
5326 if (ret)
5327 goto err_read;
5328
5329 /* do the pass over the counters data array to assign according to the
5330 * descriptions and indexing pairs
5331 */
5332 desc = mcounters->counters_data;
5333 for (i = 0; i < mcounters->ncounters; i++)
5334 read_attr->counters_buff[desc[i].index] += mread_attr.out[desc[i].description];
5335
5336err_read:
5337 kfree(mread_attr.out);
5338err_bound:
5339 mutex_unlock(&mcounters->mcntrs_mutex);
5340 return ret;
5341}
5342
5343static int mlx5_ib_destroy_counters(struct ib_counters *counters)
5344{
5345 struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
5346
5347 counters_clear_description(counters);
5348 if (mcounters->hw_cntrs_hndl)
5349 mlx5_fc_destroy(to_mdev(counters->device)->mdev,
5350 mcounters->hw_cntrs_hndl);
5351
5352 kfree(mcounters);
5353
5354 return 0;
5355}
5356
5357static struct ib_counters *mlx5_ib_create_counters(struct ib_device *device,
5358 struct uverbs_attr_bundle *attrs)
5359{
5360 struct mlx5_ib_mcounters *mcounters;
5361
5362 mcounters = kzalloc(sizeof(*mcounters), GFP_KERNEL);
5363 if (!mcounters)
5364 return ERR_PTR(-ENOMEM);
5365
5366 mutex_init(&mcounters->mcntrs_mutex);
5367
5368 return &mcounters->ibcntrs;
5369}
5370
5093void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev) 5371void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
5094{ 5372{
5095 mlx5_ib_cleanup_multiport_master(dev); 5373 mlx5_ib_cleanup_multiport_master(dev);
@@ -5333,6 +5611,9 @@ int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
5333 dev->ib_dev.destroy_flow_action = mlx5_ib_destroy_flow_action; 5611 dev->ib_dev.destroy_flow_action = mlx5_ib_destroy_flow_action;
5334 dev->ib_dev.modify_flow_action_esp = mlx5_ib_modify_flow_action_esp; 5612 dev->ib_dev.modify_flow_action_esp = mlx5_ib_modify_flow_action_esp;
5335 dev->ib_dev.driver_id = RDMA_DRIVER_MLX5; 5613 dev->ib_dev.driver_id = RDMA_DRIVER_MLX5;
5614 dev->ib_dev.create_counters = mlx5_ib_create_counters;
5615 dev->ib_dev.destroy_counters = mlx5_ib_destroy_counters;
5616 dev->ib_dev.read_counters = mlx5_ib_read_counters;
5336 5617
5337 err = init_node_data(dev); 5618 err = init_node_data(dev);
5338 if (err) 5619 if (err)
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 49a1aa0ff429..d89c8fe626f6 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -175,6 +175,7 @@ struct mlx5_ib_flow_handler {
175 struct ib_flow ibflow; 175 struct ib_flow ibflow;
176 struct mlx5_ib_flow_prio *prio; 176 struct mlx5_ib_flow_prio *prio;
177 struct mlx5_flow_handle *rule; 177 struct mlx5_flow_handle *rule;
178 struct ib_counters *ibcounters;
178}; 179};
179 180
180struct mlx5_ib_flow_db { 181struct mlx5_ib_flow_db {
@@ -813,6 +814,41 @@ struct mlx5_memic {
813 DECLARE_BITMAP(memic_alloc_pages, MLX5_MAX_MEMIC_PAGES); 814 DECLARE_BITMAP(memic_alloc_pages, MLX5_MAX_MEMIC_PAGES);
814}; 815};
815 816
817struct mlx5_read_counters_attr {
818 struct mlx5_fc *hw_cntrs_hndl;
819 u64 *out;
820 u32 flags;
821};
822
823enum mlx5_ib_counters_type {
824 MLX5_IB_COUNTERS_FLOW,
825};
826
827struct mlx5_ib_mcounters {
828 struct ib_counters ibcntrs;
829 enum mlx5_ib_counters_type type;
830 /* number of counters supported for this counters type */
831 u32 counters_num;
832 struct mlx5_fc *hw_cntrs_hndl;
833 /* read function for this counters type */
834 int (*read_counters)(struct ib_device *ibdev,
835 struct mlx5_read_counters_attr *read_attr);
836 /* max index set as part of create_flow */
837 u32 cntrs_max_index;
838 /* number of counters data entries (<description,index> pair) */
839 u32 ncounters;
840 /* counters data array for descriptions and indexes */
841 struct mlx5_ib_flow_counters_desc *counters_data;
842 /* protects access to mcounters internal data */
843 struct mutex mcntrs_mutex;
844};
845
846static inline struct mlx5_ib_mcounters *
847to_mcounters(struct ib_counters *ibcntrs)
848{
849 return container_of(ibcntrs, struct mlx5_ib_mcounters, ibcntrs);
850}
851
816struct mlx5_ib_dev { 852struct mlx5_ib_dev {
817 struct ib_device ib_dev; 853 struct ib_device ib_dev;
818 struct mlx5_core_dev *mdev; 854 struct mlx5_core_dev *mdev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 9a24314b817a..bb9665b7e8e7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -2104,21 +2104,18 @@ static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
2104 struct mlx5_vport *vport = &esw->vports[vport_idx]; 2104 struct mlx5_vport *vport = &esw->vports[vport_idx];
2105 u64 rx_discard_vport_down, tx_discard_vport_down; 2105 u64 rx_discard_vport_down, tx_discard_vport_down;
2106 u64 bytes = 0; 2106 u64 bytes = 0;
2107 u16 idx = 0;
2108 int err = 0; 2107 int err = 0;
2109 2108
2110 if (!vport->enabled || esw->mode != SRIOV_LEGACY) 2109 if (!vport->enabled || esw->mode != SRIOV_LEGACY)
2111 return 0; 2110 return 0;
2112 2111
2113 if (vport->egress.drop_counter) { 2112 if (vport->egress.drop_counter)
2114 idx = vport->egress.drop_counter->id; 2113 mlx5_fc_query(dev, vport->egress.drop_counter,
2115 mlx5_fc_query(dev, idx, &stats->rx_dropped, &bytes); 2114 &stats->rx_dropped, &bytes);
2116 }
2117 2115
2118 if (vport->ingress.drop_counter) { 2116 if (vport->ingress.drop_counter)
2119 idx = vport->ingress.drop_counter->id; 2117 mlx5_fc_query(dev, vport->ingress.drop_counter,
2120 mlx5_fc_query(dev, idx, &stats->tx_dropped, &bytes); 2118 &stats->tx_dropped, &bytes);
2121 }
2122 2119
2123 if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) && 2120 if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) &&
2124 !MLX5_CAP_GEN(dev, transmit_discard_vport_down)) 2121 !MLX5_CAP_GEN(dev, transmit_discard_vport_down))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index b6da322a8016..32070e5d993d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -233,8 +233,6 @@ void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
233 unsigned long delay); 233 unsigned long delay);
234void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev, 234void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
235 unsigned long interval); 235 unsigned long interval);
236int mlx5_fc_query(struct mlx5_core_dev *dev, u16 id,
237 u64 *packets, u64 *bytes);
238 236
239int mlx5_init_fs(struct mlx5_core_dev *dev); 237int mlx5_init_fs(struct mlx5_core_dev *dev);
240void mlx5_cleanup_fs(struct mlx5_core_dev *dev); 238void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index b7ab929d5f8e..58af6be13dfa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -243,6 +243,7 @@ err_out:
243 243
244 return ERR_PTR(err); 244 return ERR_PTR(err);
245} 245}
246EXPORT_SYMBOL(mlx5_fc_create);
246 247
247void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter) 248void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
248{ 249{
@@ -260,6 +261,7 @@ void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
260 mlx5_cmd_fc_free(dev, counter->id); 261 mlx5_cmd_fc_free(dev, counter->id);
261 kfree(counter); 262 kfree(counter);
262} 263}
264EXPORT_SYMBOL(mlx5_fc_destroy);
263 265
264int mlx5_init_fc_stats(struct mlx5_core_dev *dev) 266int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
265{ 267{
@@ -312,11 +314,12 @@ void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
312 } 314 }
313} 315}
314 316
315int mlx5_fc_query(struct mlx5_core_dev *dev, u16 id, 317int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
316 u64 *packets, u64 *bytes) 318 u64 *packets, u64 *bytes)
317{ 319{
318 return mlx5_cmd_fc_query(dev, id, packets, bytes); 320 return mlx5_cmd_fc_query(dev, counter->id, packets, bytes);
319} 321}
322EXPORT_SYMBOL(mlx5_fc_query);
320 323
321void mlx5_fc_query_cached(struct mlx5_fc *counter, 324void mlx5_fc_query_cached(struct mlx5_fc *counter,
322 u64 *bytes, u64 *packets, u64 *lastuse) 325 u64 *bytes, u64 *packets, u64 *lastuse)
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 9f4d32e41c06..757b4a30281e 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -160,6 +160,7 @@ struct mlx5_flow_act {
160 u32 modify_id; 160 u32 modify_id;
161 uintptr_t esp_id; 161 uintptr_t esp_id;
162 struct mlx5_fs_vlan vlan; 162 struct mlx5_fs_vlan vlan;
163 struct ib_counters *counters;
163}; 164};
164 165
165#define MLX5_DECLARE_FLOW_ACT(name) \ 166#define MLX5_DECLARE_FLOW_ACT(name) \
@@ -186,6 +187,9 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
186void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter); 187void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
187void mlx5_fc_query_cached(struct mlx5_fc *counter, 188void mlx5_fc_query_cached(struct mlx5_fc *counter,
188 u64 *bytes, u64 *packets, u64 *lastuse); 189 u64 *bytes, u64 *packets, u64 *lastuse);
190int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
191 u64 *packets, u64 *bytes);
192
189int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn); 193int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
190int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn); 194int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
191 195
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 406c98d7a09a..2cc04abb6df8 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1859,9 +1859,10 @@ enum ib_flow_spec_type {
1859 IB_FLOW_SPEC_ACTION_TAG = 0x1000, 1859 IB_FLOW_SPEC_ACTION_TAG = 0x1000,
1860 IB_FLOW_SPEC_ACTION_DROP = 0x1001, 1860 IB_FLOW_SPEC_ACTION_DROP = 0x1001,
1861 IB_FLOW_SPEC_ACTION_HANDLE = 0x1002, 1861 IB_FLOW_SPEC_ACTION_HANDLE = 0x1002,
1862 IB_FLOW_SPEC_ACTION_COUNT = 0x1003,
1862}; 1863};
1863#define IB_FLOW_SPEC_LAYER_MASK 0xF0 1864#define IB_FLOW_SPEC_LAYER_MASK 0xF0
1864#define IB_FLOW_SPEC_SUPPORT_LAYERS 8 1865#define IB_FLOW_SPEC_SUPPORT_LAYERS 10
1865 1866
1866/* Flow steering rule priority is set according to it's domain. 1867/* Flow steering rule priority is set according to it's domain.
1867 * Lower domain value means higher priority. 1868 * Lower domain value means higher priority.
@@ -2041,6 +2042,17 @@ struct ib_flow_spec_action_handle {
2041 struct ib_flow_action *act; 2042 struct ib_flow_action *act;
2042}; 2043};
2043 2044
2045enum ib_counters_description {
2046 IB_COUNTER_PACKETS,
2047 IB_COUNTER_BYTES,
2048};
2049
2050struct ib_flow_spec_action_count {
2051 enum ib_flow_spec_type type;
2052 u16 size;
2053 struct ib_counters *counters;
2054};
2055
2044union ib_flow_spec { 2056union ib_flow_spec {
2045 struct { 2057 struct {
2046 u32 type; 2058 u32 type;
@@ -2058,6 +2070,7 @@ union ib_flow_spec {
2058 struct ib_flow_spec_action_tag flow_tag; 2070 struct ib_flow_spec_action_tag flow_tag;
2059 struct ib_flow_spec_action_drop drop; 2071 struct ib_flow_spec_action_drop drop;
2060 struct ib_flow_spec_action_handle action; 2072 struct ib_flow_spec_action_handle action;
2073 struct ib_flow_spec_action_count flow_count;
2061}; 2074};
2062 2075
2063struct ib_flow_attr { 2076struct ib_flow_attr {
@@ -2212,6 +2225,24 @@ struct ib_port_pkey_list {
2212 struct list_head pkey_list; 2225 struct list_head pkey_list;
2213}; 2226};
2214 2227
2228struct ib_counters {
2229 struct ib_device *device;
2230 struct ib_uobject *uobject;
2231 /* num of objects attached */
2232 atomic_t usecnt;
2233};
2234
2235enum ib_read_counters_flags {
2236 /* prefer read values from driver cache */
2237 IB_READ_COUNTERS_ATTR_PREFER_CACHED = 1 << 0,
2238};
2239
2240struct ib_counters_read_attr {
2241 u64 *counters_buff;
2242 u32 ncounters;
2243 u32 flags; /* use enum ib_read_counters_flags */
2244};
2245
2215struct uverbs_attr_bundle; 2246struct uverbs_attr_bundle;
2216 2247
2217struct ib_device { 2248struct ib_device {
@@ -2441,7 +2472,8 @@ struct ib_device {
2441 struct ib_flow * (*create_flow)(struct ib_qp *qp, 2472 struct ib_flow * (*create_flow)(struct ib_qp *qp,
2442 struct ib_flow_attr 2473 struct ib_flow_attr
2443 *flow_attr, 2474 *flow_attr,
2444 int domain); 2475 int domain,
2476 struct ib_udata *udata);
2445 int (*destroy_flow)(struct ib_flow *flow_id); 2477 int (*destroy_flow)(struct ib_flow *flow_id);
2446 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask, 2478 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2447 struct ib_mr_status *mr_status); 2479 struct ib_mr_status *mr_status);
@@ -2483,6 +2515,13 @@ struct ib_device {
2483 struct ib_mr * (*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm, 2515 struct ib_mr * (*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
2484 struct ib_dm_mr_attr *attr, 2516 struct ib_dm_mr_attr *attr,
2485 struct uverbs_attr_bundle *attrs); 2517 struct uverbs_attr_bundle *attrs);
2518 struct ib_counters * (*create_counters)(struct ib_device *device,
2519 struct uverbs_attr_bundle *attrs);
2520 int (*destroy_counters)(struct ib_counters *counters);
2521 int (*read_counters)(struct ib_counters *counters,
2522 struct ib_counters_read_attr *counters_read_attr,
2523 struct uverbs_attr_bundle *attrs);
2524
2486 /** 2525 /**
2487 * rdma netdev operation 2526 * rdma netdev operation
2488 * 2527 *
diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h
index 095383a4bd1a..bd6bba3a6e04 100644
--- a/include/rdma/uverbs_ioctl.h
+++ b/include/rdma/uverbs_ioctl.h
@@ -420,6 +420,17 @@ static inline void *uverbs_attr_get_obj(const struct uverbs_attr_bundle *attrs_b
420 return attr->obj_attr.uobject->object; 420 return attr->obj_attr.uobject->object;
421} 421}
422 422
423static inline struct ib_uobject *uverbs_attr_get_uobject(const struct uverbs_attr_bundle *attrs_bundle,
424 u16 idx)
425{
426 const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx);
427
428 if (IS_ERR(attr))
429 return ERR_CAST(attr);
430
431 return attr->obj_attr.uobject;
432}
433
423static inline int uverbs_copy_to(const struct uverbs_attr_bundle *attrs_bundle, 434static inline int uverbs_copy_to(const struct uverbs_attr_bundle *attrs_bundle,
424 size_t idx, const void *from, size_t size) 435 size_t idx, const void *from, size_t size)
425{ 436{
diff --git a/include/uapi/rdma/ib_user_ioctl_cmds.h b/include/uapi/rdma/ib_user_ioctl_cmds.h
index 83e3890eef20..888ac5975a6c 100644
--- a/include/uapi/rdma/ib_user_ioctl_cmds.h
+++ b/include/uapi/rdma/ib_user_ioctl_cmds.h
@@ -55,6 +55,7 @@ enum uverbs_default_objects {
55 UVERBS_OBJECT_WQ, 55 UVERBS_OBJECT_WQ,
56 UVERBS_OBJECT_FLOW_ACTION, 56 UVERBS_OBJECT_FLOW_ACTION,
57 UVERBS_OBJECT_DM, 57 UVERBS_OBJECT_DM,
58 UVERBS_OBJECT_COUNTERS,
58}; 59};
59 60
60enum { 61enum {
@@ -131,4 +132,24 @@ enum uverbs_methods_mr {
131 UVERBS_METHOD_DM_MR_REG, 132 UVERBS_METHOD_DM_MR_REG,
132}; 133};
133 134
135enum uverbs_attrs_create_counters_cmd_attr_ids {
136 UVERBS_ATTR_CREATE_COUNTERS_HANDLE,
137};
138
139enum uverbs_attrs_destroy_counters_cmd_attr_ids {
140 UVERBS_ATTR_DESTROY_COUNTERS_HANDLE,
141};
142
143enum uverbs_attrs_read_counters_cmd_attr_ids {
144 UVERBS_ATTR_READ_COUNTERS_HANDLE,
145 UVERBS_ATTR_READ_COUNTERS_BUFF,
146 UVERBS_ATTR_READ_COUNTERS_FLAGS,
147};
148
149enum uverbs_methods_actions_counters_ops {
150 UVERBS_METHOD_COUNTERS_CREATE,
151 UVERBS_METHOD_COUNTERS_DESTROY,
152 UVERBS_METHOD_COUNTERS_READ,
153};
154
134#endif 155#endif
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 409507f83b91..4f9991de8e3a 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -998,6 +998,19 @@ struct ib_uverbs_flow_spec_action_handle {
998 __u32 reserved1; 998 __u32 reserved1;
999}; 999};
1000 1000
1001struct ib_uverbs_flow_spec_action_count {
1002 union {
1003 struct ib_uverbs_flow_spec_hdr hdr;
1004 struct {
1005 __u32 type;
1006 __u16 size;
1007 __u16 reserved;
1008 };
1009 };
1010 __u32 handle;
1011 __u32 reserved1;
1012};
1013
1001struct ib_uverbs_flow_tunnel_filter { 1014struct ib_uverbs_flow_tunnel_filter {
1002 __be32 tunnel_id; 1015 __be32 tunnel_id;
1003}; 1016};
diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h
index 729b18f8c046..8daec1fa49cf 100644
--- a/include/uapi/rdma/mlx5-abi.h
+++ b/include/uapi/rdma/mlx5-abi.h
@@ -36,6 +36,7 @@
36 36
37#include <linux/types.h> 37#include <linux/types.h>
38#include <linux/if_ether.h> /* For ETH_ALEN. */ 38#include <linux/if_ether.h> /* For ETH_ALEN. */
39#include <rdma/ib_user_ioctl_verbs.h>
39 40
40enum { 41enum {
41 MLX5_QP_FLAG_SIGNATURE = 1 << 0, 42 MLX5_QP_FLAG_SIGNATURE = 1 << 0,
@@ -443,4 +444,27 @@ enum {
443enum { 444enum {
444 MLX5_IB_CLOCK_INFO_V1 = 0, 445 MLX5_IB_CLOCK_INFO_V1 = 0,
445}; 446};
447
448struct mlx5_ib_flow_counters_desc {
449 __u32 description;
450 __u32 index;
451};
452
453struct mlx5_ib_flow_counters_data {
454 RDMA_UAPI_PTR(struct mlx5_ib_flow_counters_desc *, counters_data);
455 __u32 ncounters;
456 __u32 reserved;
457};
458
459struct mlx5_ib_create_flow {
460 __u32 ncounters_data;
461 __u32 reserved;
462 /*
463 * Following are counters data based on ncounters_data, each
464 * entry in the data[] should match a corresponding counter object
465 * that was pointed by a counters spec upon the flow creation
466 */
467 struct mlx5_ib_flow_counters_data data[];
468};
469
446#endif /* MLX5_ABI_USER_H */ 470#endif /* MLX5_ABI_USER_H */