aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/Makefile1
-rw-r--r--drivers/infiniband/hw/usnic/Kconfig10
-rw-r--r--drivers/infiniband/hw/usnic/Makefile15
-rw-r--r--drivers/infiniband/hw/usnic/usnic.h29
-rw-r--r--drivers/infiniband/hw/usnic/usnic_abi.h56
-rw-r--r--drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h27
-rw-r--r--drivers/infiniband/hw/usnic/usnic_common_util.h51
-rw-r--r--drivers/infiniband/hw/usnic/usnic_debugfs.c71
-rw-r--r--drivers/infiniband/hw/usnic/usnic_debugfs.h25
-rw-r--r--drivers/infiniband/hw/usnic/usnic_fwd.c243
-rw-r--r--drivers/infiniband/hw/usnic/usnic_fwd.h58
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib.h115
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c598
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c541
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h95
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_sysfs.c351
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_sysfs.h29
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c736
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.h72
-rw-r--r--drivers/infiniband/hw/usnic/usnic_log.h58
-rw-r--r--drivers/infiniband/hw/usnic/usnic_transport.c125
-rw-r--r--drivers/infiniband/hw/usnic/usnic_transport.h28
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c603
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.h80
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c237
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h74
-rw-r--r--drivers/infiniband/hw/usnic/usnic_vnic.c467
-rw-r--r--drivers/infiniband/hw/usnic/usnic_vnic.h103
29 files changed, 4899 insertions, 0 deletions
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 5ceda710f516..10219ee92191 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -53,6 +53,7 @@ source "drivers/infiniband/hw/mlx4/Kconfig"
53source "drivers/infiniband/hw/mlx5/Kconfig" 53source "drivers/infiniband/hw/mlx5/Kconfig"
54source "drivers/infiniband/hw/nes/Kconfig" 54source "drivers/infiniband/hw/nes/Kconfig"
55source "drivers/infiniband/hw/ocrdma/Kconfig" 55source "drivers/infiniband/hw/ocrdma/Kconfig"
56source "drivers/infiniband/hw/usnic/Kconfig"
56 57
57source "drivers/infiniband/ulp/ipoib/Kconfig" 58source "drivers/infiniband/ulp/ipoib/Kconfig"
58 59
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile
index 1fe69888515f..bf508b5550c4 100644
--- a/drivers/infiniband/Makefile
+++ b/drivers/infiniband/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/
10obj-$(CONFIG_MLX5_INFINIBAND) += hw/mlx5/ 10obj-$(CONFIG_MLX5_INFINIBAND) += hw/mlx5/
11obj-$(CONFIG_INFINIBAND_NES) += hw/nes/ 11obj-$(CONFIG_INFINIBAND_NES) += hw/nes/
12obj-$(CONFIG_INFINIBAND_OCRDMA) += hw/ocrdma/ 12obj-$(CONFIG_INFINIBAND_OCRDMA) += hw/ocrdma/
13obj-$(CONFIG_INFINIBAND_USNIC) += hw/usnic/
13obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/ 14obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/
14obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/ 15obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/
15obj-$(CONFIG_INFINIBAND_SRPT) += ulp/srpt/ 16obj-$(CONFIG_INFINIBAND_SRPT) += ulp/srpt/
diff --git a/drivers/infiniband/hw/usnic/Kconfig b/drivers/infiniband/hw/usnic/Kconfig
new file mode 100644
index 000000000000..2cc8ba00b34b
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/Kconfig
@@ -0,0 +1,10 @@
1config INFINIBAND_USNIC
2 tristate "Verbs support for Cisco VIC"
3 depends on NETDEVICES && ETHERNET && PCI && INTEL_IOMMU
4 select ENIC
5 select NET_VENDOR_CISCO
6 select PCI_IOV
7 select INFINIBAND_USER_ACCESS
8 ---help---
9 This is a low-level driver for Cisco's Virtual Interface
10 Cards (VICs), including the VIC 1240 and 1280 cards.
diff --git a/drivers/infiniband/hw/usnic/Makefile b/drivers/infiniband/hw/usnic/Makefile
new file mode 100644
index 000000000000..99fb2db47cd5
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/Makefile
@@ -0,0 +1,15 @@
1ccflags-y := -Idrivers/net/ethernet/cisco/enic
2
3obj-$(CONFIG_INFINIBAND_USNIC)+= usnic_verbs.o
4
5usnic_verbs-y=\
6usnic_fwd.o \
7usnic_transport.o \
8usnic_uiom.o \
9usnic_uiom_interval_tree.o \
10usnic_vnic.o \
11usnic_ib_main.o \
12usnic_ib_qp_grp.o \
13usnic_ib_sysfs.o \
14usnic_ib_verbs.o \
15usnic_debugfs.o \
diff --git a/drivers/infiniband/hw/usnic/usnic.h b/drivers/infiniband/hw/usnic/usnic.h
new file mode 100644
index 000000000000..d741c76bc4be
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic.h
@@ -0,0 +1,29 @@
1/*
2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18
19#ifndef USNIC_H_
20#define USNIC_H_
21
22#define DRV_NAME "usnic_verbs"
23
24#define PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC 0x00cf /* User space NIC */
25
26#define DRV_VERSION "1.0.2"
27#define DRV_RELDATE "September 09, 2013"
28
29#endif /* USNIC_H_ */
diff --git a/drivers/infiniband/hw/usnic/usnic_abi.h b/drivers/infiniband/hw/usnic/usnic_abi.h
new file mode 100644
index 000000000000..510b7d702465
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_abi.h
@@ -0,0 +1,56 @@
1/*
2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18
19
20#ifndef USNIC_ABI_H
21#define USNIC_ABI_H
22
23/* ABI between userspace and kernel */
24#define USNIC_UVERBS_ABI_VERSION 2
25
26#define USNIC_QP_GRP_MAX_WQS 8
27#define USNIC_QP_GRP_MAX_RQS 8
28#define USNIC_QP_GRP_MAX_CQS 16
29
30enum usnic_transport_type {
31 USNIC_TRANSPORT_UNKNOWN = 0,
32 USNIC_TRANSPORT_ROCE_CUSTOM = 1,
33 USNIC_TRANSPORT_MAX = 2,
34};
35
36/*TODO: Future - usnic_modify_qp needs to pass in generic filters */
37struct usnic_ib_create_qp_resp {
38 u32 vfid;
39 u32 qp_grp_id;
40 u64 bar_bus_addr;
41 u32 bar_len;
42/*
43 * WQ, RQ, CQ are explicity specified bc exposing a generic resources inteface
44 * expands the scope of ABI to many files.
45 */
46 u32 wq_cnt;
47 u32 rq_cnt;
48 u32 cq_cnt;
49 u32 wq_idx[USNIC_QP_GRP_MAX_WQS];
50 u32 rq_idx[USNIC_QP_GRP_MAX_RQS];
51 u32 cq_idx[USNIC_QP_GRP_MAX_CQS];
52 u32 transport;
53 u32 reserved[9];
54};
55
56#endif /* USNIC_ABI_H */
diff --git a/drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h b/drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h
new file mode 100644
index 000000000000..393567266142
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h
@@ -0,0 +1,27 @@
1/*
2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18
19#ifndef USNIC_CMN_PKT_HDR_H
20#define USNIC_CMN_PKT_HDR_H
21
22#define USNIC_ROCE_ETHERTYPE (0x8915)
23#define USNIC_ROCE_GRH_VER (8)
24#define USNIC_PROTO_VER (1)
25#define USNIC_ROCE_GRH_VER_SHIFT (4)
26
27#endif /* USNIC_COMMON_PKT_HDR_H */
diff --git a/drivers/infiniband/hw/usnic/usnic_common_util.h b/drivers/infiniband/hw/usnic/usnic_common_util.h
new file mode 100644
index 000000000000..128550a4f9e2
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_common_util.h
@@ -0,0 +1,51 @@
1/*
2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18
19#ifndef USNIC_CMN_UTIL_H
20#define USNIC_CMN_UTIL_H
21
22static inline void
23usnic_mac_to_gid(const char *const mac, char *raw_gid)
24{
25 raw_gid[0] = 0xfe;
26 raw_gid[1] = 0x80;
27 memset(&raw_gid[2], 0, 6);
28 raw_gid[8] = mac[0]^2;
29 raw_gid[9] = mac[1];
30 raw_gid[10] = mac[2];
31 raw_gid[11] = 0xff;
32 raw_gid[12] = 0xfe;
33 raw_gid[13] = mac[3];
34 raw_gid[14] = mac[4];
35 raw_gid[15] = mac[5];
36}
37
38static inline void
39usnic_write_gid_if_id_from_mac(char *mac, char *raw_gid)
40{
41 raw_gid[8] = mac[0]^2;
42 raw_gid[9] = mac[1];
43 raw_gid[10] = mac[2];
44 raw_gid[11] = 0xff;
45 raw_gid[12] = 0xfe;
46 raw_gid[13] = mac[3];
47 raw_gid[14] = mac[4];
48 raw_gid[15] = mac[5];
49}
50
51#endif /* USNIC_COMMON_UTIL_H */
diff --git a/drivers/infiniband/hw/usnic/usnic_debugfs.c b/drivers/infiniband/hw/usnic/usnic_debugfs.c
new file mode 100644
index 000000000000..91386df025ae
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_debugfs.c
@@ -0,0 +1,71 @@
1/*
2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18
19#include <linux/debugfs.h>
20#include <linux/module.h>
21
22#include "usnic.h"
23#include "usnic_log.h"
24#include "usnic_debugfs.h"
25
26static struct dentry *debugfs_root;
27
28static ssize_t usnic_debugfs_buildinfo_read(struct file *f, char __user *data,
29 size_t count, loff_t *ppos)
30{
31 char buf[500];
32 int res;
33
34 if (*ppos > 0)
35 return 0;
36
37 res = scnprintf(buf, sizeof(buf),
38 "version: %s\n"
39 "build date: %s\n",
40 DRV_VERSION, DRV_RELDATE);
41
42 return simple_read_from_buffer(data, count, ppos, buf, res);
43}
44
45static const struct file_operations usnic_debugfs_buildinfo_ops = {
46 .owner = THIS_MODULE,
47 .open = simple_open,
48 .read = usnic_debugfs_buildinfo_read
49};
50
51void usnic_debugfs_init(void)
52{
53 debugfs_root = debugfs_create_dir(DRV_NAME, NULL);
54 if (IS_ERR(debugfs_root)) {
55 usnic_err("Failed to create debugfs root dir, check if debugfs is enabled in kernel configuration\n");
56 debugfs_root = NULL;
57 return;
58 }
59
60 debugfs_create_file("build-info", S_IRUGO, debugfs_root,
61 NULL, &usnic_debugfs_buildinfo_ops);
62}
63
64void usnic_debugfs_exit(void)
65{
66 if (!debugfs_root)
67 return;
68
69 debugfs_remove_recursive(debugfs_root);
70 debugfs_root = NULL;
71}
diff --git a/drivers/infiniband/hw/usnic/usnic_debugfs.h b/drivers/infiniband/hw/usnic/usnic_debugfs.h
new file mode 100644
index 000000000000..914a330dd5e7
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_debugfs.h
@@ -0,0 +1,25 @@
1/*
2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18#ifndef USNIC_DEBUGFS_H_
19#define USNIC_DEBUGFS_H_
20
21void usnic_debugfs_init(void);
22
23void usnic_debugfs_exit(void);
24
25#endif /*!USNIC_DEBUGFS_H_ */
diff --git a/drivers/infiniband/hw/usnic/usnic_fwd.c b/drivers/infiniband/hw/usnic/usnic_fwd.c
new file mode 100644
index 000000000000..8e42216362e7
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_fwd.c
@@ -0,0 +1,243 @@
1/*
2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18#include <linux/netdevice.h>
19#include <linux/pci.h>
20
21#include "enic_api.h"
22#include "usnic_common_pkt_hdr.h"
23#include "usnic_fwd.h"
24#include "usnic_log.h"
25
26struct usnic_fwd_dev *usnic_fwd_dev_alloc(struct pci_dev *pdev)
27{
28 struct usnic_fwd_dev *ufdev;
29
30 ufdev = kzalloc(sizeof(*ufdev), GFP_KERNEL);
31 if (!ufdev)
32 return NULL;
33
34 ufdev->pdev = pdev;
35 ufdev->netdev = pci_get_drvdata(pdev);
36 spin_lock_init(&ufdev->lock);
37
38 return ufdev;
39}
40
41void usnic_fwd_dev_free(struct usnic_fwd_dev *ufdev)
42{
43 kfree(ufdev);
44}
45
46static int usnic_fwd_devcmd(struct usnic_fwd_dev *ufdev, int vnic_idx,
47 enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1)
48{
49 int status;
50 struct net_device *netdev = ufdev->netdev;
51
52 spin_lock(&ufdev->lock);
53 status = enic_api_devcmd_proxy_by_index(netdev,
54 vnic_idx,
55 cmd,
56 a0, a1,
57 1000);
58 spin_unlock(&ufdev->lock);
59 if (status) {
60 if (status == ERR_EINVAL && cmd == CMD_DEL_FILTER) {
61 usnic_dbg("Dev %s vnic idx %u cmd %u already deleted",
62 netdev_name(netdev), vnic_idx, cmd);
63 } else {
64 usnic_err("Dev %s vnic idx %u cmd %u failed with status %d\n",
65 netdev_name(netdev), vnic_idx, cmd,
66 status);
67 }
68 } else {
69 usnic_dbg("Dev %s vnic idx %u cmd %u success",
70 netdev_name(netdev), vnic_idx,
71 cmd);
72 }
73
74 return status;
75}
76
77int usnic_fwd_add_usnic_filter(struct usnic_fwd_dev *ufdev, int vnic_idx,
78 int rq_idx, struct usnic_fwd_filter *fwd_filter,
79 struct usnic_fwd_filter_hndl **filter_hndl)
80{
81 struct filter_tlv *tlv, *tlv_va;
82 struct filter *filter;
83 struct filter_action *action;
84 struct pci_dev *pdev;
85 struct usnic_fwd_filter_hndl *usnic_filter_hndl;
86 int status;
87 u64 a0, a1;
88 u64 tlv_size;
89 dma_addr_t tlv_pa;
90
91 pdev = ufdev->pdev;
92 tlv_size = (2*sizeof(struct filter_tlv) +
93 sizeof(struct filter) +
94 sizeof(struct filter_action));
95 tlv = pci_alloc_consistent(pdev, tlv_size, &tlv_pa);
96 if (!tlv) {
97 usnic_err("Failed to allocate memory\n");
98 return -ENOMEM;
99 }
100
101 usnic_filter_hndl = kzalloc(sizeof(*usnic_filter_hndl), GFP_ATOMIC);
102 if (!usnic_filter_hndl) {
103 usnic_err("Failed to allocate memory for hndl\n");
104 pci_free_consistent(pdev, tlv_size, tlv, tlv_pa);
105 return -ENOMEM;
106 }
107
108 tlv_va = tlv;
109 a0 = tlv_pa;
110 a1 = tlv_size;
111 memset(tlv, 0, tlv_size);
112 tlv->type = CLSF_TLV_FILTER;
113 tlv->length = sizeof(struct filter);
114 filter = (struct filter *)&tlv->val;
115 filter->type = FILTER_USNIC_ID;
116 filter->u.usnic.ethtype = USNIC_ROCE_ETHERTYPE;
117 filter->u.usnic.flags = FILTER_FIELD_USNIC_ETHTYPE |
118 FILTER_FIELD_USNIC_ID |
119 FILTER_FIELD_USNIC_PROTO;
120 filter->u.usnic.proto_version = (USNIC_ROCE_GRH_VER <<
121 USNIC_ROCE_GRH_VER_SHIFT)
122 | USNIC_PROTO_VER;
123 filter->u.usnic.usnic_id = fwd_filter->port_num;
124 tlv = (struct filter_tlv *)((char *)tlv + sizeof(struct filter_tlv) +
125 sizeof(struct filter));
126 tlv->type = CLSF_TLV_ACTION;
127 tlv->length = sizeof(struct filter_action);
128 action = (struct filter_action *)&tlv->val;
129 action->type = FILTER_ACTION_RQ_STEERING;
130 action->u.rq_idx = rq_idx;
131
132 status = usnic_fwd_devcmd(ufdev, vnic_idx, CMD_ADD_FILTER, &a0, &a1);
133 pci_free_consistent(pdev, tlv_size, tlv_va, tlv_pa);
134 if (status) {
135 usnic_err("VF %s Filter add failed with status:%d",
136 pci_name(pdev),
137 status);
138 kfree(usnic_filter_hndl);
139 return status;
140 } else {
141 usnic_dbg("VF %s FILTER ID:%u",
142 pci_name(pdev),
143 (u32)a0);
144 }
145
146 usnic_filter_hndl->type = FILTER_USNIC_ID;
147 usnic_filter_hndl->id = (u32)a0;
148 usnic_filter_hndl->vnic_idx = vnic_idx;
149 usnic_filter_hndl->ufdev = ufdev;
150 usnic_filter_hndl->filter = fwd_filter;
151 *filter_hndl = usnic_filter_hndl;
152
153 return status;
154}
155
156int usnic_fwd_del_filter(struct usnic_fwd_filter_hndl *filter_hndl)
157{
158 int status;
159 u64 a0, a1;
160 struct net_device *netdev;
161
162 netdev = filter_hndl->ufdev->netdev;
163 a0 = filter_hndl->id;
164
165 status = usnic_fwd_devcmd(filter_hndl->ufdev, filter_hndl->vnic_idx,
166 CMD_DEL_FILTER, &a0, &a1);
167 if (status) {
168 if (status == ERR_EINVAL) {
169 usnic_dbg("Filter %u already deleted for VF Idx %u pf: %s status: %d",
170 filter_hndl->id, filter_hndl->vnic_idx,
171 netdev_name(netdev), status);
172 status = 0;
173 kfree(filter_hndl);
174 } else {
175 usnic_err("PF %s VF Idx %u Filter: %u FILTER DELETE failed with status %d",
176 netdev_name(netdev),
177 filter_hndl->vnic_idx, filter_hndl->id,
178 status);
179 }
180 } else {
181 usnic_dbg("PF %s VF Idx %u Filter: %u FILTER DELETED",
182 netdev_name(netdev), filter_hndl->vnic_idx,
183 filter_hndl->id);
184 kfree(filter_hndl);
185 }
186
187 return status;
188}
189
190int usnic_fwd_enable_rq(struct usnic_fwd_dev *ufdev, int vnic_idx, int rq_idx)
191{
192 int status;
193 struct net_device *pf_netdev;
194 u64 a0, a1;
195
196 pf_netdev = ufdev->netdev;
197 a0 = rq_idx;
198 a1 = CMD_QP_RQWQ;
199
200 status = usnic_fwd_devcmd(ufdev, vnic_idx, CMD_QP_ENABLE, &a0, &a1);
201
202 if (status) {
203 usnic_err("PF %s VNIC Index %u RQ Index: %u ENABLE Failed with status %d",
204 netdev_name(pf_netdev),
205 vnic_idx,
206 rq_idx,
207 status);
208 } else {
209 usnic_dbg("PF %s VNIC Index %u RQ Index: %u ENABLED",
210 netdev_name(pf_netdev),
211 vnic_idx, rq_idx);
212 }
213
214 return status;
215}
216
217int usnic_fwd_disable_rq(struct usnic_fwd_dev *ufdev, int vnic_idx, int rq_idx)
218{
219 int status;
220 u64 a0, a1;
221 struct net_device *pf_netdev;
222
223 pf_netdev = ufdev->netdev;
224 a0 = rq_idx;
225 a1 = CMD_QP_RQWQ;
226
227 status = usnic_fwd_devcmd(ufdev, vnic_idx, CMD_QP_DISABLE, &a0, &a1);
228
229 if (status) {
230 usnic_err("PF %s VNIC Index %u RQ Index: %u DISABLE Failed with status %d",
231 netdev_name(pf_netdev),
232 vnic_idx,
233 rq_idx,
234 status);
235 } else {
236 usnic_dbg("PF %s VNIC Index %u RQ Index: %u DISABLED",
237 netdev_name(pf_netdev),
238 vnic_idx,
239 rq_idx);
240 }
241
242 return status;
243}
diff --git a/drivers/infiniband/hw/usnic/usnic_fwd.h b/drivers/infiniband/hw/usnic/usnic_fwd.h
new file mode 100644
index 000000000000..6973901da8af
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_fwd.h
@@ -0,0 +1,58 @@
1/*
2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18
19#ifndef USNIC_FWD_H_
20#define USNIC_FWD_H_
21
22#include <linux/if.h>
23#include <linux/pci.h>
24#include <linux/spinlock.h>
25
26#include "usnic_abi.h"
27#include "vnic_devcmd.h"
28
29struct usnic_fwd_dev {
30 struct pci_dev *pdev;
31 struct net_device *netdev;
32 spinlock_t lock;
33};
34
35struct usnic_fwd_filter {
36 enum usnic_transport_type transport;
37 u16 port_num;
38};
39
40struct usnic_fwd_filter_hndl {
41 enum filter_type type;
42 u32 id;
43 u32 vnic_idx;
44 struct usnic_fwd_dev *ufdev;
45 struct list_head link;
46 struct usnic_fwd_filter *filter;
47};
48
49struct usnic_fwd_dev *usnic_fwd_dev_alloc(struct pci_dev *pdev);
50void usnic_fwd_dev_free(struct usnic_fwd_dev *ufdev);
51int usnic_fwd_add_usnic_filter(struct usnic_fwd_dev *ufdev, int vnic_idx,
52 int rq_idx, struct usnic_fwd_filter *filter,
53 struct usnic_fwd_filter_hndl **filter_hndl);
54int usnic_fwd_del_filter(struct usnic_fwd_filter_hndl *filter_hndl);
55int usnic_fwd_enable_rq(struct usnic_fwd_dev *ufdev, int vnic_idx, int rq_idx);
56int usnic_fwd_disable_rq(struct usnic_fwd_dev *ufdev, int vnic_idx, int rq_idx);
57
58#endif /* !USNIC_FWD_H_ */
diff --git a/drivers/infiniband/hw/usnic/usnic_ib.h b/drivers/infiniband/hw/usnic/usnic_ib.h
new file mode 100644
index 000000000000..3511c8521f30
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_ib.h
@@ -0,0 +1,115 @@
1/*
2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18
19#ifndef USNIC_IB_H_
20#define USNIC_IB_H_
21
22#include <linux/iommu.h>
23#include <linux/netdevice.h>
24#include <linux/version.h>
25
26#include <rdma/ib_verbs.h>
27
28
29#include "usnic.h"
30#include "usnic_abi.h"
31#include "usnic_vnic.h"
32
33#define USNIC_IB_PORT_CNT 1
34#define USNIC_IB_NUM_COMP_VECTORS 1
35
36extern unsigned int usnic_ib_share_vf;
37
38struct usnic_ib_ucontext {
39 struct ib_ucontext ibucontext;
40 /* Protected by usnic_ib_dev->usdev_lock */
41 struct list_head qp_grp_list;
42 struct list_head link;
43};
44
45struct usnic_ib_pd {
46 struct ib_pd ibpd;
47 struct usnic_uiom_pd *umem_pd;
48};
49
50struct usnic_ib_mr {
51 struct ib_mr ibmr;
52 struct usnic_uiom_reg *umem;
53};
54
55struct usnic_ib_dev {
56 struct ib_device ib_dev;
57 struct pci_dev *pdev;
58 struct net_device *netdev;
59 struct usnic_fwd_dev *ufdev;
60 bool link_up;
61 struct list_head ib_dev_link;
62 struct list_head vf_dev_list;
63 struct list_head ctx_list;
64 struct mutex usdev_lock;
65 char mac[ETH_ALEN];
66 unsigned int mtu;
67
68 /* provisioning information */
69 struct kref vf_cnt;
70 unsigned int vf_res_cnt[USNIC_VNIC_RES_TYPE_MAX];
71
72 /* sysfs vars for QPN reporting */
73 struct kobject *qpn_kobj;
74};
75
76struct usnic_ib_vf {
77 struct usnic_ib_dev *pf;
78 spinlock_t lock;
79 struct usnic_vnic *vnic;
80 unsigned int qp_grp_ref_cnt;
81 struct usnic_ib_pd *pd;
82 struct list_head link;
83};
84
85static inline
86struct usnic_ib_dev *to_usdev(struct ib_device *ibdev)
87{
88 return container_of(ibdev, struct usnic_ib_dev, ib_dev);
89}
90
91static inline
92struct usnic_ib_ucontext *to_ucontext(struct ib_ucontext *ibucontext)
93{
94 return container_of(ibucontext, struct usnic_ib_ucontext, ibucontext);
95}
96
97static inline
98struct usnic_ib_pd *to_upd(struct ib_pd *ibpd)
99{
100 return container_of(ibpd, struct usnic_ib_pd, ibpd);
101}
102
103static inline
104struct usnic_ib_ucontext *to_uucontext(struct ib_ucontext *ibucontext)
105{
106 return container_of(ibucontext, struct usnic_ib_ucontext, ibucontext);
107}
108
109static inline
110struct usnic_ib_mr *to_umr(struct ib_mr *ibmr)
111{
112 return container_of(ibmr, struct usnic_ib_mr, ibmr);
113}
114void usnic_ib_log_vf(struct usnic_ib_vf *vf);
115#endif /* USNIC_IB_H_ */
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c
new file mode 100644
index 000000000000..dc09c12435b9
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c
@@ -0,0 +1,598 @@
1/*
2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 * Author: Upinder Malhi <umalhi@cisco.com>
18 * Author: Anant Deepak <anadeepa@cisco.com>
19 * Author: Cesare Cantu' <cantuc@cisco.com>
20 * Author: Jeff Squyres <jsquyres@cisco.com>
21 * Author: Kiran Thirumalai <kithirum@cisco.com>
22 * Author: Xuyang Wang <xuywang@cisco.com>
23 * Author: Reese Faucette <rfaucett@cisco.com>
24 *
25 */
26
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/slab.h>
30#include <linux/errno.h>
31#include <linux/pci.h>
32#include <linux/netdevice.h>
33
34#include <rdma/ib_user_verbs.h>
35#include <rdma/ib_addr.h>
36
37#include "usnic_abi.h"
38#include "usnic_common_util.h"
39#include "usnic_ib.h"
40#include "usnic_ib_qp_grp.h"
41#include "usnic_log.h"
42#include "usnic_fwd.h"
43#include "usnic_debugfs.h"
44#include "usnic_ib_verbs.h"
45#include "usnic_transport.h"
46#include "usnic_uiom.h"
47#include "usnic_ib_sysfs.h"
48
49unsigned int usnic_log_lvl = USNIC_LOG_LVL_ERR;
50unsigned int usnic_ib_share_vf = 1;
51
52static const char usnic_version[] =
53 DRV_NAME ": Cisco VIC (USNIC) Verbs Driver v"
54 DRV_VERSION " (" DRV_RELDATE ")\n";
55
56static DEFINE_MUTEX(usnic_ib_ibdev_list_lock);
57static LIST_HEAD(usnic_ib_ibdev_list);
58
59/* Callback dump funcs */
60static int usnic_ib_dump_vf_hdr(void *obj, char *buf, int buf_sz)
61{
62 struct usnic_ib_vf *vf = obj;
63 return scnprintf(buf, buf_sz, "PF: %s ", vf->pf->ib_dev.name);
64}
65/* End callback dump funcs */
66
67static void usnic_ib_dump_vf(struct usnic_ib_vf *vf, char *buf, int buf_sz)
68{
69 usnic_vnic_dump(vf->vnic, buf, buf_sz, vf,
70 usnic_ib_dump_vf_hdr,
71 usnic_ib_qp_grp_dump_hdr, usnic_ib_qp_grp_dump_rows);
72}
73
74void usnic_ib_log_vf(struct usnic_ib_vf *vf)
75{
76 char buf[1000];
77 usnic_ib_dump_vf(vf, buf, sizeof(buf));
78 usnic_dbg("%s\n", buf);
79}
80
81/* Start of netdev section */
82static inline const char *usnic_ib_netdev_event_to_string(unsigned long event)
83{
84 const char *event2str[] = {"NETDEV_NONE", "NETDEV_UP", "NETDEV_DOWN",
85 "NETDEV_REBOOT", "NETDEV_CHANGE",
86 "NETDEV_REGISTER", "NETDEV_UNREGISTER", "NETDEV_CHANGEMTU",
87 "NETDEV_CHANGEADDR", "NETDEV_GOING_DOWN", "NETDEV_FEAT_CHANGE",
88 "NETDEV_BONDING_FAILOVER", "NETDEV_PRE_UP",
89 "NETDEV_PRE_TYPE_CHANGE", "NETDEV_POST_TYPE_CHANGE",
90 "NETDEV_POST_INT", "NETDEV_UNREGISTER_FINAL", "NETDEV_RELEASE",
91 "NETDEV_NOTIFY_PEERS", "NETDEV_JOIN"
92 };
93
94 if (event >= ARRAY_SIZE(event2str))
95 return "UNKNOWN_NETDEV_EVENT";
96 else
97 return event2str[event];
98}
99
100static void usnic_ib_qp_grp_modify_active_to_err(struct usnic_ib_dev *us_ibdev)
101{
102 struct usnic_ib_ucontext *ctx;
103 struct usnic_ib_qp_grp *qp_grp;
104 enum ib_qp_state cur_state;
105 int status;
106
107 BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock));
108
109 list_for_each_entry(ctx, &us_ibdev->ctx_list, link) {
110 list_for_each_entry(qp_grp, &ctx->qp_grp_list, link) {
111 cur_state = qp_grp->state;
112 if (cur_state == IB_QPS_INIT ||
113 cur_state == IB_QPS_RTR ||
114 cur_state == IB_QPS_RTS) {
115 status = usnic_ib_qp_grp_modify(qp_grp,
116 IB_QPS_ERR,
117 NULL);
118 if (status) {
119 usnic_err("Failed to transistion qp grp %u from %s to %s\n",
120 qp_grp->grp_id,
121 usnic_ib_qp_grp_state_to_string
122 (cur_state),
123 usnic_ib_qp_grp_state_to_string
124 (IB_QPS_ERR));
125 }
126 }
127 }
128 }
129}
130
131static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
132 unsigned long event)
133{
134 struct net_device *netdev;
135 struct ib_event ib_event;
136
137 memset(&ib_event, 0, sizeof(ib_event));
138
139 mutex_lock(&us_ibdev->usdev_lock);
140 netdev = us_ibdev->netdev;
141 switch (event) {
142 case NETDEV_REBOOT:
143 usnic_info("PF Reset on %s\n", us_ibdev->ib_dev.name);
144 usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
145 ib_event.event = IB_EVENT_PORT_ERR;
146 ib_event.device = &us_ibdev->ib_dev;
147 ib_event.element.port_num = 1;
148 ib_dispatch_event(&ib_event);
149 break;
150 case NETDEV_UP:
151 case NETDEV_DOWN:
152 case NETDEV_CHANGE:
153 if (!us_ibdev->link_up && netif_carrier_ok(netdev)) {
154 us_ibdev->link_up = true;
155 usnic_info("Link UP on %s\n", us_ibdev->ib_dev.name);
156 ib_event.event = IB_EVENT_PORT_ACTIVE;
157 ib_event.device = &us_ibdev->ib_dev;
158 ib_event.element.port_num = 1;
159 ib_dispatch_event(&ib_event);
160 } else if (us_ibdev->link_up && !netif_carrier_ok(netdev)) {
161 us_ibdev->link_up = false;
162 usnic_info("Link DOWN on %s\n", us_ibdev->ib_dev.name);
163 usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
164 ib_event.event = IB_EVENT_PORT_ERR;
165 ib_event.device = &us_ibdev->ib_dev;
166 ib_event.element.port_num = 1;
167 ib_dispatch_event(&ib_event);
168 } else {
169 usnic_dbg("Ignorning %s on %s\n",
170 usnic_ib_netdev_event_to_string(event),
171 us_ibdev->ib_dev.name);
172 }
173 break;
174 case NETDEV_CHANGEADDR:
175 if (!memcmp(us_ibdev->mac, netdev->dev_addr,
176 sizeof(us_ibdev->mac))) {
177 usnic_dbg("Ignorning addr change on %s\n",
178 us_ibdev->ib_dev.name);
179 } else {
180 usnic_info(" %s old mac: %pM new mac: %pM\n",
181 us_ibdev->ib_dev.name,
182 us_ibdev->mac,
183 netdev->dev_addr);
184 memcpy(us_ibdev->mac, netdev->dev_addr,
185 sizeof(us_ibdev->mac));
186 usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
187 ib_event.event = IB_EVENT_GID_CHANGE;
188 ib_event.device = &us_ibdev->ib_dev;
189 ib_event.element.port_num = 1;
190 ib_dispatch_event(&ib_event);
191 }
192
193 break;
194 case NETDEV_CHANGEMTU:
195 if (us_ibdev->mtu != netdev->mtu) {
196 usnic_info("MTU Change on %s old: %u new: %u\n",
197 us_ibdev->ib_dev.name,
198 us_ibdev->mtu, netdev->mtu);
199 us_ibdev->mtu = netdev->mtu;
200 usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
201 } else {
202 usnic_dbg("Ignoring MTU change on %s\n",
203 us_ibdev->ib_dev.name);
204 }
205 break;
206 default:
207 usnic_dbg("Ignorning event %s on %s",
208 usnic_ib_netdev_event_to_string(event),
209 us_ibdev->ib_dev.name);
210 }
211 mutex_unlock(&us_ibdev->usdev_lock);
212}
213
214static int usnic_ib_netdevice_event(struct notifier_block *notifier,
215 unsigned long event, void *ptr)
216{
217 struct usnic_ib_dev *us_ibdev;
218
219 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
220
221 mutex_lock(&usnic_ib_ibdev_list_lock);
222 list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) {
223 if (us_ibdev->netdev == netdev) {
224 usnic_ib_handle_usdev_event(us_ibdev, event);
225 break;
226 }
227 }
228 mutex_unlock(&usnic_ib_ibdev_list_lock);
229
230 return NOTIFY_DONE;
231}
232
233static struct notifier_block usnic_ib_netdevice_notifier = {
234 .notifier_call = usnic_ib_netdevice_event
235};
236/* End of netdev section */
237
238/* Start of PF discovery section */
239static void *usnic_ib_device_add(struct pci_dev *dev)
240{
241 struct usnic_ib_dev *us_ibdev;
242 union ib_gid gid;
243
244 usnic_dbg("\n");
245
246 us_ibdev = (struct usnic_ib_dev *)ib_alloc_device(sizeof(*us_ibdev));
247 if (IS_ERR_OR_NULL(us_ibdev)) {
248 usnic_err("Device %s context alloc failed\n",
249 netdev_name(pci_get_drvdata(dev)));
250 return ERR_PTR(us_ibdev ? PTR_ERR(us_ibdev) : -EFAULT);
251 }
252
253 us_ibdev->ufdev = usnic_fwd_dev_alloc(dev);
254 if (IS_ERR_OR_NULL(us_ibdev->ufdev)) {
255 usnic_err("Failed to alloc ufdev for %s with err %ld\n",
256 pci_name(dev), PTR_ERR(us_ibdev->ufdev));
257 goto err_dealloc;
258 }
259
260 mutex_init(&us_ibdev->usdev_lock);
261 INIT_LIST_HEAD(&us_ibdev->vf_dev_list);
262 INIT_LIST_HEAD(&us_ibdev->ctx_list);
263
264 us_ibdev->pdev = dev;
265 us_ibdev->netdev = pci_get_drvdata(dev);
266 us_ibdev->ib_dev.owner = THIS_MODULE;
267 us_ibdev->ib_dev.node_type = RDMA_NODE_USNIC;
268 us_ibdev->ib_dev.phys_port_cnt = USNIC_IB_PORT_CNT;
269 us_ibdev->ib_dev.num_comp_vectors = USNIC_IB_NUM_COMP_VECTORS;
270 us_ibdev->ib_dev.dma_device = &dev->dev;
271 us_ibdev->ib_dev.uverbs_abi_ver = USNIC_UVERBS_ABI_VERSION;
272 strlcpy(us_ibdev->ib_dev.name, "usnic_%d", IB_DEVICE_NAME_MAX);
273
274 us_ibdev->ib_dev.uverbs_cmd_mask =
275 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
276 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
277 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
278 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
279 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
280 (1ull << IB_USER_VERBS_CMD_REG_MR) |
281 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
282 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
283 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
284 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
285 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
286 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
287 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
288 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
289 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
290 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
291 (1ull << IB_USER_VERBS_CMD_OPEN_QP);
292
293 us_ibdev->ib_dev.query_device = usnic_ib_query_device;
294 us_ibdev->ib_dev.query_port = usnic_ib_query_port;
295 us_ibdev->ib_dev.query_pkey = usnic_ib_query_pkey;
296 us_ibdev->ib_dev.query_gid = usnic_ib_query_gid;
297 us_ibdev->ib_dev.get_link_layer = usnic_ib_port_link_layer;
298 us_ibdev->ib_dev.alloc_pd = usnic_ib_alloc_pd;
299 us_ibdev->ib_dev.dealloc_pd = usnic_ib_dealloc_pd;
300 us_ibdev->ib_dev.create_qp = usnic_ib_create_qp;
301 us_ibdev->ib_dev.modify_qp = usnic_ib_modify_qp;
302 us_ibdev->ib_dev.query_qp = usnic_ib_query_qp;
303 us_ibdev->ib_dev.destroy_qp = usnic_ib_destroy_qp;
304 us_ibdev->ib_dev.create_cq = usnic_ib_create_cq;
305 us_ibdev->ib_dev.destroy_cq = usnic_ib_destroy_cq;
306 us_ibdev->ib_dev.reg_user_mr = usnic_ib_reg_mr;
307 us_ibdev->ib_dev.dereg_mr = usnic_ib_dereg_mr;
308 us_ibdev->ib_dev.alloc_ucontext = usnic_ib_alloc_ucontext;
309 us_ibdev->ib_dev.dealloc_ucontext = usnic_ib_dealloc_ucontext;
310 us_ibdev->ib_dev.mmap = usnic_ib_mmap;
311 us_ibdev->ib_dev.create_ah = usnic_ib_create_ah;
312 us_ibdev->ib_dev.destroy_ah = usnic_ib_destroy_ah;
313 us_ibdev->ib_dev.post_send = usnic_ib_post_send;
314 us_ibdev->ib_dev.post_recv = usnic_ib_post_recv;
315 us_ibdev->ib_dev.poll_cq = usnic_ib_poll_cq;
316 us_ibdev->ib_dev.req_notify_cq = usnic_ib_req_notify_cq;
317 us_ibdev->ib_dev.get_dma_mr = usnic_ib_get_dma_mr;
318
319
320 if (ib_register_device(&us_ibdev->ib_dev, NULL))
321 goto err_fwd_dealloc;
322
323 us_ibdev->link_up = netif_carrier_ok(us_ibdev->netdev);
324 us_ibdev->mtu = us_ibdev->netdev->mtu;
325 memcpy(&us_ibdev->mac, us_ibdev->netdev->dev_addr,
326 sizeof(us_ibdev->mac));
327 usnic_mac_to_gid(us_ibdev->netdev->perm_addr, &gid.raw[0]);
328 memcpy(&us_ibdev->ib_dev.node_guid, &gid.global.interface_id,
329 sizeof(gid.global.interface_id));
330 kref_init(&us_ibdev->vf_cnt);
331
332 usnic_info("Added ibdev: %s netdev: %s with mac %pM Link: %u MTU: %u\n",
333 us_ibdev->ib_dev.name, netdev_name(us_ibdev->netdev),
334 us_ibdev->mac, us_ibdev->link_up, us_ibdev->mtu);
335 return us_ibdev;
336
337err_fwd_dealloc:
338 usnic_fwd_dev_free(us_ibdev->ufdev);
339err_dealloc:
340 usnic_err("failed -- deallocing device\n");
341 ib_dealloc_device(&us_ibdev->ib_dev);
342 return NULL;
343}
344
345static void usnic_ib_device_remove(struct usnic_ib_dev *us_ibdev)
346{
347 usnic_info("Unregistering %s\n", us_ibdev->ib_dev.name);
348 usnic_ib_sysfs_unregister_usdev(us_ibdev);
349 usnic_fwd_dev_free(us_ibdev->ufdev);
350 ib_unregister_device(&us_ibdev->ib_dev);
351 ib_dealloc_device(&us_ibdev->ib_dev);
352}
353
354static void usnic_ib_undiscover_pf(struct kref *kref)
355{
356 struct usnic_ib_dev *us_ibdev, *tmp;
357 struct pci_dev *dev;
358 bool found = false;
359
360 dev = container_of(kref, struct usnic_ib_dev, vf_cnt)->pdev;
361 mutex_lock(&usnic_ib_ibdev_list_lock);
362 list_for_each_entry_safe(us_ibdev, tmp,
363 &usnic_ib_ibdev_list, ib_dev_link) {
364 if (us_ibdev->pdev == dev) {
365 list_del(&us_ibdev->ib_dev_link);
366 usnic_ib_device_remove(us_ibdev);
367 found = true;
368 break;
369 }
370 }
371
372 WARN(!found, "Failed to remove PF %s\n", pci_name(dev));
373
374 mutex_unlock(&usnic_ib_ibdev_list_lock);
375}
376
377static struct usnic_ib_dev *usnic_ib_discover_pf(struct usnic_vnic *vnic)
378{
379 struct usnic_ib_dev *us_ibdev;
380 struct pci_dev *parent_pci, *vf_pci;
381 int err;
382
383 vf_pci = usnic_vnic_get_pdev(vnic);
384 parent_pci = pci_physfn(vf_pci);
385
386 BUG_ON(!parent_pci);
387
388 mutex_lock(&usnic_ib_ibdev_list_lock);
389 list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) {
390 if (us_ibdev->pdev == parent_pci) {
391 kref_get(&us_ibdev->vf_cnt);
392 goto out;
393 }
394 }
395
396 us_ibdev = usnic_ib_device_add(parent_pci);
397 if (IS_ERR_OR_NULL(us_ibdev)) {
398 us_ibdev = (us_ibdev) ? us_ibdev : ERR_PTR(-EFAULT);
399 goto out;
400 }
401
402 err = usnic_ib_sysfs_register_usdev(us_ibdev);
403 if (err) {
404 usnic_ib_device_remove(us_ibdev);
405 us_ibdev = ERR_PTR(err);
406 goto out;
407 }
408
409 list_add(&us_ibdev->ib_dev_link, &usnic_ib_ibdev_list);
410out:
411 mutex_unlock(&usnic_ib_ibdev_list_lock);
412 return us_ibdev;
413}
414/* End of PF discovery section */
415
416/* Start of PCI section */
417
418static DEFINE_PCI_DEVICE_TABLE(usnic_ib_pci_ids) = {
419 {PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC)},
420 {0,}
421};
422
423static int usnic_ib_pci_probe(struct pci_dev *pdev,
424 const struct pci_device_id *id)
425{
426 int err;
427 struct usnic_ib_dev *pf;
428 struct usnic_ib_vf *vf;
429 enum usnic_vnic_res_type res_type;
430
431 vf = kzalloc(sizeof(*vf), GFP_KERNEL);
432 if (!vf)
433 return -ENOMEM;
434
435 err = pci_enable_device(pdev);
436 if (err) {
437 usnic_err("Failed to enable %s with err %d\n",
438 pci_name(pdev), err);
439 goto out_clean_vf;
440 }
441
442 err = pci_request_regions(pdev, DRV_NAME);
443 if (err) {
444 usnic_err("Failed to request region for %s with err %d\n",
445 pci_name(pdev), err);
446 goto out_disable_device;
447 }
448
449 pci_set_master(pdev);
450 pci_set_drvdata(pdev, vf);
451
452 vf->vnic = usnic_vnic_alloc(pdev);
453 if (IS_ERR_OR_NULL(vf->vnic)) {
454 err = (vf->vnic ? PTR_ERR(vf->vnic) : -ENOMEM);
455 usnic_err("Failed to alloc vnic for %s with err %d\n",
456 pci_name(pdev), err);
457 goto out_release_regions;
458 }
459
460 pf = usnic_ib_discover_pf(vf->vnic);
461 if (IS_ERR_OR_NULL(pf)) {
462 usnic_err("Failed to discover pf of vnic %s with err%ld\n",
463 pci_name(pdev), PTR_ERR(pf));
464 err = (pf ? PTR_ERR(pf) : -EFAULT);
465 goto out_clean_vnic;
466 }
467
468 vf->pf = pf;
469 spin_lock_init(&vf->lock);
470 mutex_lock(&pf->usdev_lock);
471 list_add_tail(&vf->link, &pf->vf_dev_list);
472 /*
473 * Save max settings (will be same for each VF, easier to re-write than
474 * to say "if (!set) { set_values(); set=1; }
475 */
476 for (res_type = USNIC_VNIC_RES_TYPE_EOL+1;
477 res_type < USNIC_VNIC_RES_TYPE_MAX;
478 res_type++) {
479 pf->vf_res_cnt[res_type] = usnic_vnic_res_cnt(vf->vnic,
480 res_type);
481 }
482
483 mutex_unlock(&pf->usdev_lock);
484
485 usnic_info("Registering usnic VF %s into PF %s\n", pci_name(pdev),
486 pf->ib_dev.name);
487 usnic_ib_log_vf(vf);
488 return 0;
489
490out_clean_vnic:
491 usnic_vnic_free(vf->vnic);
492out_release_regions:
493 pci_set_drvdata(pdev, NULL);
494 pci_clear_master(pdev);
495 pci_release_regions(pdev);
496out_disable_device:
497 pci_disable_device(pdev);
498out_clean_vf:
499 kfree(vf);
500 return err;
501}
502
503static void usnic_ib_pci_remove(struct pci_dev *pdev)
504{
505 struct usnic_ib_vf *vf = pci_get_drvdata(pdev);
506 struct usnic_ib_dev *pf = vf->pf;
507
508 mutex_lock(&pf->usdev_lock);
509 list_del(&vf->link);
510 mutex_unlock(&pf->usdev_lock);
511
512 kref_put(&pf->vf_cnt, usnic_ib_undiscover_pf);
513 usnic_vnic_free(vf->vnic);
514 pci_set_drvdata(pdev, NULL);
515 pci_clear_master(pdev);
516 pci_release_regions(pdev);
517 pci_disable_device(pdev);
518 kfree(vf);
519
520 usnic_info("Removed VF %s\n", pci_name(pdev));
521}
522
523/* PCI driver entry points */
524static struct pci_driver usnic_ib_pci_driver = {
525 .name = DRV_NAME,
526 .id_table = usnic_ib_pci_ids,
527 .probe = usnic_ib_pci_probe,
528 .remove = usnic_ib_pci_remove,
529};
530/* End of PCI section */
531
532/* Start of module section */
533static int __init usnic_ib_init(void)
534{
535 int err;
536
537 printk_once(KERN_INFO "%s", usnic_version);
538
539 err = usnic_uiom_init(DRV_NAME);
540 if (err) {
541 usnic_err("Unable to initalize umem with err %d\n", err);
542 return err;
543 }
544
545 if (pci_register_driver(&usnic_ib_pci_driver)) {
546 usnic_err("Unable to register with PCI\n");
547 goto out_umem_fini;
548 }
549
550 err = register_netdevice_notifier(&usnic_ib_netdevice_notifier);
551 if (err) {
552 usnic_err("Failed to register netdev notifier\n");
553 goto out_pci_unreg;
554 }
555
556 err = usnic_transport_init();
557 if (err) {
558 usnic_err("Failed to initialize transport\n");
559 goto out_unreg_netdev_notifier;
560 }
561
562 usnic_debugfs_init();
563
564 return 0;
565
566out_unreg_netdev_notifier:
567 unregister_netdevice_notifier(&usnic_ib_netdevice_notifier);
568out_pci_unreg:
569 pci_unregister_driver(&usnic_ib_pci_driver);
570out_umem_fini:
571 usnic_uiom_fini();
572
573 return err;
574}
575
576static void __exit usnic_ib_destroy(void)
577{
578 usnic_dbg("\n");
579 usnic_debugfs_exit();
580 usnic_transport_fini();
581 unregister_netdevice_notifier(&usnic_ib_netdevice_notifier);
582 pci_unregister_driver(&usnic_ib_pci_driver);
583 usnic_uiom_fini();
584}
585
586MODULE_DESCRIPTION("Cisco VIC (usNIC) Verbs Driver");
587MODULE_AUTHOR("Upinder Malhi <umalhi@cisco.com>");
588MODULE_LICENSE("Dual BSD/GPL");
589MODULE_VERSION(DRV_VERSION);
590module_param(usnic_log_lvl, uint, S_IRUGO | S_IWUSR);
591module_param(usnic_ib_share_vf, uint, S_IRUGO | S_IWUSR);
592MODULE_PARM_DESC(usnic_log_lvl, " Off=0, Err=1, Info=2, Debug=3");
593MODULE_PARM_DESC(usnic_ib_share_vf, "Off=0, On=1 VF sharing amongst QPs");
594MODULE_DEVICE_TABLE(pci, usnic_ib_pci_ids);
595
596module_init(usnic_ib_init);
597module_exit(usnic_ib_destroy);
598/* End of module section */
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
new file mode 100644
index 000000000000..ca5fa6ad59ac
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
@@ -0,0 +1,541 @@
1/*
2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18#include <linux/errno.h>
19#include <linux/module.h>
20#include <linux/spinlock.h>
21
22#include "usnic_log.h"
23#include "usnic_vnic.h"
24#include "usnic_fwd.h"
25#include "usnic_uiom.h"
26#include "usnic_ib_qp_grp.h"
27#include "usnic_ib_sysfs.h"
28#include "usnic_transport.h"
29
30const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state)
31{
32 switch (state) {
33 case IB_QPS_RESET:
34 return "Rst";
35 case IB_QPS_INIT:
36 return "Init";
37 case IB_QPS_RTR:
38 return "RTR";
39 case IB_QPS_RTS:
40 return "RTS";
41 case IB_QPS_SQD:
42 return "SQD";
43 case IB_QPS_SQE:
44 return "SQE";
45 case IB_QPS_ERR:
46 return "ERR";
47 default:
48 return "UNKOWN STATE";
49
50 }
51}
52
53int usnic_ib_qp_grp_dump_hdr(char *buf, int buf_sz)
54{
55 return scnprintf(buf, buf_sz, "|QPN\t|State\t|PID\t|VF Idx\t|Fil ID");
56}
57
58int usnic_ib_qp_grp_dump_rows(void *obj, char *buf, int buf_sz)
59{
60 struct usnic_ib_qp_grp *qp_grp = obj;
61 struct usnic_fwd_filter_hndl *default_filter_hndl;
62 if (obj) {
63 default_filter_hndl = list_first_entry(&qp_grp->filter_hndls,
64 struct usnic_fwd_filter_hndl, link);
65 return scnprintf(buf, buf_sz, "|%d\t|%s\t|%d\t|%hu\t|%d",
66 qp_grp->ibqp.qp_num,
67 usnic_ib_qp_grp_state_to_string(
68 qp_grp->state),
69 qp_grp->owner_pid,
70 usnic_vnic_get_index(qp_grp->vf->vnic),
71 default_filter_hndl->id);
72 } else {
73 return scnprintf(buf, buf_sz, "|N/A\t|N/A\t|N/A\t|N/A\t|N/A");
74 }
75}
76
77static int add_fwd_filter(struct usnic_ib_qp_grp *qp_grp,
78 struct usnic_fwd_filter *fwd_filter)
79{
80 struct usnic_fwd_filter_hndl *filter_hndl;
81 int status;
82 struct usnic_vnic_res_chunk *chunk;
83 int rq_idx;
84
85 WARN_ON(!spin_is_locked(&qp_grp->lock));
86
87 chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
88 if (IS_ERR_OR_NULL(chunk) || chunk->cnt < 1) {
89 usnic_err("Failed to get RQ info for qp_grp %u\n",
90 qp_grp->grp_id);
91 return -EFAULT;
92 }
93
94 rq_idx = chunk->res[0]->vnic_idx;
95
96 switch (qp_grp->transport) {
97 case USNIC_TRANSPORT_ROCE_CUSTOM:
98 status = usnic_fwd_add_usnic_filter(qp_grp->ufdev,
99 usnic_vnic_get_index(qp_grp->vf->vnic),
100 rq_idx,
101 fwd_filter,
102 &filter_hndl);
103 break;
104 default:
105 usnic_err("Unable to install filter for qp_grp %u for transport %d",
106 qp_grp->grp_id, qp_grp->transport);
107 status = -EINVAL;
108 }
109
110 if (status)
111 return status;
112
113 list_add_tail(&filter_hndl->link, &qp_grp->filter_hndls);
114 return 0;
115}
116
117static int del_all_filters(struct usnic_ib_qp_grp *qp_grp)
118{
119 int err, status;
120 struct usnic_fwd_filter_hndl *filter_hndl, *tmp;
121
122 WARN_ON(!spin_is_locked(&qp_grp->lock));
123
124 status = 0;
125
126 list_for_each_entry_safe(filter_hndl, tmp,
127 &qp_grp->filter_hndls, link) {
128 list_del(&filter_hndl->link);
129 err = usnic_fwd_del_filter(filter_hndl);
130 if (err) {
131 usnic_err("Failed to delete filter %u of qp_grp %d\n",
132 filter_hndl->id, qp_grp->grp_id);
133 }
134 status |= err;
135 }
136
137 return status;
138}
139
140static int enable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
141{
142
143 int status;
144 int i, vnic_idx;
145 struct usnic_vnic_res_chunk *res_chunk;
146 struct usnic_vnic_res *res;
147
148 WARN_ON(!spin_is_locked(&qp_grp->lock));
149
150 vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
151
152 res_chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
153 if (IS_ERR_OR_NULL(res_chunk)) {
154 usnic_err("Unable to get %s with err %ld\n",
155 usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
156 PTR_ERR(res_chunk));
157 return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
158 }
159
160 for (i = 0; i < res_chunk->cnt; i++) {
161 res = res_chunk->res[i];
162 status = usnic_fwd_enable_rq(qp_grp->ufdev, vnic_idx,
163 res->vnic_idx);
164 if (status) {
165 usnic_err("Failed to enable rq %d of %s:%d\n with err %d\n",
166 res->vnic_idx,
167 netdev_name(qp_grp->ufdev->netdev),
168 vnic_idx, status);
169 goto out_err;
170 }
171 }
172
173 return 0;
174
175out_err:
176 for (i--; i >= 0; i--) {
177 res = res_chunk->res[i];
178 usnic_fwd_disable_rq(qp_grp->ufdev, vnic_idx,
179 res->vnic_idx);
180 }
181
182 return status;
183}
184
185static int disable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
186{
187 int i, vnic_idx;
188 struct usnic_vnic_res_chunk *res_chunk;
189 struct usnic_vnic_res *res;
190 int status = 0;
191
192 WARN_ON(!spin_is_locked(&qp_grp->lock));
193 vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
194
195 res_chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
196 if (IS_ERR_OR_NULL(res_chunk)) {
197 usnic_err("Unable to get %s with err %ld\n",
198 usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
199 PTR_ERR(res_chunk));
200 return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
201 }
202
203 for (i = 0; i < res_chunk->cnt; i++) {
204 res = res_chunk->res[i];
205 status = usnic_fwd_disable_rq(qp_grp->ufdev, vnic_idx,
206 res->vnic_idx);
207 if (status) {
208 usnic_err("Failed to disable rq %d of %s:%d\n with err %d\n",
209 res->vnic_idx,
210 netdev_name(qp_grp->ufdev->netdev),
211 vnic_idx, status);
212 }
213 }
214
215 return status;
216
217}
218
219int usnic_ib_qp_grp_modify(struct usnic_ib_qp_grp *qp_grp,
220 enum ib_qp_state new_state,
221 struct usnic_fwd_filter *fwd_filter)
222{
223 int status = 0;
224 int vnic_idx;
225 struct ib_event ib_event;
226 enum ib_qp_state old_state;
227
228 old_state = qp_grp->state;
229 vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
230
231 spin_lock(&qp_grp->lock);
232 switch (new_state) {
233 case IB_QPS_RESET:
234 switch (old_state) {
235 case IB_QPS_RESET:
236 /* NO-OP */
237 break;
238 case IB_QPS_INIT:
239 status = del_all_filters(qp_grp);
240 break;
241 case IB_QPS_RTR:
242 case IB_QPS_RTS:
243 case IB_QPS_ERR:
244 status = disable_qp_grp(qp_grp);
245 status &= del_all_filters(qp_grp);
246 break;
247 default:
248 status = -EINVAL;
249 }
250 break;
251 case IB_QPS_INIT:
252 switch (old_state) {
253 case IB_QPS_RESET:
254 status = add_fwd_filter(qp_grp, fwd_filter);
255 break;
256 case IB_QPS_INIT:
257 status = add_fwd_filter(qp_grp, fwd_filter);
258 break;
259 case IB_QPS_RTR:
260 status = disable_qp_grp(qp_grp);
261 break;
262 case IB_QPS_RTS:
263 status = disable_qp_grp(qp_grp);
264 break;
265 default:
266 status = -EINVAL;
267 }
268 break;
269 case IB_QPS_RTR:
270 switch (old_state) {
271 case IB_QPS_INIT:
272 status = enable_qp_grp(qp_grp);
273 break;
274 default:
275 status = -EINVAL;
276 }
277 break;
278 case IB_QPS_RTS:
279 switch (old_state) {
280 case IB_QPS_RTR:
281 /* NO-OP FOR NOW */
282 break;
283 default:
284 status = -EINVAL;
285 }
286 break;
287 case IB_QPS_ERR:
288 ib_event.device = &qp_grp->vf->pf->ib_dev;
289 ib_event.element.qp = &qp_grp->ibqp;
290 ib_event.event = IB_EVENT_QP_FATAL;
291
292 switch (old_state) {
293 case IB_QPS_RESET:
294 qp_grp->ibqp.event_handler(&ib_event,
295 qp_grp->ibqp.qp_context);
296 break;
297 case IB_QPS_INIT:
298 status = del_all_filters(qp_grp);
299 qp_grp->ibqp.event_handler(&ib_event,
300 qp_grp->ibqp.qp_context);
301 break;
302 case IB_QPS_RTR:
303 case IB_QPS_RTS:
304 status = disable_qp_grp(qp_grp);
305 status &= del_all_filters(qp_grp);
306 qp_grp->ibqp.event_handler(&ib_event,
307 qp_grp->ibqp.qp_context);
308 break;
309 default:
310 status = -EINVAL;
311 }
312 break;
313 default:
314 status = -EINVAL;
315 }
316 spin_unlock(&qp_grp->lock);
317
318 if (!status) {
319 qp_grp->state = new_state;
320 usnic_info("Transistioned %u from %s to %s",
321 qp_grp->grp_id,
322 usnic_ib_qp_grp_state_to_string(old_state),
323 usnic_ib_qp_grp_state_to_string(new_state));
324 } else {
325 usnic_err("Failed to transistion %u from %s to %s",
326 qp_grp->grp_id,
327 usnic_ib_qp_grp_state_to_string(old_state),
328 usnic_ib_qp_grp_state_to_string(new_state));
329 }
330
331 return status;
332}
333
334static struct usnic_vnic_res_chunk**
335alloc_res_chunk_list(struct usnic_vnic *vnic,
336 struct usnic_vnic_res_spec *res_spec, void *owner_obj)
337{
338 enum usnic_vnic_res_type res_type;
339 struct usnic_vnic_res_chunk **res_chunk_list;
340 int err, i, res_cnt, res_lst_sz;
341
342 for (res_lst_sz = 0;
343 res_spec->resources[res_lst_sz].type != USNIC_VNIC_RES_TYPE_EOL;
344 res_lst_sz++) {
345 /* Do Nothing */
346 }
347
348 res_chunk_list = kzalloc(sizeof(*res_chunk_list)*(res_lst_sz+1),
349 GFP_ATOMIC);
350 if (!res_chunk_list)
351 return ERR_PTR(-ENOMEM);
352
353 for (i = 0; res_spec->resources[i].type != USNIC_VNIC_RES_TYPE_EOL;
354 i++) {
355 res_type = res_spec->resources[i].type;
356 res_cnt = res_spec->resources[i].cnt;
357
358 res_chunk_list[i] = usnic_vnic_get_resources(vnic, res_type,
359 res_cnt, owner_obj);
360 if (IS_ERR_OR_NULL(res_chunk_list[i])) {
361 err = (res_chunk_list[i] ?
362 PTR_ERR(res_chunk_list[i]) : -ENOMEM);
363 usnic_err("Failed to get %s from %s with err %d\n",
364 usnic_vnic_res_type_to_str(res_type),
365 usnic_vnic_pci_name(vnic),
366 err);
367 goto out_free_res;
368 }
369 }
370
371 return res_chunk_list;
372
373out_free_res:
374 for (i--; i > 0; i--)
375 usnic_vnic_put_resources(res_chunk_list[i]);
376 kfree(res_chunk_list);
377 return ERR_PTR(err);
378}
379
380static void free_qp_grp_res(struct usnic_vnic_res_chunk **res_chunk_list)
381{
382 int i;
383 for (i = 0; res_chunk_list[i]; i++)
384 usnic_vnic_put_resources(res_chunk_list[i]);
385 kfree(res_chunk_list);
386}
387
388static int qp_grp_and_vf_bind(struct usnic_ib_vf *vf,
389 struct usnic_ib_pd *pd,
390 struct usnic_ib_qp_grp *qp_grp)
391{
392 int err;
393 struct pci_dev *pdev;
394
395 WARN_ON(!spin_is_locked(&vf->lock));
396
397 pdev = usnic_vnic_get_pdev(vf->vnic);
398 if (vf->qp_grp_ref_cnt == 0) {
399 err = usnic_uiom_attach_dev_to_pd(pd->umem_pd, &pdev->dev);
400 if (err) {
401 usnic_err("Failed to attach %s to domain\n",
402 pci_name(pdev));
403 return err;
404 }
405 vf->pd = pd;
406 }
407 vf->qp_grp_ref_cnt++;
408
409 WARN_ON(vf->pd != pd);
410 qp_grp->vf = vf;
411
412 return 0;
413}
414
415static void qp_grp_and_vf_unbind(struct usnic_ib_qp_grp *qp_grp)
416{
417 struct pci_dev *pdev;
418 struct usnic_ib_pd *pd;
419
420 WARN_ON(!spin_is_locked(&qp_grp->vf->lock));
421
422 pd = qp_grp->vf->pd;
423 pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic);
424 if (--qp_grp->vf->qp_grp_ref_cnt == 0) {
425 qp_grp->vf->pd = NULL;
426 usnic_uiom_detach_dev_from_pd(pd->umem_pd, &pdev->dev);
427 }
428 qp_grp->vf = NULL;
429}
430
431static void log_spec(struct usnic_vnic_res_spec *res_spec)
432{
433 char buf[512];
434 usnic_vnic_spec_dump(buf, sizeof(buf), res_spec);
435 usnic_dbg("%s\n", buf);
436}
437
438struct usnic_ib_qp_grp *
439usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev,
440 struct usnic_ib_vf *vf,
441 struct usnic_ib_pd *pd,
442 struct usnic_vnic_res_spec *res_spec,
443 enum usnic_transport_type transport)
444{
445 struct usnic_ib_qp_grp *qp_grp;
446 u16 port_num;
447 int err;
448
449 WARN_ON(!spin_is_locked(&vf->lock));
450
451 err = usnic_vnic_res_spec_satisfied(&min_transport_spec[transport],
452 res_spec);
453 if (err) {
454 usnic_err("Spec does not meet miniumum req for transport %d\n",
455 transport);
456 log_spec(res_spec);
457 return ERR_PTR(err);
458 }
459
460 port_num = usnic_transport_rsrv_port(transport, 0);
461 if (!port_num) {
462 usnic_err("Unable to allocate port for %s\n",
463 netdev_name(ufdev->netdev));
464 return ERR_PTR(-EINVAL);
465 }
466
467 qp_grp = kzalloc(sizeof(*qp_grp), GFP_ATOMIC);
468 if (!qp_grp) {
469 usnic_err("Unable to alloc qp_grp - Out of memory\n");
470 return NULL;
471 }
472
473 qp_grp->res_chunk_list = alloc_res_chunk_list(vf->vnic, res_spec,
474 qp_grp);
475 if (IS_ERR_OR_NULL(qp_grp->res_chunk_list)) {
476 err = qp_grp->res_chunk_list ?
477 PTR_ERR(qp_grp->res_chunk_list) : -ENOMEM;
478 usnic_err("Unable to alloc res for %d with err %d\n",
479 qp_grp->grp_id, err);
480 goto out_free_port;
481 }
482
483 INIT_LIST_HEAD(&qp_grp->filter_hndls);
484 spin_lock_init(&qp_grp->lock);
485 qp_grp->ufdev = ufdev;
486 qp_grp->transport = transport;
487 qp_grp->filters[DFLT_FILTER_IDX].transport = transport;
488 qp_grp->filters[DFLT_FILTER_IDX].port_num = port_num;
489 qp_grp->state = IB_QPS_RESET;
490 qp_grp->owner_pid = current->pid;
491
492 /* qp_num is same as default filter port_num */
493 qp_grp->ibqp.qp_num = qp_grp->filters[DFLT_FILTER_IDX].port_num;
494 qp_grp->grp_id = qp_grp->ibqp.qp_num;
495
496 err = qp_grp_and_vf_bind(vf, pd, qp_grp);
497 if (err)
498 goto out_free_port;
499
500 usnic_ib_sysfs_qpn_add(qp_grp);
501
502 return qp_grp;
503
504out_free_port:
505 kfree(qp_grp);
506 usnic_transport_unrsrv_port(transport, port_num);
507
508 return ERR_PTR(err);
509}
510
511void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
512{
513 u16 default_port_num;
514 enum usnic_transport_type transport;
515
516 WARN_ON(qp_grp->state != IB_QPS_RESET);
517 WARN_ON(!spin_is_locked(&qp_grp->vf->lock));
518
519 transport = qp_grp->filters[DFLT_FILTER_IDX].transport;
520 default_port_num = qp_grp->filters[DFLT_FILTER_IDX].port_num;
521
522 usnic_ib_sysfs_qpn_remove(qp_grp);
523 qp_grp_and_vf_unbind(qp_grp);
524 free_qp_grp_res(qp_grp->res_chunk_list);
525 kfree(qp_grp);
526 usnic_transport_unrsrv_port(transport, default_port_num);
527}
528
529struct usnic_vnic_res_chunk*
530usnic_ib_qp_grp_get_chunk(struct usnic_ib_qp_grp *qp_grp,
531 enum usnic_vnic_res_type res_type)
532{
533 int i;
534
535 for (i = 0; qp_grp->res_chunk_list[i]; i++) {
536 if (qp_grp->res_chunk_list[i]->type == res_type)
537 return qp_grp->res_chunk_list[i];
538 }
539
540 return ERR_PTR(-EINVAL);
541}
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h
new file mode 100644
index 000000000000..6416a956dc4a
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h
@@ -0,0 +1,95 @@
1/*
2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18
19#ifndef USNIC_IB_QP_GRP_H_
20#define USNIC_IB_QP_GRP_H_
21
22#include <rdma/ib_verbs.h>
23
24#include "usnic_ib.h"
25#include "usnic_abi.h"
26#include "usnic_fwd.h"
27#include "usnic_vnic.h"
28
29#define MAX_QP_GRP_FILTERS 10
30#define DFLT_FILTER_IDX 0
31
32/*
33 * The qp group struct represents all the hw resources needed to present a ib_qp
34 */
35struct usnic_ib_qp_grp {
36 struct ib_qp ibqp;
37 enum ib_qp_state state;
38 int grp_id;
39
40 struct usnic_fwd_dev *ufdev;
41 short unsigned filter_cnt;
42 struct usnic_fwd_filter filters[MAX_QP_GRP_FILTERS];
43 struct list_head filter_hndls;
44 enum usnic_transport_type transport;
45 struct usnic_ib_ucontext *ctx;
46
47 struct usnic_vnic_res_chunk **res_chunk_list;
48
49 pid_t owner_pid;
50 struct usnic_ib_vf *vf;
51 struct list_head link;
52
53 spinlock_t lock;
54
55 struct kobject kobj;
56};
57
58static const struct
59usnic_vnic_res_spec min_transport_spec[USNIC_TRANSPORT_MAX] = {
60 { /*USNIC_TRANSPORT_UNKNOWN*/
61 .resources = {
62 {.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,},
63 },
64 },
65 { /*USNIC_TRANSPORT_ROCE_CUSTOM*/
66 .resources = {
67 {.type = USNIC_VNIC_RES_TYPE_WQ, .cnt = 1,},
68 {.type = USNIC_VNIC_RES_TYPE_RQ, .cnt = 1,},
69 {.type = USNIC_VNIC_RES_TYPE_CQ, .cnt = 1,},
70 {.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,},
71 },
72 },
73};
74
75const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state);
76int usnic_ib_qp_grp_dump_hdr(char *buf, int buf_sz);
77int usnic_ib_qp_grp_dump_rows(void *obj, char *buf, int buf_sz);
78struct usnic_ib_qp_grp *
79usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
80 struct usnic_ib_pd *pd,
81 struct usnic_vnic_res_spec *res_spec,
82 enum usnic_transport_type transport);
83void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp);
84int usnic_ib_qp_grp_modify(struct usnic_ib_qp_grp *qp_grp,
85 enum ib_qp_state new_state,
86 struct usnic_fwd_filter *fwd_filter);
87struct usnic_vnic_res_chunk
88*usnic_ib_qp_grp_get_chunk(struct usnic_ib_qp_grp *qp_grp,
89 enum usnic_vnic_res_type type);
90static inline
91struct usnic_ib_qp_grp *to_uqp_grp(struct ib_qp *ibqp)
92{
93 return container_of(ibqp, struct usnic_ib_qp_grp, ibqp);
94}
95#endif /* USNIC_IB_QP_GRP_H_ */
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
new file mode 100644
index 000000000000..bad985e9df08
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
@@ -0,0 +1,351 @@
1/*
2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18
19#include <linux/module.h>
20#include <linux/init.h>
21#include <linux/errno.h>
22
23#include <rdma/ib_user_verbs.h>
24#include <rdma/ib_addr.h>
25
26#include "usnic_common_util.h"
27#include "usnic_ib.h"
28#include "usnic_ib_qp_grp.h"
29#include "usnic_vnic.h"
30#include "usnic_ib_verbs.h"
31#include "usnic_log.h"
32
33#define UPDATE_PTR_LEFT(N, P, L) \
34do { \
35 L -= (N); \
36 P += (N); \
37} while (0)
38
39static ssize_t usnic_ib_show_fw_ver(struct device *device,
40 struct device_attribute *attr,
41 char *buf)
42{
43 struct usnic_ib_dev *us_ibdev =
44 container_of(device, struct usnic_ib_dev, ib_dev.dev);
45 struct ethtool_drvinfo info;
46
47 mutex_lock(&us_ibdev->usdev_lock);
48 us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info);
49 mutex_unlock(&us_ibdev->usdev_lock);
50
51 return scnprintf(buf, PAGE_SIZE, "%s\n", info.fw_version);
52}
53
54static ssize_t usnic_ib_show_board(struct device *device,
55 struct device_attribute *attr,
56 char *buf)
57{
58 struct usnic_ib_dev *us_ibdev =
59 container_of(device, struct usnic_ib_dev, ib_dev.dev);
60 unsigned short subsystem_device_id;
61
62 mutex_lock(&us_ibdev->usdev_lock);
63 subsystem_device_id = us_ibdev->pdev->subsystem_device;
64 mutex_unlock(&us_ibdev->usdev_lock);
65
66 return scnprintf(buf, PAGE_SIZE, "%hu\n", subsystem_device_id);
67}
68
69/*
70 * Report the configuration for this PF
71 */
72static ssize_t
73usnic_ib_show_config(struct device *device, struct device_attribute *attr,
74 char *buf)
75{
76 struct usnic_ib_dev *us_ibdev;
77 char *ptr;
78 unsigned left;
79 unsigned n;
80 enum usnic_vnic_res_type res_type;
81
82 us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev);
83
84 /* Buffer space limit is 1 page */
85 ptr = buf;
86 left = PAGE_SIZE;
87
88 mutex_lock(&us_ibdev->usdev_lock);
89 if (atomic_read(&us_ibdev->vf_cnt.refcount) > 0) {
90 char *busname;
91
92 /*
93 * bus name seems to come with annoying prefix.
94 * Remove it if it is predictable
95 */
96 busname = us_ibdev->pdev->bus->name;
97 if (strncmp(busname, "PCI Bus ", 8) == 0)
98 busname += 8;
99
100 n = scnprintf(ptr, left,
101 "%s: %s:%d.%d, %s, %pM, %u VFs\n Per VF:",
102 us_ibdev->ib_dev.name,
103 busname,
104 PCI_SLOT(us_ibdev->pdev->devfn),
105 PCI_FUNC(us_ibdev->pdev->devfn),
106 netdev_name(us_ibdev->netdev),
107 us_ibdev->mac,
108 atomic_read(&us_ibdev->vf_cnt.refcount));
109 UPDATE_PTR_LEFT(n, ptr, left);
110
111 for (res_type = USNIC_VNIC_RES_TYPE_EOL;
112 res_type < USNIC_VNIC_RES_TYPE_MAX;
113 res_type++) {
114 if (us_ibdev->vf_res_cnt[res_type] == 0)
115 continue;
116 n = scnprintf(ptr, left, " %d %s%s",
117 us_ibdev->vf_res_cnt[res_type],
118 usnic_vnic_res_type_to_str(res_type),
119 (res_type < (USNIC_VNIC_RES_TYPE_MAX - 1)) ?
120 "," : "");
121 UPDATE_PTR_LEFT(n, ptr, left);
122 }
123 n = scnprintf(ptr, left, "\n");
124 UPDATE_PTR_LEFT(n, ptr, left);
125 } else {
126 n = scnprintf(ptr, left, "%s: no VFs\n",
127 us_ibdev->ib_dev.name);
128 UPDATE_PTR_LEFT(n, ptr, left);
129 }
130 mutex_unlock(&us_ibdev->usdev_lock);
131
132 return ptr - buf;
133}
134
135static ssize_t
136usnic_ib_show_iface(struct device *device, struct device_attribute *attr,
137 char *buf)
138{
139 struct usnic_ib_dev *us_ibdev;
140
141 us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev);
142
143 return scnprintf(buf, PAGE_SIZE, "%s\n",
144 netdev_name(us_ibdev->netdev));
145}
146
147static ssize_t
148usnic_ib_show_max_vf(struct device *device, struct device_attribute *attr,
149 char *buf)
150{
151 struct usnic_ib_dev *us_ibdev;
152
153 us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev);
154
155 return scnprintf(buf, PAGE_SIZE, "%u\n",
156 atomic_read(&us_ibdev->vf_cnt.refcount));
157}
158
159static ssize_t
160usnic_ib_show_qp_per_vf(struct device *device, struct device_attribute *attr,
161 char *buf)
162{
163 struct usnic_ib_dev *us_ibdev;
164 int qp_per_vf;
165
166 us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev);
167 qp_per_vf = max(us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_WQ],
168 us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_RQ]);
169
170 return scnprintf(buf, PAGE_SIZE,
171 "%d\n", qp_per_vf);
172}
173
174static ssize_t
175usnic_ib_show_cq_per_vf(struct device *device, struct device_attribute *attr,
176 char *buf)
177{
178 struct usnic_ib_dev *us_ibdev;
179
180 us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev);
181
182 return scnprintf(buf, PAGE_SIZE, "%d\n",
183 us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ]);
184}
185
186static DEVICE_ATTR(fw_ver, S_IRUGO, usnic_ib_show_fw_ver, NULL);
187static DEVICE_ATTR(board_id, S_IRUGO, usnic_ib_show_board, NULL);
188static DEVICE_ATTR(config, S_IRUGO, usnic_ib_show_config, NULL);
189static DEVICE_ATTR(iface, S_IRUGO, usnic_ib_show_iface, NULL);
190static DEVICE_ATTR(max_vf, S_IRUGO, usnic_ib_show_max_vf, NULL);
191static DEVICE_ATTR(qp_per_vf, S_IRUGO, usnic_ib_show_qp_per_vf, NULL);
192static DEVICE_ATTR(cq_per_vf, S_IRUGO, usnic_ib_show_cq_per_vf, NULL);
193
194static struct device_attribute *usnic_class_attributes[] = {
195 &dev_attr_fw_ver,
196 &dev_attr_board_id,
197 &dev_attr_config,
198 &dev_attr_iface,
199 &dev_attr_max_vf,
200 &dev_attr_qp_per_vf,
201 &dev_attr_cq_per_vf,
202};
203
204struct qpn_attribute {
205 struct attribute attr;
206 ssize_t (*show)(struct usnic_ib_qp_grp *, char *buf);
207};
208
209/*
210 * Definitions for supporting QPN entries in sysfs
211 */
212static ssize_t
213usnic_ib_qpn_attr_show(struct kobject *kobj, struct attribute *attr, char *buf)
214{
215 struct usnic_ib_qp_grp *qp_grp;
216 struct qpn_attribute *qpn_attr;
217
218 qp_grp = container_of(kobj, struct usnic_ib_qp_grp, kobj);
219 qpn_attr = container_of(attr, struct qpn_attribute, attr);
220
221 return qpn_attr->show(qp_grp, buf);
222}
223
224static const struct sysfs_ops usnic_ib_qpn_sysfs_ops = {
225 .show = usnic_ib_qpn_attr_show
226};
227
228#define QPN_ATTR_RO(NAME) \
229struct qpn_attribute qpn_attr_##NAME = __ATTR_RO(NAME)
230
231static ssize_t context_show(struct usnic_ib_qp_grp *qp_grp, char *buf)
232{
233 return scnprintf(buf, PAGE_SIZE, "0x%p\n", qp_grp->ctx);
234}
235
236static ssize_t summary_show(struct usnic_ib_qp_grp *qp_grp, char *buf)
237{
238 int i, j, n;
239 int left;
240 char *ptr;
241 struct usnic_vnic_res_chunk *res_chunk;
242 struct usnic_fwd_filter_hndl *default_filter_hndl;
243 struct usnic_vnic_res *vnic_res;
244
245 left = PAGE_SIZE;
246 ptr = buf;
247 default_filter_hndl = list_first_entry(&qp_grp->filter_hndls,
248 struct usnic_fwd_filter_hndl, link);
249
250 n = scnprintf(ptr, left,
251 "QPN: %d State: (%s) PID: %u VF Idx: %hu Filter ID: 0x%x ",
252 qp_grp->ibqp.qp_num,
253 usnic_ib_qp_grp_state_to_string(qp_grp->state),
254 qp_grp->owner_pid,
255 usnic_vnic_get_index(qp_grp->vf->vnic),
256 default_filter_hndl->id);
257 UPDATE_PTR_LEFT(n, ptr, left);
258
259 for (i = 0; qp_grp->res_chunk_list[i]; i++) {
260 res_chunk = qp_grp->res_chunk_list[i];
261 for (j = 0; j < res_chunk->cnt; j++) {
262 vnic_res = res_chunk->res[j];
263 n = scnprintf(ptr, left, "%s[%d] ",
264 usnic_vnic_res_type_to_str(vnic_res->type),
265 vnic_res->vnic_idx);
266 UPDATE_PTR_LEFT(n, ptr, left);
267 }
268 }
269
270 n = scnprintf(ptr, left, "\n");
271 UPDATE_PTR_LEFT(n, ptr, left);
272
273 return ptr - buf;
274}
275
276static QPN_ATTR_RO(context);
277static QPN_ATTR_RO(summary);
278
279static struct attribute *usnic_ib_qpn_default_attrs[] = {
280 &qpn_attr_context.attr,
281 &qpn_attr_summary.attr,
282 NULL
283};
284
285static struct kobj_type usnic_ib_qpn_type = {
286 .sysfs_ops = &usnic_ib_qpn_sysfs_ops,
287 .default_attrs = usnic_ib_qpn_default_attrs
288};
289
290int usnic_ib_sysfs_register_usdev(struct usnic_ib_dev *us_ibdev)
291{
292 int i;
293 int err;
294 for (i = 0; i < ARRAY_SIZE(usnic_class_attributes); ++i) {
295 err = device_create_file(&us_ibdev->ib_dev.dev,
296 usnic_class_attributes[i]);
297 if (err) {
298 usnic_err("Failed to create device file %d for %s eith err %d",
299 i, us_ibdev->ib_dev.name, err);
300 return -EINVAL;
301 }
302 }
303
304 /* create kernel object for looking at individual QPs */
305 kobject_get(&us_ibdev->ib_dev.dev.kobj);
306 us_ibdev->qpn_kobj = kobject_create_and_add("qpn",
307 &us_ibdev->ib_dev.dev.kobj);
308 if (us_ibdev->qpn_kobj == NULL) {
309 kobject_put(&us_ibdev->ib_dev.dev.kobj);
310 return -ENOMEM;
311 }
312
313 return 0;
314}
315
316void usnic_ib_sysfs_unregister_usdev(struct usnic_ib_dev *us_ibdev)
317{
318 int i;
319 for (i = 0; i < ARRAY_SIZE(usnic_class_attributes); ++i) {
320 device_remove_file(&us_ibdev->ib_dev.dev,
321 usnic_class_attributes[i]);
322 }
323
324 kobject_put(us_ibdev->qpn_kobj);
325}
326
327void usnic_ib_sysfs_qpn_add(struct usnic_ib_qp_grp *qp_grp)
328{
329 struct usnic_ib_dev *us_ibdev;
330 int err;
331
332 us_ibdev = qp_grp->vf->pf;
333
334 err = kobject_init_and_add(&qp_grp->kobj, &usnic_ib_qpn_type,
335 kobject_get(us_ibdev->qpn_kobj),
336 "%d", qp_grp->grp_id);
337 if (err) {
338 kobject_put(us_ibdev->qpn_kobj);
339 return;
340 }
341}
342
343void usnic_ib_sysfs_qpn_remove(struct usnic_ib_qp_grp *qp_grp)
344{
345 struct usnic_ib_dev *us_ibdev;
346
347 us_ibdev = qp_grp->vf->pf;
348
349 kobject_put(&qp_grp->kobj);
350 kobject_put(us_ibdev->qpn_kobj);
351}
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h
new file mode 100644
index 000000000000..0d09b493cd02
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h
@@ -0,0 +1,29 @@
1/*
2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18
19#ifndef USNIC_IB_SYSFS_H_
20#define USNIC_IB_SYSFS_H_
21
22#include "usnic_ib.h"
23
24int usnic_ib_sysfs_register_usdev(struct usnic_ib_dev *us_ibdev);
25void usnic_ib_sysfs_unregister_usdev(struct usnic_ib_dev *us_ibdev);
26void usnic_ib_sysfs_qpn_add(struct usnic_ib_qp_grp *qp_grp);
27void usnic_ib_sysfs_qpn_remove(struct usnic_ib_qp_grp *qp_grp);
28
29#endif /* !USNIC_IB_SYSFS_H_ */
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
new file mode 100644
index 000000000000..8f8dfa2672b0
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -0,0 +1,736 @@
1/*
2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/slab.h>
21#include <linux/errno.h>
22
23#include <rdma/ib_user_verbs.h>
24#include <rdma/ib_addr.h>
25
26#include "usnic_abi.h"
27#include "usnic_ib.h"
28#include "usnic_common_util.h"
29#include "usnic_ib_qp_grp.h"
30#include "usnic_fwd.h"
31#include "usnic_log.h"
32#include "usnic_uiom.h"
33#include "usnic_transport.h"
34
35#define USNIC_DEFAULT_TRANSPORT USNIC_TRANSPORT_ROCE_CUSTOM
36
37static void usnic_ib_fw_string_to_u64(char *fw_ver_str, u64 *fw_ver)
38{
39 *fw_ver = (u64) *fw_ver_str;
40}
41
42static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
43 struct ib_udata *udata)
44{
45 struct usnic_ib_dev *us_ibdev;
46 struct usnic_ib_create_qp_resp resp;
47 struct pci_dev *pdev;
48 struct vnic_dev_bar *bar;
49 struct usnic_vnic_res_chunk *chunk;
50 int i, err;
51
52 memset(&resp, 0, sizeof(resp));
53
54 us_ibdev = qp_grp->vf->pf;
55 pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic);
56 if (!pdev) {
57 usnic_err("Failed to get pdev of qp_grp %d\n",
58 qp_grp->grp_id);
59 return -EFAULT;
60 }
61
62 bar = usnic_vnic_get_bar(qp_grp->vf->vnic, 0);
63 if (!bar) {
64 usnic_err("Failed to get bar0 of qp_grp %d vf %s",
65 qp_grp->grp_id, pci_name(pdev));
66 return -EFAULT;
67 }
68
69 resp.vfid = usnic_vnic_get_index(qp_grp->vf->vnic);
70 resp.bar_bus_addr = bar->bus_addr;
71 resp.bar_len = bar->len;
72 resp.transport = qp_grp->transport;
73
74 chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
75 if (IS_ERR_OR_NULL(chunk)) {
76 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
77 usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
78 qp_grp->grp_id,
79 PTR_ERR(chunk));
80 return chunk ? PTR_ERR(chunk) : -ENOMEM;
81 }
82
83 WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_RQ);
84 resp.rq_cnt = chunk->cnt;
85 for (i = 0; i < chunk->cnt; i++)
86 resp.rq_idx[i] = chunk->res[i]->vnic_idx;
87
88 chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_WQ);
89 if (IS_ERR_OR_NULL(chunk)) {
90 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
91 usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_WQ),
92 qp_grp->grp_id,
93 PTR_ERR(chunk));
94 return chunk ? PTR_ERR(chunk) : -ENOMEM;
95 }
96
97 WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_WQ);
98 resp.wq_cnt = chunk->cnt;
99 for (i = 0; i < chunk->cnt; i++)
100 resp.wq_idx[i] = chunk->res[i]->vnic_idx;
101
102 chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_CQ);
103 if (IS_ERR_OR_NULL(chunk)) {
104 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
105 usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_CQ),
106 qp_grp->grp_id,
107 PTR_ERR(chunk));
108 return chunk ? PTR_ERR(chunk) : -ENOMEM;
109 }
110
111 WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_CQ);
112 resp.cq_cnt = chunk->cnt;
113 for (i = 0; i < chunk->cnt; i++)
114 resp.cq_idx[i] = chunk->res[i]->vnic_idx;
115
116 err = ib_copy_to_udata(udata, &resp, sizeof(resp));
117 if (err) {
118 usnic_err("Failed to copy udata for %s", us_ibdev->ib_dev.name);
119 return err;
120 }
121
122 return 0;
123}
124
125static struct usnic_ib_qp_grp*
126find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
127 struct usnic_ib_pd *pd,
128 enum usnic_transport_type transport,
129 struct usnic_vnic_res_spec *res_spec)
130{
131 struct usnic_ib_vf *vf;
132 struct usnic_vnic *vnic;
133 struct usnic_ib_qp_grp *qp_grp;
134 struct device *dev, **dev_list;
135 int i, found = 0;
136
137 BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock));
138
139 if (list_empty(&us_ibdev->vf_dev_list)) {
140 usnic_info("No vfs to allocate\n");
141 return NULL;
142 }
143
144 if (!us_ibdev->link_up) {
145 usnic_info("Cannot allocate qp b/c PF link is down\n");
146 return NULL;
147 }
148
149 if (usnic_ib_share_vf) {
150 /* Try to find resouces on a used vf which is in pd */
151 dev_list = usnic_uiom_get_dev_list(pd->umem_pd);
152 for (i = 0; dev_list[i]; i++) {
153 dev = dev_list[i];
154 vf = pci_get_drvdata(to_pci_dev(dev));
155 spin_lock(&vf->lock);
156 vnic = vf->vnic;
157 if (!usnic_vnic_check_room(vnic, res_spec)) {
158 usnic_dbg("Found used vnic %s from %s\n",
159 us_ibdev->ib_dev.name,
160 pci_name(usnic_vnic_get_pdev(
161 vnic)));
162 found = 1;
163 break;
164 }
165 spin_unlock(&vf->lock);
166
167 }
168 usnic_uiom_free_dev_list(dev_list);
169 }
170
171 if (!found) {
172 /* Try to find resources on an unused vf */
173 list_for_each_entry(vf, &us_ibdev->vf_dev_list, link) {
174 spin_lock(&vf->lock);
175 vnic = vf->vnic;
176 if (vf->qp_grp_ref_cnt == 0 &&
177 usnic_vnic_check_room(vnic, res_spec) == 0) {
178 found = 1;
179 break;
180 }
181 spin_unlock(&vf->lock);
182 }
183 }
184
185 if (!found) {
186 usnic_info("No free qp grp found on %s\n",
187 us_ibdev->ib_dev.name);
188 return ERR_PTR(-ENOMEM);
189 }
190
191 qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev, vf, pd, res_spec,
192 transport);
193 spin_unlock(&vf->lock);
194 if (IS_ERR_OR_NULL(qp_grp)) {
195 usnic_err("Failed to allocate qp_grp\n");
196 return ERR_PTR(qp_grp ? PTR_ERR(qp_grp) : -ENOMEM);
197 }
198
199 return qp_grp;
200}
201
202static void qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
203{
204 struct usnic_ib_vf *vf = qp_grp->vf;
205
206 WARN_ON(qp_grp->state != IB_QPS_RESET);
207
208 spin_lock(&vf->lock);
209 usnic_ib_qp_grp_destroy(qp_grp);
210 spin_unlock(&vf->lock);
211}
212
213static void eth_speed_to_ib_speed(int speed, u8 *active_speed,
214 u8 *active_width)
215{
216 if (speed <= 10000) {
217 *active_width = IB_WIDTH_1X;
218 *active_speed = IB_SPEED_FDR10;
219 } else if (speed <= 20000) {
220 *active_width = IB_WIDTH_4X;
221 *active_speed = IB_SPEED_DDR;
222 } else if (speed <= 30000) {
223 *active_width = IB_WIDTH_4X;
224 *active_speed = IB_SPEED_QDR;
225 } else if (speed <= 40000) {
226 *active_width = IB_WIDTH_4X;
227 *active_speed = IB_SPEED_FDR10;
228 } else {
229 *active_width = IB_WIDTH_4X;
230 *active_speed = IB_SPEED_EDR;
231 }
232}
233
234/* Start of ib callback functions */
235
236enum rdma_link_layer usnic_ib_port_link_layer(struct ib_device *device,
237 u8 port_num)
238{
239 return IB_LINK_LAYER_ETHERNET;
240}
241
242int usnic_ib_query_device(struct ib_device *ibdev,
243 struct ib_device_attr *props)
244{
245 struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
246 union ib_gid gid;
247 struct ethtool_drvinfo info;
248 struct ethtool_cmd cmd;
249 int qp_per_vf;
250
251 usnic_dbg("\n");
252 mutex_lock(&us_ibdev->usdev_lock);
253 us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info);
254 us_ibdev->netdev->ethtool_ops->get_settings(us_ibdev->netdev, &cmd);
255 memset(props, 0, sizeof(*props));
256 usnic_mac_to_gid(us_ibdev->mac, &gid.raw[0]);
257 memcpy(&props->sys_image_guid, &gid.global.interface_id,
258 sizeof(gid.global.interface_id));
259 usnic_ib_fw_string_to_u64(&info.fw_version[0], &props->fw_ver);
260 props->max_mr_size = USNIC_UIOM_MAX_MR_SIZE;
261 props->page_size_cap = USNIC_UIOM_PAGE_SIZE;
262 props->vendor_id = PCI_VENDOR_ID_CISCO;
263 props->vendor_part_id = PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC;
264 props->hw_ver = us_ibdev->pdev->subsystem_device;
265 qp_per_vf = max(us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_WQ],
266 us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_RQ]);
267 props->max_qp = qp_per_vf *
268 atomic_read(&us_ibdev->vf_cnt.refcount);
269 props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
270 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
271 props->max_cq = us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ] *
272 atomic_read(&us_ibdev->vf_cnt.refcount);
273 props->max_pd = USNIC_UIOM_MAX_PD_CNT;
274 props->max_mr = USNIC_UIOM_MAX_MR_CNT;
275 props->local_ca_ack_delay = 0;
276 props->max_pkeys = 0;
277 props->atomic_cap = IB_ATOMIC_NONE;
278 props->masked_atomic_cap = props->atomic_cap;
279 props->max_qp_rd_atom = 0;
280 props->max_qp_init_rd_atom = 0;
281 props->max_res_rd_atom = 0;
282 props->max_srq = 0;
283 props->max_srq_wr = 0;
284 props->max_srq_sge = 0;
285 props->max_fast_reg_page_list_len = 0;
286 props->max_mcast_grp = 0;
287 props->max_mcast_qp_attach = 0;
288 props->max_total_mcast_qp_attach = 0;
289 props->max_map_per_fmr = 0;
290 /* Owned by Userspace
291 * max_qp_wr, max_sge, max_sge_rd, max_cqe */
292 mutex_unlock(&us_ibdev->usdev_lock);
293
294 return 0;
295}
296
297int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
298 struct ib_port_attr *props)
299{
300 struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
301 struct ethtool_cmd cmd;
302
303 usnic_dbg("\n");
304
305 mutex_lock(&us_ibdev->usdev_lock);
306 us_ibdev->netdev->ethtool_ops->get_settings(us_ibdev->netdev, &cmd);
307 memset(props, 0, sizeof(*props));
308
309 props->lid = 0;
310 props->lmc = 1;
311 props->sm_lid = 0;
312 props->sm_sl = 0;
313
314 if (us_ibdev->link_up) {
315 props->state = IB_PORT_ACTIVE;
316 props->phys_state = 5;
317 } else {
318 props->state = IB_PORT_DOWN;
319 props->phys_state = 3;
320 }
321
322 props->port_cap_flags = 0;
323 props->gid_tbl_len = 1;
324 props->pkey_tbl_len = 1;
325 props->bad_pkey_cntr = 0;
326 props->qkey_viol_cntr = 0;
327 eth_speed_to_ib_speed(cmd.speed, &props->active_speed,
328 &props->active_width);
329 props->max_mtu = IB_MTU_4096;
330 props->active_mtu = iboe_get_mtu(us_ibdev->mtu);
331 /* Userspace will adjust for hdrs */
332 props->max_msg_sz = us_ibdev->mtu;
333 props->max_vl_num = 1;
334 mutex_unlock(&us_ibdev->usdev_lock);
335
336 return 0;
337}
338
339int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
340 int qp_attr_mask,
341 struct ib_qp_init_attr *qp_init_attr)
342{
343 struct usnic_ib_qp_grp *qp_grp;
344 struct usnic_ib_vf *vf;
345 int err;
346
347 usnic_dbg("\n");
348
349 memset(qp_attr, 0, sizeof(*qp_attr));
350 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
351
352 qp_grp = to_uqp_grp(qp);
353 vf = qp_grp->vf;
354 mutex_lock(&vf->pf->usdev_lock);
355 usnic_dbg("\n");
356 qp_attr->qp_state = qp_grp->state;
357 qp_attr->cur_qp_state = qp_grp->state;
358
359 switch (qp_grp->ibqp.qp_type) {
360 case IB_QPT_UD:
361 qp_attr->qkey = 0;
362 break;
363 default:
364 usnic_err("Unexpected qp_type %d\n", qp_grp->ibqp.qp_type);
365 err = -EINVAL;
366 goto err_out;
367 }
368
369 mutex_unlock(&vf->pf->usdev_lock);
370 return 0;
371
372err_out:
373 mutex_unlock(&vf->pf->usdev_lock);
374 return err;
375}
376
377int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
378 union ib_gid *gid)
379{
380
381 struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
382 usnic_dbg("\n");
383
384 if (index > 1)
385 return -EINVAL;
386
387 mutex_lock(&us_ibdev->usdev_lock);
388 memset(&(gid->raw[0]), 0, sizeof(gid->raw));
389 usnic_mac_to_gid(us_ibdev->mac, &gid->raw[0]);
390 mutex_unlock(&us_ibdev->usdev_lock);
391
392 return 0;
393}
394
395int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
396 u16 *pkey)
397{
398 if (index > 1)
399 return -EINVAL;
400
401 *pkey = 0xffff;
402 return 0;
403}
404
405struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev,
406 struct ib_ucontext *context,
407 struct ib_udata *udata)
408{
409 struct usnic_ib_pd *pd;
410 void *umem_pd;
411
412 usnic_dbg("\n");
413
414 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
415 if (!pd)
416 return ERR_PTR(-ENOMEM);
417
418 umem_pd = pd->umem_pd = usnic_uiom_alloc_pd();
419 if (IS_ERR_OR_NULL(umem_pd)) {
420 kfree(pd);
421 return ERR_PTR(umem_pd ? PTR_ERR(umem_pd) : -ENOMEM);
422 }
423
424 usnic_info("domain 0x%p allocated for context 0x%p and device %s\n",
425 pd, context, ibdev->name);
426 return &pd->ibpd;
427}
428
429int usnic_ib_dealloc_pd(struct ib_pd *pd)
430{
431 usnic_info("freeing domain 0x%p\n", pd);
432
433 usnic_uiom_dealloc_pd((to_upd(pd))->umem_pd);
434 kfree(pd);
435 return 0;
436}
437
438struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
439 struct ib_qp_init_attr *init_attr,
440 struct ib_udata *udata)
441{
442 int err;
443 struct usnic_ib_dev *us_ibdev;
444 struct usnic_ib_qp_grp *qp_grp;
445 struct usnic_ib_ucontext *ucontext;
446 int cq_cnt;
447 struct usnic_vnic_res_spec res_spec;
448
449 usnic_dbg("\n");
450
451 ucontext = to_uucontext(pd->uobject->context);
452 us_ibdev = to_usdev(pd->device);
453
454 if (init_attr->qp_type != IB_QPT_UD) {
455 usnic_err("%s asked to make a non-UD QP: %d\n",
456 us_ibdev->ib_dev.name, init_attr->qp_type);
457 return ERR_PTR(-EINVAL);
458 }
459
460 mutex_lock(&us_ibdev->usdev_lock);
461 cq_cnt = (init_attr->send_cq == init_attr->recv_cq) ? 1 : 2,
462 res_spec = min_transport_spec[USNIC_DEFAULT_TRANSPORT];
463 usnic_vnic_res_spec_update(&res_spec, USNIC_VNIC_RES_TYPE_CQ, cq_cnt);
464 qp_grp = find_free_vf_and_create_qp_grp(us_ibdev, to_upd(pd),
465 USNIC_DEFAULT_TRANSPORT,
466 &res_spec);
467 if (IS_ERR_OR_NULL(qp_grp)) {
468 err = (qp_grp ? PTR_ERR(qp_grp) : -ENOMEM);
469 goto out_release_mutex;
470 }
471
472 err = usnic_ib_fill_create_qp_resp(qp_grp, udata);
473 if (err) {
474 err = -EBUSY;
475 goto out_release_qp_grp;
476 }
477
478 qp_grp->ctx = ucontext;
479 list_add_tail(&qp_grp->link, &ucontext->qp_grp_list);
480 usnic_ib_log_vf(qp_grp->vf);
481 mutex_unlock(&us_ibdev->usdev_lock);
482 return &qp_grp->ibqp;
483
484out_release_qp_grp:
485 qp_grp_destroy(qp_grp);
486out_release_mutex:
487 mutex_unlock(&us_ibdev->usdev_lock);
488 return ERR_PTR(err);
489}
490
491int usnic_ib_destroy_qp(struct ib_qp *qp)
492{
493 struct usnic_ib_qp_grp *qp_grp;
494 struct usnic_ib_vf *vf;
495
496 usnic_dbg("\n");
497
498 qp_grp = to_uqp_grp(qp);
499 vf = qp_grp->vf;
500 mutex_lock(&vf->pf->usdev_lock);
501 if (usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RESET, NULL)) {
502 usnic_err("Failed to move qp grp %u to reset\n",
503 qp_grp->grp_id);
504 }
505
506 list_del(&qp_grp->link);
507 qp_grp_destroy(qp_grp);
508 mutex_unlock(&vf->pf->usdev_lock);
509
510 return 0;
511}
512
513int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
514 int attr_mask, struct ib_udata *udata)
515{
516 struct usnic_ib_qp_grp *qp_grp;
517 int status;
518 usnic_dbg("\n");
519
520 qp_grp = to_uqp_grp(ibqp);
521
522 /* TODO: Future Support All States */
523 mutex_lock(&qp_grp->vf->pf->usdev_lock);
524 if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_INIT) {
525 status = usnic_ib_qp_grp_modify(qp_grp, IB_QPS_INIT,
526 &qp_grp->filters[DFLT_FILTER_IDX]);
527 } else if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_RTR) {
528 status = usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RTR, NULL);
529 } else if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_RTS) {
530 status = usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RTS, NULL);
531 } else {
532 usnic_err("Unexpected combination mask: %u state: %u\n",
533 attr_mask & IB_QP_STATE, attr->qp_state);
534 status = -EINVAL;
535 }
536
537 mutex_unlock(&qp_grp->vf->pf->usdev_lock);
538 return status;
539}
540
541struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev, int entries,
542 int vector, struct ib_ucontext *context,
543 struct ib_udata *udata)
544{
545 struct ib_cq *cq;
546
547 usnic_dbg("\n");
548 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
549 if (!cq)
550 return ERR_PTR(-EBUSY);
551
552 return cq;
553}
554
555int usnic_ib_destroy_cq(struct ib_cq *cq)
556{
557 usnic_dbg("\n");
558 kfree(cq);
559 return 0;
560}
561
562struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
563 u64 virt_addr, int access_flags,
564 struct ib_udata *udata)
565{
566 struct usnic_ib_mr *mr;
567 int err;
568
569 usnic_dbg("start 0x%llx va 0x%llx length 0x%llx\n", start,
570 virt_addr, length);
571
572 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
573 if (IS_ERR_OR_NULL(mr))
574 return ERR_PTR(mr ? PTR_ERR(mr) : -ENOMEM);
575
576 mr->umem = usnic_uiom_reg_get(to_upd(pd)->umem_pd, start, length,
577 access_flags, 0);
578 if (IS_ERR_OR_NULL(mr->umem)) {
579 err = (mr->umem) ? PTR_ERR(mr->umem) : -EFAULT;
580 goto err_free;
581 }
582
583 mr->ibmr.lkey = mr->ibmr.rkey = 0;
584 return &mr->ibmr;
585
586err_free:
587 kfree(mr);
588 return ERR_PTR(err);
589}
590
591int usnic_ib_dereg_mr(struct ib_mr *ibmr)
592{
593 struct usnic_ib_mr *mr = to_umr(ibmr);
594
595 usnic_dbg("va 0x%lx length 0x%zx\n", mr->umem->va, mr->umem->length);
596
597 usnic_uiom_reg_release(mr->umem, ibmr->pd->uobject->context->closing);
598 kfree(mr);
599 return 0;
600}
601
602struct ib_ucontext *usnic_ib_alloc_ucontext(struct ib_device *ibdev,
603 struct ib_udata *udata)
604{
605 struct usnic_ib_ucontext *context;
606 struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
607 usnic_dbg("\n");
608
609 context = kmalloc(sizeof(*context), GFP_KERNEL);
610 if (!context)
611 return ERR_PTR(-ENOMEM);
612
613 INIT_LIST_HEAD(&context->qp_grp_list);
614 mutex_lock(&us_ibdev->usdev_lock);
615 list_add_tail(&context->link, &us_ibdev->ctx_list);
616 mutex_unlock(&us_ibdev->usdev_lock);
617
618 return &context->ibucontext;
619}
620
621int usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
622{
623 struct usnic_ib_ucontext *context = to_uucontext(ibcontext);
624 struct usnic_ib_dev *us_ibdev = to_usdev(ibcontext->device);
625 usnic_dbg("\n");
626
627 mutex_lock(&us_ibdev->usdev_lock);
628 BUG_ON(!list_empty(&context->qp_grp_list));
629 list_del(&context->link);
630 mutex_unlock(&us_ibdev->usdev_lock);
631 kfree(context);
632 return 0;
633}
634
635int usnic_ib_mmap(struct ib_ucontext *context,
636 struct vm_area_struct *vma)
637{
638 struct usnic_ib_ucontext *uctx = to_ucontext(context);
639 struct usnic_ib_dev *us_ibdev;
640 struct usnic_ib_qp_grp *qp_grp;
641 struct usnic_ib_vf *vf;
642 struct vnic_dev_bar *bar;
643 dma_addr_t bus_addr;
644 unsigned int len;
645 unsigned int vfid;
646
647 usnic_dbg("\n");
648
649 us_ibdev = to_usdev(context->device);
650 vma->vm_flags |= VM_IO;
651 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
652 vfid = vma->vm_pgoff;
653 usnic_dbg("Page Offset %lu PAGE_SHIFT %u VFID %u\n",
654 vma->vm_pgoff, PAGE_SHIFT, vfid);
655
656 mutex_lock(&us_ibdev->usdev_lock);
657 list_for_each_entry(qp_grp, &uctx->qp_grp_list, link) {
658 vf = qp_grp->vf;
659 if (usnic_vnic_get_index(vf->vnic) == vfid) {
660 bar = usnic_vnic_get_bar(vf->vnic, 0);
661 if ((vma->vm_end - vma->vm_start) != bar->len) {
662 usnic_err("Bar0 Len %lu - Request map %lu\n",
663 bar->len,
664 vma->vm_end - vma->vm_start);
665 mutex_unlock(&us_ibdev->usdev_lock);
666 return -EINVAL;
667 }
668 bus_addr = bar->bus_addr;
669 len = bar->len;
670 usnic_dbg("bus: %pa vaddr: %p size: %ld\n",
671 &bus_addr, bar->vaddr, bar->len);
672 mutex_unlock(&us_ibdev->usdev_lock);
673
674 return remap_pfn_range(vma,
675 vma->vm_start,
676 bus_addr >> PAGE_SHIFT,
677 len, vma->vm_page_prot);
678 }
679 }
680
681 mutex_unlock(&us_ibdev->usdev_lock);
682 usnic_err("No VF %u found\n", vfid);
683 return -EINVAL;
684}
685
686/* In ib callbacks section - Start of stub funcs */
687struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd,
688 struct ib_ah_attr *ah_attr)
689{
690 usnic_dbg("\n");
691 return ERR_PTR(-EPERM);
692}
693
694int usnic_ib_destroy_ah(struct ib_ah *ah)
695{
696 usnic_dbg("\n");
697 return -EINVAL;
698}
699
700int usnic_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
701 struct ib_send_wr **bad_wr)
702{
703 usnic_dbg("\n");
704 return -EINVAL;
705}
706
707int usnic_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
708 struct ib_recv_wr **bad_wr)
709{
710 usnic_dbg("\n");
711 return -EINVAL;
712}
713
714int usnic_ib_poll_cq(struct ib_cq *ibcq, int num_entries,
715 struct ib_wc *wc)
716{
717 usnic_dbg("\n");
718 return -EINVAL;
719}
720
721int usnic_ib_req_notify_cq(struct ib_cq *cq,
722 enum ib_cq_notify_flags flags)
723{
724 usnic_dbg("\n");
725 return -EINVAL;
726}
727
728struct ib_mr *usnic_ib_get_dma_mr(struct ib_pd *pd, int acc)
729{
730 usnic_dbg("\n");
731 return ERR_PTR(-ENOMEM);
732}
733
734
735/* In ib callbacks section - End of stub funcs */
736/* End of ib callbacks section */
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
new file mode 100644
index 000000000000..bb864f5aed70
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
@@ -0,0 +1,72 @@
1/*
2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18
19#ifndef USNIC_IB_VERBS_H_
20#define USNIC_IB_VERBS_H_
21
22#include "usnic_ib.h"
23
24enum rdma_link_layer usnic_ib_port_link_layer(struct ib_device *device,
25 u8 port_num);
26int usnic_ib_query_device(struct ib_device *ibdev,
27 struct ib_device_attr *props);
28int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
29 struct ib_port_attr *props);
30int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
31 int qp_attr_mask,
32 struct ib_qp_init_attr *qp_init_attr);
33int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
34 union ib_gid *gid);
35int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
36 u16 *pkey);
37struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev,
38 struct ib_ucontext *context,
39 struct ib_udata *udata);
40int usnic_ib_dealloc_pd(struct ib_pd *pd);
41struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
42 struct ib_qp_init_attr *init_attr,
43 struct ib_udata *udata);
44int usnic_ib_destroy_qp(struct ib_qp *qp);
45int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
46 int attr_mask, struct ib_udata *udata);
47struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev, int entries,
48 int vector, struct ib_ucontext *context,
49 struct ib_udata *udata);
50int usnic_ib_destroy_cq(struct ib_cq *cq);
51struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
52 u64 virt_addr, int access_flags,
53 struct ib_udata *udata);
54int usnic_ib_dereg_mr(struct ib_mr *ibmr);
55struct ib_ucontext *usnic_ib_alloc_ucontext(struct ib_device *ibdev,
56 struct ib_udata *udata);
57int usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext);
58int usnic_ib_mmap(struct ib_ucontext *context,
59 struct vm_area_struct *vma);
60struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd,
61 struct ib_ah_attr *ah_attr);
62int usnic_ib_destroy_ah(struct ib_ah *ah);
63int usnic_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
64 struct ib_send_wr **bad_wr);
65int usnic_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
66 struct ib_recv_wr **bad_wr);
67int usnic_ib_poll_cq(struct ib_cq *ibcq, int num_entries,
68 struct ib_wc *wc);
69int usnic_ib_req_notify_cq(struct ib_cq *cq,
70 enum ib_cq_notify_flags flags);
71struct ib_mr *usnic_ib_get_dma_mr(struct ib_pd *pd, int acc);
72#endif /* !USNIC_IB_VERBS_H */
diff --git a/drivers/infiniband/hw/usnic/usnic_log.h b/drivers/infiniband/hw/usnic/usnic_log.h
new file mode 100644
index 000000000000..75777a66c684
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_log.h
@@ -0,0 +1,58 @@
1/*
2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18
19#ifndef USNIC_LOG_H_
20#define USNIC_LOG_H_
21
22#include "usnic.h"
23
24extern unsigned int usnic_log_lvl;
25
26#define USNIC_LOG_LVL_NONE (0)
27#define USNIC_LOG_LVL_ERR (1)
28#define USNIC_LOG_LVL_INFO (2)
29#define USNIC_LOG_LVL_DBG (3)
30
31#define usnic_printk(lvl, args...) \
32 do { \
33 printk(lvl "%s:%s:%d: ", DRV_NAME, __func__, \
34 __LINE__); \
35 printk(args); \
36 } while (0)
37
38#define usnic_dbg(args...) \
39 do { \
40 if (unlikely(usnic_log_lvl >= USNIC_LOG_LVL_DBG)) { \
41 usnic_printk(KERN_INFO, args); \
42 } \
43} while (0)
44
45#define usnic_info(args...) \
46do { \
47 if (usnic_log_lvl >= USNIC_LOG_LVL_INFO) { \
48 usnic_printk(KERN_INFO, args); \
49 } \
50} while (0)
51
52#define usnic_err(args...) \
53 do { \
54 if (usnic_log_lvl >= USNIC_LOG_LVL_ERR) { \
55 usnic_printk(KERN_ERR, args); \
56 } \
57 } while (0)
58#endif /* !USNIC_LOG_H_ */
diff --git a/drivers/infiniband/hw/usnic/usnic_transport.c b/drivers/infiniband/hw/usnic/usnic_transport.c
new file mode 100644
index 000000000000..723bd6c3a8f8
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_transport.c
@@ -0,0 +1,125 @@
1/*
2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18#include <linux/bitmap.h>
19#include <linux/module.h>
20#include <linux/slab.h>
21
22#include "usnic_transport.h"
23#include "usnic_log.h"
24
25/* ROCE */
26static unsigned long *roce_bitmap;
27static u16 roce_next_port = 1;
28#define ROCE_BITMAP_SZ ((1 << (8 /*CHAR_BIT*/ * sizeof(u16)))/8 /*CHAR BIT*/)
29static DEFINE_SPINLOCK(roce_bitmap_lock);
30
31static const char *transport_to_str(enum usnic_transport_type type)
32{
33 switch (type) {
34 case USNIC_TRANSPORT_UNKNOWN:
35 return "Unknown";
36 case USNIC_TRANSPORT_ROCE_CUSTOM:
37 return "roce custom";
38 case USNIC_TRANSPORT_MAX:
39 return "Max?";
40 default:
41 return "Not known";
42 }
43}
44
45/*
46 * reserve a port number. if "0" specified, we will try to pick one
47 * starting at roce_next_port. roce_next_port will take on the values
48 * 1..4096
49 */
50u16 usnic_transport_rsrv_port(enum usnic_transport_type type, u16 port_num)
51{
52 if (type == USNIC_TRANSPORT_ROCE_CUSTOM) {
53 spin_lock(&roce_bitmap_lock);
54 if (!port_num) {
55 port_num = bitmap_find_next_zero_area(roce_bitmap,
56 ROCE_BITMAP_SZ,
57 roce_next_port /* start */,
58 1 /* nr */,
59 0 /* align */);
60 roce_next_port = (port_num & 4095) + 1;
61 } else if (test_bit(port_num, roce_bitmap)) {
62 usnic_err("Failed to allocate port for %s\n",
63 transport_to_str(type));
64 spin_unlock(&roce_bitmap_lock);
65 goto out_fail;
66 }
67 bitmap_set(roce_bitmap, port_num, 1);
68 spin_unlock(&roce_bitmap_lock);
69 } else {
70 usnic_err("Failed to allocate port - transport %s unsupported\n",
71 transport_to_str(type));
72 goto out_fail;
73 }
74
75 usnic_dbg("Allocating port %hu for %s\n", port_num,
76 transport_to_str(type));
77 return port_num;
78
79out_fail:
80 return 0;
81}
82
83void usnic_transport_unrsrv_port(enum usnic_transport_type type, u16 port_num)
84{
85 if (type == USNIC_TRANSPORT_ROCE_CUSTOM) {
86 spin_lock(&roce_bitmap_lock);
87 if (!port_num) {
88 usnic_err("Unreserved unvalid port num 0 for %s\n",
89 transport_to_str(type));
90 goto out_roce_custom;
91 }
92
93 if (!test_bit(port_num, roce_bitmap)) {
94 usnic_err("Unreserving invalid %hu for %s\n",
95 port_num,
96 transport_to_str(type));
97 goto out_roce_custom;
98 }
99 bitmap_clear(roce_bitmap, port_num, 1);
100 usnic_dbg("Freeing port %hu for %s\n", port_num,
101 transport_to_str(type));
102out_roce_custom:
103 spin_unlock(&roce_bitmap_lock);
104 } else {
105 usnic_err("Freeing invalid port %hu for %d\n", port_num, type);
106 }
107}
108
109int usnic_transport_init(void)
110{
111 roce_bitmap = kzalloc(ROCE_BITMAP_SZ, GFP_KERNEL);
112 if (!roce_bitmap) {
113 usnic_err("Failed to allocate bit map");
114 return -ENOMEM;
115 }
116
117 /* Do not ever allocate bit 0, hence set it here */
118 bitmap_set(roce_bitmap, 0, 1);
119 return 0;
120}
121
122void usnic_transport_fini(void)
123{
124 kfree(roce_bitmap);
125}
diff --git a/drivers/infiniband/hw/usnic/usnic_transport.h b/drivers/infiniband/hw/usnic/usnic_transport.h
new file mode 100644
index 000000000000..091fdaf4d25a
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_transport.h
@@ -0,0 +1,28 @@
1/*
2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18
19#ifndef USNIC_TRANSPORT_H_
20#define USNIC_TRANSPORT_H_
21
22#include "usnic_abi.h"
23
24u16 usnic_transport_rsrv_port(enum usnic_transport_type type, u16 port_num);
25void usnic_transport_unrsrv_port(enum usnic_transport_type type, u16 port_num);
26int usnic_transport_init(void);
27void usnic_transport_fini(void);
28#endif /* !USNIC_TRANSPORT_H */
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
new file mode 100644
index 000000000000..c841a752dbd0
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -0,0 +1,603 @@
1/*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2013 Cisco Systems. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/mm.h>
36#include <linux/dma-mapping.h>
37#include <linux/sched.h>
38#include <linux/hugetlb.h>
39#include <linux/dma-attrs.h>
40#include <linux/iommu.h>
41#include <linux/workqueue.h>
42#include <linux/list.h>
43#include <linux/pci.h>
44
45#include "usnic_log.h"
46#include "usnic_uiom.h"
47#include "usnic_uiom_interval_tree.h"
48
49static struct workqueue_struct *usnic_uiom_wq;
50
51#define USNIC_UIOM_PAGE_CHUNK \
52 ((PAGE_SIZE - offsetof(struct usnic_uiom_chunk, page_list)) /\
53 ((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] - \
54 (void *) &((struct usnic_uiom_chunk *) 0)->page_list[0]))
55
56static void usnic_uiom_reg_account(struct work_struct *work)
57{
58 struct usnic_uiom_reg *umem = container_of(work,
59 struct usnic_uiom_reg, work);
60
61 down_write(&umem->mm->mmap_sem);
62 umem->mm->locked_vm -= umem->diff;
63 up_write(&umem->mm->mmap_sem);
64 mmput(umem->mm);
65 kfree(umem);
66}
67
68static int usnic_uiom_dma_fault(struct iommu_domain *domain,
69 struct device *dev,
70 unsigned long iova, int flags,
71 void *token)
72{
73 usnic_err("Device %s iommu fault domain 0x%pK va 0x%lx flags 0x%x\n",
74 dev_name(dev),
75 domain, iova, flags);
76 return -ENOSYS;
77}
78
79static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty)
80{
81 struct usnic_uiom_chunk *chunk, *tmp;
82 struct page *page;
83 int i;
84 dma_addr_t pa;
85
86 list_for_each_entry_safe(chunk, tmp, chunk_list, list) {
87 for (i = 0; i < chunk->nents; i++) {
88 page = sg_page(&chunk->page_list[i]);
89 pa = sg_phys(&chunk->page_list[i]);
90 if (dirty)
91 set_page_dirty_lock(page);
92 put_page(page);
93 usnic_dbg("pa: %pa\n", &pa);
94 }
95 kfree(chunk);
96 }
97}
98
99static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
100 int dmasync, struct list_head *chunk_list)
101{
102 struct page **page_list;
103 struct usnic_uiom_chunk *chunk;
104 unsigned long locked;
105 unsigned long lock_limit;
106 unsigned long cur_base;
107 unsigned long npages;
108 int ret;
109 int off;
110 int i;
111 int flags;
112 dma_addr_t pa;
113 DEFINE_DMA_ATTRS(attrs);
114
115 if (dmasync)
116 dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
117
118 if (!can_do_mlock())
119 return -EPERM;
120
121 INIT_LIST_HEAD(chunk_list);
122
123 page_list = (struct page **) __get_free_page(GFP_KERNEL);
124 if (!page_list)
125 return -ENOMEM;
126
127 npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT;
128
129 down_write(&current->mm->mmap_sem);
130
131 locked = npages + current->mm->locked_vm;
132 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
133
134 if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
135 ret = -ENOMEM;
136 goto out;
137 }
138
139 flags = IOMMU_READ | IOMMU_CACHE;
140 flags |= (writable) ? IOMMU_WRITE : 0;
141 cur_base = addr & PAGE_MASK;
142 ret = 0;
143
144 while (npages) {
145 ret = get_user_pages(current, current->mm, cur_base,
146 min_t(unsigned long, npages,
147 PAGE_SIZE / sizeof(struct page *)),
148 1, !writable, page_list, NULL);
149
150 if (ret < 0)
151 goto out;
152
153 npages -= ret;
154 off = 0;
155
156 while (ret) {
157 chunk = kmalloc(sizeof(*chunk) +
158 sizeof(struct scatterlist) *
159 min_t(int, ret, USNIC_UIOM_PAGE_CHUNK),
160 GFP_KERNEL);
161 if (!chunk) {
162 ret = -ENOMEM;
163 goto out;
164 }
165
166 chunk->nents = min_t(int, ret, USNIC_UIOM_PAGE_CHUNK);
167 sg_init_table(chunk->page_list, chunk->nents);
168 for (i = 0; i < chunk->nents; ++i) {
169 sg_set_page(&chunk->page_list[i],
170 page_list[i + off],
171 PAGE_SIZE, 0);
172 pa = sg_phys(&chunk->page_list[i]);
173 usnic_dbg("va: 0x%lx pa: %pa\n",
174 cur_base + i*PAGE_SIZE, &pa);
175 }
176 cur_base += chunk->nents * PAGE_SIZE;
177 ret -= chunk->nents;
178 off += chunk->nents;
179 list_add_tail(&chunk->list, chunk_list);
180 }
181
182 ret = 0;
183 }
184
185out:
186 if (ret < 0)
187 usnic_uiom_put_pages(chunk_list, 0);
188 else
189 current->mm->locked_vm = locked;
190
191 up_write(&current->mm->mmap_sem);
192 free_page((unsigned long) page_list);
193 return ret;
194}
195
196static void usnic_uiom_unmap_sorted_intervals(struct list_head *intervals,
197 struct usnic_uiom_pd *pd)
198{
199 struct usnic_uiom_interval_node *interval, *tmp;
200 long unsigned va, size;
201
202 list_for_each_entry_safe(interval, tmp, intervals, link) {
203 va = interval->start << PAGE_SHIFT;
204 size = ((interval->last - interval->start) + 1) << PAGE_SHIFT;
205 while (size > 0) {
206 /* Workaround for RH 970401 */
207 usnic_dbg("va 0x%lx size 0x%lx", va, PAGE_SIZE);
208 iommu_unmap(pd->domain, va, PAGE_SIZE);
209 va += PAGE_SIZE;
210 size -= PAGE_SIZE;
211 }
212 }
213}
214
215static void __usnic_uiom_reg_release(struct usnic_uiom_pd *pd,
216 struct usnic_uiom_reg *uiomr,
217 int dirty)
218{
219 int npages;
220 unsigned long vpn_start, vpn_last;
221 struct usnic_uiom_interval_node *interval, *tmp;
222 int writable = 0;
223 LIST_HEAD(rm_intervals);
224
225 npages = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
226 vpn_start = (uiomr->va & PAGE_MASK) >> PAGE_SHIFT;
227 vpn_last = vpn_start + npages - 1;
228
229 spin_lock(&pd->lock);
230 usnic_uiom_remove_interval(&pd->rb_root, vpn_start,
231 vpn_last, &rm_intervals);
232 usnic_uiom_unmap_sorted_intervals(&rm_intervals, pd);
233
234 list_for_each_entry_safe(interval, tmp, &rm_intervals, link) {
235 if (interval->flags & IOMMU_WRITE)
236 writable = 1;
237 list_del(&interval->link);
238 kfree(interval);
239 }
240
241 usnic_uiom_put_pages(&uiomr->chunk_list, dirty & writable);
242 spin_unlock(&pd->lock);
243}
244
245static int usnic_uiom_map_sorted_intervals(struct list_head *intervals,
246 struct usnic_uiom_reg *uiomr)
247{
248 int i, err;
249 size_t size;
250 struct usnic_uiom_chunk *chunk;
251 struct usnic_uiom_interval_node *interval_node;
252 dma_addr_t pa;
253 dma_addr_t pa_start = 0;
254 dma_addr_t pa_end = 0;
255 long int va_start = -EINVAL;
256 struct usnic_uiom_pd *pd = uiomr->pd;
257 long int va = uiomr->va & PAGE_MASK;
258 int flags = IOMMU_READ | IOMMU_CACHE;
259
260 flags |= (uiomr->writable) ? IOMMU_WRITE : 0;
261 chunk = list_first_entry(&uiomr->chunk_list, struct usnic_uiom_chunk,
262 list);
263 list_for_each_entry(interval_node, intervals, link) {
264iter_chunk:
265 for (i = 0; i < chunk->nents; i++, va += PAGE_SIZE) {
266 pa = sg_phys(&chunk->page_list[i]);
267 if ((va >> PAGE_SHIFT) < interval_node->start)
268 continue;
269
270 if ((va >> PAGE_SHIFT) == interval_node->start) {
271 /* First page of the interval */
272 va_start = va;
273 pa_start = pa;
274 pa_end = pa;
275 }
276
277 WARN_ON(va_start == -EINVAL);
278
279 if ((pa_end + PAGE_SIZE != pa) &&
280 (pa != pa_start)) {
281 /* PAs are not contiguous */
282 size = pa_end - pa_start + PAGE_SIZE;
283 usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x",
284 va_start, &pa_start, size, flags);
285 err = iommu_map(pd->domain, va_start, pa_start,
286 size, flags);
287 if (err) {
288 usnic_err("Failed to map va 0x%lx pa 0x%pa size 0x%zx with err %d\n",
289 va_start, &pa_start, size, err);
290 goto err_out;
291 }
292 va_start = va;
293 pa_start = pa;
294 pa_end = pa;
295 }
296
297 if ((va >> PAGE_SHIFT) == interval_node->last) {
298 /* Last page of the interval */
299 size = pa - pa_start + PAGE_SIZE;
300 usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n",
301 va_start, &pa_start, size, flags);
302 err = iommu_map(pd->domain, va_start, pa_start,
303 size, flags);
304 if (err) {
305 usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
306 va_start, &pa_start, size, err);
307 goto err_out;
308 }
309 break;
310 }
311
312 if (pa != pa_start)
313 pa_end += PAGE_SIZE;
314 }
315
316 if (i == chunk->nents) {
317 /*
318 * Hit last entry of the chunk,
319 * hence advance to next chunk
320 */
321 chunk = list_first_entry(&chunk->list,
322 struct usnic_uiom_chunk,
323 list);
324 goto iter_chunk;
325 }
326 }
327
328 return 0;
329
330err_out:
331 usnic_uiom_unmap_sorted_intervals(intervals, pd);
332 return err;
333}
334
335struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
336 unsigned long addr, size_t size,
337 int writable, int dmasync)
338{
339 struct usnic_uiom_reg *uiomr;
340 unsigned long va_base, vpn_start, vpn_last;
341 unsigned long npages;
342 int offset, err;
343 LIST_HEAD(sorted_diff_intervals);
344
345 /*
346 * Intel IOMMU map throws an error if a translation entry is
347 * changed from read to write. This module may not unmap
348 * and then remap the entry after fixing the permission
349 * b/c this open up a small windows where hw DMA may page fault
350 * Hence, make all entries to be writable.
351 */
352 writable = 1;
353
354 va_base = addr & PAGE_MASK;
355 offset = addr & ~PAGE_MASK;
356 npages = PAGE_ALIGN(size + offset) >> PAGE_SHIFT;
357 vpn_start = (addr & PAGE_MASK) >> PAGE_SHIFT;
358 vpn_last = vpn_start + npages - 1;
359
360 uiomr = kmalloc(sizeof(*uiomr), GFP_KERNEL);
361 if (!uiomr)
362 return ERR_PTR(-ENOMEM);
363
364 uiomr->va = va_base;
365 uiomr->offset = offset;
366 uiomr->length = size;
367 uiomr->writable = writable;
368 uiomr->pd = pd;
369
370 err = usnic_uiom_get_pages(addr, size, writable, dmasync,
371 &uiomr->chunk_list);
372 if (err) {
373 usnic_err("Failed get_pages vpn [0x%lx,0x%lx] err %d\n",
374 vpn_start, vpn_last, err);
375 goto out_free_uiomr;
376 }
377
378 spin_lock(&pd->lock);
379 err = usnic_uiom_get_intervals_diff(vpn_start, vpn_last,
380 (writable) ? IOMMU_WRITE : 0,
381 IOMMU_WRITE,
382 &pd->rb_root,
383 &sorted_diff_intervals);
384 if (err) {
385 usnic_err("Failed disjoint interval vpn [0x%lx,0x%lx] err %d\n",
386 vpn_start, vpn_last, err);
387 goto out_put_pages;
388 }
389
390 err = usnic_uiom_map_sorted_intervals(&sorted_diff_intervals, uiomr);
391 if (err) {
392 usnic_err("Failed map interval vpn [0x%lx,0x%lx] err %d\n",
393 vpn_start, vpn_last, err);
394 goto out_put_intervals;
395
396 }
397
398 err = usnic_uiom_insert_interval(&pd->rb_root, vpn_start, vpn_last,
399 (writable) ? IOMMU_WRITE : 0);
400 if (err) {
401 usnic_err("Failed insert interval vpn [0x%lx,0x%lx] err %d\n",
402 vpn_start, vpn_last, err);
403 goto out_unmap_intervals;
404 }
405
406 usnic_uiom_put_interval_set(&sorted_diff_intervals);
407 spin_unlock(&pd->lock);
408
409 return uiomr;
410
411out_unmap_intervals:
412 usnic_uiom_unmap_sorted_intervals(&sorted_diff_intervals, pd);
413out_put_intervals:
414 usnic_uiom_put_interval_set(&sorted_diff_intervals);
415out_put_pages:
416 usnic_uiom_put_pages(&uiomr->chunk_list, 0);
417 spin_unlock(&pd->lock);
418out_free_uiomr:
419 kfree(uiomr);
420 return ERR_PTR(err);
421}
422
423void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, int closing)
424{
425 struct mm_struct *mm;
426 unsigned long diff;
427
428 __usnic_uiom_reg_release(uiomr->pd, uiomr, 1);
429
430 mm = get_task_mm(current);
431 if (!mm) {
432 kfree(uiomr);
433 return;
434 }
435
436 diff = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
437
438 /*
439 * We may be called with the mm's mmap_sem already held. This
440 * can happen when a userspace munmap() is the call that drops
441 * the last reference to our file and calls our release
442 * method. If there are memory regions to destroy, we'll end
443 * up here and not be able to take the mmap_sem. In that case
444 * we defer the vm_locked accounting to the system workqueue.
445 */
446 if (closing) {
447 if (!down_write_trylock(&mm->mmap_sem)) {
448 INIT_WORK(&uiomr->work, usnic_uiom_reg_account);
449 uiomr->mm = mm;
450 uiomr->diff = diff;
451
452 queue_work(usnic_uiom_wq, &uiomr->work);
453 return;
454 }
455 } else
456 down_write(&mm->mmap_sem);
457
458 current->mm->locked_vm -= diff;
459 up_write(&mm->mmap_sem);
460 mmput(mm);
461 kfree(uiomr);
462}
463
464struct usnic_uiom_pd *usnic_uiom_alloc_pd(void)
465{
466 struct usnic_uiom_pd *pd;
467 void *domain;
468
469 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
470 if (!pd)
471 return ERR_PTR(-ENOMEM);
472
473 pd->domain = domain = iommu_domain_alloc(&pci_bus_type);
474 if (IS_ERR_OR_NULL(domain)) {
475 usnic_err("Failed to allocate IOMMU domain with err %ld\n",
476 PTR_ERR(pd->domain));
477 kfree(pd);
478 return ERR_PTR(domain ? PTR_ERR(domain) : -ENOMEM);
479 }
480
481 iommu_set_fault_handler(pd->domain, usnic_uiom_dma_fault, NULL);
482
483 spin_lock_init(&pd->lock);
484 INIT_LIST_HEAD(&pd->devs);
485
486 return pd;
487}
488
489void usnic_uiom_dealloc_pd(struct usnic_uiom_pd *pd)
490{
491 iommu_domain_free(pd->domain);
492 kfree(pd);
493}
494
495int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev)
496{
497 struct usnic_uiom_dev *uiom_dev;
498 int err;
499
500 uiom_dev = kzalloc(sizeof(*uiom_dev), GFP_KERNEL);
501 if (!uiom_dev)
502 return -ENOMEM;
503 uiom_dev->dev = dev;
504
505 err = iommu_attach_device(pd->domain, dev);
506 if (err)
507 goto out_free_dev;
508
509 if (!iommu_domain_has_cap(pd->domain, IOMMU_CAP_CACHE_COHERENCY)) {
510 usnic_err("IOMMU of %s does not support cache coherency\n",
511 dev_name(dev));
512 err = -EINVAL;
513 goto out_detach_device;
514 }
515
516 spin_lock(&pd->lock);
517 list_add_tail(&uiom_dev->link, &pd->devs);
518 pd->dev_cnt++;
519 spin_unlock(&pd->lock);
520
521 return 0;
522
523out_detach_device:
524 iommu_detach_device(pd->domain, dev);
525out_free_dev:
526 kfree(uiom_dev);
527 return err;
528}
529
530void usnic_uiom_detach_dev_from_pd(struct usnic_uiom_pd *pd, struct device *dev)
531{
532 struct usnic_uiom_dev *uiom_dev;
533 int found = 0;
534
535 spin_lock(&pd->lock);
536 list_for_each_entry(uiom_dev, &pd->devs, link) {
537 if (uiom_dev->dev == dev) {
538 found = 1;
539 break;
540 }
541 }
542
543 if (!found) {
544 usnic_err("Unable to free dev %s - not found\n",
545 dev_name(dev));
546 spin_unlock(&pd->lock);
547 return;
548 }
549
550 list_del(&uiom_dev->link);
551 pd->dev_cnt--;
552 spin_unlock(&pd->lock);
553
554 return iommu_detach_device(pd->domain, dev);
555}
556
557struct device **usnic_uiom_get_dev_list(struct usnic_uiom_pd *pd)
558{
559 struct usnic_uiom_dev *uiom_dev;
560 struct device **devs;
561 int i = 0;
562
563 spin_lock(&pd->lock);
564 devs = kcalloc(pd->dev_cnt + 1, sizeof(*devs), GFP_ATOMIC);
565 if (!devs) {
566 devs = ERR_PTR(-ENOMEM);
567 goto out;
568 }
569
570 list_for_each_entry(uiom_dev, &pd->devs, link) {
571 devs[i++] = uiom_dev->dev;
572 }
573out:
574 spin_unlock(&pd->lock);
575 return devs;
576}
577
578void usnic_uiom_free_dev_list(struct device **devs)
579{
580 kfree(devs);
581}
582
583int usnic_uiom_init(char *drv_name)
584{
585 if (!iommu_present(&pci_bus_type)) {
586 usnic_err("IOMMU required but not present or enabled. USNIC QPs will not function w/o enabling IOMMU\n");
587 return -EPERM;
588 }
589
590 usnic_uiom_wq = create_workqueue(drv_name);
591 if (!usnic_uiom_wq) {
592 usnic_err("Unable to alloc wq for drv %s\n", drv_name);
593 return -ENOMEM;
594 }
595
596 return 0;
597}
598
599void usnic_uiom_fini(void)
600{
601 flush_workqueue(usnic_uiom_wq);
602 destroy_workqueue(usnic_uiom_wq);
603}
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.h b/drivers/infiniband/hw/usnic/usnic_uiom.h
new file mode 100644
index 000000000000..70440996e8f2
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.h
@@ -0,0 +1,80 @@
1/*
2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18
19#ifndef USNIC_UIOM_H_
20#define USNIC_UIOM_H_
21
22#include <linux/list.h>
23#include <linux/scatterlist.h>
24
25#include "usnic_uiom_interval_tree.h"
26
27#define USNIC_UIOM_READ (1)
28#define USNIC_UIOM_WRITE (2)
29
30#define USNIC_UIOM_MAX_PD_CNT (1000)
31#define USNIC_UIOM_MAX_MR_CNT (1000000)
32#define USNIC_UIOM_MAX_MR_SIZE (~0UL)
33#define USNIC_UIOM_PAGE_SIZE (PAGE_SIZE)
34
35struct usnic_uiom_dev {
36 struct device *dev;
37 struct list_head link;
38};
39
40struct usnic_uiom_pd {
41 struct iommu_domain *domain;
42 spinlock_t lock;
43 struct rb_root rb_root;
44 struct list_head devs;
45 int dev_cnt;
46};
47
48struct usnic_uiom_reg {
49 struct usnic_uiom_pd *pd;
50 unsigned long va;
51 size_t length;
52 int offset;
53 int page_size;
54 int writable;
55 struct list_head chunk_list;
56 struct work_struct work;
57 struct mm_struct *mm;
58 unsigned long diff;
59};
60
61struct usnic_uiom_chunk {
62 struct list_head list;
63 int nents;
64 struct scatterlist page_list[0];
65};
66
67struct usnic_uiom_pd *usnic_uiom_alloc_pd(void);
68void usnic_uiom_dealloc_pd(struct usnic_uiom_pd *pd);
69int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev);
70void usnic_uiom_detach_dev_from_pd(struct usnic_uiom_pd *pd,
71 struct device *dev);
72struct device **usnic_uiom_get_dev_list(struct usnic_uiom_pd *pd);
73void usnic_uiom_free_dev_list(struct device **devs);
74struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
75 unsigned long addr, size_t size,
76 int access, int dmasync);
77void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, int closing);
78int usnic_uiom_init(char *drv_name);
79void usnic_uiom_fini(void);
80#endif /* USNIC_UIOM_H_ */
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c
new file mode 100644
index 000000000000..7e1dafccb11e
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c
@@ -0,0 +1,237 @@
1#include <linux/init.h>
2#include <linux/list.h>
3#include <linux/slab.h>
4#include <linux/list_sort.h>
5#include <linux/version.h>
6
7#include <linux/interval_tree_generic.h>
8#include "usnic_uiom_interval_tree.h"
9
10#define START(node) ((node)->start)
11#define LAST(node) ((node)->last)
12
13#define MAKE_NODE(node, start, end, ref_cnt, flags, err, err_out) \
14 do { \
15 node = usnic_uiom_interval_node_alloc(start, \
16 end, ref_cnt, flags); \
17 if (!node) { \
18 err = -ENOMEM; \
19 goto err_out; \
20 } \
21 } while (0)
22
23#define MARK_FOR_ADD(node, list) (list_add_tail(&node->link, list))
24
25#define MAKE_NODE_AND_APPEND(node, start, end, ref_cnt, flags, err, \
26 err_out, list) \
27 do { \
28 MAKE_NODE(node, start, end, \
29 ref_cnt, flags, err, \
30 err_out); \
31 MARK_FOR_ADD(node, list); \
32 } while (0)
33
34#define FLAGS_EQUAL(flags1, flags2, mask) \
35 (((flags1) & (mask)) == ((flags2) & (mask)))
36
37static struct usnic_uiom_interval_node*
38usnic_uiom_interval_node_alloc(long int start, long int last, int ref_cnt,
39 int flags)
40{
41 struct usnic_uiom_interval_node *interval = kzalloc(sizeof(*interval),
42 GFP_ATOMIC);
43 if (!interval)
44 return NULL;
45
46 interval->start = start;
47 interval->last = last;
48 interval->flags = flags;
49 interval->ref_cnt = ref_cnt;
50
51 return interval;
52}
53
54static int interval_cmp(void *priv, struct list_head *a, struct list_head *b)
55{
56 struct usnic_uiom_interval_node *node_a, *node_b;
57
58 node_a = list_entry(a, struct usnic_uiom_interval_node, link);
59 node_b = list_entry(b, struct usnic_uiom_interval_node, link);
60
61 /* long to int */
62 if (node_a->start < node_b->start)
63 return -1;
64 else if (node_a->start > node_b->start)
65 return 1;
66
67 return 0;
68}
69
70static void
71find_intervals_intersection_sorted(struct rb_root *root, unsigned long start,
72 unsigned long last,
73 struct list_head *list)
74{
75 struct usnic_uiom_interval_node *node;
76
77 INIT_LIST_HEAD(list);
78
79 for (node = usnic_uiom_interval_tree_iter_first(root, start, last);
80 node;
81 node = usnic_uiom_interval_tree_iter_next(node, start, last))
82 list_add_tail(&node->link, list);
83
84 list_sort(NULL, list, interval_cmp);
85}
86
87int usnic_uiom_get_intervals_diff(unsigned long start, unsigned long last,
88 int flags, int flag_mask,
89 struct rb_root *root,
90 struct list_head *diff_set)
91{
92 struct usnic_uiom_interval_node *interval, *tmp;
93 int err = 0;
94 long int pivot = start;
95 LIST_HEAD(intersection_set);
96
97 INIT_LIST_HEAD(diff_set);
98
99 find_intervals_intersection_sorted(root, start, last,
100 &intersection_set);
101
102 list_for_each_entry(interval, &intersection_set, link) {
103 if (pivot < interval->start) {
104 MAKE_NODE_AND_APPEND(tmp, pivot, interval->start - 1,
105 1, flags, err, err_out,
106 diff_set);
107 pivot = interval->start;
108 }
109
110 /*
111 * Invariant: Set [start, pivot] is either in diff_set or root,
112 * but not in both.
113 */
114
115 if (pivot > interval->last) {
116 continue;
117 } else if (pivot <= interval->last &&
118 FLAGS_EQUAL(interval->flags, flags,
119 flag_mask)) {
120 pivot = interval->last + 1;
121 }
122 }
123
124 if (pivot <= last)
125 MAKE_NODE_AND_APPEND(tmp, pivot, last, 1, flags, err, err_out,
126 diff_set);
127
128 return 0;
129
130err_out:
131 list_for_each_entry_safe(interval, tmp, diff_set, link) {
132 list_del(&interval->link);
133 kfree(interval);
134 }
135
136 return err;
137}
138
139void usnic_uiom_put_interval_set(struct list_head *intervals)
140{
141 struct usnic_uiom_interval_node *interval, *tmp;
142 list_for_each_entry_safe(interval, tmp, intervals, link)
143 kfree(interval);
144}
145
146int usnic_uiom_insert_interval(struct rb_root *root, unsigned long start,
147 unsigned long last, int flags)
148{
149 struct usnic_uiom_interval_node *interval, *tmp;
150 unsigned long istart, ilast;
151 int iref_cnt, iflags;
152 unsigned long lpivot = start;
153 int err = 0;
154 LIST_HEAD(to_add);
155 LIST_HEAD(intersection_set);
156
157 find_intervals_intersection_sorted(root, start, last,
158 &intersection_set);
159
160 list_for_each_entry(interval, &intersection_set, link) {
161 /*
162 * Invariant - lpivot is the left edge of next interval to be
163 * inserted
164 */
165 istart = interval->start;
166 ilast = interval->last;
167 iref_cnt = interval->ref_cnt;
168 iflags = interval->flags;
169
170 if (istart < lpivot) {
171 MAKE_NODE_AND_APPEND(tmp, istart, lpivot - 1, iref_cnt,
172 iflags, err, err_out, &to_add);
173 } else if (istart > lpivot) {
174 MAKE_NODE_AND_APPEND(tmp, lpivot, istart - 1, 1, flags,
175 err, err_out, &to_add);
176 lpivot = istart;
177 } else {
178 lpivot = istart;
179 }
180
181 if (ilast > last) {
182 MAKE_NODE_AND_APPEND(tmp, lpivot, last, iref_cnt + 1,
183 iflags | flags, err, err_out,
184 &to_add);
185 MAKE_NODE_AND_APPEND(tmp, last + 1, ilast, iref_cnt,
186 iflags, err, err_out, &to_add);
187 } else {
188 MAKE_NODE_AND_APPEND(tmp, lpivot, ilast, iref_cnt + 1,
189 iflags | flags, err, err_out,
190 &to_add);
191 }
192
193 lpivot = ilast + 1;
194 }
195
196 if (lpivot <= last)
197 MAKE_NODE_AND_APPEND(tmp, lpivot, last, 1, flags, err, err_out,
198 &to_add);
199
200 list_for_each_entry_safe(interval, tmp, &intersection_set, link) {
201 usnic_uiom_interval_tree_remove(interval, root);
202 kfree(interval);
203 }
204
205 list_for_each_entry(interval, &to_add, link)
206 usnic_uiom_interval_tree_insert(interval, root);
207
208 return 0;
209
210err_out:
211 list_for_each_entry_safe(interval, tmp, &to_add, link)
212 kfree(interval);
213
214 return err;
215}
216
217void usnic_uiom_remove_interval(struct rb_root *root, unsigned long start,
218 unsigned long last, struct list_head *removed)
219{
220 struct usnic_uiom_interval_node *interval;
221
222 for (interval = usnic_uiom_interval_tree_iter_first(root, start, last);
223 interval;
224 interval = usnic_uiom_interval_tree_iter_next(interval,
225 start,
226 last)) {
227 if (--interval->ref_cnt == 0)
228 list_add_tail(&interval->link, removed);
229 }
230
231 list_for_each_entry(interval, removed, link)
232 usnic_uiom_interval_tree_remove(interval, root);
233}
234
235INTERVAL_TREE_DEFINE(struct usnic_uiom_interval_node, rb,
236 unsigned long, __subtree_last,
237 START, LAST, , usnic_uiom_interval_tree)
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h
new file mode 100644
index 000000000000..030ba6e68a52
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h
@@ -0,0 +1,74 @@
1/*
2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18
19#ifndef USNIC_UIOM_INTERVAL_TREE_H_
20#define USNIC_UIOM_INTERVAL_TREE_H_
21
22#include <linux/version.h>
23#include <linux/rbtree.h>
24
25struct usnic_uiom_interval_node {
26 struct rb_node rb;
27 struct list_head link;
28 unsigned long start;
29 unsigned long last;
30 unsigned long __subtree_last;
31 unsigned int ref_cnt;
32 int flags;
33};
34
35extern void
36usnic_uiom_interval_tree_insert(struct usnic_uiom_interval_node *node,
37 struct rb_root *root);
38extern void
39usnic_uiom_interval_tree_remove(struct usnic_uiom_interval_node *node,
40 struct rb_root *root);
41extern struct usnic_uiom_interval_node *
42usnic_uiom_interval_tree_iter_first(struct rb_root *root,
43 unsigned long start,
44 unsigned long last);
45extern struct usnic_uiom_interval_node *
46usnic_uiom_interval_tree_iter_next(struct usnic_uiom_interval_node *node,
47 unsigned long start, unsigned long last);
48/*
49 * Inserts {start...last} into {root}. If there are overlaps,
50 * nodes will be broken up and merged
51 */
52int usnic_uiom_insert_interval(struct rb_root *root,
53 unsigned long start, unsigned long last,
54 int flags);
55/*
56 * Removed {start...last} from {root}. The nodes removed are returned in
57 * 'removed.' The caller is responsibile for freeing memory of nodes in
58 * 'removed.'
59 */
60void usnic_uiom_remove_interval(struct rb_root *root,
61 unsigned long start, unsigned long last,
62 struct list_head *removed);
63/*
64 * Returns {start...last} - {root} (relative complement of {start...last} in
65 * {root}) in diff_set sorted ascendingly
66 */
67int usnic_uiom_get_intervals_diff(unsigned long start,
68 unsigned long last, int flags,
69 int flag_mask,
70 struct rb_root *root,
71 struct list_head *diff_set);
72/* Call this to free diff_set returned by usnic_uiom_get_intervals_diff */
73void usnic_uiom_put_interval_set(struct list_head *intervals);
74#endif /* USNIC_UIOM_INTERVAL_TREE_H_ */
diff --git a/drivers/infiniband/hw/usnic/usnic_vnic.c b/drivers/infiniband/hw/usnic/usnic_vnic.c
new file mode 100644
index 000000000000..656b88c39eda
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_vnic.c
@@ -0,0 +1,467 @@
1/*
2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18#include <linux/errno.h>
19#include <linux/module.h>
20#include <linux/pci.h>
21
22#include "usnic_ib.h"
23#include "vnic_resource.h"
24#include "usnic_log.h"
25#include "usnic_vnic.h"
26
27struct usnic_vnic {
28 struct vnic_dev *vdev;
29 struct vnic_dev_bar bar[PCI_NUM_RESOURCES];
30 struct usnic_vnic_res_chunk chunks[USNIC_VNIC_RES_TYPE_MAX];
31 spinlock_t res_lock;
32};
33
34static enum vnic_res_type _to_vnic_res_type(enum usnic_vnic_res_type res_type)
35{
36#define DEFINE_USNIC_VNIC_RES_AT(usnic_vnic_res_t, vnic_res_type, desc, val) \
37 vnic_res_type,
38#define DEFINE_USNIC_VNIC_RES(usnic_vnic_res_t, vnic_res_type, desc) \
39 vnic_res_type,
40 static enum vnic_res_type usnic_vnic_type_2_vnic_type[] = {
41 USNIC_VNIC_RES_TYPES};
42#undef DEFINE_USNIC_VNIC_RES
43#undef DEFINE_USNIC_VNIC_RES_AT
44
45 if (res_type >= USNIC_VNIC_RES_TYPE_MAX)
46 return RES_TYPE_MAX;
47
48 return usnic_vnic_type_2_vnic_type[res_type];
49}
50
51const char *usnic_vnic_res_type_to_str(enum usnic_vnic_res_type res_type)
52{
53#define DEFINE_USNIC_VNIC_RES_AT(usnic_vnic_res_t, vnic_res_type, desc, val) \
54 desc,
55#define DEFINE_USNIC_VNIC_RES(usnic_vnic_res_t, vnic_res_type, desc) \
56 desc,
57 static const char * const usnic_vnic_res_type_desc[] = {
58 USNIC_VNIC_RES_TYPES};
59#undef DEFINE_USNIC_VNIC_RES
60#undef DEFINE_USNIC_VNIC_RES_AT
61
62 if (res_type >= USNIC_VNIC_RES_TYPE_MAX)
63 return "unknown";
64
65 return usnic_vnic_res_type_desc[res_type];
66
67}
68
69const char *usnic_vnic_pci_name(struct usnic_vnic *vnic)
70{
71 return pci_name(usnic_vnic_get_pdev(vnic));
72}
73
74int usnic_vnic_dump(struct usnic_vnic *vnic, char *buf,
75 int buf_sz,
76 void *hdr_obj,
77 int (*printtitle)(void *, char*, int),
78 int (*printcols)(char *, int),
79 int (*printrow)(void *, char *, int))
80{
81 struct usnic_vnic_res_chunk *chunk;
82 struct usnic_vnic_res *res;
83 struct vnic_dev_bar *bar0;
84 int i, j, offset;
85
86 offset = 0;
87 bar0 = usnic_vnic_get_bar(vnic, 0);
88 offset += scnprintf(buf + offset, buf_sz - offset,
89 "VF:%hu BAR0 bus_addr=%pa vaddr=0x%p size=%ld ",
90 usnic_vnic_get_index(vnic),
91 &bar0->bus_addr,
92 bar0->vaddr, bar0->len);
93 if (printtitle)
94 offset += printtitle(hdr_obj, buf + offset, buf_sz - offset);
95 offset += scnprintf(buf + offset, buf_sz - offset, "\n");
96 offset += scnprintf(buf + offset, buf_sz - offset,
97 "|RES\t|CTRL_PIN\t\t|IN_USE\t");
98 if (printcols)
99 offset += printcols(buf + offset, buf_sz - offset);
100 offset += scnprintf(buf + offset, buf_sz - offset, "\n");
101
102 spin_lock(&vnic->res_lock);
103 for (i = 0; i < ARRAY_SIZE(vnic->chunks); i++) {
104 chunk = &vnic->chunks[i];
105 for (j = 0; j < chunk->cnt; j++) {
106 res = chunk->res[j];
107 offset += scnprintf(buf + offset, buf_sz - offset,
108 "|%s[%u]\t|0x%p\t|%u\t",
109 usnic_vnic_res_type_to_str(res->type),
110 res->vnic_idx, res->ctrl, !!res->owner);
111 if (printrow) {
112 offset += printrow(res->owner, buf + offset,
113 buf_sz - offset);
114 }
115 offset += scnprintf(buf + offset, buf_sz - offset,
116 "\n");
117 }
118 }
119 spin_unlock(&vnic->res_lock);
120 return offset;
121}
122
123void usnic_vnic_res_spec_update(struct usnic_vnic_res_spec *spec,
124 enum usnic_vnic_res_type trgt_type,
125 u16 cnt)
126{
127 int i;
128
129 for (i = 0; i < USNIC_VNIC_RES_TYPE_MAX; i++) {
130 if (spec->resources[i].type == trgt_type) {
131 spec->resources[i].cnt = cnt;
132 return;
133 }
134 }
135
136 WARN_ON(1);
137}
138
139int usnic_vnic_res_spec_satisfied(const struct usnic_vnic_res_spec *min_spec,
140 struct usnic_vnic_res_spec *res_spec)
141{
142 int found, i, j;
143
144 for (i = 0; i < USNIC_VNIC_RES_TYPE_MAX; i++) {
145 found = 0;
146
147 for (j = 0; j < USNIC_VNIC_RES_TYPE_MAX; j++) {
148 if (res_spec->resources[i].type !=
149 min_spec->resources[i].type)
150 continue;
151 found = 1;
152 if (min_spec->resources[i].cnt >
153 res_spec->resources[i].cnt)
154 return -EINVAL;
155 break;
156 }
157
158 if (!found)
159 return -EINVAL;
160 }
161 return 0;
162}
163
164int usnic_vnic_spec_dump(char *buf, int buf_sz,
165 struct usnic_vnic_res_spec *res_spec)
166{
167 enum usnic_vnic_res_type res_type;
168 int res_cnt;
169 int i;
170 int offset = 0;
171
172 for (i = 0; i < USNIC_VNIC_RES_TYPE_MAX; i++) {
173 res_type = res_spec->resources[i].type;
174 res_cnt = res_spec->resources[i].cnt;
175 offset += scnprintf(buf + offset, buf_sz - offset,
176 "Res: %s Cnt: %d ",
177 usnic_vnic_res_type_to_str(res_type),
178 res_cnt);
179 }
180
181 return offset;
182}
183
184int usnic_vnic_check_room(struct usnic_vnic *vnic,
185 struct usnic_vnic_res_spec *res_spec)
186{
187 int i;
188 enum usnic_vnic_res_type res_type;
189 int res_cnt;
190
191 for (i = 0; i < USNIC_VNIC_RES_TYPE_MAX; i++) {
192 res_type = res_spec->resources[i].type;
193 res_cnt = res_spec->resources[i].cnt;
194
195 if (res_type == USNIC_VNIC_RES_TYPE_EOL)
196 break;
197
198 if (res_cnt > usnic_vnic_res_free_cnt(vnic, res_type))
199 return -EBUSY;
200 }
201
202 return 0;
203}
204
205int usnic_vnic_res_cnt(struct usnic_vnic *vnic,
206 enum usnic_vnic_res_type type)
207{
208 return vnic->chunks[type].cnt;
209}
210
211int usnic_vnic_res_free_cnt(struct usnic_vnic *vnic,
212 enum usnic_vnic_res_type type)
213{
214 return vnic->chunks[type].free_cnt;
215}
216
217struct usnic_vnic_res_chunk *
218usnic_vnic_get_resources(struct usnic_vnic *vnic, enum usnic_vnic_res_type type,
219 int cnt, void *owner)
220{
221 struct usnic_vnic_res_chunk *src, *ret;
222 struct usnic_vnic_res *res;
223 int i;
224
225 if (usnic_vnic_res_free_cnt(vnic, type) < cnt || cnt < 1 || !owner)
226 return ERR_PTR(-EINVAL);
227
228 ret = kzalloc(sizeof(*ret), GFP_ATOMIC);
229 if (!ret) {
230 usnic_err("Failed to allocate chunk for %s - Out of memory\n",
231 usnic_vnic_pci_name(vnic));
232 return ERR_PTR(-ENOMEM);
233 }
234
235 ret->res = kzalloc(sizeof(*(ret->res))*cnt, GFP_ATOMIC);
236 if (!ret->res) {
237 usnic_err("Failed to allocate resources for %s. Out of memory\n",
238 usnic_vnic_pci_name(vnic));
239 kfree(ret);
240 return ERR_PTR(-ENOMEM);
241 }
242
243 spin_lock(&vnic->res_lock);
244 src = &vnic->chunks[type];
245 for (i = 0; i < src->cnt && ret->cnt < cnt; i++) {
246 res = src->res[i];
247 if (!res->owner) {
248 src->free_cnt--;
249 res->owner = owner;
250 ret->res[ret->cnt++] = res;
251 }
252 }
253
254 spin_unlock(&vnic->res_lock);
255 ret->type = type;
256 ret->vnic = vnic;
257 WARN_ON(ret->cnt != cnt);
258
259 return ret;
260}
261
262void usnic_vnic_put_resources(struct usnic_vnic_res_chunk *chunk)
263{
264
265 struct usnic_vnic_res *res;
266 int i;
267 struct usnic_vnic *vnic = chunk->vnic;
268
269 spin_lock(&vnic->res_lock);
270 while ((i = --chunk->cnt) >= 0) {
271 res = chunk->res[i];
272 chunk->res[i] = NULL;
273 res->owner = NULL;
274 vnic->chunks[res->type].free_cnt++;
275 }
276 spin_unlock(&vnic->res_lock);
277
278 kfree(chunk->res);
279 kfree(chunk);
280}
281
282u16 usnic_vnic_get_index(struct usnic_vnic *vnic)
283{
284 return usnic_vnic_get_pdev(vnic)->devfn - 1;
285}
286
287static int usnic_vnic_alloc_res_chunk(struct usnic_vnic *vnic,
288 enum usnic_vnic_res_type type,
289 struct usnic_vnic_res_chunk *chunk)
290{
291 int cnt, err, i;
292 struct usnic_vnic_res *res;
293
294 cnt = vnic_dev_get_res_count(vnic->vdev, _to_vnic_res_type(type));
295 if (cnt < 1)
296 return -EINVAL;
297
298 chunk->cnt = chunk->free_cnt = cnt;
299 chunk->res = kzalloc(sizeof(*(chunk->res))*cnt, GFP_KERNEL);
300 if (!chunk->res)
301 return -ENOMEM;
302
303 for (i = 0; i < cnt; i++) {
304 res = kzalloc(sizeof(*res), GFP_KERNEL);
305 if (!res) {
306 err = -ENOMEM;
307 goto fail;
308 }
309 res->type = type;
310 res->vnic_idx = i;
311 res->vnic = vnic;
312 res->ctrl = vnic_dev_get_res(vnic->vdev,
313 _to_vnic_res_type(type), i);
314 chunk->res[i] = res;
315 }
316
317 chunk->vnic = vnic;
318 return 0;
319fail:
320 for (i--; i >= 0; i--)
321 kfree(chunk->res[i]);
322 kfree(chunk->res);
323 return err;
324}
325
326static void usnic_vnic_free_res_chunk(struct usnic_vnic_res_chunk *chunk)
327{
328 int i;
329 for (i = 0; i < chunk->cnt; i++)
330 kfree(chunk->res[i]);
331 kfree(chunk->res);
332}
333
334static int usnic_vnic_discover_resources(struct pci_dev *pdev,
335 struct usnic_vnic *vnic)
336{
337 enum usnic_vnic_res_type res_type;
338 int i;
339 int err = 0;
340
341 for (i = 0; i < ARRAY_SIZE(vnic->bar); i++) {
342 if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
343 continue;
344 vnic->bar[i].len = pci_resource_len(pdev, i);
345 vnic->bar[i].vaddr = pci_iomap(pdev, i, vnic->bar[i].len);
346 if (!vnic->bar[i].vaddr) {
347 usnic_err("Cannot memory-map BAR %d, aborting\n",
348 i);
349 err = -ENODEV;
350 goto out_clean_bar;
351 }
352 vnic->bar[i].bus_addr = pci_resource_start(pdev, i);
353 }
354
355 vnic->vdev = vnic_dev_register(NULL, pdev, pdev, vnic->bar,
356 ARRAY_SIZE(vnic->bar));
357 if (!vnic->vdev) {
358 usnic_err("Failed to register device %s\n",
359 pci_name(pdev));
360 err = -EINVAL;
361 goto out_clean_bar;
362 }
363
364 for (res_type = USNIC_VNIC_RES_TYPE_EOL + 1;
365 res_type < USNIC_VNIC_RES_TYPE_MAX; res_type++) {
366 err = usnic_vnic_alloc_res_chunk(vnic, res_type,
367 &vnic->chunks[res_type]);
368 if (err) {
369 usnic_err("Failed to alloc res %s with err %d\n",
370 usnic_vnic_res_type_to_str(res_type),
371 err);
372 goto out_clean_chunks;
373 }
374 }
375
376 return 0;
377
378out_clean_chunks:
379 for (res_type--; res_type > USNIC_VNIC_RES_TYPE_EOL; res_type--)
380 usnic_vnic_free_res_chunk(&vnic->chunks[res_type]);
381 vnic_dev_unregister(vnic->vdev);
382out_clean_bar:
383 for (i = 0; i < ARRAY_SIZE(vnic->bar); i++) {
384 if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
385 continue;
386 if (!vnic->bar[i].vaddr)
387 break;
388
389 iounmap(vnic->bar[i].vaddr);
390 }
391
392 return err;
393}
394
395struct pci_dev *usnic_vnic_get_pdev(struct usnic_vnic *vnic)
396{
397 return vnic_dev_get_pdev(vnic->vdev);
398}
399
400struct vnic_dev_bar *usnic_vnic_get_bar(struct usnic_vnic *vnic,
401 int bar_num)
402{
403 return (bar_num < ARRAY_SIZE(vnic->bar)) ? &vnic->bar[bar_num] : NULL;
404}
405
406static void usnic_vnic_release_resources(struct usnic_vnic *vnic)
407{
408 int i;
409 struct pci_dev *pdev;
410 enum usnic_vnic_res_type res_type;
411
412 pdev = usnic_vnic_get_pdev(vnic);
413
414 for (res_type = USNIC_VNIC_RES_TYPE_EOL + 1;
415 res_type < USNIC_VNIC_RES_TYPE_MAX; res_type++)
416 usnic_vnic_free_res_chunk(&vnic->chunks[res_type]);
417
418 vnic_dev_unregister(vnic->vdev);
419
420 for (i = 0; i < ARRAY_SIZE(vnic->bar); i++) {
421 if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
422 continue;
423 iounmap(vnic->bar[i].vaddr);
424 }
425}
426
427struct usnic_vnic *usnic_vnic_alloc(struct pci_dev *pdev)
428{
429 struct usnic_vnic *vnic;
430 int err = 0;
431
432 if (!pci_is_enabled(pdev)) {
433 usnic_err("PCI dev %s is disabled\n", pci_name(pdev));
434 return ERR_PTR(-EINVAL);
435 }
436
437 vnic = kzalloc(sizeof(*vnic), GFP_KERNEL);
438 if (!vnic) {
439 usnic_err("Failed to alloc vnic for %s - out of memory\n",
440 pci_name(pdev));
441 return ERR_PTR(-ENOMEM);
442 }
443
444 spin_lock_init(&vnic->res_lock);
445
446 err = usnic_vnic_discover_resources(pdev, vnic);
447 if (err) {
448 usnic_err("Failed to discover %s resources with err %d\n",
449 pci_name(pdev), err);
450 goto out_free_vnic;
451 }
452
453 usnic_dbg("Allocated vnic for %s\n", usnic_vnic_pci_name(vnic));
454
455 return vnic;
456
457out_free_vnic:
458 kfree(vnic);
459
460 return ERR_PTR(err);
461}
462
463void usnic_vnic_free(struct usnic_vnic *vnic)
464{
465 usnic_vnic_release_resources(vnic);
466 kfree(vnic);
467}
diff --git a/drivers/infiniband/hw/usnic/usnic_vnic.h b/drivers/infiniband/hw/usnic/usnic_vnic.h
new file mode 100644
index 000000000000..14d931a8829d
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_vnic.h
@@ -0,0 +1,103 @@
1/*
2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18
19#ifndef USNIC_VNIC_H_
20#define USNIC_VNIC_H_
21
22#include <linux/pci.h>
23
24#include "vnic_dev.h"
25
26/* =USNIC_VNIC_RES_TYPE= =VNIC_RES= =DESC= */
27#define USNIC_VNIC_RES_TYPES \
28 DEFINE_USNIC_VNIC_RES_AT(EOL, RES_TYPE_EOL, "EOL", 0) \
29 DEFINE_USNIC_VNIC_RES(WQ, RES_TYPE_WQ, "WQ") \
30 DEFINE_USNIC_VNIC_RES(RQ, RES_TYPE_RQ, "RQ") \
31 DEFINE_USNIC_VNIC_RES(CQ, RES_TYPE_CQ, "CQ") \
32 DEFINE_USNIC_VNIC_RES(INTR, RES_TYPE_INTR_CTRL, "INT") \
33 DEFINE_USNIC_VNIC_RES(MAX, RES_TYPE_MAX, "MAX")\
34
35#define DEFINE_USNIC_VNIC_RES_AT(usnic_vnic_res_t, vnic_res_type, desc, val) \
36 USNIC_VNIC_RES_TYPE_##usnic_vnic_res_t = val,
37#define DEFINE_USNIC_VNIC_RES(usnic_vnic_res_t, vnic_res_type, desc) \
38 USNIC_VNIC_RES_TYPE_##usnic_vnic_res_t,
39enum usnic_vnic_res_type {
40 USNIC_VNIC_RES_TYPES
41};
42#undef DEFINE_USNIC_VNIC_RES
43#undef DEFINE_USNIC_VNIC_RES_AT
44
45struct usnic_vnic_res {
46 enum usnic_vnic_res_type type;
47 unsigned int vnic_idx;
48 struct usnic_vnic *vnic;
49 void __iomem *ctrl;
50 void *owner;
51};
52
53struct usnic_vnic_res_chunk {
54 enum usnic_vnic_res_type type;
55 int cnt;
56 int free_cnt;
57 struct usnic_vnic_res **res;
58 struct usnic_vnic *vnic;
59};
60
61struct usnic_vnic_res_desc {
62 enum usnic_vnic_res_type type;
63 uint16_t cnt;
64};
65
66struct usnic_vnic_res_spec {
67 struct usnic_vnic_res_desc resources[USNIC_VNIC_RES_TYPE_MAX];
68};
69
70const char *usnic_vnic_res_type_to_str(enum usnic_vnic_res_type res_type);
71const char *usnic_vnic_pci_name(struct usnic_vnic *vnic);
72int usnic_vnic_dump(struct usnic_vnic *vnic, char *buf, int buf_sz,
73 void *hdr_obj,
74 int (*printtitle)(void *, char*, int),
75 int (*printcols)(char *, int),
76 int (*printrow)(void *, char *, int));
77void usnic_vnic_res_spec_update(struct usnic_vnic_res_spec *spec,
78 enum usnic_vnic_res_type trgt_type,
79 u16 cnt);
80int usnic_vnic_res_spec_satisfied(const struct usnic_vnic_res_spec *min_spec,
81 struct usnic_vnic_res_spec *res_spec);
82int usnic_vnic_spec_dump(char *buf, int buf_sz,
83 struct usnic_vnic_res_spec *res_spec);
84int usnic_vnic_check_room(struct usnic_vnic *vnic,
85 struct usnic_vnic_res_spec *res_spec);
86int usnic_vnic_res_cnt(struct usnic_vnic *vnic,
87 enum usnic_vnic_res_type type);
88int usnic_vnic_res_free_cnt(struct usnic_vnic *vnic,
89 enum usnic_vnic_res_type type);
90struct usnic_vnic_res_chunk *
91usnic_vnic_get_resources(struct usnic_vnic *vnic,
92 enum usnic_vnic_res_type type,
93 int cnt,
94 void *owner);
95void usnic_vnic_put_resources(struct usnic_vnic_res_chunk *chunk);
96struct pci_dev *usnic_vnic_get_pdev(struct usnic_vnic *vnic);
97struct vnic_dev_bar *usnic_vnic_get_bar(struct usnic_vnic *vnic,
98 int bar_num);
99struct usnic_vnic *usnic_vnic_alloc(struct pci_dev *pdev);
100void usnic_vnic_free(struct usnic_vnic *vnic);
101u16 usnic_vnic_get_index(struct usnic_vnic *vnic);
102
103#endif /*!USNIC_VNIC_H_*/