aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDoug Ledford <dledford@redhat.com>2016-02-03 11:08:53 -0500
committerDoug Ledford <dledford@redhat.com>2016-02-03 11:08:53 -0500
commite581d111dad3781266ae1abe1d2848e69406deb5 (patch)
tree1c38b43046f91fb9328e1bf40b2dc58a2fc753e2
parentf5e741b7c7dddefa318b7f91af134f8ae2a342c1 (diff)
staging/rdma: remove deprecated ehca driver
This driver was moved to staging for eventual deletion. Time to complete that task. Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r--MAINTAINERS7
-rw-r--r--drivers/staging/rdma/Kconfig2
-rw-r--r--drivers/staging/rdma/Makefile1
-rw-r--r--drivers/staging/rdma/ehca/Kconfig10
-rw-r--r--drivers/staging/rdma/ehca/Makefile16
-rw-r--r--drivers/staging/rdma/ehca/TODO4
-rw-r--r--drivers/staging/rdma/ehca/ehca_av.c279
-rw-r--r--drivers/staging/rdma/ehca/ehca_classes.h481
-rw-r--r--drivers/staging/rdma/ehca/ehca_classes_pSeries.h208
-rw-r--r--drivers/staging/rdma/ehca/ehca_cq.c397
-rw-r--r--drivers/staging/rdma/ehca/ehca_eq.c189
-rw-r--r--drivers/staging/rdma/ehca/ehca_hca.c414
-rw-r--r--drivers/staging/rdma/ehca/ehca_irq.c870
-rw-r--r--drivers/staging/rdma/ehca/ehca_irq.h77
-rw-r--r--drivers/staging/rdma/ehca/ehca_iverbs.h202
-rw-r--r--drivers/staging/rdma/ehca/ehca_main.c1118
-rw-r--r--drivers/staging/rdma/ehca/ehca_mcast.c131
-rw-r--r--drivers/staging/rdma/ehca/ehca_mrmw.c2202
-rw-r--r--drivers/staging/rdma/ehca/ehca_mrmw.h127
-rw-r--r--drivers/staging/rdma/ehca/ehca_pd.c123
-rw-r--r--drivers/staging/rdma/ehca/ehca_qes.h260
-rw-r--r--drivers/staging/rdma/ehca/ehca_qp.c2256
-rw-r--r--drivers/staging/rdma/ehca/ehca_reqs.c953
-rw-r--r--drivers/staging/rdma/ehca/ehca_sqp.c245
-rw-r--r--drivers/staging/rdma/ehca/ehca_tools.h155
-rw-r--r--drivers/staging/rdma/ehca/ehca_uverbs.c309
-rw-r--r--drivers/staging/rdma/ehca/hcp_if.c949
-rw-r--r--drivers/staging/rdma/ehca/hcp_if.h265
-rw-r--r--drivers/staging/rdma/ehca/hcp_phyp.c82
-rw-r--r--drivers/staging/rdma/ehca/hcp_phyp.h90
-rw-r--r--drivers/staging/rdma/ehca/hipz_fns.h68
-rw-r--r--drivers/staging/rdma/ehca/hipz_fns_core.h100
-rw-r--r--drivers/staging/rdma/ehca/hipz_hw.h414
-rw-r--r--drivers/staging/rdma/ehca/ipz_pt_fn.c289
-rw-r--r--drivers/staging/rdma/ehca/ipz_pt_fn.h289
35 files changed, 0 insertions, 13582 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index e76048d11e45..d1aeb1d5add5 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4177,13 +4177,6 @@ W: http://aeschi.ch.eu.org/efs/
4177S: Orphan 4177S: Orphan
4178F: fs/efs/ 4178F: fs/efs/
4179 4179
4180EHCA (IBM GX bus InfiniBand adapter) DRIVER
4181M: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
4182M: Christoph Raisch <raisch@de.ibm.com>
4183L: linux-rdma@vger.kernel.org
4184S: Supported
4185F: drivers/infiniband/hw/ehca/
4186
4187EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER 4180EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER
4188M: Thadeu Lima de Souza Cascardo <cascardo@linux.vnet.ibm.com> 4181M: Thadeu Lima de Souza Cascardo <cascardo@linux.vnet.ibm.com>
4189L: netdev@vger.kernel.org 4182L: netdev@vger.kernel.org
diff --git a/drivers/staging/rdma/Kconfig b/drivers/staging/rdma/Kconfig
index 1624bc43e304..8a0be6961d7a 100644
--- a/drivers/staging/rdma/Kconfig
+++ b/drivers/staging/rdma/Kconfig
@@ -22,8 +22,6 @@ menuconfig STAGING_RDMA
22# Please keep entries in alphabetic order 22# Please keep entries in alphabetic order
23if STAGING_RDMA 23if STAGING_RDMA
24 24
25source "drivers/staging/rdma/ehca/Kconfig"
26
27source "drivers/staging/rdma/hfi1/Kconfig" 25source "drivers/staging/rdma/hfi1/Kconfig"
28 26
29source "drivers/staging/rdma/ipath/Kconfig" 27source "drivers/staging/rdma/ipath/Kconfig"
diff --git a/drivers/staging/rdma/Makefile b/drivers/staging/rdma/Makefile
index 2217d6778669..08e1919c819e 100644
--- a/drivers/staging/rdma/Makefile
+++ b/drivers/staging/rdma/Makefile
@@ -1,4 +1,3 @@
1# Entries for RDMA_STAGING tree 1# Entries for RDMA_STAGING tree
2obj-$(CONFIG_INFINIBAND_EHCA) += ehca/
3obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/ 2obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/
4obj-$(CONFIG_INFINIBAND_IPATH) += ipath/ 3obj-$(CONFIG_INFINIBAND_IPATH) += ipath/
diff --git a/drivers/staging/rdma/ehca/Kconfig b/drivers/staging/rdma/ehca/Kconfig
deleted file mode 100644
index 3fadd2ad6426..000000000000
--- a/drivers/staging/rdma/ehca/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
1config INFINIBAND_EHCA
2 tristate "eHCA support"
3 depends on IBMEBUS
4 ---help---
5 This driver supports the deprecated IBM pSeries eHCA InfiniBand
6 adapter.
7
8 To compile the driver as a module, choose M here. The module
9 will be called ib_ehca.
10
diff --git a/drivers/staging/rdma/ehca/Makefile b/drivers/staging/rdma/ehca/Makefile
deleted file mode 100644
index 74d284e46a40..000000000000
--- a/drivers/staging/rdma/ehca/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
1# Authors: Heiko J Schick <schickhj@de.ibm.com>
2# Christoph Raisch <raisch@de.ibm.com>
3# Joachim Fenkes <fenkes@de.ibm.com>
4#
5# Copyright (c) 2005 IBM Corporation
6#
7# All rights reserved.
8#
9# This source code is distributed under a dual license of GPL v2.0 and OpenIB BSD.
10
11obj-$(CONFIG_INFINIBAND_EHCA) += ib_ehca.o
12
13ib_ehca-objs = ehca_main.o ehca_hca.o ehca_mcast.o ehca_pd.o ehca_av.o ehca_eq.o \
14 ehca_cq.o ehca_qp.o ehca_sqp.o ehca_mrmw.o ehca_reqs.o ehca_irq.o \
15 ehca_uverbs.o ipz_pt_fn.o hcp_if.o hcp_phyp.o
16
diff --git a/drivers/staging/rdma/ehca/TODO b/drivers/staging/rdma/ehca/TODO
deleted file mode 100644
index 199a4a600142..000000000000
--- a/drivers/staging/rdma/ehca/TODO
+++ /dev/null
@@ -1,4 +0,0 @@
19/2015
2
3The ehca driver has been deprecated and moved to drivers/staging/rdma.
4It will be removed in the 4.6 merge window.
diff --git a/drivers/staging/rdma/ehca/ehca_av.c b/drivers/staging/rdma/ehca/ehca_av.c
deleted file mode 100644
index 94e088c2d989..000000000000
--- a/drivers/staging/rdma/ehca/ehca_av.c
+++ /dev/null
@@ -1,279 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * address vector functions
5 *
6 * Authors: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
7 * Khadija Souissi <souissik@de.ibm.com>
8 * Reinhard Ernst <rernst@de.ibm.com>
9 * Christoph Raisch <raisch@de.ibm.com>
10 *
11 * Copyright (c) 2005 IBM Corporation
12 *
13 * All rights reserved.
14 *
15 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
16 * BSD.
17 *
18 * OpenIB BSD License
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are met:
22 *
23 * Redistributions of source code must retain the above copyright notice, this
24 * list of conditions and the following disclaimer.
25 *
26 * Redistributions in binary form must reproduce the above copyright notice,
27 * this list of conditions and the following disclaimer in the documentation
28 * and/or other materials
29 * provided with the distribution.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
32 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
35 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
36 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
37 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
38 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
39 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGE.
42 */
43
44#include <linux/slab.h>
45
46#include "ehca_tools.h"
47#include "ehca_iverbs.h"
48#include "hcp_if.h"
49
50static struct kmem_cache *av_cache;
51
52int ehca_calc_ipd(struct ehca_shca *shca, int port,
53 enum ib_rate path_rate, u32 *ipd)
54{
55 int path = ib_rate_to_mult(path_rate);
56 int link, ret;
57 struct ib_port_attr pa;
58
59 if (path_rate == IB_RATE_PORT_CURRENT) {
60 *ipd = 0;
61 return 0;
62 }
63
64 if (unlikely(path < 0)) {
65 ehca_err(&shca->ib_device, "Invalid static rate! path_rate=%x",
66 path_rate);
67 return -EINVAL;
68 }
69
70 ret = ehca_query_port(&shca->ib_device, port, &pa);
71 if (unlikely(ret < 0)) {
72 ehca_err(&shca->ib_device, "Failed to query port ret=%i", ret);
73 return ret;
74 }
75
76 link = ib_width_enum_to_int(pa.active_width) * pa.active_speed;
77
78 if (path >= link)
79 /* no need to throttle if path faster than link */
80 *ipd = 0;
81 else
82 /* IPD = round((link / path) - 1) */
83 *ipd = ((link + (path >> 1)) / path) - 1;
84
85 return 0;
86}
87
88struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
89{
90 int ret;
91 struct ehca_av *av;
92 struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
93 ib_device);
94
95 av = kmem_cache_alloc(av_cache, GFP_KERNEL);
96 if (!av) {
97 ehca_err(pd->device, "Out of memory pd=%p ah_attr=%p",
98 pd, ah_attr);
99 return ERR_PTR(-ENOMEM);
100 }
101
102 av->av.sl = ah_attr->sl;
103 av->av.dlid = ah_attr->dlid;
104 av->av.slid_path_bits = ah_attr->src_path_bits;
105
106 if (ehca_static_rate < 0) {
107 u32 ipd;
108
109 if (ehca_calc_ipd(shca, ah_attr->port_num,
110 ah_attr->static_rate, &ipd)) {
111 ret = -EINVAL;
112 goto create_ah_exit1;
113 }
114 av->av.ipd = ipd;
115 } else
116 av->av.ipd = ehca_static_rate;
117
118 av->av.lnh = ah_attr->ah_flags;
119 av->av.grh.word_0 = EHCA_BMASK_SET(GRH_IPVERSION_MASK, 6);
120 av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_TCLASS_MASK,
121 ah_attr->grh.traffic_class);
122 av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_FLOWLABEL_MASK,
123 ah_attr->grh.flow_label);
124 av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_HOPLIMIT_MASK,
125 ah_attr->grh.hop_limit);
126 av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_NEXTHEADER_MASK, 0x1B);
127 /* set sgid in grh.word_1 */
128 if (ah_attr->ah_flags & IB_AH_GRH) {
129 int rc;
130 struct ib_port_attr port_attr;
131 union ib_gid gid;
132
133 memset(&port_attr, 0, sizeof(port_attr));
134 rc = ehca_query_port(pd->device, ah_attr->port_num,
135 &port_attr);
136 if (rc) { /* invalid port number */
137 ret = -EINVAL;
138 ehca_err(pd->device, "Invalid port number "
139 "ehca_query_port() returned %x "
140 "pd=%p ah_attr=%p", rc, pd, ah_attr);
141 goto create_ah_exit1;
142 }
143 memset(&gid, 0, sizeof(gid));
144 rc = ehca_query_gid(pd->device,
145 ah_attr->port_num,
146 ah_attr->grh.sgid_index, &gid);
147 if (rc) {
148 ret = -EINVAL;
149 ehca_err(pd->device, "Failed to retrieve sgid "
150 "ehca_query_gid() returned %x "
151 "pd=%p ah_attr=%p", rc, pd, ah_attr);
152 goto create_ah_exit1;
153 }
154 memcpy(&av->av.grh.word_1, &gid, sizeof(gid));
155 }
156 av->av.pmtu = shca->max_mtu;
157
158 /* dgid comes in grh.word_3 */
159 memcpy(&av->av.grh.word_3, &ah_attr->grh.dgid,
160 sizeof(ah_attr->grh.dgid));
161
162 return &av->ib_ah;
163
164create_ah_exit1:
165 kmem_cache_free(av_cache, av);
166
167 return ERR_PTR(ret);
168}
169
170int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
171{
172 struct ehca_av *av;
173 struct ehca_ud_av new_ehca_av;
174 struct ehca_shca *shca = container_of(ah->pd->device, struct ehca_shca,
175 ib_device);
176
177 memset(&new_ehca_av, 0, sizeof(new_ehca_av));
178 new_ehca_av.sl = ah_attr->sl;
179 new_ehca_av.dlid = ah_attr->dlid;
180 new_ehca_av.slid_path_bits = ah_attr->src_path_bits;
181 new_ehca_av.ipd = ah_attr->static_rate;
182 new_ehca_av.lnh = EHCA_BMASK_SET(GRH_FLAG_MASK,
183 (ah_attr->ah_flags & IB_AH_GRH) > 0);
184 new_ehca_av.grh.word_0 = EHCA_BMASK_SET(GRH_TCLASS_MASK,
185 ah_attr->grh.traffic_class);
186 new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_FLOWLABEL_MASK,
187 ah_attr->grh.flow_label);
188 new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_HOPLIMIT_MASK,
189 ah_attr->grh.hop_limit);
190 new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_NEXTHEADER_MASK, 0x1b);
191
192 /* set sgid in grh.word_1 */
193 if (ah_attr->ah_flags & IB_AH_GRH) {
194 int rc;
195 struct ib_port_attr port_attr;
196 union ib_gid gid;
197
198 memset(&port_attr, 0, sizeof(port_attr));
199 rc = ehca_query_port(ah->device, ah_attr->port_num,
200 &port_attr);
201 if (rc) { /* invalid port number */
202 ehca_err(ah->device, "Invalid port number "
203 "ehca_query_port() returned %x "
204 "ah=%p ah_attr=%p port_num=%x",
205 rc, ah, ah_attr, ah_attr->port_num);
206 return -EINVAL;
207 }
208 memset(&gid, 0, sizeof(gid));
209 rc = ehca_query_gid(ah->device,
210 ah_attr->port_num,
211 ah_attr->grh.sgid_index, &gid);
212 if (rc) {
213 ehca_err(ah->device, "Failed to retrieve sgid "
214 "ehca_query_gid() returned %x "
215 "ah=%p ah_attr=%p port_num=%x "
216 "sgid_index=%x",
217 rc, ah, ah_attr, ah_attr->port_num,
218 ah_attr->grh.sgid_index);
219 return -EINVAL;
220 }
221 memcpy(&new_ehca_av.grh.word_1, &gid, sizeof(gid));
222 }
223
224 new_ehca_av.pmtu = shca->max_mtu;
225
226 memcpy(&new_ehca_av.grh.word_3, &ah_attr->grh.dgid,
227 sizeof(ah_attr->grh.dgid));
228
229 av = container_of(ah, struct ehca_av, ib_ah);
230 av->av = new_ehca_av;
231
232 return 0;
233}
234
235int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
236{
237 struct ehca_av *av = container_of(ah, struct ehca_av, ib_ah);
238
239 memcpy(&ah_attr->grh.dgid, &av->av.grh.word_3,
240 sizeof(ah_attr->grh.dgid));
241 ah_attr->sl = av->av.sl;
242
243 ah_attr->dlid = av->av.dlid;
244
245 ah_attr->src_path_bits = av->av.slid_path_bits;
246 ah_attr->static_rate = av->av.ipd;
247 ah_attr->ah_flags = EHCA_BMASK_GET(GRH_FLAG_MASK, av->av.lnh);
248 ah_attr->grh.traffic_class = EHCA_BMASK_GET(GRH_TCLASS_MASK,
249 av->av.grh.word_0);
250 ah_attr->grh.hop_limit = EHCA_BMASK_GET(GRH_HOPLIMIT_MASK,
251 av->av.grh.word_0);
252 ah_attr->grh.flow_label = EHCA_BMASK_GET(GRH_FLOWLABEL_MASK,
253 av->av.grh.word_0);
254
255 return 0;
256}
257
258int ehca_destroy_ah(struct ib_ah *ah)
259{
260 kmem_cache_free(av_cache, container_of(ah, struct ehca_av, ib_ah));
261
262 return 0;
263}
264
265int ehca_init_av_cache(void)
266{
267 av_cache = kmem_cache_create("ehca_cache_av",
268 sizeof(struct ehca_av), 0,
269 SLAB_HWCACHE_ALIGN,
270 NULL);
271 if (!av_cache)
272 return -ENOMEM;
273 return 0;
274}
275
276void ehca_cleanup_av_cache(void)
277{
278 kmem_cache_destroy(av_cache);
279}
diff --git a/drivers/staging/rdma/ehca/ehca_classes.h b/drivers/staging/rdma/ehca/ehca_classes.h
deleted file mode 100644
index e8c3387d7aaa..000000000000
--- a/drivers/staging/rdma/ehca/ehca_classes.h
+++ /dev/null
@@ -1,481 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * Struct definition for eHCA internal structures
5 *
6 * Authors: Heiko J Schick <schickhj@de.ibm.com>
7 * Christoph Raisch <raisch@de.ibm.com>
8 * Joachim Fenkes <fenkes@de.ibm.com>
9 *
10 * Copyright (c) 2005 IBM Corporation
11 *
12 * All rights reserved.
13 *
14 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
15 * BSD.
16 *
17 * OpenIB BSD License
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are met:
21 *
22 * Redistributions of source code must retain the above copyright notice, this
23 * list of conditions and the following disclaimer.
24 *
25 * Redistributions in binary form must reproduce the above copyright notice,
26 * this list of conditions and the following disclaimer in the documentation
27 * and/or other materials
28 * provided with the distribution.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
41 */
42
43#ifndef __EHCA_CLASSES_H__
44#define __EHCA_CLASSES_H__
45
46struct ehca_module;
47struct ehca_qp;
48struct ehca_cq;
49struct ehca_eq;
50struct ehca_mr;
51struct ehca_mw;
52struct ehca_pd;
53struct ehca_av;
54
55#include <linux/wait.h>
56#include <linux/mutex.h>
57
58#include <rdma/ib_verbs.h>
59#include <rdma/ib_user_verbs.h>
60
61#ifdef CONFIG_PPC64
62#include "ehca_classes_pSeries.h"
63#endif
64#include "ipz_pt_fn.h"
65#include "ehca_qes.h"
66#include "ehca_irq.h"
67
68#define EHCA_EQE_CACHE_SIZE 20
69#define EHCA_MAX_NUM_QUEUES 0xffff
70
71struct ehca_eqe_cache_entry {
72 struct ehca_eqe *eqe;
73 struct ehca_cq *cq;
74};
75
76struct ehca_eq {
77 u32 length;
78 struct ipz_queue ipz_queue;
79 struct ipz_eq_handle ipz_eq_handle;
80 struct work_struct work;
81 struct h_galpas galpas;
82 int is_initialized;
83 struct ehca_pfeq pf;
84 spinlock_t spinlock;
85 struct tasklet_struct interrupt_task;
86 u32 ist;
87 spinlock_t irq_spinlock;
88 struct ehca_eqe_cache_entry eqe_cache[EHCA_EQE_CACHE_SIZE];
89};
90
91struct ehca_sma_attr {
92 u16 lid, lmc, sm_sl, sm_lid;
93 u16 pkey_tbl_len, pkeys[16];
94};
95
96struct ehca_sport {
97 struct ib_cq *ibcq_aqp1;
98 struct ib_qp *ibqp_sqp[2];
99 /* lock to serialze modify_qp() calls for sqp in normal
100 * and irq path (when event PORT_ACTIVE is received first time)
101 */
102 spinlock_t mod_sqp_lock;
103 enum ib_port_state port_state;
104 struct ehca_sma_attr saved_attr;
105 u32 pma_qp_nr;
106};
107
108#define HCA_CAP_MR_PGSIZE_4K 0x80000000
109#define HCA_CAP_MR_PGSIZE_64K 0x40000000
110#define HCA_CAP_MR_PGSIZE_1M 0x20000000
111#define HCA_CAP_MR_PGSIZE_16M 0x10000000
112
113struct ehca_shca {
114 struct ib_device ib_device;
115 struct platform_device *ofdev;
116 u8 num_ports;
117 int hw_level;
118 struct list_head shca_list;
119 struct ipz_adapter_handle ipz_hca_handle;
120 struct ehca_sport sport[2];
121 struct ehca_eq eq;
122 struct ehca_eq neq;
123 struct ehca_mr *maxmr;
124 struct ehca_pd *pd;
125 struct h_galpas galpas;
126 struct mutex modify_mutex;
127 u64 hca_cap;
128 /* MR pgsize: bit 0-3 means 4K, 64K, 1M, 16M respectively */
129 u32 hca_cap_mr_pgsize;
130 int max_mtu;
131 int max_num_qps;
132 int max_num_cqs;
133 atomic_t num_cqs;
134 atomic_t num_qps;
135};
136
137struct ehca_pd {
138 struct ib_pd ib_pd;
139 struct ipz_pd fw_pd;
140 /* small queue mgmt */
141 struct mutex lock;
142 struct list_head free[2];
143 struct list_head full[2];
144};
145
146enum ehca_ext_qp_type {
147 EQPT_NORMAL = 0,
148 EQPT_LLQP = 1,
149 EQPT_SRQBASE = 2,
150 EQPT_SRQ = 3,
151};
152
153/* struct to cache modify_qp()'s parms for GSI/SMI qp */
154struct ehca_mod_qp_parm {
155 int mask;
156 struct ib_qp_attr attr;
157};
158
159#define EHCA_MOD_QP_PARM_MAX 4
160
161#define QMAP_IDX_MASK 0xFFFFULL
162
163/* struct for tracking if cqes have been reported to the application */
164struct ehca_qmap_entry {
165 u16 app_wr_id;
166 u8 reported;
167 u8 cqe_req;
168};
169
170struct ehca_queue_map {
171 struct ehca_qmap_entry *map;
172 unsigned int entries;
173 unsigned int tail;
174 unsigned int left_to_poll;
175 unsigned int next_wqe_idx; /* Idx to first wqe to be flushed */
176};
177
178/* function to calculate the next index for the qmap */
179static inline unsigned int next_index(unsigned int cur_index, unsigned int limit)
180{
181 unsigned int temp = cur_index + 1;
182 return (temp == limit) ? 0 : temp;
183}
184
185struct ehca_qp {
186 union {
187 struct ib_qp ib_qp;
188 struct ib_srq ib_srq;
189 };
190 u32 qp_type;
191 enum ehca_ext_qp_type ext_type;
192 enum ib_qp_state state;
193 struct ipz_queue ipz_squeue;
194 struct ehca_queue_map sq_map;
195 struct ipz_queue ipz_rqueue;
196 struct ehca_queue_map rq_map;
197 struct h_galpas galpas;
198 u32 qkey;
199 u32 real_qp_num;
200 u32 token;
201 spinlock_t spinlock_s;
202 spinlock_t spinlock_r;
203 u32 sq_max_inline_data_size;
204 struct ipz_qp_handle ipz_qp_handle;
205 struct ehca_pfqp pf;
206 struct ib_qp_init_attr init_attr;
207 struct ehca_cq *send_cq;
208 struct ehca_cq *recv_cq;
209 unsigned int sqerr_purgeflag;
210 struct hlist_node list_entries;
211 /* array to cache modify_qp()'s parms for GSI/SMI qp */
212 struct ehca_mod_qp_parm *mod_qp_parm;
213 int mod_qp_parm_idx;
214 /* mmap counter for resources mapped into user space */
215 u32 mm_count_squeue;
216 u32 mm_count_rqueue;
217 u32 mm_count_galpa;
218 /* unsolicited ack circumvention */
219 int unsol_ack_circ;
220 int mtu_shift;
221 u32 message_count;
222 u32 packet_count;
223 atomic_t nr_events; /* events seen */
224 wait_queue_head_t wait_completion;
225 int mig_armed;
226 struct list_head sq_err_node;
227 struct list_head rq_err_node;
228};
229
230#define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ)
231#define HAS_SQ(qp) (qp->ext_type != EQPT_SRQ)
232#define HAS_RQ(qp) (qp->ext_type != EQPT_SRQBASE)
233
234/* must be power of 2 */
235#define QP_HASHTAB_LEN 8
236
237struct ehca_cq {
238 struct ib_cq ib_cq;
239 struct ipz_queue ipz_queue;
240 struct h_galpas galpas;
241 spinlock_t spinlock;
242 u32 cq_number;
243 u32 token;
244 u32 nr_of_entries;
245 struct ipz_cq_handle ipz_cq_handle;
246 struct ehca_pfcq pf;
247 spinlock_t cb_lock;
248 struct hlist_head qp_hashtab[QP_HASHTAB_LEN];
249 struct list_head entry;
250 u32 nr_callbacks; /* #events assigned to cpu by scaling code */
251 atomic_t nr_events; /* #events seen */
252 wait_queue_head_t wait_completion;
253 spinlock_t task_lock;
254 /* mmap counter for resources mapped into user space */
255 u32 mm_count_queue;
256 u32 mm_count_galpa;
257 struct list_head sqp_err_list;
258 struct list_head rqp_err_list;
259};
260
261enum ehca_mr_flag {
262 EHCA_MR_FLAG_FMR = 0x80000000, /* FMR, created with ehca_alloc_fmr */
263 EHCA_MR_FLAG_MAXMR = 0x40000000, /* max-MR */
264};
265
266struct ehca_mr {
267 union {
268 struct ib_mr ib_mr; /* must always be first in ehca_mr */
269 struct ib_fmr ib_fmr; /* must always be first in ehca_mr */
270 } ib;
271 struct ib_umem *umem;
272 spinlock_t mrlock;
273
274 enum ehca_mr_flag flags;
275 u32 num_kpages; /* number of kernel pages */
276 u32 num_hwpages; /* number of hw pages to form MR */
277 u64 hwpage_size; /* hw page size used for this MR */
278 int acl; /* ACL (stored here for usage in reregister) */
279 u64 *start; /* virtual start address (stored here for */
280 /* usage in reregister) */
281 u64 size; /* size (stored here for usage in reregister) */
282 u32 fmr_page_size; /* page size for FMR */
283 u32 fmr_max_pages; /* max pages for FMR */
284 u32 fmr_max_maps; /* max outstanding maps for FMR */
285 u32 fmr_map_cnt; /* map counter for FMR */
286 /* fw specific data */
287 struct ipz_mrmw_handle ipz_mr_handle; /* MR handle for h-calls */
288 struct h_galpas galpas;
289};
290
291struct ehca_mw {
292 struct ib_mw ib_mw; /* gen2 mw, must always be first in ehca_mw */
293 spinlock_t mwlock;
294
295 u8 never_bound; /* indication MW was never bound */
296 struct ipz_mrmw_handle ipz_mw_handle; /* MW handle for h-calls */
297 struct h_galpas galpas;
298};
299
300enum ehca_mr_pgi_type {
301 EHCA_MR_PGI_PHYS = 1, /* type of ehca_reg_phys_mr,
302 * ehca_rereg_phys_mr,
303 * ehca_reg_internal_maxmr */
304 EHCA_MR_PGI_USER = 2, /* type of ehca_reg_user_mr */
305 EHCA_MR_PGI_FMR = 3 /* type of ehca_map_phys_fmr */
306};
307
308struct ehca_mr_pginfo {
309 enum ehca_mr_pgi_type type;
310 u64 num_kpages;
311 u64 kpage_cnt;
312 u64 hwpage_size; /* hw page size used for this MR */
313 u64 num_hwpages; /* number of hw pages */
314 u64 hwpage_cnt; /* counter for hw pages */
315 u64 next_hwpage; /* next hw page in buffer/chunk/listelem */
316
317 union {
318 struct { /* type EHCA_MR_PGI_PHYS section */
319 u64 addr;
320 u16 size;
321 } phy;
322 struct { /* type EHCA_MR_PGI_USER section */
323 struct ib_umem *region;
324 struct scatterlist *next_sg;
325 u64 next_nmap;
326 } usr;
327 struct { /* type EHCA_MR_PGI_FMR section */
328 u64 fmr_pgsize;
329 u64 *page_list;
330 u64 next_listelem;
331 } fmr;
332 } u;
333};
334
335/* output parameters for MR/FMR hipz calls */
336struct ehca_mr_hipzout_parms {
337 struct ipz_mrmw_handle handle;
338 u32 lkey;
339 u32 rkey;
340 u64 len;
341 u64 vaddr;
342 u32 acl;
343};
344
345/* output parameters for MW hipz calls */
346struct ehca_mw_hipzout_parms {
347 struct ipz_mrmw_handle handle;
348 u32 rkey;
349};
350
351struct ehca_av {
352 struct ib_ah ib_ah;
353 struct ehca_ud_av av;
354};
355
356struct ehca_ucontext {
357 struct ib_ucontext ib_ucontext;
358};
359
360int ehca_init_pd_cache(void);
361void ehca_cleanup_pd_cache(void);
362int ehca_init_cq_cache(void);
363void ehca_cleanup_cq_cache(void);
364int ehca_init_qp_cache(void);
365void ehca_cleanup_qp_cache(void);
366int ehca_init_av_cache(void);
367void ehca_cleanup_av_cache(void);
368int ehca_init_mrmw_cache(void);
369void ehca_cleanup_mrmw_cache(void);
370int ehca_init_small_qp_cache(void);
371void ehca_cleanup_small_qp_cache(void);
372
373extern rwlock_t ehca_qp_idr_lock;
374extern rwlock_t ehca_cq_idr_lock;
375extern struct idr ehca_qp_idr;
376extern struct idr ehca_cq_idr;
377extern spinlock_t shca_list_lock;
378
379extern int ehca_static_rate;
380extern int ehca_port_act_time;
381extern bool ehca_use_hp_mr;
382extern bool ehca_scaling_code;
383extern int ehca_lock_hcalls;
384extern int ehca_nr_ports;
385extern int ehca_max_cq;
386extern int ehca_max_qp;
387
388struct ipzu_queue_resp {
389 u32 qe_size; /* queue entry size */
390 u32 act_nr_of_sg;
391 u32 queue_length; /* queue length allocated in bytes */
392 u32 pagesize;
393 u32 toggle_state;
394 u32 offset; /* save offset within a page for small_qp */
395};
396
397struct ehca_create_cq_resp {
398 u32 cq_number;
399 u32 token;
400 struct ipzu_queue_resp ipz_queue;
401 u32 fw_handle_ofs;
402 u32 dummy;
403};
404
405struct ehca_create_qp_resp {
406 u32 qp_num;
407 u32 token;
408 u32 qp_type;
409 u32 ext_type;
410 u32 qkey;
411 /* qp_num assigned by ehca: sqp0/1 may have got different numbers */
412 u32 real_qp_num;
413 u32 fw_handle_ofs;
414 u32 dummy;
415 struct ipzu_queue_resp ipz_squeue;
416 struct ipzu_queue_resp ipz_rqueue;
417};
418
419struct ehca_alloc_cq_parms {
420 u32 nr_cqe;
421 u32 act_nr_of_entries;
422 u32 act_pages;
423 struct ipz_eq_handle eq_handle;
424};
425
426enum ehca_service_type {
427 ST_RC = 0,
428 ST_UC = 1,
429 ST_RD = 2,
430 ST_UD = 3,
431};
432
433enum ehca_ll_comp_flags {
434 LLQP_SEND_COMP = 0x20,
435 LLQP_RECV_COMP = 0x40,
436 LLQP_COMP_MASK = 0x60,
437};
438
439struct ehca_alloc_queue_parms {
440 /* input parameters */
441 int max_wr;
442 int max_sge;
443 int page_size;
444 int is_small;
445
446 /* output parameters */
447 u16 act_nr_wqes;
448 u8 act_nr_sges;
449 u32 queue_size; /* bytes for small queues, pages otherwise */
450};
451
452struct ehca_alloc_qp_parms {
453 struct ehca_alloc_queue_parms squeue;
454 struct ehca_alloc_queue_parms rqueue;
455
456 /* input parameters */
457 enum ehca_service_type servicetype;
458 int qp_storage;
459 int sigtype;
460 enum ehca_ext_qp_type ext_type;
461 enum ehca_ll_comp_flags ll_comp_flags;
462 int ud_av_l_key_ctl;
463
464 u32 token;
465 struct ipz_eq_handle eq_handle;
466 struct ipz_pd pd;
467 struct ipz_cq_handle send_cq_handle, recv_cq_handle;
468
469 u32 srq_qpn, srq_token, srq_limit;
470
471 /* output parameters */
472 u32 real_qp_num;
473 struct ipz_qp_handle qp_handle;
474 struct h_galpas galpas;
475};
476
477int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp);
478int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int qp_num);
479struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int qp_num);
480
481#endif
diff --git a/drivers/staging/rdma/ehca/ehca_classes_pSeries.h b/drivers/staging/rdma/ehca/ehca_classes_pSeries.h
deleted file mode 100644
index 689c35786dd2..000000000000
--- a/drivers/staging/rdma/ehca/ehca_classes_pSeries.h
+++ /dev/null
@@ -1,208 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * pSeries interface definitions
5 *
6 * Authors: Waleri Fomin <fomin@de.ibm.com>
7 * Christoph Raisch <raisch@de.ibm.com>
8 *
9 * Copyright (c) 2005 IBM Corporation
10 *
11 * All rights reserved.
12 *
13 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
14 * BSD.
15 *
16 * OpenIB BSD License
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are met:
20 *
21 * Redistributions of source code must retain the above copyright notice, this
22 * list of conditions and the following disclaimer.
23 *
24 * Redistributions in binary form must reproduce the above copyright notice,
25 * this list of conditions and the following disclaimer in the documentation
26 * and/or other materials
27 * provided with the distribution.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42#ifndef __EHCA_CLASSES_PSERIES_H__
43#define __EHCA_CLASSES_PSERIES_H__
44
45#include "hcp_phyp.h"
46#include "ipz_pt_fn.h"
47
48
49struct ehca_pfqp {
50 struct ipz_qpt sqpt;
51 struct ipz_qpt rqpt;
52};
53
54struct ehca_pfcq {
55 struct ipz_qpt qpt;
56 u32 cqnr;
57};
58
59struct ehca_pfeq {
60 struct ipz_qpt qpt;
61 struct h_galpa galpa;
62 u32 eqnr;
63};
64
65struct ipz_adapter_handle {
66 u64 handle;
67};
68
69struct ipz_cq_handle {
70 u64 handle;
71};
72
73struct ipz_eq_handle {
74 u64 handle;
75};
76
77struct ipz_qp_handle {
78 u64 handle;
79};
80struct ipz_mrmw_handle {
81 u64 handle;
82};
83
84struct ipz_pd {
85 u32 value;
86};
87
88struct hcp_modify_qp_control_block {
89 u32 qkey; /* 00 */
90 u32 rdd; /* reliable datagram domain */
91 u32 send_psn; /* 02 */
92 u32 receive_psn; /* 03 */
93 u32 prim_phys_port; /* 04 */
94 u32 alt_phys_port; /* 05 */
95 u32 prim_p_key_idx; /* 06 */
96 u32 alt_p_key_idx; /* 07 */
97 u32 rdma_atomic_ctrl; /* 08 */
98 u32 qp_state; /* 09 */
99 u32 reserved_10; /* 10 */
100 u32 rdma_nr_atomic_resp_res; /* 11 */
101 u32 path_migration_state; /* 12 */
102 u32 rdma_atomic_outst_dest_qp; /* 13 */
103 u32 dest_qp_nr; /* 14 */
104 u32 min_rnr_nak_timer_field; /* 15 */
105 u32 service_level; /* 16 */
106 u32 send_grh_flag; /* 17 */
107 u32 retry_count; /* 18 */
108 u32 timeout; /* 19 */
109 u32 path_mtu; /* 20 */
110 u32 max_static_rate; /* 21 */
111 u32 dlid; /* 22 */
112 u32 rnr_retry_count; /* 23 */
113 u32 source_path_bits; /* 24 */
114 u32 traffic_class; /* 25 */
115 u32 hop_limit; /* 26 */
116 u32 source_gid_idx; /* 27 */
117 u32 flow_label; /* 28 */
118 u32 reserved_29; /* 29 */
119 union { /* 30 */
120 u64 dw[2];
121 u8 byte[16];
122 } dest_gid;
123 u32 service_level_al; /* 34 */
124 u32 send_grh_flag_al; /* 35 */
125 u32 retry_count_al; /* 36 */
126 u32 timeout_al; /* 37 */
127 u32 max_static_rate_al; /* 38 */
128 u32 dlid_al; /* 39 */
129 u32 rnr_retry_count_al; /* 40 */
130 u32 source_path_bits_al; /* 41 */
131 u32 traffic_class_al; /* 42 */
132 u32 hop_limit_al; /* 43 */
133 u32 source_gid_idx_al; /* 44 */
134 u32 flow_label_al; /* 45 */
135 u32 reserved_46; /* 46 */
136 u32 reserved_47; /* 47 */
137 union { /* 48 */
138 u64 dw[2];
139 u8 byte[16];
140 } dest_gid_al;
141 u32 max_nr_outst_send_wr; /* 52 */
142 u32 max_nr_outst_recv_wr; /* 53 */
143 u32 disable_ete_credit_check; /* 54 */
144 u32 qp_number; /* 55 */
145 u64 send_queue_handle; /* 56 */
146 u64 recv_queue_handle; /* 58 */
147 u32 actual_nr_sges_in_sq_wqe; /* 60 */
148 u32 actual_nr_sges_in_rq_wqe; /* 61 */
149 u32 qp_enable; /* 62 */
150 u32 curr_srq_limit; /* 63 */
151 u64 qp_aff_asyn_ev_log_reg; /* 64 */
152 u64 shared_rq_hndl; /* 66 */
153 u64 trigg_doorbell_qp_hndl; /* 68 */
154 u32 reserved_70_127[58]; /* 70 */
155};
156
157#define MQPCB_MASK_QKEY EHCA_BMASK_IBM( 0, 0)
158#define MQPCB_MASK_SEND_PSN EHCA_BMASK_IBM( 2, 2)
159#define MQPCB_MASK_RECEIVE_PSN EHCA_BMASK_IBM( 3, 3)
160#define MQPCB_MASK_PRIM_PHYS_PORT EHCA_BMASK_IBM( 4, 4)
161#define MQPCB_PRIM_PHYS_PORT EHCA_BMASK_IBM(24, 31)
162#define MQPCB_MASK_ALT_PHYS_PORT EHCA_BMASK_IBM( 5, 5)
163#define MQPCB_MASK_PRIM_P_KEY_IDX EHCA_BMASK_IBM( 6, 6)
164#define MQPCB_PRIM_P_KEY_IDX EHCA_BMASK_IBM(24, 31)
165#define MQPCB_MASK_ALT_P_KEY_IDX EHCA_BMASK_IBM( 7, 7)
166#define MQPCB_MASK_RDMA_ATOMIC_CTRL EHCA_BMASK_IBM( 8, 8)
167#define MQPCB_MASK_QP_STATE EHCA_BMASK_IBM( 9, 9)
168#define MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES EHCA_BMASK_IBM(11, 11)
169#define MQPCB_MASK_PATH_MIGRATION_STATE EHCA_BMASK_IBM(12, 12)
170#define MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP EHCA_BMASK_IBM(13, 13)
171#define MQPCB_MASK_DEST_QP_NR EHCA_BMASK_IBM(14, 14)
172#define MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD EHCA_BMASK_IBM(15, 15)
173#define MQPCB_MASK_SERVICE_LEVEL EHCA_BMASK_IBM(16, 16)
174#define MQPCB_MASK_SEND_GRH_FLAG EHCA_BMASK_IBM(17, 17)
175#define MQPCB_MASK_RETRY_COUNT EHCA_BMASK_IBM(18, 18)
176#define MQPCB_MASK_TIMEOUT EHCA_BMASK_IBM(19, 19)
177#define MQPCB_MASK_PATH_MTU EHCA_BMASK_IBM(20, 20)
178#define MQPCB_MASK_MAX_STATIC_RATE EHCA_BMASK_IBM(21, 21)
179#define MQPCB_MASK_DLID EHCA_BMASK_IBM(22, 22)
180#define MQPCB_MASK_RNR_RETRY_COUNT EHCA_BMASK_IBM(23, 23)
181#define MQPCB_MASK_SOURCE_PATH_BITS EHCA_BMASK_IBM(24, 24)
182#define MQPCB_MASK_TRAFFIC_CLASS EHCA_BMASK_IBM(25, 25)
183#define MQPCB_MASK_HOP_LIMIT EHCA_BMASK_IBM(26, 26)
184#define MQPCB_MASK_SOURCE_GID_IDX EHCA_BMASK_IBM(27, 27)
185#define MQPCB_MASK_FLOW_LABEL EHCA_BMASK_IBM(28, 28)
186#define MQPCB_MASK_DEST_GID EHCA_BMASK_IBM(30, 30)
187#define MQPCB_MASK_SERVICE_LEVEL_AL EHCA_BMASK_IBM(31, 31)
188#define MQPCB_MASK_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(32, 32)
189#define MQPCB_MASK_RETRY_COUNT_AL EHCA_BMASK_IBM(33, 33)
190#define MQPCB_MASK_TIMEOUT_AL EHCA_BMASK_IBM(34, 34)
191#define MQPCB_MASK_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(35, 35)
192#define MQPCB_MASK_DLID_AL EHCA_BMASK_IBM(36, 36)
193#define MQPCB_MASK_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(37, 37)
194#define MQPCB_MASK_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(38, 38)
195#define MQPCB_MASK_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(39, 39)
196#define MQPCB_MASK_HOP_LIMIT_AL EHCA_BMASK_IBM(40, 40)
197#define MQPCB_MASK_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(41, 41)
198#define MQPCB_MASK_FLOW_LABEL_AL EHCA_BMASK_IBM(42, 42)
199#define MQPCB_MASK_DEST_GID_AL EHCA_BMASK_IBM(44, 44)
200#define MQPCB_MASK_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(45, 45)
201#define MQPCB_MASK_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(46, 46)
202#define MQPCB_MASK_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(47, 47)
203#define MQPCB_MASK_QP_ENABLE EHCA_BMASK_IBM(48, 48)
204#define MQPCB_MASK_CURR_SRQ_LIMIT EHCA_BMASK_IBM(49, 49)
205#define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG EHCA_BMASK_IBM(50, 50)
206#define MQPCB_MASK_SHARED_RQ_HNDL EHCA_BMASK_IBM(51, 51)
207
208#endif /* __EHCA_CLASSES_PSERIES_H__ */
diff --git a/drivers/staging/rdma/ehca/ehca_cq.c b/drivers/staging/rdma/ehca/ehca_cq.c
deleted file mode 100644
index 1aa7931fe860..000000000000
--- a/drivers/staging/rdma/ehca/ehca_cq.c
+++ /dev/null
@@ -1,397 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * Completion queue handling
5 *
6 * Authors: Waleri Fomin <fomin@de.ibm.com>
7 * Khadija Souissi <souissi@de.ibm.com>
8 * Reinhard Ernst <rernst@de.ibm.com>
9 * Heiko J Schick <schickhj@de.ibm.com>
10 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
11 *
12 *
13 * Copyright (c) 2005 IBM Corporation
14 *
15 * All rights reserved.
16 *
17 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
18 * BSD.
19 *
20 * OpenIB BSD License
21 *
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions are met:
24 *
25 * Redistributions of source code must retain the above copyright notice, this
26 * list of conditions and the following disclaimer.
27 *
28 * Redistributions in binary form must reproduce the above copyright notice,
29 * this list of conditions and the following disclaimer in the documentation
30 * and/or other materials
31 * provided with the distribution.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
34 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
37 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
38 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
39 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
40 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
41 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
43 * POSSIBILITY OF SUCH DAMAGE.
44 */
45
46#include <linux/slab.h>
47
48#include "ehca_iverbs.h"
49#include "ehca_classes.h"
50#include "ehca_irq.h"
51#include "hcp_if.h"
52
53static struct kmem_cache *cq_cache;
54
55int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp)
56{
57 unsigned int qp_num = qp->real_qp_num;
58 unsigned int key = qp_num & (QP_HASHTAB_LEN-1);
59 unsigned long flags;
60
61 spin_lock_irqsave(&cq->spinlock, flags);
62 hlist_add_head(&qp->list_entries, &cq->qp_hashtab[key]);
63 spin_unlock_irqrestore(&cq->spinlock, flags);
64
65 ehca_dbg(cq->ib_cq.device, "cq_num=%x real_qp_num=%x",
66 cq->cq_number, qp_num);
67
68 return 0;
69}
70
71int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int real_qp_num)
72{
73 int ret = -EINVAL;
74 unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
75 struct hlist_node *iter;
76 struct ehca_qp *qp;
77 unsigned long flags;
78
79 spin_lock_irqsave(&cq->spinlock, flags);
80 hlist_for_each(iter, &cq->qp_hashtab[key]) {
81 qp = hlist_entry(iter, struct ehca_qp, list_entries);
82 if (qp->real_qp_num == real_qp_num) {
83 hlist_del(iter);
84 ehca_dbg(cq->ib_cq.device,
85 "removed qp from cq .cq_num=%x real_qp_num=%x",
86 cq->cq_number, real_qp_num);
87 ret = 0;
88 break;
89 }
90 }
91 spin_unlock_irqrestore(&cq->spinlock, flags);
92 if (ret)
93 ehca_err(cq->ib_cq.device,
94 "qp not found cq_num=%x real_qp_num=%x",
95 cq->cq_number, real_qp_num);
96
97 return ret;
98}
99
100struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num)
101{
102 struct ehca_qp *ret = NULL;
103 unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
104 struct hlist_node *iter;
105 struct ehca_qp *qp;
106 hlist_for_each(iter, &cq->qp_hashtab[key]) {
107 qp = hlist_entry(iter, struct ehca_qp, list_entries);
108 if (qp->real_qp_num == real_qp_num) {
109 ret = qp;
110 break;
111 }
112 }
113 return ret;
114}
115
116struct ib_cq *ehca_create_cq(struct ib_device *device,
117 const struct ib_cq_init_attr *attr,
118 struct ib_ucontext *context,
119 struct ib_udata *udata)
120{
121 int cqe = attr->cqe;
122 static const u32 additional_cqe = 20;
123 struct ib_cq *cq;
124 struct ehca_cq *my_cq;
125 struct ehca_shca *shca =
126 container_of(device, struct ehca_shca, ib_device);
127 struct ipz_adapter_handle adapter_handle;
128 struct ehca_alloc_cq_parms param; /* h_call's out parameters */
129 struct h_galpa gal;
130 void *vpage;
131 u32 counter;
132 u64 rpage, cqx_fec, h_ret;
133 int rc, i;
134 unsigned long flags;
135
136 if (attr->flags)
137 return ERR_PTR(-EINVAL);
138
139 if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
140 return ERR_PTR(-EINVAL);
141
142 if (!atomic_add_unless(&shca->num_cqs, 1, shca->max_num_cqs)) {
143 ehca_err(device, "Unable to create CQ, max number of %i "
144 "CQs reached.", shca->max_num_cqs);
145 ehca_err(device, "To increase the maximum number of CQs "
146 "use the number_of_cqs module parameter.\n");
147 return ERR_PTR(-ENOSPC);
148 }
149
150 my_cq = kmem_cache_zalloc(cq_cache, GFP_KERNEL);
151 if (!my_cq) {
152 ehca_err(device, "Out of memory for ehca_cq struct device=%p",
153 device);
154 atomic_dec(&shca->num_cqs);
155 return ERR_PTR(-ENOMEM);
156 }
157
158 memset(&param, 0, sizeof(struct ehca_alloc_cq_parms));
159
160 spin_lock_init(&my_cq->spinlock);
161 spin_lock_init(&my_cq->cb_lock);
162 spin_lock_init(&my_cq->task_lock);
163 atomic_set(&my_cq->nr_events, 0);
164 init_waitqueue_head(&my_cq->wait_completion);
165
166 cq = &my_cq->ib_cq;
167
168 adapter_handle = shca->ipz_hca_handle;
169 param.eq_handle = shca->eq.ipz_eq_handle;
170
171 idr_preload(GFP_KERNEL);
172 write_lock_irqsave(&ehca_cq_idr_lock, flags);
173 rc = idr_alloc(&ehca_cq_idr, my_cq, 0, 0x2000000, GFP_NOWAIT);
174 write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
175 idr_preload_end();
176
177 if (rc < 0) {
178 cq = ERR_PTR(-ENOMEM);
179 ehca_err(device, "Can't allocate new idr entry. device=%p",
180 device);
181 goto create_cq_exit1;
182 }
183 my_cq->token = rc;
184
185 /*
186 * CQs maximum depth is 4GB-64, but we need additional 20 as buffer
187 * for receiving errors CQEs.
188 */
189 param.nr_cqe = cqe + additional_cqe;
190 h_ret = hipz_h_alloc_resource_cq(adapter_handle, my_cq, &param);
191
192 if (h_ret != H_SUCCESS) {
193 ehca_err(device, "hipz_h_alloc_resource_cq() failed "
194 "h_ret=%lli device=%p", h_ret, device);
195 cq = ERR_PTR(ehca2ib_return_code(h_ret));
196 goto create_cq_exit2;
197 }
198
199 rc = ipz_queue_ctor(NULL, &my_cq->ipz_queue, param.act_pages,
200 EHCA_PAGESIZE, sizeof(struct ehca_cqe), 0, 0);
201 if (!rc) {
202 ehca_err(device, "ipz_queue_ctor() failed ipz_rc=%i device=%p",
203 rc, device);
204 cq = ERR_PTR(-EINVAL);
205 goto create_cq_exit3;
206 }
207
208 for (counter = 0; counter < param.act_pages; counter++) {
209 vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue);
210 if (!vpage) {
211 ehca_err(device, "ipz_qpageit_get_inc() "
212 "returns NULL device=%p", device);
213 cq = ERR_PTR(-EAGAIN);
214 goto create_cq_exit4;
215 }
216 rpage = __pa(vpage);
217
218 h_ret = hipz_h_register_rpage_cq(adapter_handle,
219 my_cq->ipz_cq_handle,
220 &my_cq->pf,
221 0,
222 0,
223 rpage,
224 1,
225 my_cq->galpas.
226 kernel);
227
228 if (h_ret < H_SUCCESS) {
229 ehca_err(device, "hipz_h_register_rpage_cq() failed "
230 "ehca_cq=%p cq_num=%x h_ret=%lli counter=%i "
231 "act_pages=%i", my_cq, my_cq->cq_number,
232 h_ret, counter, param.act_pages);
233 cq = ERR_PTR(-EINVAL);
234 goto create_cq_exit4;
235 }
236
237 if (counter == (param.act_pages - 1)) {
238 vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue);
239 if ((h_ret != H_SUCCESS) || vpage) {
240 ehca_err(device, "Registration of pages not "
241 "complete ehca_cq=%p cq_num=%x "
242 "h_ret=%lli", my_cq, my_cq->cq_number,
243 h_ret);
244 cq = ERR_PTR(-EAGAIN);
245 goto create_cq_exit4;
246 }
247 } else {
248 if (h_ret != H_PAGE_REGISTERED) {
249 ehca_err(device, "Registration of page failed "
250 "ehca_cq=%p cq_num=%x h_ret=%lli "
251 "counter=%i act_pages=%i",
252 my_cq, my_cq->cq_number,
253 h_ret, counter, param.act_pages);
254 cq = ERR_PTR(-ENOMEM);
255 goto create_cq_exit4;
256 }
257 }
258 }
259
260 ipz_qeit_reset(&my_cq->ipz_queue);
261
262 gal = my_cq->galpas.kernel;
263 cqx_fec = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_fec));
264 ehca_dbg(device, "ehca_cq=%p cq_num=%x CQX_FEC=%llx",
265 my_cq, my_cq->cq_number, cqx_fec);
266
267 my_cq->ib_cq.cqe = my_cq->nr_of_entries =
268 param.act_nr_of_entries - additional_cqe;
269 my_cq->cq_number = (my_cq->ipz_cq_handle.handle) & 0xffff;
270
271 for (i = 0; i < QP_HASHTAB_LEN; i++)
272 INIT_HLIST_HEAD(&my_cq->qp_hashtab[i]);
273
274 INIT_LIST_HEAD(&my_cq->sqp_err_list);
275 INIT_LIST_HEAD(&my_cq->rqp_err_list);
276
277 if (context) {
278 struct ipz_queue *ipz_queue = &my_cq->ipz_queue;
279 struct ehca_create_cq_resp resp;
280 memset(&resp, 0, sizeof(resp));
281 resp.cq_number = my_cq->cq_number;
282 resp.token = my_cq->token;
283 resp.ipz_queue.qe_size = ipz_queue->qe_size;
284 resp.ipz_queue.act_nr_of_sg = ipz_queue->act_nr_of_sg;
285 resp.ipz_queue.queue_length = ipz_queue->queue_length;
286 resp.ipz_queue.pagesize = ipz_queue->pagesize;
287 resp.ipz_queue.toggle_state = ipz_queue->toggle_state;
288 resp.fw_handle_ofs = (u32)
289 (my_cq->galpas.user.fw_handle & (PAGE_SIZE - 1));
290 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
291 ehca_err(device, "Copy to udata failed.");
292 cq = ERR_PTR(-EFAULT);
293 goto create_cq_exit4;
294 }
295 }
296
297 return cq;
298
299create_cq_exit4:
300 ipz_queue_dtor(NULL, &my_cq->ipz_queue);
301
302create_cq_exit3:
303 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
304 if (h_ret != H_SUCCESS)
305 ehca_err(device, "hipz_h_destroy_cq() failed ehca_cq=%p "
306 "cq_num=%x h_ret=%lli", my_cq, my_cq->cq_number, h_ret);
307
308create_cq_exit2:
309 write_lock_irqsave(&ehca_cq_idr_lock, flags);
310 idr_remove(&ehca_cq_idr, my_cq->token);
311 write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
312
313create_cq_exit1:
314 kmem_cache_free(cq_cache, my_cq);
315
316 atomic_dec(&shca->num_cqs);
317 return cq;
318}
319
320int ehca_destroy_cq(struct ib_cq *cq)
321{
322 u64 h_ret;
323 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
324 int cq_num = my_cq->cq_number;
325 struct ib_device *device = cq->device;
326 struct ehca_shca *shca = container_of(device, struct ehca_shca,
327 ib_device);
328 struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
329 unsigned long flags;
330
331 if (cq->uobject) {
332 if (my_cq->mm_count_galpa || my_cq->mm_count_queue) {
333 ehca_err(device, "Resources still referenced in "
334 "user space cq_num=%x", my_cq->cq_number);
335 return -EINVAL;
336 }
337 }
338
339 /*
340 * remove the CQ from the idr first to make sure
341 * no more interrupt tasklets will touch this CQ
342 */
343 write_lock_irqsave(&ehca_cq_idr_lock, flags);
344 idr_remove(&ehca_cq_idr, my_cq->token);
345 write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
346
347 /* now wait until all pending events have completed */
348 wait_event(my_cq->wait_completion, !atomic_read(&my_cq->nr_events));
349
350 /* nobody's using our CQ any longer -- we can destroy it */
351 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0);
352 if (h_ret == H_R_STATE) {
353 /* cq in err: read err data and destroy it forcibly */
354 ehca_dbg(device, "ehca_cq=%p cq_num=%x resource=%llx in err "
355 "state. Try to delete it forcibly.",
356 my_cq, cq_num, my_cq->ipz_cq_handle.handle);
357 ehca_error_data(shca, my_cq, my_cq->ipz_cq_handle.handle);
358 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
359 if (h_ret == H_SUCCESS)
360 ehca_dbg(device, "cq_num=%x deleted successfully.",
361 cq_num);
362 }
363 if (h_ret != H_SUCCESS) {
364 ehca_err(device, "hipz_h_destroy_cq() failed h_ret=%lli "
365 "ehca_cq=%p cq_num=%x", h_ret, my_cq, cq_num);
366 return ehca2ib_return_code(h_ret);
367 }
368 ipz_queue_dtor(NULL, &my_cq->ipz_queue);
369 kmem_cache_free(cq_cache, my_cq);
370
371 atomic_dec(&shca->num_cqs);
372 return 0;
373}
374
375int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
376{
377 /* TODO: proper resize needs to be done */
378 ehca_err(cq->device, "not implemented yet");
379
380 return -EFAULT;
381}
382
383int ehca_init_cq_cache(void)
384{
385 cq_cache = kmem_cache_create("ehca_cache_cq",
386 sizeof(struct ehca_cq), 0,
387 SLAB_HWCACHE_ALIGN,
388 NULL);
389 if (!cq_cache)
390 return -ENOMEM;
391 return 0;
392}
393
394void ehca_cleanup_cq_cache(void)
395{
396 kmem_cache_destroy(cq_cache);
397}
diff --git a/drivers/staging/rdma/ehca/ehca_eq.c b/drivers/staging/rdma/ehca/ehca_eq.c
deleted file mode 100644
index 90da6747d395..000000000000
--- a/drivers/staging/rdma/ehca/ehca_eq.c
+++ /dev/null
@@ -1,189 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * Event queue handling
5 *
6 * Authors: Waleri Fomin <fomin@de.ibm.com>
7 * Khadija Souissi <souissi@de.ibm.com>
8 * Reinhard Ernst <rernst@de.ibm.com>
9 * Heiko J Schick <schickhj@de.ibm.com>
10 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
11 *
12 *
13 * Copyright (c) 2005 IBM Corporation
14 *
15 * All rights reserved.
16 *
17 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
18 * BSD.
19 *
20 * OpenIB BSD License
21 *
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions are met:
24 *
25 * Redistributions of source code must retain the above copyright notice, this
26 * list of conditions and the following disclaimer.
27 *
28 * Redistributions in binary form must reproduce the above copyright notice,
29 * this list of conditions and the following disclaimer in the documentation
30 * and/or other materials
31 * provided with the distribution.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
34 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
37 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
38 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
39 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
40 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
41 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
43 * POSSIBILITY OF SUCH DAMAGE.
44 */
45
46#include "ehca_classes.h"
47#include "ehca_irq.h"
48#include "ehca_iverbs.h"
49#include "ehca_qes.h"
50#include "hcp_if.h"
51#include "ipz_pt_fn.h"
52
53int ehca_create_eq(struct ehca_shca *shca,
54 struct ehca_eq *eq,
55 const enum ehca_eq_type type, const u32 length)
56{
57 int ret;
58 u64 h_ret;
59 u32 nr_pages;
60 u32 i;
61 void *vpage;
62 struct ib_device *ib_dev = &shca->ib_device;
63
64 spin_lock_init(&eq->spinlock);
65 spin_lock_init(&eq->irq_spinlock);
66 eq->is_initialized = 0;
67
68 if (type != EHCA_EQ && type != EHCA_NEQ) {
69 ehca_err(ib_dev, "Invalid EQ type %x. eq=%p", type, eq);
70 return -EINVAL;
71 }
72 if (!length) {
73 ehca_err(ib_dev, "EQ length must not be zero. eq=%p", eq);
74 return -EINVAL;
75 }
76
77 h_ret = hipz_h_alloc_resource_eq(shca->ipz_hca_handle,
78 &eq->pf,
79 type,
80 length,
81 &eq->ipz_eq_handle,
82 &eq->length,
83 &nr_pages, &eq->ist);
84
85 if (h_ret != H_SUCCESS) {
86 ehca_err(ib_dev, "Can't allocate EQ/NEQ. eq=%p", eq);
87 return -EINVAL;
88 }
89
90 ret = ipz_queue_ctor(NULL, &eq->ipz_queue, nr_pages,
91 EHCA_PAGESIZE, sizeof(struct ehca_eqe), 0, 0);
92 if (!ret) {
93 ehca_err(ib_dev, "Can't allocate EQ pages eq=%p", eq);
94 goto create_eq_exit1;
95 }
96
97 for (i = 0; i < nr_pages; i++) {
98 u64 rpage;
99
100 vpage = ipz_qpageit_get_inc(&eq->ipz_queue);
101 if (!vpage)
102 goto create_eq_exit2;
103
104 rpage = __pa(vpage);
105 h_ret = hipz_h_register_rpage_eq(shca->ipz_hca_handle,
106 eq->ipz_eq_handle,
107 &eq->pf,
108 0, 0, rpage, 1);
109
110 if (i == (nr_pages - 1)) {
111 /* last page */
112 vpage = ipz_qpageit_get_inc(&eq->ipz_queue);
113 if (h_ret != H_SUCCESS || vpage)
114 goto create_eq_exit2;
115 } else {
116 if (h_ret != H_PAGE_REGISTERED)
117 goto create_eq_exit2;
118 }
119 }
120
121 ipz_qeit_reset(&eq->ipz_queue);
122
123 /* register interrupt handlers and initialize work queues */
124 if (type == EHCA_EQ) {
125 tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca);
126
127 ret = ibmebus_request_irq(eq->ist, ehca_interrupt_eq,
128 0, "ehca_eq",
129 (void *)shca);
130 if (ret < 0)
131 ehca_err(ib_dev, "Can't map interrupt handler.");
132 } else if (type == EHCA_NEQ) {
133 tasklet_init(&eq->interrupt_task, ehca_tasklet_neq, (long)shca);
134
135 ret = ibmebus_request_irq(eq->ist, ehca_interrupt_neq,
136 0, "ehca_neq",
137 (void *)shca);
138 if (ret < 0)
139 ehca_err(ib_dev, "Can't map interrupt handler.");
140 }
141
142 eq->is_initialized = 1;
143
144 return 0;
145
146create_eq_exit2:
147 ipz_queue_dtor(NULL, &eq->ipz_queue);
148
149create_eq_exit1:
150 hipz_h_destroy_eq(shca->ipz_hca_handle, eq);
151
152 return -EINVAL;
153}
154
155void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq)
156{
157 unsigned long flags;
158 void *eqe;
159
160 spin_lock_irqsave(&eq->spinlock, flags);
161 eqe = ipz_eqit_eq_get_inc_valid(&eq->ipz_queue);
162 spin_unlock_irqrestore(&eq->spinlock, flags);
163
164 return eqe;
165}
166
167int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq)
168{
169 unsigned long flags;
170 u64 h_ret;
171
172 ibmebus_free_irq(eq->ist, (void *)shca);
173
174 spin_lock_irqsave(&shca_list_lock, flags);
175 eq->is_initialized = 0;
176 spin_unlock_irqrestore(&shca_list_lock, flags);
177
178 tasklet_kill(&eq->interrupt_task);
179
180 h_ret = hipz_h_destroy_eq(shca->ipz_hca_handle, eq);
181
182 if (h_ret != H_SUCCESS) {
183 ehca_err(&shca->ib_device, "Can't free EQ resources.");
184 return -EINVAL;
185 }
186 ipz_queue_dtor(NULL, &eq->ipz_queue);
187
188 return 0;
189}
diff --git a/drivers/staging/rdma/ehca/ehca_hca.c b/drivers/staging/rdma/ehca/ehca_hca.c
deleted file mode 100644
index e8b1bb65797a..000000000000
--- a/drivers/staging/rdma/ehca/ehca_hca.c
+++ /dev/null
@@ -1,414 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * HCA query functions
5 *
6 * Authors: Heiko J Schick <schickhj@de.ibm.com>
7 * Christoph Raisch <raisch@de.ibm.com>
8 *
9 * Copyright (c) 2005 IBM Corporation
10 *
11 * All rights reserved.
12 *
13 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
14 * BSD.
15 *
16 * OpenIB BSD License
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are met:
20 *
21 * Redistributions of source code must retain the above copyright notice, this
22 * list of conditions and the following disclaimer.
23 *
24 * Redistributions in binary form must reproduce the above copyright notice,
25 * this list of conditions and the following disclaimer in the documentation
26 * and/or other materials
27 * provided with the distribution.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42#include <linux/gfp.h>
43
44#include "ehca_tools.h"
45#include "ehca_iverbs.h"
46#include "hcp_if.h"
47
48static unsigned int limit_uint(unsigned int value)
49{
50 return min_t(unsigned int, value, INT_MAX);
51}
52
53int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
54 struct ib_udata *uhw)
55{
56 int i, ret = 0;
57 struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
58 ib_device);
59 struct hipz_query_hca *rblock;
60
61 static const u32 cap_mapping[] = {
62 IB_DEVICE_RESIZE_MAX_WR, HCA_CAP_WQE_RESIZE,
63 IB_DEVICE_BAD_PKEY_CNTR, HCA_CAP_BAD_P_KEY_CTR,
64 IB_DEVICE_BAD_QKEY_CNTR, HCA_CAP_Q_KEY_VIOL_CTR,
65 IB_DEVICE_RAW_MULTI, HCA_CAP_RAW_PACKET_MCAST,
66 IB_DEVICE_AUTO_PATH_MIG, HCA_CAP_AUTO_PATH_MIG,
67 IB_DEVICE_CHANGE_PHY_PORT, HCA_CAP_SQD_RTS_PORT_CHANGE,
68 IB_DEVICE_UD_AV_PORT_ENFORCE, HCA_CAP_AH_PORT_NR_CHECK,
69 IB_DEVICE_CURR_QP_STATE_MOD, HCA_CAP_CUR_QP_STATE_MOD,
70 IB_DEVICE_SHUTDOWN_PORT, HCA_CAP_SHUTDOWN_PORT,
71 IB_DEVICE_INIT_TYPE, HCA_CAP_INIT_TYPE,
72 IB_DEVICE_PORT_ACTIVE_EVENT, HCA_CAP_PORT_ACTIVE_EVENT,
73 };
74
75 if (uhw->inlen || uhw->outlen)
76 return -EINVAL;
77
78 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
79 if (!rblock) {
80 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
81 return -ENOMEM;
82 }
83
84 if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) {
85 ehca_err(&shca->ib_device, "Can't query device properties");
86 ret = -EINVAL;
87 goto query_device1;
88 }
89
90 memset(props, 0, sizeof(struct ib_device_attr));
91 props->page_size_cap = shca->hca_cap_mr_pgsize;
92 props->fw_ver = rblock->hw_ver;
93 props->max_mr_size = rblock->max_mr_size;
94 props->vendor_id = rblock->vendor_id >> 8;
95 props->vendor_part_id = rblock->vendor_part_id >> 16;
96 props->hw_ver = rblock->hw_ver;
97 props->max_qp = limit_uint(rblock->max_qp);
98 props->max_qp_wr = limit_uint(rblock->max_wqes_wq);
99 props->max_sge = limit_uint(rblock->max_sge);
100 props->max_sge_rd = limit_uint(rblock->max_sge_rd);
101 props->max_cq = limit_uint(rblock->max_cq);
102 props->max_cqe = limit_uint(rblock->max_cqe);
103 props->max_mr = limit_uint(rblock->max_mr);
104 props->max_mw = limit_uint(rblock->max_mw);
105 props->max_pd = limit_uint(rblock->max_pd);
106 props->max_ah = limit_uint(rblock->max_ah);
107 props->max_ee = limit_uint(rblock->max_rd_ee_context);
108 props->max_rdd = limit_uint(rblock->max_rd_domain);
109 props->max_fmr = limit_uint(rblock->max_mr);
110 props->max_qp_rd_atom = limit_uint(rblock->max_rr_qp);
111 props->max_ee_rd_atom = limit_uint(rblock->max_rr_ee_context);
112 props->max_res_rd_atom = limit_uint(rblock->max_rr_hca);
113 props->max_qp_init_rd_atom = limit_uint(rblock->max_act_wqs_qp);
114 props->max_ee_init_rd_atom = limit_uint(rblock->max_act_wqs_ee_context);
115
116 if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
117 props->max_srq = limit_uint(props->max_qp);
118 props->max_srq_wr = limit_uint(props->max_qp_wr);
119 props->max_srq_sge = 3;
120 }
121
122 props->max_pkeys = 16;
123 /* Some FW versions say 0 here; insert sensible value in that case */
124 props->local_ca_ack_delay = rblock->local_ca_ack_delay ?
125 min_t(u8, rblock->local_ca_ack_delay, 255) : 12;
126 props->max_raw_ipv6_qp = limit_uint(rblock->max_raw_ipv6_qp);
127 props->max_raw_ethy_qp = limit_uint(rblock->max_raw_ethy_qp);
128 props->max_mcast_grp = limit_uint(rblock->max_mcast_grp);
129 props->max_mcast_qp_attach = limit_uint(rblock->max_mcast_qp_attach);
130 props->max_total_mcast_qp_attach
131 = limit_uint(rblock->max_total_mcast_qp_attach);
132
133 /* translate device capabilities */
134 props->device_cap_flags = IB_DEVICE_SYS_IMAGE_GUID |
135 IB_DEVICE_RC_RNR_NAK_GEN | IB_DEVICE_N_NOTIFY_CQ;
136 for (i = 0; i < ARRAY_SIZE(cap_mapping); i += 2)
137 if (rblock->hca_cap_indicators & cap_mapping[i + 1])
138 props->device_cap_flags |= cap_mapping[i];
139
140query_device1:
141 ehca_free_fw_ctrlblock(rblock);
142
143 return ret;
144}
145
146static enum ib_mtu map_mtu(struct ehca_shca *shca, u32 fw_mtu)
147{
148 switch (fw_mtu) {
149 case 0x1:
150 return IB_MTU_256;
151 case 0x2:
152 return IB_MTU_512;
153 case 0x3:
154 return IB_MTU_1024;
155 case 0x4:
156 return IB_MTU_2048;
157 case 0x5:
158 return IB_MTU_4096;
159 default:
160 ehca_err(&shca->ib_device, "Unknown MTU size: %x.",
161 fw_mtu);
162 return 0;
163 }
164}
165
166static u8 map_number_of_vls(struct ehca_shca *shca, u32 vl_cap)
167{
168 switch (vl_cap) {
169 case 0x1:
170 return 1;
171 case 0x2:
172 return 2;
173 case 0x3:
174 return 4;
175 case 0x4:
176 return 8;
177 case 0x5:
178 return 15;
179 default:
180 ehca_err(&shca->ib_device, "invalid Vl Capability: %x.",
181 vl_cap);
182 return 0;
183 }
184}
185
186int ehca_query_port(struct ib_device *ibdev,
187 u8 port, struct ib_port_attr *props)
188{
189 int ret = 0;
190 u64 h_ret;
191 struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
192 ib_device);
193 struct hipz_query_port *rblock;
194
195 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
196 if (!rblock) {
197 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
198 return -ENOMEM;
199 }
200
201 h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
202 if (h_ret != H_SUCCESS) {
203 ehca_err(&shca->ib_device, "Can't query port properties");
204 ret = -EINVAL;
205 goto query_port1;
206 }
207
208 memset(props, 0, sizeof(struct ib_port_attr));
209
210 props->active_mtu = props->max_mtu = map_mtu(shca, rblock->max_mtu);
211 props->port_cap_flags = rblock->capability_mask;
212 props->gid_tbl_len = rblock->gid_tbl_len;
213 if (rblock->max_msg_sz)
214 props->max_msg_sz = rblock->max_msg_sz;
215 else
216 props->max_msg_sz = 0x1 << 31;
217 props->bad_pkey_cntr = rblock->bad_pkey_cntr;
218 props->qkey_viol_cntr = rblock->qkey_viol_cntr;
219 props->pkey_tbl_len = rblock->pkey_tbl_len;
220 props->lid = rblock->lid;
221 props->sm_lid = rblock->sm_lid;
222 props->lmc = rblock->lmc;
223 props->sm_sl = rblock->sm_sl;
224 props->subnet_timeout = rblock->subnet_timeout;
225 props->init_type_reply = rblock->init_type_reply;
226 props->max_vl_num = map_number_of_vls(shca, rblock->vl_cap);
227
228 if (rblock->state && rblock->phys_width) {
229 props->phys_state = rblock->phys_pstate;
230 props->state = rblock->phys_state;
231 props->active_width = rblock->phys_width;
232 props->active_speed = rblock->phys_speed;
233 } else {
234 /* old firmware releases don't report physical
235 * port info, so use default values
236 */
237 props->phys_state = 5;
238 props->state = rblock->state;
239 props->active_width = IB_WIDTH_12X;
240 props->active_speed = IB_SPEED_SDR;
241 }
242
243query_port1:
244 ehca_free_fw_ctrlblock(rblock);
245
246 return ret;
247}
248
249int ehca_query_sma_attr(struct ehca_shca *shca,
250 u8 port, struct ehca_sma_attr *attr)
251{
252 int ret = 0;
253 u64 h_ret;
254 struct hipz_query_port *rblock;
255
256 rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
257 if (!rblock) {
258 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
259 return -ENOMEM;
260 }
261
262 h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
263 if (h_ret != H_SUCCESS) {
264 ehca_err(&shca->ib_device, "Can't query port properties");
265 ret = -EINVAL;
266 goto query_sma_attr1;
267 }
268
269 memset(attr, 0, sizeof(struct ehca_sma_attr));
270
271 attr->lid = rblock->lid;
272 attr->lmc = rblock->lmc;
273 attr->sm_sl = rblock->sm_sl;
274 attr->sm_lid = rblock->sm_lid;
275
276 attr->pkey_tbl_len = rblock->pkey_tbl_len;
277 memcpy(attr->pkeys, rblock->pkey_entries, sizeof(attr->pkeys));
278
279query_sma_attr1:
280 ehca_free_fw_ctrlblock(rblock);
281
282 return ret;
283}
284
285int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
286{
287 int ret = 0;
288 u64 h_ret;
289 struct ehca_shca *shca;
290 struct hipz_query_port *rblock;
291
292 shca = container_of(ibdev, struct ehca_shca, ib_device);
293 if (index > 16) {
294 ehca_err(&shca->ib_device, "Invalid index: %x.", index);
295 return -EINVAL;
296 }
297
298 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
299 if (!rblock) {
300 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
301 return -ENOMEM;
302 }
303
304 h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
305 if (h_ret != H_SUCCESS) {
306 ehca_err(&shca->ib_device, "Can't query port properties");
307 ret = -EINVAL;
308 goto query_pkey1;
309 }
310
311 memcpy(pkey, &rblock->pkey_entries + index, sizeof(u16));
312
313query_pkey1:
314 ehca_free_fw_ctrlblock(rblock);
315
316 return ret;
317}
318
319int ehca_query_gid(struct ib_device *ibdev, u8 port,
320 int index, union ib_gid *gid)
321{
322 int ret = 0;
323 u64 h_ret;
324 struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
325 ib_device);
326 struct hipz_query_port *rblock;
327
328 if (index < 0 || index > 255) {
329 ehca_err(&shca->ib_device, "Invalid index: %x.", index);
330 return -EINVAL;
331 }
332
333 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
334 if (!rblock) {
335 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
336 return -ENOMEM;
337 }
338
339 h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
340 if (h_ret != H_SUCCESS) {
341 ehca_err(&shca->ib_device, "Can't query port properties");
342 ret = -EINVAL;
343 goto query_gid1;
344 }
345
346 memcpy(&gid->raw[0], &rblock->gid_prefix, sizeof(u64));
347 memcpy(&gid->raw[8], &rblock->guid_entries[index], sizeof(u64));
348
349query_gid1:
350 ehca_free_fw_ctrlblock(rblock);
351
352 return ret;
353}
354
355static const u32 allowed_port_caps = (
356 IB_PORT_SM | IB_PORT_LED_INFO_SUP | IB_PORT_CM_SUP |
357 IB_PORT_SNMP_TUNNEL_SUP | IB_PORT_DEVICE_MGMT_SUP |
358 IB_PORT_VENDOR_CLASS_SUP);
359
360int ehca_modify_port(struct ib_device *ibdev,
361 u8 port, int port_modify_mask,
362 struct ib_port_modify *props)
363{
364 int ret = 0;
365 struct ehca_shca *shca;
366 struct hipz_query_port *rblock;
367 u32 cap;
368 u64 hret;
369
370 shca = container_of(ibdev, struct ehca_shca, ib_device);
371 if ((props->set_port_cap_mask | props->clr_port_cap_mask)
372 & ~allowed_port_caps) {
373 ehca_err(&shca->ib_device, "Non-changeable bits set in masks "
374 "set=%x clr=%x allowed=%x", props->set_port_cap_mask,
375 props->clr_port_cap_mask, allowed_port_caps);
376 return -EINVAL;
377 }
378
379 if (mutex_lock_interruptible(&shca->modify_mutex))
380 return -ERESTARTSYS;
381
382 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
383 if (!rblock) {
384 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
385 ret = -ENOMEM;
386 goto modify_port1;
387 }
388
389 hret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
390 if (hret != H_SUCCESS) {
391 ehca_err(&shca->ib_device, "Can't query port properties");
392 ret = -EINVAL;
393 goto modify_port2;
394 }
395
396 cap = (rblock->capability_mask | props->set_port_cap_mask)
397 & ~props->clr_port_cap_mask;
398
399 hret = hipz_h_modify_port(shca->ipz_hca_handle, port,
400 cap, props->init_type, port_modify_mask);
401 if (hret != H_SUCCESS) {
402 ehca_err(&shca->ib_device, "Modify port failed h_ret=%lli",
403 hret);
404 ret = -EINVAL;
405 }
406
407modify_port2:
408 ehca_free_fw_ctrlblock(rblock);
409
410modify_port1:
411 mutex_unlock(&shca->modify_mutex);
412
413 return ret;
414}
diff --git a/drivers/staging/rdma/ehca/ehca_irq.c b/drivers/staging/rdma/ehca/ehca_irq.c
deleted file mode 100644
index 8615d7cf7e01..000000000000
--- a/drivers/staging/rdma/ehca/ehca_irq.c
+++ /dev/null
@@ -1,870 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * Functions for EQs, NEQs and interrupts
5 *
6 * Authors: Heiko J Schick <schickhj@de.ibm.com>
7 * Khadija Souissi <souissi@de.ibm.com>
8 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
9 * Joachim Fenkes <fenkes@de.ibm.com>
10 *
11 * Copyright (c) 2005 IBM Corporation
12 *
13 * All rights reserved.
14 *
15 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
16 * BSD.
17 *
18 * OpenIB BSD License
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are met:
22 *
23 * Redistributions of source code must retain the above copyright notice, this
24 * list of conditions and the following disclaimer.
25 *
26 * Redistributions in binary form must reproduce the above copyright notice,
27 * this list of conditions and the following disclaimer in the documentation
28 * and/or other materials
29 * provided with the distribution.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
32 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
35 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
36 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
37 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
38 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
39 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGE.
42 */
43
44#include <linux/slab.h>
45#include <linux/smpboot.h>
46
47#include "ehca_classes.h"
48#include "ehca_irq.h"
49#include "ehca_iverbs.h"
50#include "ehca_tools.h"
51#include "hcp_if.h"
52#include "hipz_fns.h"
53#include "ipz_pt_fn.h"
54
55#define EQE_COMPLETION_EVENT EHCA_BMASK_IBM( 1, 1)
56#define EQE_CQ_QP_NUMBER EHCA_BMASK_IBM( 8, 31)
57#define EQE_EE_IDENTIFIER EHCA_BMASK_IBM( 2, 7)
58#define EQE_CQ_NUMBER EHCA_BMASK_IBM( 8, 31)
59#define EQE_QP_NUMBER EHCA_BMASK_IBM( 8, 31)
60#define EQE_QP_TOKEN EHCA_BMASK_IBM(32, 63)
61#define EQE_CQ_TOKEN EHCA_BMASK_IBM(32, 63)
62
63#define NEQE_COMPLETION_EVENT EHCA_BMASK_IBM( 1, 1)
64#define NEQE_EVENT_CODE EHCA_BMASK_IBM( 2, 7)
65#define NEQE_PORT_NUMBER EHCA_BMASK_IBM( 8, 15)
66#define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16, 16)
67#define NEQE_DISRUPTIVE EHCA_BMASK_IBM(16, 16)
68#define NEQE_SPECIFIC_EVENT EHCA_BMASK_IBM(16, 23)
69
70#define ERROR_DATA_LENGTH EHCA_BMASK_IBM(52, 63)
71#define ERROR_DATA_TYPE EHCA_BMASK_IBM( 0, 7)
72
73static void queue_comp_task(struct ehca_cq *__cq);
74
75static struct ehca_comp_pool *pool;
76
77static inline void comp_event_callback(struct ehca_cq *cq)
78{
79 if (!cq->ib_cq.comp_handler)
80 return;
81
82 spin_lock(&cq->cb_lock);
83 cq->ib_cq.comp_handler(&cq->ib_cq, cq->ib_cq.cq_context);
84 spin_unlock(&cq->cb_lock);
85
86 return;
87}
88
89static void print_error_data(struct ehca_shca *shca, void *data,
90 u64 *rblock, int length)
91{
92 u64 type = EHCA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
93 u64 resource = rblock[1];
94
95 switch (type) {
96 case 0x1: /* Queue Pair */
97 {
98 struct ehca_qp *qp = (struct ehca_qp *)data;
99
100 /* only print error data if AER is set */
101 if (rblock[6] == 0)
102 return;
103
104 ehca_err(&shca->ib_device,
105 "QP 0x%x (resource=%llx) has errors.",
106 qp->ib_qp.qp_num, resource);
107 break;
108 }
109 case 0x4: /* Completion Queue */
110 {
111 struct ehca_cq *cq = (struct ehca_cq *)data;
112
113 ehca_err(&shca->ib_device,
114 "CQ 0x%x (resource=%llx) has errors.",
115 cq->cq_number, resource);
116 break;
117 }
118 default:
119 ehca_err(&shca->ib_device,
120 "Unknown error type: %llx on %s.",
121 type, shca->ib_device.name);
122 break;
123 }
124
125 ehca_err(&shca->ib_device, "Error data is available: %llx.", resource);
126 ehca_err(&shca->ib_device, "EHCA ----- error data begin "
127 "---------------------------------------------------");
128 ehca_dmp(rblock, length, "resource=%llx", resource);
129 ehca_err(&shca->ib_device, "EHCA ----- error data end "
130 "----------------------------------------------------");
131
132 return;
133}
134
135int ehca_error_data(struct ehca_shca *shca, void *data,
136 u64 resource)
137{
138
139 unsigned long ret;
140 u64 *rblock;
141 unsigned long block_count;
142
143 rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
144 if (!rblock) {
145 ehca_err(&shca->ib_device, "Cannot allocate rblock memory.");
146 ret = -ENOMEM;
147 goto error_data1;
148 }
149
150 /* rblock must be 4K aligned and should be 4K large */
151 ret = hipz_h_error_data(shca->ipz_hca_handle,
152 resource,
153 rblock,
154 &block_count);
155
156 if (ret == H_R_STATE)
157 ehca_err(&shca->ib_device,
158 "No error data is available: %llx.", resource);
159 else if (ret == H_SUCCESS) {
160 int length;
161
162 length = EHCA_BMASK_GET(ERROR_DATA_LENGTH, rblock[0]);
163
164 if (length > EHCA_PAGESIZE)
165 length = EHCA_PAGESIZE;
166
167 print_error_data(shca, data, rblock, length);
168 } else
169 ehca_err(&shca->ib_device,
170 "Error data could not be fetched: %llx", resource);
171
172 ehca_free_fw_ctrlblock(rblock);
173
174error_data1:
175 return ret;
176
177}
178
179static void dispatch_qp_event(struct ehca_shca *shca, struct ehca_qp *qp,
180 enum ib_event_type event_type)
181{
182 struct ib_event event;
183
184 /* PATH_MIG without the QP ever having been armed is false alarm */
185 if (event_type == IB_EVENT_PATH_MIG && !qp->mig_armed)
186 return;
187
188 event.device = &shca->ib_device;
189 event.event = event_type;
190
191 if (qp->ext_type == EQPT_SRQ) {
192 if (!qp->ib_srq.event_handler)
193 return;
194
195 event.element.srq = &qp->ib_srq;
196 qp->ib_srq.event_handler(&event, qp->ib_srq.srq_context);
197 } else {
198 if (!qp->ib_qp.event_handler)
199 return;
200
201 event.element.qp = &qp->ib_qp;
202 qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context);
203 }
204}
205
206static void qp_event_callback(struct ehca_shca *shca, u64 eqe,
207 enum ib_event_type event_type, int fatal)
208{
209 struct ehca_qp *qp;
210 u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe);
211
212 read_lock(&ehca_qp_idr_lock);
213 qp = idr_find(&ehca_qp_idr, token);
214 if (qp)
215 atomic_inc(&qp->nr_events);
216 read_unlock(&ehca_qp_idr_lock);
217
218 if (!qp)
219 return;
220
221 if (fatal)
222 ehca_error_data(shca, qp, qp->ipz_qp_handle.handle);
223
224 dispatch_qp_event(shca, qp, fatal && qp->ext_type == EQPT_SRQ ?
225 IB_EVENT_SRQ_ERR : event_type);
226
227 /*
228 * eHCA only processes one WQE at a time for SRQ base QPs,
229 * so the last WQE has been processed as soon as the QP enters
230 * error state.
231 */
232 if (fatal && qp->ext_type == EQPT_SRQBASE)
233 dispatch_qp_event(shca, qp, IB_EVENT_QP_LAST_WQE_REACHED);
234
235 if (atomic_dec_and_test(&qp->nr_events))
236 wake_up(&qp->wait_completion);
237 return;
238}
239
240static void cq_event_callback(struct ehca_shca *shca,
241 u64 eqe)
242{
243 struct ehca_cq *cq;
244 u32 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe);
245
246 read_lock(&ehca_cq_idr_lock);
247 cq = idr_find(&ehca_cq_idr, token);
248 if (cq)
249 atomic_inc(&cq->nr_events);
250 read_unlock(&ehca_cq_idr_lock);
251
252 if (!cq)
253 return;
254
255 ehca_error_data(shca, cq, cq->ipz_cq_handle.handle);
256
257 if (atomic_dec_and_test(&cq->nr_events))
258 wake_up(&cq->wait_completion);
259
260 return;
261}
262
263static void parse_identifier(struct ehca_shca *shca, u64 eqe)
264{
265 u8 identifier = EHCA_BMASK_GET(EQE_EE_IDENTIFIER, eqe);
266
267 switch (identifier) {
268 case 0x02: /* path migrated */
269 qp_event_callback(shca, eqe, IB_EVENT_PATH_MIG, 0);
270 break;
271 case 0x03: /* communication established */
272 qp_event_callback(shca, eqe, IB_EVENT_COMM_EST, 0);
273 break;
274 case 0x04: /* send queue drained */
275 qp_event_callback(shca, eqe, IB_EVENT_SQ_DRAINED, 0);
276 break;
277 case 0x05: /* QP error */
278 case 0x06: /* QP error */
279 qp_event_callback(shca, eqe, IB_EVENT_QP_FATAL, 1);
280 break;
281 case 0x07: /* CQ error */
282 case 0x08: /* CQ error */
283 cq_event_callback(shca, eqe);
284 break;
285 case 0x09: /* MRMWPTE error */
286 ehca_err(&shca->ib_device, "MRMWPTE error.");
287 break;
288 case 0x0A: /* port event */
289 ehca_err(&shca->ib_device, "Port event.");
290 break;
291 case 0x0B: /* MR access error */
292 ehca_err(&shca->ib_device, "MR access error.");
293 break;
294 case 0x0C: /* EQ error */
295 ehca_err(&shca->ib_device, "EQ error.");
296 break;
297 case 0x0D: /* P/Q_Key mismatch */
298 ehca_err(&shca->ib_device, "P/Q_Key mismatch.");
299 break;
300 case 0x10: /* sampling complete */
301 ehca_err(&shca->ib_device, "Sampling complete.");
302 break;
303 case 0x11: /* unaffiliated access error */
304 ehca_err(&shca->ib_device, "Unaffiliated access error.");
305 break;
306 case 0x12: /* path migrating */
307 ehca_err(&shca->ib_device, "Path migrating.");
308 break;
309 case 0x13: /* interface trace stopped */
310 ehca_err(&shca->ib_device, "Interface trace stopped.");
311 break;
312 case 0x14: /* first error capture info available */
313 ehca_info(&shca->ib_device, "First error capture available");
314 break;
315 case 0x15: /* SRQ limit reached */
316 qp_event_callback(shca, eqe, IB_EVENT_SRQ_LIMIT_REACHED, 0);
317 break;
318 default:
319 ehca_err(&shca->ib_device, "Unknown identifier: %x on %s.",
320 identifier, shca->ib_device.name);
321 break;
322 }
323
324 return;
325}
326
327static void dispatch_port_event(struct ehca_shca *shca, int port_num,
328 enum ib_event_type type, const char *msg)
329{
330 struct ib_event event;
331
332 ehca_info(&shca->ib_device, "port %d %s.", port_num, msg);
333 event.device = &shca->ib_device;
334 event.event = type;
335 event.element.port_num = port_num;
336 ib_dispatch_event(&event);
337}
338
339static void notify_port_conf_change(struct ehca_shca *shca, int port_num)
340{
341 struct ehca_sma_attr new_attr;
342 struct ehca_sma_attr *old_attr = &shca->sport[port_num - 1].saved_attr;
343
344 ehca_query_sma_attr(shca, port_num, &new_attr);
345
346 if (new_attr.sm_sl != old_attr->sm_sl ||
347 new_attr.sm_lid != old_attr->sm_lid)
348 dispatch_port_event(shca, port_num, IB_EVENT_SM_CHANGE,
349 "SM changed");
350
351 if (new_attr.lid != old_attr->lid ||
352 new_attr.lmc != old_attr->lmc)
353 dispatch_port_event(shca, port_num, IB_EVENT_LID_CHANGE,
354 "LID changed");
355
356 if (new_attr.pkey_tbl_len != old_attr->pkey_tbl_len ||
357 memcmp(new_attr.pkeys, old_attr->pkeys,
358 sizeof(u16) * new_attr.pkey_tbl_len))
359 dispatch_port_event(shca, port_num, IB_EVENT_PKEY_CHANGE,
360 "P_Key changed");
361
362 *old_attr = new_attr;
363}
364
365/* replay modify_qp for sqps -- return 0 if all is well, 1 if AQP1 destroyed */
366static int replay_modify_qp(struct ehca_sport *sport)
367{
368 int aqp1_destroyed;
369 unsigned long flags;
370
371 spin_lock_irqsave(&sport->mod_sqp_lock, flags);
372
373 aqp1_destroyed = !sport->ibqp_sqp[IB_QPT_GSI];
374
375 if (sport->ibqp_sqp[IB_QPT_SMI])
376 ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_SMI]);
377 if (!aqp1_destroyed)
378 ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_GSI]);
379
380 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
381
382 return aqp1_destroyed;
383}
384
385static void parse_ec(struct ehca_shca *shca, u64 eqe)
386{
387 u8 ec = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe);
388 u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe);
389 u8 spec_event;
390 struct ehca_sport *sport = &shca->sport[port - 1];
391
392 switch (ec) {
393 case 0x30: /* port availability change */
394 if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) {
395 /* only replay modify_qp calls in autodetect mode;
396 * if AQP1 was destroyed, the port is already down
397 * again and we can drop the event.
398 */
399 if (ehca_nr_ports < 0)
400 if (replay_modify_qp(sport))
401 break;
402
403 sport->port_state = IB_PORT_ACTIVE;
404 dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
405 "is active");
406 ehca_query_sma_attr(shca, port, &sport->saved_attr);
407 } else {
408 sport->port_state = IB_PORT_DOWN;
409 dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
410 "is inactive");
411 }
412 break;
413 case 0x31:
414 /* port configuration change
415 * disruptive change is caused by
416 * LID, PKEY or SM change
417 */
418 if (EHCA_BMASK_GET(NEQE_DISRUPTIVE, eqe)) {
419 ehca_warn(&shca->ib_device, "disruptive port "
420 "%d configuration change", port);
421
422 sport->port_state = IB_PORT_DOWN;
423 dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
424 "is inactive");
425
426 sport->port_state = IB_PORT_ACTIVE;
427 dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
428 "is active");
429 ehca_query_sma_attr(shca, port,
430 &sport->saved_attr);
431 } else
432 notify_port_conf_change(shca, port);
433 break;
434 case 0x32: /* adapter malfunction */
435 ehca_err(&shca->ib_device, "Adapter malfunction.");
436 break;
437 case 0x33: /* trace stopped */
438 ehca_err(&shca->ib_device, "Traced stopped.");
439 break;
440 case 0x34: /* util async event */
441 spec_event = EHCA_BMASK_GET(NEQE_SPECIFIC_EVENT, eqe);
442 if (spec_event == 0x80) /* client reregister required */
443 dispatch_port_event(shca, port,
444 IB_EVENT_CLIENT_REREGISTER,
445 "client reregister req.");
446 else
447 ehca_warn(&shca->ib_device, "Unknown util async "
448 "event %x on port %x", spec_event, port);
449 break;
450 default:
451 ehca_err(&shca->ib_device, "Unknown event code: %x on %s.",
452 ec, shca->ib_device.name);
453 break;
454 }
455
456 return;
457}
458
459static inline void reset_eq_pending(struct ehca_cq *cq)
460{
461 u64 CQx_EP;
462 struct h_galpa gal = cq->galpas.kernel;
463
464 hipz_galpa_store_cq(gal, cqx_ep, 0x0);
465 CQx_EP = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_ep));
466
467 return;
468}
469
470irqreturn_t ehca_interrupt_neq(int irq, void *dev_id)
471{
472 struct ehca_shca *shca = (struct ehca_shca*)dev_id;
473
474 tasklet_hi_schedule(&shca->neq.interrupt_task);
475
476 return IRQ_HANDLED;
477}
478
479void ehca_tasklet_neq(unsigned long data)
480{
481 struct ehca_shca *shca = (struct ehca_shca*)data;
482 struct ehca_eqe *eqe;
483 u64 ret;
484
485 eqe = ehca_poll_eq(shca, &shca->neq);
486
487 while (eqe) {
488 if (!EHCA_BMASK_GET(NEQE_COMPLETION_EVENT, eqe->entry))
489 parse_ec(shca, eqe->entry);
490
491 eqe = ehca_poll_eq(shca, &shca->neq);
492 }
493
494 ret = hipz_h_reset_event(shca->ipz_hca_handle,
495 shca->neq.ipz_eq_handle, 0xFFFFFFFFFFFFFFFFL);
496
497 if (ret != H_SUCCESS)
498 ehca_err(&shca->ib_device, "Can't clear notification events.");
499
500 return;
501}
502
503irqreturn_t ehca_interrupt_eq(int irq, void *dev_id)
504{
505 struct ehca_shca *shca = (struct ehca_shca*)dev_id;
506
507 tasklet_hi_schedule(&shca->eq.interrupt_task);
508
509 return IRQ_HANDLED;
510}
511
512
513static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe)
514{
515 u64 eqe_value;
516 u32 token;
517 struct ehca_cq *cq;
518
519 eqe_value = eqe->entry;
520 ehca_dbg(&shca->ib_device, "eqe_value=%llx", eqe_value);
521 if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
522 ehca_dbg(&shca->ib_device, "Got completion event");
523 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
524 read_lock(&ehca_cq_idr_lock);
525 cq = idr_find(&ehca_cq_idr, token);
526 if (cq)
527 atomic_inc(&cq->nr_events);
528 read_unlock(&ehca_cq_idr_lock);
529 if (cq == NULL) {
530 ehca_err(&shca->ib_device,
531 "Invalid eqe for non-existing cq token=%x",
532 token);
533 return;
534 }
535 reset_eq_pending(cq);
536 if (ehca_scaling_code)
537 queue_comp_task(cq);
538 else {
539 comp_event_callback(cq);
540 if (atomic_dec_and_test(&cq->nr_events))
541 wake_up(&cq->wait_completion);
542 }
543 } else {
544 ehca_dbg(&shca->ib_device, "Got non completion event");
545 parse_identifier(shca, eqe_value);
546 }
547}
548
549void ehca_process_eq(struct ehca_shca *shca, int is_irq)
550{
551 struct ehca_eq *eq = &shca->eq;
552 struct ehca_eqe_cache_entry *eqe_cache = eq->eqe_cache;
553 u64 eqe_value, ret;
554 int eqe_cnt, i;
555 int eq_empty = 0;
556
557 spin_lock(&eq->irq_spinlock);
558 if (is_irq) {
559 const int max_query_cnt = 100;
560 int query_cnt = 0;
561 int int_state = 1;
562 do {
563 int_state = hipz_h_query_int_state(
564 shca->ipz_hca_handle, eq->ist);
565 query_cnt++;
566 iosync();
567 } while (int_state && query_cnt < max_query_cnt);
568 if (unlikely((query_cnt == max_query_cnt)))
569 ehca_dbg(&shca->ib_device, "int_state=%x query_cnt=%x",
570 int_state, query_cnt);
571 }
572
573 /* read out all eqes */
574 eqe_cnt = 0;
575 do {
576 u32 token;
577 eqe_cache[eqe_cnt].eqe = ehca_poll_eq(shca, eq);
578 if (!eqe_cache[eqe_cnt].eqe)
579 break;
580 eqe_value = eqe_cache[eqe_cnt].eqe->entry;
581 if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
582 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
583 read_lock(&ehca_cq_idr_lock);
584 eqe_cache[eqe_cnt].cq = idr_find(&ehca_cq_idr, token);
585 if (eqe_cache[eqe_cnt].cq)
586 atomic_inc(&eqe_cache[eqe_cnt].cq->nr_events);
587 read_unlock(&ehca_cq_idr_lock);
588 if (!eqe_cache[eqe_cnt].cq) {
589 ehca_err(&shca->ib_device,
590 "Invalid eqe for non-existing cq "
591 "token=%x", token);
592 continue;
593 }
594 } else
595 eqe_cache[eqe_cnt].cq = NULL;
596 eqe_cnt++;
597 } while (eqe_cnt < EHCA_EQE_CACHE_SIZE);
598 if (!eqe_cnt) {
599 if (is_irq)
600 ehca_dbg(&shca->ib_device,
601 "No eqe found for irq event");
602 goto unlock_irq_spinlock;
603 } else if (!is_irq) {
604 ret = hipz_h_eoi(eq->ist);
605 if (ret != H_SUCCESS)
606 ehca_err(&shca->ib_device,
607 "bad return code EOI -rc = %lld\n", ret);
608 ehca_dbg(&shca->ib_device, "deadman found %x eqe", eqe_cnt);
609 }
610 if (unlikely(eqe_cnt == EHCA_EQE_CACHE_SIZE))
611 ehca_dbg(&shca->ib_device, "too many eqes for one irq event");
612 /* enable irq for new packets */
613 for (i = 0; i < eqe_cnt; i++) {
614 if (eq->eqe_cache[i].cq)
615 reset_eq_pending(eq->eqe_cache[i].cq);
616 }
617 /* check eq */
618 spin_lock(&eq->spinlock);
619 eq_empty = (!ipz_eqit_eq_peek_valid(&shca->eq.ipz_queue));
620 spin_unlock(&eq->spinlock);
621 /* call completion handler for cached eqes */
622 for (i = 0; i < eqe_cnt; i++)
623 if (eq->eqe_cache[i].cq) {
624 if (ehca_scaling_code)
625 queue_comp_task(eq->eqe_cache[i].cq);
626 else {
627 struct ehca_cq *cq = eq->eqe_cache[i].cq;
628 comp_event_callback(cq);
629 if (atomic_dec_and_test(&cq->nr_events))
630 wake_up(&cq->wait_completion);
631 }
632 } else {
633 ehca_dbg(&shca->ib_device, "Got non completion event");
634 parse_identifier(shca, eq->eqe_cache[i].eqe->entry);
635 }
636 /* poll eq if not empty */
637 if (eq_empty)
638 goto unlock_irq_spinlock;
639 do {
640 struct ehca_eqe *eqe;
641 eqe = ehca_poll_eq(shca, &shca->eq);
642 if (!eqe)
643 break;
644 process_eqe(shca, eqe);
645 } while (1);
646
647unlock_irq_spinlock:
648 spin_unlock(&eq->irq_spinlock);
649}
650
651void ehca_tasklet_eq(unsigned long data)
652{
653 ehca_process_eq((struct ehca_shca*)data, 1);
654}
655
656static int find_next_online_cpu(struct ehca_comp_pool *pool)
657{
658 int cpu;
659 unsigned long flags;
660
661 WARN_ON_ONCE(!in_interrupt());
662 if (ehca_debug_level >= 3)
663 ehca_dmp(cpu_online_mask, cpumask_size(), "");
664
665 spin_lock_irqsave(&pool->last_cpu_lock, flags);
666 do {
667 cpu = cpumask_next(pool->last_cpu, cpu_online_mask);
668 if (cpu >= nr_cpu_ids)
669 cpu = cpumask_first(cpu_online_mask);
670 pool->last_cpu = cpu;
671 } while (!per_cpu_ptr(pool->cpu_comp_tasks, cpu)->active);
672 spin_unlock_irqrestore(&pool->last_cpu_lock, flags);
673
674 return cpu;
675}
676
677static void __queue_comp_task(struct ehca_cq *__cq,
678 struct ehca_cpu_comp_task *cct,
679 struct task_struct *thread)
680{
681 unsigned long flags;
682
683 spin_lock_irqsave(&cct->task_lock, flags);
684 spin_lock(&__cq->task_lock);
685
686 if (__cq->nr_callbacks == 0) {
687 __cq->nr_callbacks++;
688 list_add_tail(&__cq->entry, &cct->cq_list);
689 cct->cq_jobs++;
690 wake_up_process(thread);
691 } else
692 __cq->nr_callbacks++;
693
694 spin_unlock(&__cq->task_lock);
695 spin_unlock_irqrestore(&cct->task_lock, flags);
696}
697
698static void queue_comp_task(struct ehca_cq *__cq)
699{
700 int cpu_id;
701 struct ehca_cpu_comp_task *cct;
702 struct task_struct *thread;
703 int cq_jobs;
704 unsigned long flags;
705
706 cpu_id = find_next_online_cpu(pool);
707 BUG_ON(!cpu_online(cpu_id));
708
709 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
710 thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu_id);
711 BUG_ON(!cct || !thread);
712
713 spin_lock_irqsave(&cct->task_lock, flags);
714 cq_jobs = cct->cq_jobs;
715 spin_unlock_irqrestore(&cct->task_lock, flags);
716 if (cq_jobs > 0) {
717 cpu_id = find_next_online_cpu(pool);
718 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
719 thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu_id);
720 BUG_ON(!cct || !thread);
721 }
722 __queue_comp_task(__cq, cct, thread);
723}
724
725static void run_comp_task(struct ehca_cpu_comp_task *cct)
726{
727 struct ehca_cq *cq;
728
729 while (!list_empty(&cct->cq_list)) {
730 cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
731 spin_unlock_irq(&cct->task_lock);
732
733 comp_event_callback(cq);
734 if (atomic_dec_and_test(&cq->nr_events))
735 wake_up(&cq->wait_completion);
736
737 spin_lock_irq(&cct->task_lock);
738 spin_lock(&cq->task_lock);
739 cq->nr_callbacks--;
740 if (!cq->nr_callbacks) {
741 list_del_init(cct->cq_list.next);
742 cct->cq_jobs--;
743 }
744 spin_unlock(&cq->task_lock);
745 }
746}
747
748static void comp_task_park(unsigned int cpu)
749{
750 struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
751 struct ehca_cpu_comp_task *target;
752 struct task_struct *thread;
753 struct ehca_cq *cq, *tmp;
754 LIST_HEAD(list);
755
756 spin_lock_irq(&cct->task_lock);
757 cct->cq_jobs = 0;
758 cct->active = 0;
759 list_splice_init(&cct->cq_list, &list);
760 spin_unlock_irq(&cct->task_lock);
761
762 cpu = find_next_online_cpu(pool);
763 target = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
764 thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu);
765 spin_lock_irq(&target->task_lock);
766 list_for_each_entry_safe(cq, tmp, &list, entry) {
767 list_del(&cq->entry);
768 __queue_comp_task(cq, target, thread);
769 }
770 spin_unlock_irq(&target->task_lock);
771}
772
773static void comp_task_stop(unsigned int cpu, bool online)
774{
775 struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
776
777 spin_lock_irq(&cct->task_lock);
778 cct->cq_jobs = 0;
779 cct->active = 0;
780 WARN_ON(!list_empty(&cct->cq_list));
781 spin_unlock_irq(&cct->task_lock);
782}
783
784static int comp_task_should_run(unsigned int cpu)
785{
786 struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
787
788 return cct->cq_jobs;
789}
790
791static void comp_task(unsigned int cpu)
792{
793 struct ehca_cpu_comp_task *cct = this_cpu_ptr(pool->cpu_comp_tasks);
794 int cql_empty;
795
796 spin_lock_irq(&cct->task_lock);
797 cql_empty = list_empty(&cct->cq_list);
798 if (!cql_empty) {
799 __set_current_state(TASK_RUNNING);
800 run_comp_task(cct);
801 }
802 spin_unlock_irq(&cct->task_lock);
803}
804
805static struct smp_hotplug_thread comp_pool_threads = {
806 .thread_should_run = comp_task_should_run,
807 .thread_fn = comp_task,
808 .thread_comm = "ehca_comp/%u",
809 .cleanup = comp_task_stop,
810 .park = comp_task_park,
811};
812
813int ehca_create_comp_pool(void)
814{
815 int cpu, ret = -ENOMEM;
816
817 if (!ehca_scaling_code)
818 return 0;
819
820 pool = kzalloc(sizeof(struct ehca_comp_pool), GFP_KERNEL);
821 if (pool == NULL)
822 return -ENOMEM;
823
824 spin_lock_init(&pool->last_cpu_lock);
825 pool->last_cpu = cpumask_any(cpu_online_mask);
826
827 pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task);
828 if (!pool->cpu_comp_tasks)
829 goto out_pool;
830
831 pool->cpu_comp_threads = alloc_percpu(struct task_struct *);
832 if (!pool->cpu_comp_threads)
833 goto out_tasks;
834
835 for_each_present_cpu(cpu) {
836 struct ehca_cpu_comp_task *cct;
837
838 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
839 spin_lock_init(&cct->task_lock);
840 INIT_LIST_HEAD(&cct->cq_list);
841 }
842
843 comp_pool_threads.store = pool->cpu_comp_threads;
844 ret = smpboot_register_percpu_thread(&comp_pool_threads);
845 if (ret)
846 goto out_threads;
847
848 pr_info("eHCA scaling code enabled\n");
849 return ret;
850
851out_threads:
852 free_percpu(pool->cpu_comp_threads);
853out_tasks:
854 free_percpu(pool->cpu_comp_tasks);
855out_pool:
856 kfree(pool);
857 return ret;
858}
859
860void ehca_destroy_comp_pool(void)
861{
862 if (!ehca_scaling_code)
863 return;
864
865 smpboot_unregister_percpu_thread(&comp_pool_threads);
866
867 free_percpu(pool->cpu_comp_threads);
868 free_percpu(pool->cpu_comp_tasks);
869 kfree(pool);
870}
diff --git a/drivers/staging/rdma/ehca/ehca_irq.h b/drivers/staging/rdma/ehca/ehca_irq.h
deleted file mode 100644
index 5370199f08c7..000000000000
--- a/drivers/staging/rdma/ehca/ehca_irq.h
+++ /dev/null
@@ -1,77 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * Function definitions and structs for EQs, NEQs and interrupts
5 *
6 * Authors: Heiko J Schick <schickhj@de.ibm.com>
7 * Khadija Souissi <souissi@de.ibm.com>
8 *
9 * Copyright (c) 2005 IBM Corporation
10 *
11 * All rights reserved.
12 *
13 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
14 * BSD.
15 *
16 * OpenIB BSD License
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are met:
20 *
21 * Redistributions of source code must retain the above copyright notice, this
22 * list of conditions and the following disclaimer.
23 *
24 * Redistributions in binary form must reproduce the above copyright notice,
25 * this list of conditions and the following disclaimer in the documentation
26 * and/or other materials
27 * provided with the distribution.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42#ifndef __EHCA_IRQ_H
43#define __EHCA_IRQ_H
44
45
46struct ehca_shca;
47
48#include <linux/interrupt.h>
49#include <linux/types.h>
50
51int ehca_error_data(struct ehca_shca *shca, void *data, u64 resource);
52
53irqreturn_t ehca_interrupt_neq(int irq, void *dev_id);
54void ehca_tasklet_neq(unsigned long data);
55
56irqreturn_t ehca_interrupt_eq(int irq, void *dev_id);
57void ehca_tasklet_eq(unsigned long data);
58void ehca_process_eq(struct ehca_shca *shca, int is_irq);
59
60struct ehca_cpu_comp_task {
61 struct list_head cq_list;
62 spinlock_t task_lock;
63 int cq_jobs;
64 int active;
65};
66
67struct ehca_comp_pool {
68 struct ehca_cpu_comp_task __percpu *cpu_comp_tasks;
69 struct task_struct * __percpu *cpu_comp_threads;
70 int last_cpu;
71 spinlock_t last_cpu_lock;
72};
73
74int ehca_create_comp_pool(void);
75void ehca_destroy_comp_pool(void);
76
77#endif
diff --git a/drivers/staging/rdma/ehca/ehca_iverbs.h b/drivers/staging/rdma/ehca/ehca_iverbs.h
deleted file mode 100644
index cca5933fcda6..000000000000
--- a/drivers/staging/rdma/ehca/ehca_iverbs.h
+++ /dev/null
@@ -1,202 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * Function definitions for internal functions
5 *
6 * Authors: Heiko J Schick <schickhj@de.ibm.com>
7 * Dietmar Decker <ddecker@de.ibm.com>
8 *
9 * Copyright (c) 2005 IBM Corporation
10 *
11 * All rights reserved.
12 *
13 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
14 * BSD.
15 *
16 * OpenIB BSD License
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are met:
20 *
21 * Redistributions of source code must retain the above copyright notice, this
22 * list of conditions and the following disclaimer.
23 *
24 * Redistributions in binary form must reproduce the above copyright notice,
25 * this list of conditions and the following disclaimer in the documentation
26 * and/or other materials
27 * provided with the distribution.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42#ifndef __EHCA_IVERBS_H__
43#define __EHCA_IVERBS_H__
44
45#include "ehca_classes.h"
46
47int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
48 struct ib_udata *uhw);
49
50int ehca_query_port(struct ib_device *ibdev, u8 port,
51 struct ib_port_attr *props);
52
53enum rdma_protocol_type
54ehca_query_protocol(struct ib_device *device, u8 port_num);
55
56int ehca_query_sma_attr(struct ehca_shca *shca, u8 port,
57 struct ehca_sma_attr *attr);
58
59int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 * pkey);
60
61int ehca_query_gid(struct ib_device *ibdev, u8 port, int index,
62 union ib_gid *gid);
63
64int ehca_modify_port(struct ib_device *ibdev, u8 port, int port_modify_mask,
65 struct ib_port_modify *props);
66
67struct ib_pd *ehca_alloc_pd(struct ib_device *device,
68 struct ib_ucontext *context,
69 struct ib_udata *udata);
70
71int ehca_dealloc_pd(struct ib_pd *pd);
72
73struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
74
75int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
76
77int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
78
79int ehca_destroy_ah(struct ib_ah *ah);
80
81struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
82
83struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
84 u64 virt, int mr_access_flags,
85 struct ib_udata *udata);
86
87int ehca_dereg_mr(struct ib_mr *mr);
88
89struct ib_mw *ehca_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
90
91int ehca_dealloc_mw(struct ib_mw *mw);
92
93struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
94 int mr_access_flags,
95 struct ib_fmr_attr *fmr_attr);
96
97int ehca_map_phys_fmr(struct ib_fmr *fmr,
98 u64 *page_list, int list_len, u64 iova);
99
100int ehca_unmap_fmr(struct list_head *fmr_list);
101
102int ehca_dealloc_fmr(struct ib_fmr *fmr);
103
104enum ehca_eq_type {
105 EHCA_EQ = 0, /* Event Queue */
106 EHCA_NEQ /* Notification Event Queue */
107};
108
109int ehca_create_eq(struct ehca_shca *shca, struct ehca_eq *eq,
110 enum ehca_eq_type type, const u32 length);
111
112int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq);
113
114void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq);
115
116
117struct ib_cq *ehca_create_cq(struct ib_device *device,
118 const struct ib_cq_init_attr *attr,
119 struct ib_ucontext *context,
120 struct ib_udata *udata);
121
122int ehca_destroy_cq(struct ib_cq *cq);
123
124int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
125
126int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
127
128int ehca_peek_cq(struct ib_cq *cq, int wc_cnt);
129
130int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags notify_flags);
131
132struct ib_qp *ehca_create_qp(struct ib_pd *pd,
133 struct ib_qp_init_attr *init_attr,
134 struct ib_udata *udata);
135
136int ehca_destroy_qp(struct ib_qp *qp);
137
138int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
139 struct ib_udata *udata);
140
141int ehca_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
142 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
143
144int ehca_post_send(struct ib_qp *qp, struct ib_send_wr *send_wr,
145 struct ib_send_wr **bad_send_wr);
146
147int ehca_post_recv(struct ib_qp *qp, struct ib_recv_wr *recv_wr,
148 struct ib_recv_wr **bad_recv_wr);
149
150int ehca_post_srq_recv(struct ib_srq *srq,
151 struct ib_recv_wr *recv_wr,
152 struct ib_recv_wr **bad_recv_wr);
153
154struct ib_srq *ehca_create_srq(struct ib_pd *pd,
155 struct ib_srq_init_attr *init_attr,
156 struct ib_udata *udata);
157
158int ehca_modify_srq(struct ib_srq *srq, struct ib_srq_attr *attr,
159 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
160
161int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
162
163int ehca_destroy_srq(struct ib_srq *srq);
164
165u64 ehca_define_sqp(struct ehca_shca *shca, struct ehca_qp *ibqp,
166 struct ib_qp_init_attr *qp_init_attr);
167
168int ehca_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
169
170int ehca_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
171
172struct ib_ucontext *ehca_alloc_ucontext(struct ib_device *device,
173 struct ib_udata *udata);
174
175int ehca_dealloc_ucontext(struct ib_ucontext *context);
176
177int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
178
179int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
180 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
181 const struct ib_mad_hdr *in, size_t in_mad_size,
182 struct ib_mad_hdr *out, size_t *out_mad_size,
183 u16 *out_mad_pkey_index);
184
185void ehca_poll_eqs(unsigned long data);
186
187int ehca_calc_ipd(struct ehca_shca *shca, int port,
188 enum ib_rate path_rate, u32 *ipd);
189
190void ehca_add_to_err_list(struct ehca_qp *qp, int on_sq);
191
192#ifdef CONFIG_PPC_64K_PAGES
193void *ehca_alloc_fw_ctrlblock(gfp_t flags);
194void ehca_free_fw_ctrlblock(void *ptr);
195#else
196#define ehca_alloc_fw_ctrlblock(flags) ((void *)get_zeroed_page(flags))
197#define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr))
198#endif
199
200void ehca_recover_sqp(struct ib_qp *sqp);
201
202#endif
diff --git a/drivers/staging/rdma/ehca/ehca_main.c b/drivers/staging/rdma/ehca/ehca_main.c
deleted file mode 100644
index 832f22f40862..000000000000
--- a/drivers/staging/rdma/ehca/ehca_main.c
+++ /dev/null
@@ -1,1118 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * module start stop, hca detection
5 *
6 * Authors: Heiko J Schick <schickhj@de.ibm.com>
7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 * Joachim Fenkes <fenkes@de.ibm.com>
9 *
10 * Copyright (c) 2005 IBM Corporation
11 *
12 * All rights reserved.
13 *
14 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
15 * BSD.
16 *
17 * OpenIB BSD License
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are met:
21 *
22 * Redistributions of source code must retain the above copyright notice, this
23 * list of conditions and the following disclaimer.
24 *
25 * Redistributions in binary form must reproduce the above copyright notice,
26 * this list of conditions and the following disclaimer in the documentation
27 * and/or other materials
28 * provided with the distribution.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
41 */
42
43#ifdef CONFIG_PPC_64K_PAGES
44#include <linux/slab.h>
45#endif
46
47#include <linux/notifier.h>
48#include <linux/memory.h>
49#include <rdma/ib_mad.h>
50#include "ehca_classes.h"
51#include "ehca_iverbs.h"
52#include "ehca_mrmw.h"
53#include "ehca_tools.h"
54#include "hcp_if.h"
55
56#define HCAD_VERSION "0029"
57
58MODULE_LICENSE("Dual BSD/GPL");
59MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
60MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
61MODULE_VERSION(HCAD_VERSION);
62
63static bool ehca_open_aqp1 = 0;
64static int ehca_hw_level = 0;
65static bool ehca_poll_all_eqs = 1;
66
67int ehca_debug_level = 0;
68int ehca_nr_ports = -1;
69bool ehca_use_hp_mr = 0;
70int ehca_port_act_time = 30;
71int ehca_static_rate = -1;
72bool ehca_scaling_code = 0;
73int ehca_lock_hcalls = -1;
74int ehca_max_cq = -1;
75int ehca_max_qp = -1;
76
77module_param_named(open_aqp1, ehca_open_aqp1, bool, S_IRUGO);
78module_param_named(debug_level, ehca_debug_level, int, S_IRUGO);
79module_param_named(hw_level, ehca_hw_level, int, S_IRUGO);
80module_param_named(nr_ports, ehca_nr_ports, int, S_IRUGO);
81module_param_named(use_hp_mr, ehca_use_hp_mr, bool, S_IRUGO);
82module_param_named(port_act_time, ehca_port_act_time, int, S_IRUGO);
83module_param_named(poll_all_eqs, ehca_poll_all_eqs, bool, S_IRUGO);
84module_param_named(static_rate, ehca_static_rate, int, S_IRUGO);
85module_param_named(scaling_code, ehca_scaling_code, bool, S_IRUGO);
86module_param_named(lock_hcalls, ehca_lock_hcalls, bint, S_IRUGO);
87module_param_named(number_of_cqs, ehca_max_cq, int, S_IRUGO);
88module_param_named(number_of_qps, ehca_max_qp, int, S_IRUGO);
89
90MODULE_PARM_DESC(open_aqp1,
91 "Open AQP1 on startup (default: no)");
92MODULE_PARM_DESC(debug_level,
93 "Amount of debug output (0: none (default), 1: traces, "
94 "2: some dumps, 3: lots)");
95MODULE_PARM_DESC(hw_level,
96 "Hardware level (0: autosensing (default), "
97 "0x10..0x14: eHCA, 0x20..0x23: eHCA2)");
98MODULE_PARM_DESC(nr_ports,
99 "number of connected ports (-1: autodetect (default), "
100 "1: port one only, 2: two ports)");
101MODULE_PARM_DESC(use_hp_mr,
102 "Use high performance MRs (default: no)");
103MODULE_PARM_DESC(port_act_time,
104 "Time to wait for port activation (default: 30 sec)");
105MODULE_PARM_DESC(poll_all_eqs,
106 "Poll all event queues periodically (default: yes)");
107MODULE_PARM_DESC(static_rate,
108 "Set permanent static rate (default: no static rate)");
109MODULE_PARM_DESC(scaling_code,
110 "Enable scaling code (default: no)");
111MODULE_PARM_DESC(lock_hcalls,
112 "Serialize all hCalls made by the driver "
113 "(default: autodetect)");
114MODULE_PARM_DESC(number_of_cqs,
115 "Max number of CQs which can be allocated "
116 "(default: autodetect)");
117MODULE_PARM_DESC(number_of_qps,
118 "Max number of QPs which can be allocated "
119 "(default: autodetect)");
120
121DEFINE_RWLOCK(ehca_qp_idr_lock);
122DEFINE_RWLOCK(ehca_cq_idr_lock);
123DEFINE_IDR(ehca_qp_idr);
124DEFINE_IDR(ehca_cq_idr);
125
126static LIST_HEAD(shca_list); /* list of all registered ehcas */
127DEFINE_SPINLOCK(shca_list_lock);
128
129static struct timer_list poll_eqs_timer;
130
131#ifdef CONFIG_PPC_64K_PAGES
132static struct kmem_cache *ctblk_cache;
133
134void *ehca_alloc_fw_ctrlblock(gfp_t flags)
135{
136 void *ret = kmem_cache_zalloc(ctblk_cache, flags);
137 if (!ret)
138 ehca_gen_err("Out of memory for ctblk");
139 return ret;
140}
141
142void ehca_free_fw_ctrlblock(void *ptr)
143{
144 if (ptr)
145 kmem_cache_free(ctblk_cache, ptr);
146
147}
148#endif
149
150int ehca2ib_return_code(u64 ehca_rc)
151{
152 switch (ehca_rc) {
153 case H_SUCCESS:
154 return 0;
155 case H_RESOURCE: /* Resource in use */
156 case H_BUSY:
157 return -EBUSY;
158 case H_NOT_ENOUGH_RESOURCES: /* insufficient resources */
159 case H_CONSTRAINED: /* resource constraint */
160 case H_NO_MEM:
161 return -ENOMEM;
162 default:
163 return -EINVAL;
164 }
165}
166
167static int ehca_create_slab_caches(void)
168{
169 int ret;
170
171 ret = ehca_init_pd_cache();
172 if (ret) {
173 ehca_gen_err("Cannot create PD SLAB cache.");
174 return ret;
175 }
176
177 ret = ehca_init_cq_cache();
178 if (ret) {
179 ehca_gen_err("Cannot create CQ SLAB cache.");
180 goto create_slab_caches2;
181 }
182
183 ret = ehca_init_qp_cache();
184 if (ret) {
185 ehca_gen_err("Cannot create QP SLAB cache.");
186 goto create_slab_caches3;
187 }
188
189 ret = ehca_init_av_cache();
190 if (ret) {
191 ehca_gen_err("Cannot create AV SLAB cache.");
192 goto create_slab_caches4;
193 }
194
195 ret = ehca_init_mrmw_cache();
196 if (ret) {
197 ehca_gen_err("Cannot create MR&MW SLAB cache.");
198 goto create_slab_caches5;
199 }
200
201 ret = ehca_init_small_qp_cache();
202 if (ret) {
203 ehca_gen_err("Cannot create small queue SLAB cache.");
204 goto create_slab_caches6;
205 }
206
207#ifdef CONFIG_PPC_64K_PAGES
208 ctblk_cache = kmem_cache_create("ehca_cache_ctblk",
209 EHCA_PAGESIZE, H_CB_ALIGNMENT,
210 SLAB_HWCACHE_ALIGN,
211 NULL);
212 if (!ctblk_cache) {
213 ehca_gen_err("Cannot create ctblk SLAB cache.");
214 ehca_cleanup_small_qp_cache();
215 ret = -ENOMEM;
216 goto create_slab_caches6;
217 }
218#endif
219 return 0;
220
221create_slab_caches6:
222 ehca_cleanup_mrmw_cache();
223
224create_slab_caches5:
225 ehca_cleanup_av_cache();
226
227create_slab_caches4:
228 ehca_cleanup_qp_cache();
229
230create_slab_caches3:
231 ehca_cleanup_cq_cache();
232
233create_slab_caches2:
234 ehca_cleanup_pd_cache();
235
236 return ret;
237}
238
239static void ehca_destroy_slab_caches(void)
240{
241 ehca_cleanup_small_qp_cache();
242 ehca_cleanup_mrmw_cache();
243 ehca_cleanup_av_cache();
244 ehca_cleanup_qp_cache();
245 ehca_cleanup_cq_cache();
246 ehca_cleanup_pd_cache();
247#ifdef CONFIG_PPC_64K_PAGES
248 kmem_cache_destroy(ctblk_cache);
249#endif
250}
251
252#define EHCA_HCAAVER EHCA_BMASK_IBM(32, 39)
253#define EHCA_REVID EHCA_BMASK_IBM(40, 63)
254
255static struct cap_descr {
256 u64 mask;
257 char *descr;
258} hca_cap_descr[] = {
259 { HCA_CAP_AH_PORT_NR_CHECK, "HCA_CAP_AH_PORT_NR_CHECK" },
260 { HCA_CAP_ATOMIC, "HCA_CAP_ATOMIC" },
261 { HCA_CAP_AUTO_PATH_MIG, "HCA_CAP_AUTO_PATH_MIG" },
262 { HCA_CAP_BAD_P_KEY_CTR, "HCA_CAP_BAD_P_KEY_CTR" },
263 { HCA_CAP_SQD_RTS_PORT_CHANGE, "HCA_CAP_SQD_RTS_PORT_CHANGE" },
264 { HCA_CAP_CUR_QP_STATE_MOD, "HCA_CAP_CUR_QP_STATE_MOD" },
265 { HCA_CAP_INIT_TYPE, "HCA_CAP_INIT_TYPE" },
266 { HCA_CAP_PORT_ACTIVE_EVENT, "HCA_CAP_PORT_ACTIVE_EVENT" },
267 { HCA_CAP_Q_KEY_VIOL_CTR, "HCA_CAP_Q_KEY_VIOL_CTR" },
268 { HCA_CAP_WQE_RESIZE, "HCA_CAP_WQE_RESIZE" },
269 { HCA_CAP_RAW_PACKET_MCAST, "HCA_CAP_RAW_PACKET_MCAST" },
270 { HCA_CAP_SHUTDOWN_PORT, "HCA_CAP_SHUTDOWN_PORT" },
271 { HCA_CAP_RC_LL_QP, "HCA_CAP_RC_LL_QP" },
272 { HCA_CAP_SRQ, "HCA_CAP_SRQ" },
273 { HCA_CAP_UD_LL_QP, "HCA_CAP_UD_LL_QP" },
274 { HCA_CAP_RESIZE_MR, "HCA_CAP_RESIZE_MR" },
275 { HCA_CAP_MINI_QP, "HCA_CAP_MINI_QP" },
276 { HCA_CAP_H_ALLOC_RES_SYNC, "HCA_CAP_H_ALLOC_RES_SYNC" },
277};
278
279static int ehca_sense_attributes(struct ehca_shca *shca)
280{
281 int i, ret = 0;
282 u64 h_ret;
283 struct hipz_query_hca *rblock;
284 struct hipz_query_port *port;
285 const char *loc_code;
286
287 static const u32 pgsize_map[] = {
288 HCA_CAP_MR_PGSIZE_4K, 0x1000,
289 HCA_CAP_MR_PGSIZE_64K, 0x10000,
290 HCA_CAP_MR_PGSIZE_1M, 0x100000,
291 HCA_CAP_MR_PGSIZE_16M, 0x1000000,
292 };
293
294 ehca_gen_dbg("Probing adapter %s...",
295 shca->ofdev->dev.of_node->full_name);
296 loc_code = of_get_property(shca->ofdev->dev.of_node, "ibm,loc-code",
297 NULL);
298 if (loc_code)
299 ehca_gen_dbg(" ... location lode=%s", loc_code);
300
301 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
302 if (!rblock) {
303 ehca_gen_err("Cannot allocate rblock memory.");
304 return -ENOMEM;
305 }
306
307 h_ret = hipz_h_query_hca(shca->ipz_hca_handle, rblock);
308 if (h_ret != H_SUCCESS) {
309 ehca_gen_err("Cannot query device properties. h_ret=%lli",
310 h_ret);
311 ret = -EPERM;
312 goto sense_attributes1;
313 }
314
315 if (ehca_nr_ports == 1)
316 shca->num_ports = 1;
317 else
318 shca->num_ports = (u8)rblock->num_ports;
319
320 ehca_gen_dbg(" ... found %x ports", rblock->num_ports);
321
322 if (ehca_hw_level == 0) {
323 u32 hcaaver;
324 u32 revid;
325
326 hcaaver = EHCA_BMASK_GET(EHCA_HCAAVER, rblock->hw_ver);
327 revid = EHCA_BMASK_GET(EHCA_REVID, rblock->hw_ver);
328
329 ehca_gen_dbg(" ... hardware version=%x:%x", hcaaver, revid);
330
331 if (hcaaver == 1) {
332 if (revid <= 3)
333 shca->hw_level = 0x10 | (revid + 1);
334 else
335 shca->hw_level = 0x14;
336 } else if (hcaaver == 2) {
337 if (revid == 0)
338 shca->hw_level = 0x21;
339 else if (revid == 0x10)
340 shca->hw_level = 0x22;
341 else if (revid == 0x20 || revid == 0x21)
342 shca->hw_level = 0x23;
343 }
344
345 if (!shca->hw_level) {
346 ehca_gen_warn("unknown hardware version"
347 " - assuming default level");
348 shca->hw_level = 0x22;
349 }
350 } else
351 shca->hw_level = ehca_hw_level;
352 ehca_gen_dbg(" ... hardware level=%x", shca->hw_level);
353
354 shca->hca_cap = rblock->hca_cap_indicators;
355 ehca_gen_dbg(" ... HCA capabilities:");
356 for (i = 0; i < ARRAY_SIZE(hca_cap_descr); i++)
357 if (EHCA_BMASK_GET(hca_cap_descr[i].mask, shca->hca_cap))
358 ehca_gen_dbg(" %s", hca_cap_descr[i].descr);
359
360 /* Autodetect hCall locking -- the "H_ALLOC_RESOURCE synced" flag is
361 * a firmware property, so it's valid across all adapters
362 */
363 if (ehca_lock_hcalls == -1)
364 ehca_lock_hcalls = !EHCA_BMASK_GET(HCA_CAP_H_ALLOC_RES_SYNC,
365 shca->hca_cap);
366
367 /* translate supported MR page sizes; always support 4K */
368 shca->hca_cap_mr_pgsize = EHCA_PAGESIZE;
369 for (i = 0; i < ARRAY_SIZE(pgsize_map); i += 2)
370 if (rblock->memory_page_size_supported & pgsize_map[i])
371 shca->hca_cap_mr_pgsize |= pgsize_map[i + 1];
372
373 /* Set maximum number of CQs and QPs to calculate EQ size */
374 if (shca->max_num_qps == -1)
375 shca->max_num_qps = min_t(int, rblock->max_qp,
376 EHCA_MAX_NUM_QUEUES);
377 else if (shca->max_num_qps < 1 || shca->max_num_qps > rblock->max_qp) {
378 ehca_gen_warn("The requested number of QPs is out of range "
379 "(1 - %i) specified by HW. Value is set to %i",
380 rblock->max_qp, rblock->max_qp);
381 shca->max_num_qps = rblock->max_qp;
382 }
383
384 if (shca->max_num_cqs == -1)
385 shca->max_num_cqs = min_t(int, rblock->max_cq,
386 EHCA_MAX_NUM_QUEUES);
387 else if (shca->max_num_cqs < 1 || shca->max_num_cqs > rblock->max_cq) {
388 ehca_gen_warn("The requested number of CQs is out of range "
389 "(1 - %i) specified by HW. Value is set to %i",
390 rblock->max_cq, rblock->max_cq);
391 }
392
393 /* query max MTU from first port -- it's the same for all ports */
394 port = (struct hipz_query_port *)rblock;
395 h_ret = hipz_h_query_port(shca->ipz_hca_handle, 1, port);
396 if (h_ret != H_SUCCESS) {
397 ehca_gen_err("Cannot query port properties. h_ret=%lli",
398 h_ret);
399 ret = -EPERM;
400 goto sense_attributes1;
401 }
402
403 shca->max_mtu = port->max_mtu;
404
405sense_attributes1:
406 ehca_free_fw_ctrlblock(rblock);
407 return ret;
408}
409
410static int init_node_guid(struct ehca_shca *shca)
411{
412 int ret = 0;
413 struct hipz_query_hca *rblock;
414
415 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
416 if (!rblock) {
417 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
418 return -ENOMEM;
419 }
420
421 if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) {
422 ehca_err(&shca->ib_device, "Can't query device properties");
423 ret = -EINVAL;
424 goto init_node_guid1;
425 }
426
427 memcpy(&shca->ib_device.node_guid, &rblock->node_guid, sizeof(u64));
428
429init_node_guid1:
430 ehca_free_fw_ctrlblock(rblock);
431 return ret;
432}
433
434static int ehca_port_immutable(struct ib_device *ibdev, u8 port_num,
435 struct ib_port_immutable *immutable)
436{
437 struct ib_port_attr attr;
438 int err;
439
440 err = ehca_query_port(ibdev, port_num, &attr);
441 if (err)
442 return err;
443
444 immutable->pkey_tbl_len = attr.pkey_tbl_len;
445 immutable->gid_tbl_len = attr.gid_tbl_len;
446 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
447 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
448
449 return 0;
450}
451
452static int ehca_init_device(struct ehca_shca *shca)
453{
454 int ret;
455
456 ret = init_node_guid(shca);
457 if (ret)
458 return ret;
459
460 strlcpy(shca->ib_device.name, "ehca%d", IB_DEVICE_NAME_MAX);
461 shca->ib_device.owner = THIS_MODULE;
462
463 shca->ib_device.uverbs_abi_ver = 8;
464 shca->ib_device.uverbs_cmd_mask =
465 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
466 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
467 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
468 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
469 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
470 (1ull << IB_USER_VERBS_CMD_REG_MR) |
471 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
472 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
473 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
474 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
475 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
476 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
477 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
478 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
479 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
480 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST);
481
482 shca->ib_device.node_type = RDMA_NODE_IB_CA;
483 shca->ib_device.phys_port_cnt = shca->num_ports;
484 shca->ib_device.num_comp_vectors = 1;
485 shca->ib_device.dma_device = &shca->ofdev->dev;
486 shca->ib_device.query_device = ehca_query_device;
487 shca->ib_device.query_port = ehca_query_port;
488 shca->ib_device.query_gid = ehca_query_gid;
489 shca->ib_device.query_pkey = ehca_query_pkey;
490 /* shca->in_device.modify_device = ehca_modify_device */
491 shca->ib_device.modify_port = ehca_modify_port;
492 shca->ib_device.alloc_ucontext = ehca_alloc_ucontext;
493 shca->ib_device.dealloc_ucontext = ehca_dealloc_ucontext;
494 shca->ib_device.alloc_pd = ehca_alloc_pd;
495 shca->ib_device.dealloc_pd = ehca_dealloc_pd;
496 shca->ib_device.create_ah = ehca_create_ah;
497 /* shca->ib_device.modify_ah = ehca_modify_ah; */
498 shca->ib_device.query_ah = ehca_query_ah;
499 shca->ib_device.destroy_ah = ehca_destroy_ah;
500 shca->ib_device.create_qp = ehca_create_qp;
501 shca->ib_device.modify_qp = ehca_modify_qp;
502 shca->ib_device.query_qp = ehca_query_qp;
503 shca->ib_device.destroy_qp = ehca_destroy_qp;
504 shca->ib_device.post_send = ehca_post_send;
505 shca->ib_device.post_recv = ehca_post_recv;
506 shca->ib_device.create_cq = ehca_create_cq;
507 shca->ib_device.destroy_cq = ehca_destroy_cq;
508 shca->ib_device.resize_cq = ehca_resize_cq;
509 shca->ib_device.poll_cq = ehca_poll_cq;
510 /* shca->ib_device.peek_cq = ehca_peek_cq; */
511 shca->ib_device.req_notify_cq = ehca_req_notify_cq;
512 /* shca->ib_device.req_ncomp_notif = ehca_req_ncomp_notif; */
513 shca->ib_device.get_dma_mr = ehca_get_dma_mr;
514 shca->ib_device.reg_user_mr = ehca_reg_user_mr;
515 shca->ib_device.dereg_mr = ehca_dereg_mr;
516 shca->ib_device.alloc_mw = ehca_alloc_mw;
517 shca->ib_device.dealloc_mw = ehca_dealloc_mw;
518 shca->ib_device.alloc_fmr = ehca_alloc_fmr;
519 shca->ib_device.map_phys_fmr = ehca_map_phys_fmr;
520 shca->ib_device.unmap_fmr = ehca_unmap_fmr;
521 shca->ib_device.dealloc_fmr = ehca_dealloc_fmr;
522 shca->ib_device.attach_mcast = ehca_attach_mcast;
523 shca->ib_device.detach_mcast = ehca_detach_mcast;
524 shca->ib_device.process_mad = ehca_process_mad;
525 shca->ib_device.mmap = ehca_mmap;
526 shca->ib_device.dma_ops = &ehca_dma_mapping_ops;
527 shca->ib_device.get_port_immutable = ehca_port_immutable;
528
529 if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
530 shca->ib_device.uverbs_cmd_mask |=
531 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
532 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
533 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
534 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
535
536 shca->ib_device.create_srq = ehca_create_srq;
537 shca->ib_device.modify_srq = ehca_modify_srq;
538 shca->ib_device.query_srq = ehca_query_srq;
539 shca->ib_device.destroy_srq = ehca_destroy_srq;
540 shca->ib_device.post_srq_recv = ehca_post_srq_recv;
541 }
542
543 return ret;
544}
545
546static int ehca_create_aqp1(struct ehca_shca *shca, u32 port)
547{
548 struct ehca_sport *sport = &shca->sport[port - 1];
549 struct ib_cq *ibcq;
550 struct ib_qp *ibqp;
551 struct ib_qp_init_attr qp_init_attr;
552 struct ib_cq_init_attr cq_attr = {};
553 int ret;
554
555 if (sport->ibcq_aqp1) {
556 ehca_err(&shca->ib_device, "AQP1 CQ is already created.");
557 return -EPERM;
558 }
559
560 cq_attr.cqe = 10;
561 ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void *)(-1),
562 &cq_attr);
563 if (IS_ERR(ibcq)) {
564 ehca_err(&shca->ib_device, "Cannot create AQP1 CQ.");
565 return PTR_ERR(ibcq);
566 }
567 sport->ibcq_aqp1 = ibcq;
568
569 if (sport->ibqp_sqp[IB_QPT_GSI]) {
570 ehca_err(&shca->ib_device, "AQP1 QP is already created.");
571 ret = -EPERM;
572 goto create_aqp1;
573 }
574
575 memset(&qp_init_attr, 0, sizeof(struct ib_qp_init_attr));
576 qp_init_attr.send_cq = ibcq;
577 qp_init_attr.recv_cq = ibcq;
578 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
579 qp_init_attr.cap.max_send_wr = 100;
580 qp_init_attr.cap.max_recv_wr = 100;
581 qp_init_attr.cap.max_send_sge = 2;
582 qp_init_attr.cap.max_recv_sge = 1;
583 qp_init_attr.qp_type = IB_QPT_GSI;
584 qp_init_attr.port_num = port;
585 qp_init_attr.qp_context = NULL;
586 qp_init_attr.event_handler = NULL;
587 qp_init_attr.srq = NULL;
588
589 ibqp = ib_create_qp(&shca->pd->ib_pd, &qp_init_attr);
590 if (IS_ERR(ibqp)) {
591 ehca_err(&shca->ib_device, "Cannot create AQP1 QP.");
592 ret = PTR_ERR(ibqp);
593 goto create_aqp1;
594 }
595 sport->ibqp_sqp[IB_QPT_GSI] = ibqp;
596
597 return 0;
598
599create_aqp1:
600 ib_destroy_cq(sport->ibcq_aqp1);
601 return ret;
602}
603
604static int ehca_destroy_aqp1(struct ehca_sport *sport)
605{
606 int ret;
607
608 ret = ib_destroy_qp(sport->ibqp_sqp[IB_QPT_GSI]);
609 if (ret) {
610 ehca_gen_err("Cannot destroy AQP1 QP. ret=%i", ret);
611 return ret;
612 }
613
614 ret = ib_destroy_cq(sport->ibcq_aqp1);
615 if (ret)
616 ehca_gen_err("Cannot destroy AQP1 CQ. ret=%i", ret);
617
618 return ret;
619}
620
621static ssize_t ehca_show_debug_level(struct device_driver *ddp, char *buf)
622{
623 return snprintf(buf, PAGE_SIZE, "%d\n", ehca_debug_level);
624}
625
626static ssize_t ehca_store_debug_level(struct device_driver *ddp,
627 const char *buf, size_t count)
628{
629 int value = (*buf) - '0';
630 if (value >= 0 && value <= 9)
631 ehca_debug_level = value;
632 return 1;
633}
634
635static DRIVER_ATTR(debug_level, S_IRUSR | S_IWUSR,
636 ehca_show_debug_level, ehca_store_debug_level);
637
638static struct attribute *ehca_drv_attrs[] = {
639 &driver_attr_debug_level.attr,
640 NULL
641};
642
643static struct attribute_group ehca_drv_attr_grp = {
644 .attrs = ehca_drv_attrs
645};
646
647static const struct attribute_group *ehca_drv_attr_groups[] = {
648 &ehca_drv_attr_grp,
649 NULL,
650};
651
652#define EHCA_RESOURCE_ATTR(name) \
653static ssize_t ehca_show_##name(struct device *dev, \
654 struct device_attribute *attr, \
655 char *buf) \
656{ \
657 struct ehca_shca *shca; \
658 struct hipz_query_hca *rblock; \
659 int data; \
660 \
661 shca = dev_get_drvdata(dev); \
662 \
663 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); \
664 if (!rblock) { \
665 dev_err(dev, "Can't allocate rblock memory.\n"); \
666 return 0; \
667 } \
668 \
669 if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) { \
670 dev_err(dev, "Can't query device properties\n"); \
671 ehca_free_fw_ctrlblock(rblock); \
672 return 0; \
673 } \
674 \
675 data = rblock->name; \
676 ehca_free_fw_ctrlblock(rblock); \
677 \
678 if ((strcmp(#name, "num_ports") == 0) && (ehca_nr_ports == 1)) \
679 return snprintf(buf, 256, "1\n"); \
680 else \
681 return snprintf(buf, 256, "%d\n", data); \
682 \
683} \
684static DEVICE_ATTR(name, S_IRUGO, ehca_show_##name, NULL);
685
686EHCA_RESOURCE_ATTR(num_ports);
687EHCA_RESOURCE_ATTR(hw_ver);
688EHCA_RESOURCE_ATTR(max_eq);
689EHCA_RESOURCE_ATTR(cur_eq);
690EHCA_RESOURCE_ATTR(max_cq);
691EHCA_RESOURCE_ATTR(cur_cq);
692EHCA_RESOURCE_ATTR(max_qp);
693EHCA_RESOURCE_ATTR(cur_qp);
694EHCA_RESOURCE_ATTR(max_mr);
695EHCA_RESOURCE_ATTR(cur_mr);
696EHCA_RESOURCE_ATTR(max_mw);
697EHCA_RESOURCE_ATTR(cur_mw);
698EHCA_RESOURCE_ATTR(max_pd);
699EHCA_RESOURCE_ATTR(max_ah);
700
701static ssize_t ehca_show_adapter_handle(struct device *dev,
702 struct device_attribute *attr,
703 char *buf)
704{
705 struct ehca_shca *shca = dev_get_drvdata(dev);
706
707 return sprintf(buf, "%llx\n", shca->ipz_hca_handle.handle);
708
709}
710static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL);
711
712static struct attribute *ehca_dev_attrs[] = {
713 &dev_attr_adapter_handle.attr,
714 &dev_attr_num_ports.attr,
715 &dev_attr_hw_ver.attr,
716 &dev_attr_max_eq.attr,
717 &dev_attr_cur_eq.attr,
718 &dev_attr_max_cq.attr,
719 &dev_attr_cur_cq.attr,
720 &dev_attr_max_qp.attr,
721 &dev_attr_cur_qp.attr,
722 &dev_attr_max_mr.attr,
723 &dev_attr_cur_mr.attr,
724 &dev_attr_max_mw.attr,
725 &dev_attr_cur_mw.attr,
726 &dev_attr_max_pd.attr,
727 &dev_attr_max_ah.attr,
728 NULL
729};
730
731static struct attribute_group ehca_dev_attr_grp = {
732 .attrs = ehca_dev_attrs
733};
734
735static int ehca_probe(struct platform_device *dev)
736{
737 struct ehca_shca *shca;
738 const u64 *handle;
739 struct ib_pd *ibpd;
740 int ret, i, eq_size;
741 unsigned long flags;
742
743 handle = of_get_property(dev->dev.of_node, "ibm,hca-handle", NULL);
744 if (!handle) {
745 ehca_gen_err("Cannot get eHCA handle for adapter: %s.",
746 dev->dev.of_node->full_name);
747 return -ENODEV;
748 }
749
750 if (!(*handle)) {
751 ehca_gen_err("Wrong eHCA handle for adapter: %s.",
752 dev->dev.of_node->full_name);
753 return -ENODEV;
754 }
755
756 shca = (struct ehca_shca *)ib_alloc_device(sizeof(*shca));
757 if (!shca) {
758 ehca_gen_err("Cannot allocate shca memory.");
759 return -ENOMEM;
760 }
761
762 mutex_init(&shca->modify_mutex);
763 atomic_set(&shca->num_cqs, 0);
764 atomic_set(&shca->num_qps, 0);
765 shca->max_num_qps = ehca_max_qp;
766 shca->max_num_cqs = ehca_max_cq;
767
768 for (i = 0; i < ARRAY_SIZE(shca->sport); i++)
769 spin_lock_init(&shca->sport[i].mod_sqp_lock);
770
771 shca->ofdev = dev;
772 shca->ipz_hca_handle.handle = *handle;
773 dev_set_drvdata(&dev->dev, shca);
774
775 ret = ehca_sense_attributes(shca);
776 if (ret < 0) {
777 ehca_gen_err("Cannot sense eHCA attributes.");
778 goto probe1;
779 }
780
781 ret = ehca_init_device(shca);
782 if (ret) {
783 ehca_gen_err("Cannot init ehca device struct");
784 goto probe1;
785 }
786
787 eq_size = 2 * shca->max_num_cqs + 4 * shca->max_num_qps;
788 /* create event queues */
789 ret = ehca_create_eq(shca, &shca->eq, EHCA_EQ, eq_size);
790 if (ret) {
791 ehca_err(&shca->ib_device, "Cannot create EQ.");
792 goto probe1;
793 }
794
795 ret = ehca_create_eq(shca, &shca->neq, EHCA_NEQ, 513);
796 if (ret) {
797 ehca_err(&shca->ib_device, "Cannot create NEQ.");
798 goto probe3;
799 }
800
801 /* create internal protection domain */
802 ibpd = ehca_alloc_pd(&shca->ib_device, (void *)(-1), NULL);
803 if (IS_ERR(ibpd)) {
804 ehca_err(&shca->ib_device, "Cannot create internal PD.");
805 ret = PTR_ERR(ibpd);
806 goto probe4;
807 }
808
809 shca->pd = container_of(ibpd, struct ehca_pd, ib_pd);
810 shca->pd->ib_pd.device = &shca->ib_device;
811
812 /* create internal max MR */
813 ret = ehca_reg_internal_maxmr(shca, shca->pd, &shca->maxmr);
814
815 if (ret) {
816 ehca_err(&shca->ib_device, "Cannot create internal MR ret=%i",
817 ret);
818 goto probe5;
819 }
820
821 ret = ib_register_device(&shca->ib_device, NULL);
822 if (ret) {
823 ehca_err(&shca->ib_device,
824 "ib_register_device() failed ret=%i", ret);
825 goto probe6;
826 }
827
828 /* create AQP1 for port 1 */
829 if (ehca_open_aqp1 == 1) {
830 shca->sport[0].port_state = IB_PORT_DOWN;
831 ret = ehca_create_aqp1(shca, 1);
832 if (ret) {
833 ehca_err(&shca->ib_device,
834 "Cannot create AQP1 for port 1.");
835 goto probe7;
836 }
837 }
838
839 /* create AQP1 for port 2 */
840 if ((ehca_open_aqp1 == 1) && (shca->num_ports == 2)) {
841 shca->sport[1].port_state = IB_PORT_DOWN;
842 ret = ehca_create_aqp1(shca, 2);
843 if (ret) {
844 ehca_err(&shca->ib_device,
845 "Cannot create AQP1 for port 2.");
846 goto probe8;
847 }
848 }
849
850 ret = sysfs_create_group(&dev->dev.kobj, &ehca_dev_attr_grp);
851 if (ret) /* only complain; we can live without attributes */
852 ehca_err(&shca->ib_device,
853 "Cannot create device attributes ret=%d", ret);
854
855 spin_lock_irqsave(&shca_list_lock, flags);
856 list_add(&shca->shca_list, &shca_list);
857 spin_unlock_irqrestore(&shca_list_lock, flags);
858
859 return 0;
860
861probe8:
862 ret = ehca_destroy_aqp1(&shca->sport[0]);
863 if (ret)
864 ehca_err(&shca->ib_device,
865 "Cannot destroy AQP1 for port 1. ret=%i", ret);
866
867probe7:
868 ib_unregister_device(&shca->ib_device);
869
870probe6:
871 ret = ehca_dereg_internal_maxmr(shca);
872 if (ret)
873 ehca_err(&shca->ib_device,
874 "Cannot destroy internal MR. ret=%x", ret);
875
876probe5:
877 ret = ehca_dealloc_pd(&shca->pd->ib_pd);
878 if (ret)
879 ehca_err(&shca->ib_device,
880 "Cannot destroy internal PD. ret=%x", ret);
881
882probe4:
883 ret = ehca_destroy_eq(shca, &shca->neq);
884 if (ret)
885 ehca_err(&shca->ib_device,
886 "Cannot destroy NEQ. ret=%x", ret);
887
888probe3:
889 ret = ehca_destroy_eq(shca, &shca->eq);
890 if (ret)
891 ehca_err(&shca->ib_device,
892 "Cannot destroy EQ. ret=%x", ret);
893
894probe1:
895 ib_dealloc_device(&shca->ib_device);
896
897 return -EINVAL;
898}
899
900static int ehca_remove(struct platform_device *dev)
901{
902 struct ehca_shca *shca = dev_get_drvdata(&dev->dev);
903 unsigned long flags;
904 int ret;
905
906 sysfs_remove_group(&dev->dev.kobj, &ehca_dev_attr_grp);
907
908 if (ehca_open_aqp1 == 1) {
909 int i;
910 for (i = 0; i < shca->num_ports; i++) {
911 ret = ehca_destroy_aqp1(&shca->sport[i]);
912 if (ret)
913 ehca_err(&shca->ib_device,
914 "Cannot destroy AQP1 for port %x "
915 "ret=%i", ret, i);
916 }
917 }
918
919 ib_unregister_device(&shca->ib_device);
920
921 ret = ehca_dereg_internal_maxmr(shca);
922 if (ret)
923 ehca_err(&shca->ib_device,
924 "Cannot destroy internal MR. ret=%i", ret);
925
926 ret = ehca_dealloc_pd(&shca->pd->ib_pd);
927 if (ret)
928 ehca_err(&shca->ib_device,
929 "Cannot destroy internal PD. ret=%i", ret);
930
931 ret = ehca_destroy_eq(shca, &shca->eq);
932 if (ret)
933 ehca_err(&shca->ib_device, "Cannot destroy EQ. ret=%i", ret);
934
935 ret = ehca_destroy_eq(shca, &shca->neq);
936 if (ret)
937 ehca_err(&shca->ib_device, "Canot destroy NEQ. ret=%i", ret);
938
939 ib_dealloc_device(&shca->ib_device);
940
941 spin_lock_irqsave(&shca_list_lock, flags);
942 list_del(&shca->shca_list);
943 spin_unlock_irqrestore(&shca_list_lock, flags);
944
945 return ret;
946}
947
948static struct of_device_id ehca_device_table[] =
949{
950 {
951 .name = "lhca",
952 .compatible = "IBM,lhca",
953 },
954 {},
955};
956MODULE_DEVICE_TABLE(of, ehca_device_table);
957
958static struct platform_driver ehca_driver = {
959 .probe = ehca_probe,
960 .remove = ehca_remove,
961 .driver = {
962 .name = "ehca",
963 .owner = THIS_MODULE,
964 .groups = ehca_drv_attr_groups,
965 .of_match_table = ehca_device_table,
966 },
967};
968
969void ehca_poll_eqs(unsigned long data)
970{
971 struct ehca_shca *shca;
972
973 spin_lock(&shca_list_lock);
974 list_for_each_entry(shca, &shca_list, shca_list) {
975 if (shca->eq.is_initialized) {
976 /* call deadman proc only if eq ptr does not change */
977 struct ehca_eq *eq = &shca->eq;
978 int max = 3;
979 volatile u64 q_ofs, q_ofs2;
980 unsigned long flags;
981 spin_lock_irqsave(&eq->spinlock, flags);
982 q_ofs = eq->ipz_queue.current_q_offset;
983 spin_unlock_irqrestore(&eq->spinlock, flags);
984 do {
985 spin_lock_irqsave(&eq->spinlock, flags);
986 q_ofs2 = eq->ipz_queue.current_q_offset;
987 spin_unlock_irqrestore(&eq->spinlock, flags);
988 max--;
989 } while (q_ofs == q_ofs2 && max > 0);
990 if (q_ofs == q_ofs2)
991 ehca_process_eq(shca, 0);
992 }
993 }
994 mod_timer(&poll_eqs_timer, round_jiffies(jiffies + HZ));
995 spin_unlock(&shca_list_lock);
996}
997
998static int ehca_mem_notifier(struct notifier_block *nb,
999 unsigned long action, void *data)
1000{
1001 static unsigned long ehca_dmem_warn_time;
1002 unsigned long flags;
1003
1004 switch (action) {
1005 case MEM_CANCEL_OFFLINE:
1006 case MEM_CANCEL_ONLINE:
1007 case MEM_ONLINE:
1008 case MEM_OFFLINE:
1009 return NOTIFY_OK;
1010 case MEM_GOING_ONLINE:
1011 case MEM_GOING_OFFLINE:
1012 /* only ok if no hca is attached to the lpar */
1013 spin_lock_irqsave(&shca_list_lock, flags);
1014 if (list_empty(&shca_list)) {
1015 spin_unlock_irqrestore(&shca_list_lock, flags);
1016 return NOTIFY_OK;
1017 } else {
1018 spin_unlock_irqrestore(&shca_list_lock, flags);
1019 if (printk_timed_ratelimit(&ehca_dmem_warn_time,
1020 30 * 1000))
1021 ehca_gen_err("DMEM operations are not allowed"
1022 "in conjunction with eHCA");
1023 return NOTIFY_BAD;
1024 }
1025 }
1026 return NOTIFY_OK;
1027}
1028
1029static struct notifier_block ehca_mem_nb = {
1030 .notifier_call = ehca_mem_notifier,
1031};
1032
1033static int __init ehca_module_init(void)
1034{
1035 int ret;
1036
1037 printk(KERN_INFO "eHCA Infiniband Device Driver "
1038 "(Version " HCAD_VERSION ")\n");
1039
1040 ret = ehca_create_comp_pool();
1041 if (ret) {
1042 ehca_gen_err("Cannot create comp pool.");
1043 return ret;
1044 }
1045
1046 ret = ehca_create_slab_caches();
1047 if (ret) {
1048 ehca_gen_err("Cannot create SLAB caches");
1049 ret = -ENOMEM;
1050 goto module_init1;
1051 }
1052
1053 ret = ehca_create_busmap();
1054 if (ret) {
1055 ehca_gen_err("Cannot create busmap.");
1056 goto module_init2;
1057 }
1058
1059 ret = ibmebus_register_driver(&ehca_driver);
1060 if (ret) {
1061 ehca_gen_err("Cannot register eHCA device driver");
1062 ret = -EINVAL;
1063 goto module_init3;
1064 }
1065
1066 ret = register_memory_notifier(&ehca_mem_nb);
1067 if (ret) {
1068 ehca_gen_err("Failed registering memory add/remove notifier");
1069 goto module_init4;
1070 }
1071
1072 if (ehca_poll_all_eqs != 1) {
1073 ehca_gen_err("WARNING!!!");
1074 ehca_gen_err("It is possible to lose interrupts.");
1075 } else {
1076 init_timer(&poll_eqs_timer);
1077 poll_eqs_timer.function = ehca_poll_eqs;
1078 poll_eqs_timer.expires = jiffies + HZ;
1079 add_timer(&poll_eqs_timer);
1080 }
1081
1082 return 0;
1083
1084module_init4:
1085 ibmebus_unregister_driver(&ehca_driver);
1086
1087module_init3:
1088 ehca_destroy_busmap();
1089
1090module_init2:
1091 ehca_destroy_slab_caches();
1092
1093module_init1:
1094 ehca_destroy_comp_pool();
1095 return ret;
1096};
1097
1098static void __exit ehca_module_exit(void)
1099{
1100 if (ehca_poll_all_eqs == 1)
1101 del_timer_sync(&poll_eqs_timer);
1102
1103 ibmebus_unregister_driver(&ehca_driver);
1104
1105 unregister_memory_notifier(&ehca_mem_nb);
1106
1107 ehca_destroy_busmap();
1108
1109 ehca_destroy_slab_caches();
1110
1111 ehca_destroy_comp_pool();
1112
1113 idr_destroy(&ehca_cq_idr);
1114 idr_destroy(&ehca_qp_idr);
1115};
1116
1117module_init(ehca_module_init);
1118module_exit(ehca_module_exit);
diff --git a/drivers/staging/rdma/ehca/ehca_mcast.c b/drivers/staging/rdma/ehca/ehca_mcast.c
deleted file mode 100644
index cec181532924..000000000000
--- a/drivers/staging/rdma/ehca/ehca_mcast.c
+++ /dev/null
@@ -1,131 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * mcast functions
5 *
6 * Authors: Khadija Souissi <souissik@de.ibm.com>
7 * Waleri Fomin <fomin@de.ibm.com>
8 * Reinhard Ernst <rernst@de.ibm.com>
9 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
10 * Heiko J Schick <schickhj@de.ibm.com>
11 *
12 * Copyright (c) 2005 IBM Corporation
13 *
14 * All rights reserved.
15 *
16 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
17 * BSD.
18 *
19 * OpenIB BSD License
20 *
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions are met:
23 *
24 * Redistributions of source code must retain the above copyright notice, this
25 * list of conditions and the following disclaimer.
26 *
27 * Redistributions in binary form must reproduce the above copyright notice,
28 * this list of conditions and the following disclaimer in the documentation
29 * and/or other materials
30 * provided with the distribution.
31 *
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
33 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
36 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
37 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
38 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
39 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
40 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
41 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
42 * POSSIBILITY OF SUCH DAMAGE.
43 */
44
45#include <linux/module.h>
46#include <linux/err.h>
47#include "ehca_classes.h"
48#include "ehca_tools.h"
49#include "ehca_qes.h"
50#include "ehca_iverbs.h"
51#include "hcp_if.h"
52
53#define MAX_MC_LID 0xFFFE
54#define MIN_MC_LID 0xC000 /* Multicast limits */
55#define EHCA_VALID_MULTICAST_GID(gid) ((gid)[0] == 0xFF)
56#define EHCA_VALID_MULTICAST_LID(lid) \
57 (((lid) >= MIN_MC_LID) && ((lid) <= MAX_MC_LID))
58
59int ehca_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
60{
61 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
62 struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
63 ib_device);
64 union ib_gid my_gid;
65 u64 subnet_prefix, interface_id, h_ret;
66
67 if (ibqp->qp_type != IB_QPT_UD) {
68 ehca_err(ibqp->device, "invalid qp_type=%x", ibqp->qp_type);
69 return -EINVAL;
70 }
71
72 if (!(EHCA_VALID_MULTICAST_GID(gid->raw))) {
73 ehca_err(ibqp->device, "invalid mulitcast gid");
74 return -EINVAL;
75 } else if ((lid < MIN_MC_LID) || (lid > MAX_MC_LID)) {
76 ehca_err(ibqp->device, "invalid mulitcast lid=%x", lid);
77 return -EINVAL;
78 }
79
80 memcpy(&my_gid, gid->raw, sizeof(union ib_gid));
81
82 subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix);
83 interface_id = be64_to_cpu(my_gid.global.interface_id);
84 h_ret = hipz_h_attach_mcqp(shca->ipz_hca_handle,
85 my_qp->ipz_qp_handle,
86 my_qp->galpas.kernel,
87 lid, subnet_prefix, interface_id);
88 if (h_ret != H_SUCCESS)
89 ehca_err(ibqp->device,
90 "ehca_qp=%p qp_num=%x hipz_h_attach_mcqp() failed "
91 "h_ret=%lli", my_qp, ibqp->qp_num, h_ret);
92
93 return ehca2ib_return_code(h_ret);
94}
95
96int ehca_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
97{
98 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
99 struct ehca_shca *shca = container_of(ibqp->pd->device,
100 struct ehca_shca, ib_device);
101 union ib_gid my_gid;
102 u64 subnet_prefix, interface_id, h_ret;
103
104 if (ibqp->qp_type != IB_QPT_UD) {
105 ehca_err(ibqp->device, "invalid qp_type %x", ibqp->qp_type);
106 return -EINVAL;
107 }
108
109 if (!(EHCA_VALID_MULTICAST_GID(gid->raw))) {
110 ehca_err(ibqp->device, "invalid mulitcast gid");
111 return -EINVAL;
112 } else if ((lid < MIN_MC_LID) || (lid > MAX_MC_LID)) {
113 ehca_err(ibqp->device, "invalid mulitcast lid=%x", lid);
114 return -EINVAL;
115 }
116
117 memcpy(&my_gid, gid->raw, sizeof(union ib_gid));
118
119 subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix);
120 interface_id = be64_to_cpu(my_gid.global.interface_id);
121 h_ret = hipz_h_detach_mcqp(shca->ipz_hca_handle,
122 my_qp->ipz_qp_handle,
123 my_qp->galpas.kernel,
124 lid, subnet_prefix, interface_id);
125 if (h_ret != H_SUCCESS)
126 ehca_err(ibqp->device,
127 "ehca_qp=%p qp_num=%x hipz_h_detach_mcqp() failed "
128 "h_ret=%lli", my_qp, ibqp->qp_num, h_ret);
129
130 return ehca2ib_return_code(h_ret);
131}
diff --git a/drivers/staging/rdma/ehca/ehca_mrmw.c b/drivers/staging/rdma/ehca/ehca_mrmw.c
deleted file mode 100644
index 3367205e3160..000000000000
--- a/drivers/staging/rdma/ehca/ehca_mrmw.c
+++ /dev/null
@@ -1,2202 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * MR/MW functions
5 *
6 * Authors: Dietmar Decker <ddecker@de.ibm.com>
7 * Christoph Raisch <raisch@de.ibm.com>
8 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
9 *
10 * Copyright (c) 2005 IBM Corporation
11 *
12 * All rights reserved.
13 *
14 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
15 * BSD.
16 *
17 * OpenIB BSD License
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are met:
21 *
22 * Redistributions of source code must retain the above copyright notice, this
23 * list of conditions and the following disclaimer.
24 *
25 * Redistributions in binary form must reproduce the above copyright notice,
26 * this list of conditions and the following disclaimer in the documentation
27 * and/or other materials
28 * provided with the distribution.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
41 */
42
43#include <linux/slab.h>
44#include <rdma/ib_umem.h>
45
46#include "ehca_iverbs.h"
47#include "ehca_mrmw.h"
48#include "hcp_if.h"
49#include "hipz_hw.h"
50
51#define NUM_CHUNKS(length, chunk_size) \
52 (((length) + (chunk_size - 1)) / (chunk_size))
53
54/* max number of rpages (per hcall register_rpages) */
55#define MAX_RPAGES 512
56
57/* DMEM toleration management */
58#define EHCA_SECTSHIFT SECTION_SIZE_BITS
59#define EHCA_SECTSIZE (1UL << EHCA_SECTSHIFT)
60#define EHCA_HUGEPAGESHIFT 34
61#define EHCA_HUGEPAGE_SIZE (1UL << EHCA_HUGEPAGESHIFT)
62#define EHCA_HUGEPAGE_PFN_MASK ((EHCA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT)
63#define EHCA_INVAL_ADDR 0xFFFFFFFFFFFFFFFFULL
64#define EHCA_DIR_INDEX_SHIFT 13 /* 8k Entries in 64k block */
65#define EHCA_TOP_INDEX_SHIFT (EHCA_DIR_INDEX_SHIFT * 2)
66#define EHCA_MAP_ENTRIES (1 << EHCA_DIR_INDEX_SHIFT)
67#define EHCA_TOP_MAP_SIZE (0x10000) /* currently fixed map size */
68#define EHCA_DIR_MAP_SIZE (0x10000)
69#define EHCA_ENT_MAP_SIZE (0x10000)
70#define EHCA_INDEX_MASK (EHCA_MAP_ENTRIES - 1)
71
72static unsigned long ehca_mr_len;
73
74/*
75 * Memory map data structures
76 */
77struct ehca_dir_bmap {
78 u64 ent[EHCA_MAP_ENTRIES];
79};
80struct ehca_top_bmap {
81 struct ehca_dir_bmap *dir[EHCA_MAP_ENTRIES];
82};
83struct ehca_bmap {
84 struct ehca_top_bmap *top[EHCA_MAP_ENTRIES];
85};
86
87static struct ehca_bmap *ehca_bmap;
88
89static struct kmem_cache *mr_cache;
90static struct kmem_cache *mw_cache;
91
92enum ehca_mr_pgsize {
93 EHCA_MR_PGSIZE4K = 0x1000L,
94 EHCA_MR_PGSIZE64K = 0x10000L,
95 EHCA_MR_PGSIZE1M = 0x100000L,
96 EHCA_MR_PGSIZE16M = 0x1000000L
97};
98
99#define EHCA_MR_PGSHIFT4K 12
100#define EHCA_MR_PGSHIFT64K 16
101#define EHCA_MR_PGSHIFT1M 20
102#define EHCA_MR_PGSHIFT16M 24
103
104static u64 ehca_map_vaddr(void *caddr);
105
106static u32 ehca_encode_hwpage_size(u32 pgsize)
107{
108 int log = ilog2(pgsize);
109 WARN_ON(log < 12 || log > 24 || log & 3);
110 return (log - 12) / 4;
111}
112
113static u64 ehca_get_max_hwpage_size(struct ehca_shca *shca)
114{
115 return rounddown_pow_of_two(shca->hca_cap_mr_pgsize);
116}
117
118static struct ehca_mr *ehca_mr_new(void)
119{
120 struct ehca_mr *me;
121
122 me = kmem_cache_zalloc(mr_cache, GFP_KERNEL);
123 if (me)
124 spin_lock_init(&me->mrlock);
125 else
126 ehca_gen_err("alloc failed");
127
128 return me;
129}
130
131static void ehca_mr_delete(struct ehca_mr *me)
132{
133 kmem_cache_free(mr_cache, me);
134}
135
136static struct ehca_mw *ehca_mw_new(void)
137{
138 struct ehca_mw *me;
139
140 me = kmem_cache_zalloc(mw_cache, GFP_KERNEL);
141 if (me)
142 spin_lock_init(&me->mwlock);
143 else
144 ehca_gen_err("alloc failed");
145
146 return me;
147}
148
149static void ehca_mw_delete(struct ehca_mw *me)
150{
151 kmem_cache_free(mw_cache, me);
152}
153
154/*----------------------------------------------------------------------*/
155
156struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
157{
158 struct ib_mr *ib_mr;
159 int ret;
160 struct ehca_mr *e_maxmr;
161 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
162 struct ehca_shca *shca =
163 container_of(pd->device, struct ehca_shca, ib_device);
164
165 if (shca->maxmr) {
166 e_maxmr = ehca_mr_new();
167 if (!e_maxmr) {
168 ehca_err(&shca->ib_device, "out of memory");
169 ib_mr = ERR_PTR(-ENOMEM);
170 goto get_dma_mr_exit0;
171 }
172
173 ret = ehca_reg_maxmr(shca, e_maxmr,
174 (void *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START)),
175 mr_access_flags, e_pd,
176 &e_maxmr->ib.ib_mr.lkey,
177 &e_maxmr->ib.ib_mr.rkey);
178 if (ret) {
179 ehca_mr_delete(e_maxmr);
180 ib_mr = ERR_PTR(ret);
181 goto get_dma_mr_exit0;
182 }
183 ib_mr = &e_maxmr->ib.ib_mr;
184 } else {
185 ehca_err(&shca->ib_device, "no internal max-MR exist!");
186 ib_mr = ERR_PTR(-EINVAL);
187 goto get_dma_mr_exit0;
188 }
189
190get_dma_mr_exit0:
191 if (IS_ERR(ib_mr))
192 ehca_err(&shca->ib_device, "h_ret=%li pd=%p mr_access_flags=%x",
193 PTR_ERR(ib_mr), pd, mr_access_flags);
194 return ib_mr;
195} /* end ehca_get_dma_mr() */
196
197/*----------------------------------------------------------------------*/
198
199struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
200 u64 virt, int mr_access_flags,
201 struct ib_udata *udata)
202{
203 struct ib_mr *ib_mr;
204 struct ehca_mr *e_mr;
205 struct ehca_shca *shca =
206 container_of(pd->device, struct ehca_shca, ib_device);
207 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
208 struct ehca_mr_pginfo pginfo;
209 int ret, page_shift;
210 u32 num_kpages;
211 u32 num_hwpages;
212 u64 hwpage_size;
213
214 if (!pd) {
215 ehca_gen_err("bad pd=%p", pd);
216 return ERR_PTR(-EFAULT);
217 }
218
219 if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
220 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
221 ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
222 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
223 /*
224 * Remote Write Access requires Local Write Access
225 * Remote Atomic Access requires Local Write Access
226 */
227 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
228 mr_access_flags);
229 ib_mr = ERR_PTR(-EINVAL);
230 goto reg_user_mr_exit0;
231 }
232
233 if (length == 0 || virt + length < virt) {
234 ehca_err(pd->device, "bad input values: length=%llx "
235 "virt_base=%llx", length, virt);
236 ib_mr = ERR_PTR(-EINVAL);
237 goto reg_user_mr_exit0;
238 }
239
240 e_mr = ehca_mr_new();
241 if (!e_mr) {
242 ehca_err(pd->device, "out of memory");
243 ib_mr = ERR_PTR(-ENOMEM);
244 goto reg_user_mr_exit0;
245 }
246
247 e_mr->umem = ib_umem_get(pd->uobject->context, start, length,
248 mr_access_flags, 0);
249 if (IS_ERR(e_mr->umem)) {
250 ib_mr = (void *)e_mr->umem;
251 goto reg_user_mr_exit1;
252 }
253
254 if (e_mr->umem->page_size != PAGE_SIZE) {
255 ehca_err(pd->device, "page size not supported, "
256 "e_mr->umem->page_size=%x", e_mr->umem->page_size);
257 ib_mr = ERR_PTR(-EINVAL);
258 goto reg_user_mr_exit2;
259 }
260
261 /* determine number of MR pages */
262 num_kpages = NUM_CHUNKS((virt % PAGE_SIZE) + length, PAGE_SIZE);
263 /* select proper hw_pgsize */
264 page_shift = PAGE_SHIFT;
265 if (e_mr->umem->hugetlb) {
266 /* determine page_shift, clamp between 4K and 16M */
267 page_shift = (fls64(length - 1) + 3) & ~3;
268 page_shift = min(max(page_shift, EHCA_MR_PGSHIFT4K),
269 EHCA_MR_PGSHIFT16M);
270 }
271 hwpage_size = 1UL << page_shift;
272
273 /* now that we have the desired page size, shift until it's
274 * supported, too. 4K is always supported, so this terminates.
275 */
276 while (!(hwpage_size & shca->hca_cap_mr_pgsize))
277 hwpage_size >>= 4;
278
279reg_user_mr_fallback:
280 num_hwpages = NUM_CHUNKS((virt % hwpage_size) + length, hwpage_size);
281 /* register MR on HCA */
282 memset(&pginfo, 0, sizeof(pginfo));
283 pginfo.type = EHCA_MR_PGI_USER;
284 pginfo.hwpage_size = hwpage_size;
285 pginfo.num_kpages = num_kpages;
286 pginfo.num_hwpages = num_hwpages;
287 pginfo.u.usr.region = e_mr->umem;
288 pginfo.next_hwpage = ib_umem_offset(e_mr->umem) / hwpage_size;
289 pginfo.u.usr.next_sg = pginfo.u.usr.region->sg_head.sgl;
290 ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags,
291 e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
292 &e_mr->ib.ib_mr.rkey, EHCA_REG_MR);
293 if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) {
294 ehca_warn(pd->device, "failed to register mr "
295 "with hwpage_size=%llx", hwpage_size);
296 ehca_info(pd->device, "try to register mr with "
297 "kpage_size=%lx", PAGE_SIZE);
298 /*
299 * this means kpages are not contiguous for a hw page
300 * try kernel page size as fallback solution
301 */
302 hwpage_size = PAGE_SIZE;
303 goto reg_user_mr_fallback;
304 }
305 if (ret) {
306 ib_mr = ERR_PTR(ret);
307 goto reg_user_mr_exit2;
308 }
309
310 /* successful registration of all pages */
311 return &e_mr->ib.ib_mr;
312
313reg_user_mr_exit2:
314 ib_umem_release(e_mr->umem);
315reg_user_mr_exit1:
316 ehca_mr_delete(e_mr);
317reg_user_mr_exit0:
318 if (IS_ERR(ib_mr))
319 ehca_err(pd->device, "rc=%li pd=%p mr_access_flags=%x udata=%p",
320 PTR_ERR(ib_mr), pd, mr_access_flags, udata);
321 return ib_mr;
322} /* end ehca_reg_user_mr() */
323
324/*----------------------------------------------------------------------*/
325
326int ehca_dereg_mr(struct ib_mr *mr)
327{
328 int ret = 0;
329 u64 h_ret;
330 struct ehca_shca *shca =
331 container_of(mr->device, struct ehca_shca, ib_device);
332 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
333
334 if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
335 ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
336 "e_mr->flags=%x", mr, e_mr, e_mr->flags);
337 ret = -EINVAL;
338 goto dereg_mr_exit0;
339 } else if (e_mr == shca->maxmr) {
340 /* should be impossible, however reject to be sure */
341 ehca_err(mr->device, "dereg internal max-MR impossible, mr=%p "
342 "shca->maxmr=%p mr->lkey=%x",
343 mr, shca->maxmr, mr->lkey);
344 ret = -EINVAL;
345 goto dereg_mr_exit0;
346 }
347
348 /* TODO: BUSY: MR still has bound window(s) */
349 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
350 if (h_ret != H_SUCCESS) {
351 ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lli shca=%p "
352 "e_mr=%p hca_hndl=%llx mr_hndl=%llx mr->lkey=%x",
353 h_ret, shca, e_mr, shca->ipz_hca_handle.handle,
354 e_mr->ipz_mr_handle.handle, mr->lkey);
355 ret = ehca2ib_return_code(h_ret);
356 goto dereg_mr_exit0;
357 }
358
359 if (e_mr->umem)
360 ib_umem_release(e_mr->umem);
361
362 /* successful deregistration */
363 ehca_mr_delete(e_mr);
364
365dereg_mr_exit0:
366 if (ret)
367 ehca_err(mr->device, "ret=%i mr=%p", ret, mr);
368 return ret;
369} /* end ehca_dereg_mr() */
370
371/*----------------------------------------------------------------------*/
372
373struct ib_mw *ehca_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
374{
375 struct ib_mw *ib_mw;
376 u64 h_ret;
377 struct ehca_mw *e_mw;
378 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
379 struct ehca_shca *shca =
380 container_of(pd->device, struct ehca_shca, ib_device);
381 struct ehca_mw_hipzout_parms hipzout;
382
383 if (type != IB_MW_TYPE_1)
384 return ERR_PTR(-EINVAL);
385
386 e_mw = ehca_mw_new();
387 if (!e_mw) {
388 ib_mw = ERR_PTR(-ENOMEM);
389 goto alloc_mw_exit0;
390 }
391
392 h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw,
393 e_pd->fw_pd, &hipzout);
394 if (h_ret != H_SUCCESS) {
395 ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lli "
396 "shca=%p hca_hndl=%llx mw=%p",
397 h_ret, shca, shca->ipz_hca_handle.handle, e_mw);
398 ib_mw = ERR_PTR(ehca2ib_return_code(h_ret));
399 goto alloc_mw_exit1;
400 }
401 /* successful MW allocation */
402 e_mw->ipz_mw_handle = hipzout.handle;
403 e_mw->ib_mw.rkey = hipzout.rkey;
404 return &e_mw->ib_mw;
405
406alloc_mw_exit1:
407 ehca_mw_delete(e_mw);
408alloc_mw_exit0:
409 if (IS_ERR(ib_mw))
410 ehca_err(pd->device, "h_ret=%li pd=%p", PTR_ERR(ib_mw), pd);
411 return ib_mw;
412} /* end ehca_alloc_mw() */
413
414/*----------------------------------------------------------------------*/
415
416int ehca_dealloc_mw(struct ib_mw *mw)
417{
418 u64 h_ret;
419 struct ehca_shca *shca =
420 container_of(mw->device, struct ehca_shca, ib_device);
421 struct ehca_mw *e_mw = container_of(mw, struct ehca_mw, ib_mw);
422
423 h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw);
424 if (h_ret != H_SUCCESS) {
425 ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lli shca=%p "
426 "mw=%p rkey=%x hca_hndl=%llx mw_hndl=%llx",
427 h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle,
428 e_mw->ipz_mw_handle.handle);
429 return ehca2ib_return_code(h_ret);
430 }
431 /* successful deallocation */
432 ehca_mw_delete(e_mw);
433 return 0;
434} /* end ehca_dealloc_mw() */
435
436/*----------------------------------------------------------------------*/
437
438struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
439 int mr_access_flags,
440 struct ib_fmr_attr *fmr_attr)
441{
442 struct ib_fmr *ib_fmr;
443 struct ehca_shca *shca =
444 container_of(pd->device, struct ehca_shca, ib_device);
445 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
446 struct ehca_mr *e_fmr;
447 int ret;
448 u32 tmp_lkey, tmp_rkey;
449 struct ehca_mr_pginfo pginfo;
450 u64 hw_pgsize;
451
452 /* check other parameters */
453 if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
454 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
455 ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
456 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
457 /*
458 * Remote Write Access requires Local Write Access
459 * Remote Atomic Access requires Local Write Access
460 */
461 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
462 mr_access_flags);
463 ib_fmr = ERR_PTR(-EINVAL);
464 goto alloc_fmr_exit0;
465 }
466 if (mr_access_flags & IB_ACCESS_MW_BIND) {
467 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
468 mr_access_flags);
469 ib_fmr = ERR_PTR(-EINVAL);
470 goto alloc_fmr_exit0;
471 }
472 if ((fmr_attr->max_pages == 0) || (fmr_attr->max_maps == 0)) {
473 ehca_err(pd->device, "bad input values: fmr_attr->max_pages=%x "
474 "fmr_attr->max_maps=%x fmr_attr->page_shift=%x",
475 fmr_attr->max_pages, fmr_attr->max_maps,
476 fmr_attr->page_shift);
477 ib_fmr = ERR_PTR(-EINVAL);
478 goto alloc_fmr_exit0;
479 }
480
481 hw_pgsize = 1 << fmr_attr->page_shift;
482 if (!(hw_pgsize & shca->hca_cap_mr_pgsize)) {
483 ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x",
484 fmr_attr->page_shift);
485 ib_fmr = ERR_PTR(-EINVAL);
486 goto alloc_fmr_exit0;
487 }
488
489 e_fmr = ehca_mr_new();
490 if (!e_fmr) {
491 ib_fmr = ERR_PTR(-ENOMEM);
492 goto alloc_fmr_exit0;
493 }
494 e_fmr->flags |= EHCA_MR_FLAG_FMR;
495
496 /* register MR on HCA */
497 memset(&pginfo, 0, sizeof(pginfo));
498 pginfo.hwpage_size = hw_pgsize;
499 /*
500 * pginfo.num_hwpages==0, ie register_rpages() will not be called
501 * but deferred to map_phys_fmr()
502 */
503 ret = ehca_reg_mr(shca, e_fmr, NULL,
504 fmr_attr->max_pages * (1 << fmr_attr->page_shift),
505 mr_access_flags, e_pd, &pginfo,
506 &tmp_lkey, &tmp_rkey, EHCA_REG_MR);
507 if (ret) {
508 ib_fmr = ERR_PTR(ret);
509 goto alloc_fmr_exit1;
510 }
511
512 /* successful */
513 e_fmr->hwpage_size = hw_pgsize;
514 e_fmr->fmr_page_size = 1 << fmr_attr->page_shift;
515 e_fmr->fmr_max_pages = fmr_attr->max_pages;
516 e_fmr->fmr_max_maps = fmr_attr->max_maps;
517 e_fmr->fmr_map_cnt = 0;
518 return &e_fmr->ib.ib_fmr;
519
520alloc_fmr_exit1:
521 ehca_mr_delete(e_fmr);
522alloc_fmr_exit0:
523 return ib_fmr;
524} /* end ehca_alloc_fmr() */
525
526/*----------------------------------------------------------------------*/
527
528int ehca_map_phys_fmr(struct ib_fmr *fmr,
529 u64 *page_list,
530 int list_len,
531 u64 iova)
532{
533 int ret;
534 struct ehca_shca *shca =
535 container_of(fmr->device, struct ehca_shca, ib_device);
536 struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
537 struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd);
538 struct ehca_mr_pginfo pginfo;
539 u32 tmp_lkey, tmp_rkey;
540
541 if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
542 ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
543 e_fmr, e_fmr->flags);
544 ret = -EINVAL;
545 goto map_phys_fmr_exit0;
546 }
547 ret = ehca_fmr_check_page_list(e_fmr, page_list, list_len);
548 if (ret)
549 goto map_phys_fmr_exit0;
550 if (iova % e_fmr->fmr_page_size) {
551 /* only whole-numbered pages */
552 ehca_err(fmr->device, "bad iova, iova=%llx fmr_page_size=%x",
553 iova, e_fmr->fmr_page_size);
554 ret = -EINVAL;
555 goto map_phys_fmr_exit0;
556 }
557 if (e_fmr->fmr_map_cnt >= e_fmr->fmr_max_maps) {
558 /* HCAD does not limit the maps, however trace this anyway */
559 ehca_info(fmr->device, "map limit exceeded, fmr=%p "
560 "e_fmr->fmr_map_cnt=%x e_fmr->fmr_max_maps=%x",
561 fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps);
562 }
563
564 memset(&pginfo, 0, sizeof(pginfo));
565 pginfo.type = EHCA_MR_PGI_FMR;
566 pginfo.num_kpages = list_len;
567 pginfo.hwpage_size = e_fmr->hwpage_size;
568 pginfo.num_hwpages =
569 list_len * e_fmr->fmr_page_size / pginfo.hwpage_size;
570 pginfo.u.fmr.page_list = page_list;
571 pginfo.next_hwpage =
572 (iova & (e_fmr->fmr_page_size-1)) / pginfo.hwpage_size;
573 pginfo.u.fmr.fmr_pgsize = e_fmr->fmr_page_size;
574
575 ret = ehca_rereg_mr(shca, e_fmr, (u64 *)iova,
576 list_len * e_fmr->fmr_page_size,
577 e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey);
578 if (ret)
579 goto map_phys_fmr_exit0;
580
581 /* successful reregistration */
582 e_fmr->fmr_map_cnt++;
583 e_fmr->ib.ib_fmr.lkey = tmp_lkey;
584 e_fmr->ib.ib_fmr.rkey = tmp_rkey;
585 return 0;
586
587map_phys_fmr_exit0:
588 if (ret)
589 ehca_err(fmr->device, "ret=%i fmr=%p page_list=%p list_len=%x "
590 "iova=%llx", ret, fmr, page_list, list_len, iova);
591 return ret;
592} /* end ehca_map_phys_fmr() */
593
594/*----------------------------------------------------------------------*/
595
596int ehca_unmap_fmr(struct list_head *fmr_list)
597{
598 int ret = 0;
599 struct ib_fmr *ib_fmr;
600 struct ehca_shca *shca = NULL;
601 struct ehca_shca *prev_shca;
602 struct ehca_mr *e_fmr;
603 u32 num_fmr = 0;
604 u32 unmap_fmr_cnt = 0;
605
606 /* check all FMR belong to same SHCA, and check internal flag */
607 list_for_each_entry(ib_fmr, fmr_list, list) {
608 prev_shca = shca;
609 shca = container_of(ib_fmr->device, struct ehca_shca,
610 ib_device);
611 e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
612 if ((shca != prev_shca) && prev_shca) {
613 ehca_err(&shca->ib_device, "SHCA mismatch, shca=%p "
614 "prev_shca=%p e_fmr=%p",
615 shca, prev_shca, e_fmr);
616 ret = -EINVAL;
617 goto unmap_fmr_exit0;
618 }
619 if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
620 ehca_err(&shca->ib_device, "not a FMR, e_fmr=%p "
621 "e_fmr->flags=%x", e_fmr, e_fmr->flags);
622 ret = -EINVAL;
623 goto unmap_fmr_exit0;
624 }
625 num_fmr++;
626 }
627
628 /* loop over all FMRs to unmap */
629 list_for_each_entry(ib_fmr, fmr_list, list) {
630 unmap_fmr_cnt++;
631 e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
632 shca = container_of(ib_fmr->device, struct ehca_shca,
633 ib_device);
634 ret = ehca_unmap_one_fmr(shca, e_fmr);
635 if (ret) {
636 /* unmap failed, stop unmapping of rest of FMRs */
637 ehca_err(&shca->ib_device, "unmap of one FMR failed, "
638 "stop rest, e_fmr=%p num_fmr=%x "
639 "unmap_fmr_cnt=%x lkey=%x", e_fmr, num_fmr,
640 unmap_fmr_cnt, e_fmr->ib.ib_fmr.lkey);
641 goto unmap_fmr_exit0;
642 }
643 }
644
645unmap_fmr_exit0:
646 if (ret)
647 ehca_gen_err("ret=%i fmr_list=%p num_fmr=%x unmap_fmr_cnt=%x",
648 ret, fmr_list, num_fmr, unmap_fmr_cnt);
649 return ret;
650} /* end ehca_unmap_fmr() */
651
652/*----------------------------------------------------------------------*/
653
654int ehca_dealloc_fmr(struct ib_fmr *fmr)
655{
656 int ret;
657 u64 h_ret;
658 struct ehca_shca *shca =
659 container_of(fmr->device, struct ehca_shca, ib_device);
660 struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
661
662 if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
663 ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
664 e_fmr, e_fmr->flags);
665 ret = -EINVAL;
666 goto free_fmr_exit0;
667 }
668
669 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
670 if (h_ret != H_SUCCESS) {
671 ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lli e_fmr=%p "
672 "hca_hndl=%llx fmr_hndl=%llx fmr->lkey=%x",
673 h_ret, e_fmr, shca->ipz_hca_handle.handle,
674 e_fmr->ipz_mr_handle.handle, fmr->lkey);
675 ret = ehca2ib_return_code(h_ret);
676 goto free_fmr_exit0;
677 }
678 /* successful deregistration */
679 ehca_mr_delete(e_fmr);
680 return 0;
681
682free_fmr_exit0:
683 if (ret)
684 ehca_err(&shca->ib_device, "ret=%i fmr=%p", ret, fmr);
685 return ret;
686} /* end ehca_dealloc_fmr() */
687
688/*----------------------------------------------------------------------*/
689
690static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca,
691 struct ehca_mr *e_mr,
692 struct ehca_mr_pginfo *pginfo);
693
694int ehca_reg_mr(struct ehca_shca *shca,
695 struct ehca_mr *e_mr,
696 u64 *iova_start,
697 u64 size,
698 int acl,
699 struct ehca_pd *e_pd,
700 struct ehca_mr_pginfo *pginfo,
701 u32 *lkey, /*OUT*/
702 u32 *rkey, /*OUT*/
703 enum ehca_reg_type reg_type)
704{
705 int ret;
706 u64 h_ret;
707 u32 hipz_acl;
708 struct ehca_mr_hipzout_parms hipzout;
709
710 ehca_mrmw_map_acl(acl, &hipz_acl);
711 ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl);
712 if (ehca_use_hp_mr == 1)
713 hipz_acl |= 0x00000001;
714
715 h_ret = hipz_h_alloc_resource_mr(shca->ipz_hca_handle, e_mr,
716 (u64)iova_start, size, hipz_acl,
717 e_pd->fw_pd, &hipzout);
718 if (h_ret != H_SUCCESS) {
719 ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lli "
720 "hca_hndl=%llx", h_ret, shca->ipz_hca_handle.handle);
721 ret = ehca2ib_return_code(h_ret);
722 goto ehca_reg_mr_exit0;
723 }
724
725 e_mr->ipz_mr_handle = hipzout.handle;
726
727 if (reg_type == EHCA_REG_BUSMAP_MR)
728 ret = ehca_reg_bmap_mr_rpages(shca, e_mr, pginfo);
729 else if (reg_type == EHCA_REG_MR)
730 ret = ehca_reg_mr_rpages(shca, e_mr, pginfo);
731 else
732 ret = -EINVAL;
733
734 if (ret)
735 goto ehca_reg_mr_exit1;
736
737 /* successful registration */
738 e_mr->num_kpages = pginfo->num_kpages;
739 e_mr->num_hwpages = pginfo->num_hwpages;
740 e_mr->hwpage_size = pginfo->hwpage_size;
741 e_mr->start = iova_start;
742 e_mr->size = size;
743 e_mr->acl = acl;
744 *lkey = hipzout.lkey;
745 *rkey = hipzout.rkey;
746 return 0;
747
748ehca_reg_mr_exit1:
749 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
750 if (h_ret != H_SUCCESS) {
751 ehca_err(&shca->ib_device, "h_ret=%lli shca=%p e_mr=%p "
752 "iova_start=%p size=%llx acl=%x e_pd=%p lkey=%x "
753 "pginfo=%p num_kpages=%llx num_hwpages=%llx ret=%i",
754 h_ret, shca, e_mr, iova_start, size, acl, e_pd,
755 hipzout.lkey, pginfo, pginfo->num_kpages,
756 pginfo->num_hwpages, ret);
757 ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, "
758 "not recoverable");
759 }
760ehca_reg_mr_exit0:
761 if (ret)
762 ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p "
763 "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
764 "num_kpages=%llx num_hwpages=%llx",
765 ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo,
766 pginfo->num_kpages, pginfo->num_hwpages);
767 return ret;
768} /* end ehca_reg_mr() */
769
770/*----------------------------------------------------------------------*/
771
772int ehca_reg_mr_rpages(struct ehca_shca *shca,
773 struct ehca_mr *e_mr,
774 struct ehca_mr_pginfo *pginfo)
775{
776 int ret = 0;
777 u64 h_ret;
778 u32 rnum;
779 u64 rpage;
780 u32 i;
781 u64 *kpage;
782
783 if (!pginfo->num_hwpages) /* in case of fmr */
784 return 0;
785
786 kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
787 if (!kpage) {
788 ehca_err(&shca->ib_device, "kpage alloc failed");
789 ret = -ENOMEM;
790 goto ehca_reg_mr_rpages_exit0;
791 }
792
793 /* max MAX_RPAGES ehca mr pages per register call */
794 for (i = 0; i < NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES); i++) {
795
796 if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
797 rnum = pginfo->num_hwpages % MAX_RPAGES; /* last shot */
798 if (rnum == 0)
799 rnum = MAX_RPAGES; /* last shot is full */
800 } else
801 rnum = MAX_RPAGES;
802
803 ret = ehca_set_pagebuf(pginfo, rnum, kpage);
804 if (ret) {
805 ehca_err(&shca->ib_device, "ehca_set_pagebuf "
806 "bad rc, ret=%i rnum=%x kpage=%p",
807 ret, rnum, kpage);
808 goto ehca_reg_mr_rpages_exit1;
809 }
810
811 if (rnum > 1) {
812 rpage = __pa(kpage);
813 if (!rpage) {
814 ehca_err(&shca->ib_device, "kpage=%p i=%x",
815 kpage, i);
816 ret = -EFAULT;
817 goto ehca_reg_mr_rpages_exit1;
818 }
819 } else
820 rpage = *kpage;
821
822 h_ret = hipz_h_register_rpage_mr(
823 shca->ipz_hca_handle, e_mr,
824 ehca_encode_hwpage_size(pginfo->hwpage_size),
825 0, rpage, rnum);
826
827 if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
828 /*
829 * check for 'registration complete'==H_SUCCESS
830 * and for 'page registered'==H_PAGE_REGISTERED
831 */
832 if (h_ret != H_SUCCESS) {
833 ehca_err(&shca->ib_device, "last "
834 "hipz_reg_rpage_mr failed, h_ret=%lli "
835 "e_mr=%p i=%x hca_hndl=%llx mr_hndl=%llx"
836 " lkey=%x", h_ret, e_mr, i,
837 shca->ipz_hca_handle.handle,
838 e_mr->ipz_mr_handle.handle,
839 e_mr->ib.ib_mr.lkey);
840 ret = ehca2ib_return_code(h_ret);
841 break;
842 } else
843 ret = 0;
844 } else if (h_ret != H_PAGE_REGISTERED) {
845 ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, "
846 "h_ret=%lli e_mr=%p i=%x lkey=%x hca_hndl=%llx "
847 "mr_hndl=%llx", h_ret, e_mr, i,
848 e_mr->ib.ib_mr.lkey,
849 shca->ipz_hca_handle.handle,
850 e_mr->ipz_mr_handle.handle);
851 ret = ehca2ib_return_code(h_ret);
852 break;
853 } else
854 ret = 0;
855 } /* end for(i) */
856
857
858ehca_reg_mr_rpages_exit1:
859 ehca_free_fw_ctrlblock(kpage);
860ehca_reg_mr_rpages_exit0:
861 if (ret)
862 ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p pginfo=%p "
863 "num_kpages=%llx num_hwpages=%llx", ret, shca, e_mr,
864 pginfo, pginfo->num_kpages, pginfo->num_hwpages);
865 return ret;
866} /* end ehca_reg_mr_rpages() */
867
868/*----------------------------------------------------------------------*/
869
870inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
871 struct ehca_mr *e_mr,
872 u64 *iova_start,
873 u64 size,
874 u32 acl,
875 struct ehca_pd *e_pd,
876 struct ehca_mr_pginfo *pginfo,
877 u32 *lkey, /*OUT*/
878 u32 *rkey) /*OUT*/
879{
880 int ret;
881 u64 h_ret;
882 u32 hipz_acl;
883 u64 *kpage;
884 u64 rpage;
885 struct ehca_mr_pginfo pginfo_save;
886 struct ehca_mr_hipzout_parms hipzout;
887
888 ehca_mrmw_map_acl(acl, &hipz_acl);
889 ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl);
890
891 kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
892 if (!kpage) {
893 ehca_err(&shca->ib_device, "kpage alloc failed");
894 ret = -ENOMEM;
895 goto ehca_rereg_mr_rereg1_exit0;
896 }
897
898 pginfo_save = *pginfo;
899 ret = ehca_set_pagebuf(pginfo, pginfo->num_hwpages, kpage);
900 if (ret) {
901 ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p "
902 "pginfo=%p type=%x num_kpages=%llx num_hwpages=%llx "
903 "kpage=%p", e_mr, pginfo, pginfo->type,
904 pginfo->num_kpages, pginfo->num_hwpages, kpage);
905 goto ehca_rereg_mr_rereg1_exit1;
906 }
907 rpage = __pa(kpage);
908 if (!rpage) {
909 ehca_err(&shca->ib_device, "kpage=%p", kpage);
910 ret = -EFAULT;
911 goto ehca_rereg_mr_rereg1_exit1;
912 }
913 h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_mr,
914 (u64)iova_start, size, hipz_acl,
915 e_pd->fw_pd, rpage, &hipzout);
916 if (h_ret != H_SUCCESS) {
917 /*
918 * reregistration unsuccessful, try it again with the 3 hCalls,
919 * e.g. this is required in case H_MR_CONDITION
920 * (MW bound or MR is shared)
921 */
922 ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed "
923 "(Rereg1), h_ret=%lli e_mr=%p", h_ret, e_mr);
924 *pginfo = pginfo_save;
925 ret = -EAGAIN;
926 } else if ((u64 *)hipzout.vaddr != iova_start) {
927 ehca_err(&shca->ib_device, "PHYP changed iova_start in "
928 "rereg_pmr, iova_start=%p iova_start_out=%llx e_mr=%p "
929 "mr_handle=%llx lkey=%x lkey_out=%x", iova_start,
930 hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle,
931 e_mr->ib.ib_mr.lkey, hipzout.lkey);
932 ret = -EFAULT;
933 } else {
934 /*
935 * successful reregistration
936 * note: start and start_out are identical for eServer HCAs
937 */
938 e_mr->num_kpages = pginfo->num_kpages;
939 e_mr->num_hwpages = pginfo->num_hwpages;
940 e_mr->hwpage_size = pginfo->hwpage_size;
941 e_mr->start = iova_start;
942 e_mr->size = size;
943 e_mr->acl = acl;
944 *lkey = hipzout.lkey;
945 *rkey = hipzout.rkey;
946 }
947
948ehca_rereg_mr_rereg1_exit1:
949 ehca_free_fw_ctrlblock(kpage);
950ehca_rereg_mr_rereg1_exit0:
951 if ( ret && (ret != -EAGAIN) )
952 ehca_err(&shca->ib_device, "ret=%i lkey=%x rkey=%x "
953 "pginfo=%p num_kpages=%llx num_hwpages=%llx",
954 ret, *lkey, *rkey, pginfo, pginfo->num_kpages,
955 pginfo->num_hwpages);
956 return ret;
957} /* end ehca_rereg_mr_rereg1() */
958
959/*----------------------------------------------------------------------*/
960
961int ehca_rereg_mr(struct ehca_shca *shca,
962 struct ehca_mr *e_mr,
963 u64 *iova_start,
964 u64 size,
965 int acl,
966 struct ehca_pd *e_pd,
967 struct ehca_mr_pginfo *pginfo,
968 u32 *lkey,
969 u32 *rkey)
970{
971 int ret = 0;
972 u64 h_ret;
973 int rereg_1_hcall = 1; /* 1: use hipz_h_reregister_pmr directly */
974 int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */
975
976 /* first determine reregistration hCall(s) */
977 if ((pginfo->num_hwpages > MAX_RPAGES) ||
978 (e_mr->num_hwpages > MAX_RPAGES) ||
979 (pginfo->num_hwpages > e_mr->num_hwpages)) {
980 ehca_dbg(&shca->ib_device, "Rereg3 case, "
981 "pginfo->num_hwpages=%llx e_mr->num_hwpages=%x",
982 pginfo->num_hwpages, e_mr->num_hwpages);
983 rereg_1_hcall = 0;
984 rereg_3_hcall = 1;
985 }
986
987 if (e_mr->flags & EHCA_MR_FLAG_MAXMR) { /* check for max-MR */
988 rereg_1_hcall = 0;
989 rereg_3_hcall = 1;
990 e_mr->flags &= ~EHCA_MR_FLAG_MAXMR;
991 ehca_err(&shca->ib_device, "Rereg MR for max-MR! e_mr=%p",
992 e_mr);
993 }
994
995 if (rereg_1_hcall) {
996 ret = ehca_rereg_mr_rereg1(shca, e_mr, iova_start, size,
997 acl, e_pd, pginfo, lkey, rkey);
998 if (ret) {
999 if (ret == -EAGAIN)
1000 rereg_3_hcall = 1;
1001 else
1002 goto ehca_rereg_mr_exit0;
1003 }
1004 }
1005
1006 if (rereg_3_hcall) {
1007 struct ehca_mr save_mr;
1008
1009 /* first deregister old MR */
1010 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
1011 if (h_ret != H_SUCCESS) {
1012 ehca_err(&shca->ib_device, "hipz_free_mr failed, "
1013 "h_ret=%lli e_mr=%p hca_hndl=%llx mr_hndl=%llx "
1014 "mr->lkey=%x",
1015 h_ret, e_mr, shca->ipz_hca_handle.handle,
1016 e_mr->ipz_mr_handle.handle,
1017 e_mr->ib.ib_mr.lkey);
1018 ret = ehca2ib_return_code(h_ret);
1019 goto ehca_rereg_mr_exit0;
1020 }
1021 /* clean ehca_mr_t, without changing struct ib_mr and lock */
1022 save_mr = *e_mr;
1023 ehca_mr_deletenew(e_mr);
1024
1025 /* set some MR values */
1026 e_mr->flags = save_mr.flags;
1027 e_mr->hwpage_size = save_mr.hwpage_size;
1028 e_mr->fmr_page_size = save_mr.fmr_page_size;
1029 e_mr->fmr_max_pages = save_mr.fmr_max_pages;
1030 e_mr->fmr_max_maps = save_mr.fmr_max_maps;
1031 e_mr->fmr_map_cnt = save_mr.fmr_map_cnt;
1032
1033 ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl,
1034 e_pd, pginfo, lkey, rkey, EHCA_REG_MR);
1035 if (ret) {
1036 u32 offset = (u64)(&e_mr->flags) - (u64)e_mr;
1037 memcpy(&e_mr->flags, &(save_mr.flags),
1038 sizeof(struct ehca_mr) - offset);
1039 goto ehca_rereg_mr_exit0;
1040 }
1041 }
1042
1043ehca_rereg_mr_exit0:
1044 if (ret)
1045 ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p "
1046 "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
1047 "num_kpages=%llx lkey=%x rkey=%x rereg_1_hcall=%x "
1048 "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size,
1049 acl, e_pd, pginfo, pginfo->num_kpages, *lkey, *rkey,
1050 rereg_1_hcall, rereg_3_hcall);
1051 return ret;
1052} /* end ehca_rereg_mr() */
1053
1054/*----------------------------------------------------------------------*/
1055
1056int ehca_unmap_one_fmr(struct ehca_shca *shca,
1057 struct ehca_mr *e_fmr)
1058{
1059 int ret = 0;
1060 u64 h_ret;
1061 struct ehca_pd *e_pd =
1062 container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd);
1063 struct ehca_mr save_fmr;
1064 u32 tmp_lkey, tmp_rkey;
1065 struct ehca_mr_pginfo pginfo;
1066 struct ehca_mr_hipzout_parms hipzout;
1067 struct ehca_mr save_mr;
1068
1069 if (e_fmr->fmr_max_pages <= MAX_RPAGES) {
1070 /*
1071 * note: after using rereg hcall with len=0,
1072 * rereg hcall must be used again for registering pages
1073 */
1074 h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_fmr, 0,
1075 0, 0, e_pd->fw_pd, 0, &hipzout);
1076 if (h_ret == H_SUCCESS) {
1077 /* successful reregistration */
1078 e_fmr->start = NULL;
1079 e_fmr->size = 0;
1080 tmp_lkey = hipzout.lkey;
1081 tmp_rkey = hipzout.rkey;
1082 return 0;
1083 }
1084 /*
1085 * should not happen, because length checked above,
1086 * FMRs are not shared and no MW bound to FMRs
1087 */
1088 ehca_err(&shca->ib_device, "hipz_reregister_pmr failed "
1089 "(Rereg1), h_ret=%lli e_fmr=%p hca_hndl=%llx "
1090 "mr_hndl=%llx lkey=%x lkey_out=%x",
1091 h_ret, e_fmr, shca->ipz_hca_handle.handle,
1092 e_fmr->ipz_mr_handle.handle,
1093 e_fmr->ib.ib_fmr.lkey, hipzout.lkey);
1094 /* try free and rereg */
1095 }
1096
1097 /* first free old FMR */
1098 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
1099 if (h_ret != H_SUCCESS) {
1100 ehca_err(&shca->ib_device, "hipz_free_mr failed, "
1101 "h_ret=%lli e_fmr=%p hca_hndl=%llx mr_hndl=%llx "
1102 "lkey=%x",
1103 h_ret, e_fmr, shca->ipz_hca_handle.handle,
1104 e_fmr->ipz_mr_handle.handle,
1105 e_fmr->ib.ib_fmr.lkey);
1106 ret = ehca2ib_return_code(h_ret);
1107 goto ehca_unmap_one_fmr_exit0;
1108 }
1109 /* clean ehca_mr_t, without changing lock */
1110 save_fmr = *e_fmr;
1111 ehca_mr_deletenew(e_fmr);
1112
1113 /* set some MR values */
1114 e_fmr->flags = save_fmr.flags;
1115 e_fmr->hwpage_size = save_fmr.hwpage_size;
1116 e_fmr->fmr_page_size = save_fmr.fmr_page_size;
1117 e_fmr->fmr_max_pages = save_fmr.fmr_max_pages;
1118 e_fmr->fmr_max_maps = save_fmr.fmr_max_maps;
1119 e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt;
1120 e_fmr->acl = save_fmr.acl;
1121
1122 memset(&pginfo, 0, sizeof(pginfo));
1123 pginfo.type = EHCA_MR_PGI_FMR;
1124 ret = ehca_reg_mr(shca, e_fmr, NULL,
1125 (e_fmr->fmr_max_pages * e_fmr->fmr_page_size),
1126 e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
1127 &tmp_rkey, EHCA_REG_MR);
1128 if (ret) {
1129 u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr;
1130 memcpy(&e_fmr->flags, &(save_mr.flags),
1131 sizeof(struct ehca_mr) - offset);
1132 }
1133
1134ehca_unmap_one_fmr_exit0:
1135 if (ret)
1136 ehca_err(&shca->ib_device, "ret=%i tmp_lkey=%x tmp_rkey=%x "
1137 "fmr_max_pages=%x",
1138 ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages);
1139 return ret;
1140} /* end ehca_unmap_one_fmr() */
1141
1142/*----------------------------------------------------------------------*/
1143
1144int ehca_reg_smr(struct ehca_shca *shca,
1145 struct ehca_mr *e_origmr,
1146 struct ehca_mr *e_newmr,
1147 u64 *iova_start,
1148 int acl,
1149 struct ehca_pd *e_pd,
1150 u32 *lkey, /*OUT*/
1151 u32 *rkey) /*OUT*/
1152{
1153 int ret = 0;
1154 u64 h_ret;
1155 u32 hipz_acl;
1156 struct ehca_mr_hipzout_parms hipzout;
1157
1158 ehca_mrmw_map_acl(acl, &hipz_acl);
1159 ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl);
1160
1161 h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
1162 (u64)iova_start, hipz_acl, e_pd->fw_pd,
1163 &hipzout);
1164 if (h_ret != H_SUCCESS) {
1165 ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli "
1166 "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x "
1167 "e_pd=%p hca_hndl=%llx mr_hndl=%llx lkey=%x",
1168 h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd,
1169 shca->ipz_hca_handle.handle,
1170 e_origmr->ipz_mr_handle.handle,
1171 e_origmr->ib.ib_mr.lkey);
1172 ret = ehca2ib_return_code(h_ret);
1173 goto ehca_reg_smr_exit0;
1174 }
1175 /* successful registration */
1176 e_newmr->num_kpages = e_origmr->num_kpages;
1177 e_newmr->num_hwpages = e_origmr->num_hwpages;
1178 e_newmr->hwpage_size = e_origmr->hwpage_size;
1179 e_newmr->start = iova_start;
1180 e_newmr->size = e_origmr->size;
1181 e_newmr->acl = acl;
1182 e_newmr->ipz_mr_handle = hipzout.handle;
1183 *lkey = hipzout.lkey;
1184 *rkey = hipzout.rkey;
1185 return 0;
1186
1187ehca_reg_smr_exit0:
1188 if (ret)
1189 ehca_err(&shca->ib_device, "ret=%i shca=%p e_origmr=%p "
1190 "e_newmr=%p iova_start=%p acl=%x e_pd=%p",
1191 ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd);
1192 return ret;
1193} /* end ehca_reg_smr() */
1194
1195/*----------------------------------------------------------------------*/
1196static inline void *ehca_calc_sectbase(int top, int dir, int idx)
1197{
1198 unsigned long ret = idx;
1199 ret |= dir << EHCA_DIR_INDEX_SHIFT;
1200 ret |= top << EHCA_TOP_INDEX_SHIFT;
1201 return __va(ret << SECTION_SIZE_BITS);
1202}
1203
1204#define ehca_bmap_valid(entry) \
1205 ((u64)entry != (u64)EHCA_INVAL_ADDR)
1206
1207static u64 ehca_reg_mr_section(int top, int dir, int idx, u64 *kpage,
1208 struct ehca_shca *shca, struct ehca_mr *mr,
1209 struct ehca_mr_pginfo *pginfo)
1210{
1211 u64 h_ret = 0;
1212 unsigned long page = 0;
1213 u64 rpage = __pa(kpage);
1214 int page_count;
1215
1216 void *sectbase = ehca_calc_sectbase(top, dir, idx);
1217 if ((unsigned long)sectbase & (pginfo->hwpage_size - 1)) {
1218 ehca_err(&shca->ib_device, "reg_mr_section will probably fail:"
1219 "hwpage_size does not fit to "
1220 "section start address");
1221 }
1222 page_count = EHCA_SECTSIZE / pginfo->hwpage_size;
1223
1224 while (page < page_count) {
1225 u64 rnum;
1226 for (rnum = 0; (rnum < MAX_RPAGES) && (page < page_count);
1227 rnum++) {
1228 void *pg = sectbase + ((page++) * pginfo->hwpage_size);
1229 kpage[rnum] = __pa(pg);
1230 }
1231
1232 h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, mr,
1233 ehca_encode_hwpage_size(pginfo->hwpage_size),
1234 0, rpage, rnum);
1235
1236 if ((h_ret != H_SUCCESS) && (h_ret != H_PAGE_REGISTERED)) {
1237 ehca_err(&shca->ib_device, "register_rpage_mr failed");
1238 return h_ret;
1239 }
1240 }
1241 return h_ret;
1242}
1243
1244static u64 ehca_reg_mr_sections(int top, int dir, u64 *kpage,
1245 struct ehca_shca *shca, struct ehca_mr *mr,
1246 struct ehca_mr_pginfo *pginfo)
1247{
1248 u64 hret = H_SUCCESS;
1249 int idx;
1250
1251 for (idx = 0; idx < EHCA_MAP_ENTRIES; idx++) {
1252 if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]->ent[idx]))
1253 continue;
1254
1255 hret = ehca_reg_mr_section(top, dir, idx, kpage, shca, mr,
1256 pginfo);
1257 if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
1258 return hret;
1259 }
1260 return hret;
1261}
1262
1263static u64 ehca_reg_mr_dir_sections(int top, u64 *kpage, struct ehca_shca *shca,
1264 struct ehca_mr *mr,
1265 struct ehca_mr_pginfo *pginfo)
1266{
1267 u64 hret = H_SUCCESS;
1268 int dir;
1269
1270 for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) {
1271 if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
1272 continue;
1273
1274 hret = ehca_reg_mr_sections(top, dir, kpage, shca, mr, pginfo);
1275 if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
1276 return hret;
1277 }
1278 return hret;
1279}
1280
1281/* register internal max-MR to internal SHCA */
1282int ehca_reg_internal_maxmr(
1283 struct ehca_shca *shca,
1284 struct ehca_pd *e_pd,
1285 struct ehca_mr **e_maxmr) /*OUT*/
1286{
1287 int ret;
1288 struct ehca_mr *e_mr;
1289 u64 *iova_start;
1290 u64 size_maxmr;
1291 struct ehca_mr_pginfo pginfo;
1292 u32 num_kpages;
1293 u32 num_hwpages;
1294 u64 hw_pgsize;
1295
1296 if (!ehca_bmap) {
1297 ret = -EFAULT;
1298 goto ehca_reg_internal_maxmr_exit0;
1299 }
1300
1301 e_mr = ehca_mr_new();
1302 if (!e_mr) {
1303 ehca_err(&shca->ib_device, "out of memory");
1304 ret = -ENOMEM;
1305 goto ehca_reg_internal_maxmr_exit0;
1306 }
1307 e_mr->flags |= EHCA_MR_FLAG_MAXMR;
1308
1309 /* register internal max-MR on HCA */
1310 size_maxmr = ehca_mr_len;
1311 iova_start = (u64 *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START));
1312 num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr,
1313 PAGE_SIZE);
1314 hw_pgsize = ehca_get_max_hwpage_size(shca);
1315 num_hwpages = NUM_CHUNKS(((u64)iova_start % hw_pgsize) + size_maxmr,
1316 hw_pgsize);
1317
1318 memset(&pginfo, 0, sizeof(pginfo));
1319 pginfo.type = EHCA_MR_PGI_PHYS;
1320 pginfo.num_kpages = num_kpages;
1321 pginfo.num_hwpages = num_hwpages;
1322 pginfo.hwpage_size = hw_pgsize;
1323 pginfo.u.phy.addr = 0;
1324 pginfo.u.phy.size = size_maxmr;
1325
1326 ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd,
1327 &pginfo, &e_mr->ib.ib_mr.lkey,
1328 &e_mr->ib.ib_mr.rkey, EHCA_REG_BUSMAP_MR);
1329 if (ret) {
1330 ehca_err(&shca->ib_device, "reg of internal max MR failed, "
1331 "e_mr=%p iova_start=%p size_maxmr=%llx num_kpages=%x "
1332 "num_hwpages=%x", e_mr, iova_start, size_maxmr,
1333 num_kpages, num_hwpages);
1334 goto ehca_reg_internal_maxmr_exit1;
1335 }
1336
1337 /* successful registration of all pages */
1338 e_mr->ib.ib_mr.device = e_pd->ib_pd.device;
1339 e_mr->ib.ib_mr.pd = &e_pd->ib_pd;
1340 e_mr->ib.ib_mr.uobject = NULL;
1341 atomic_inc(&(e_pd->ib_pd.usecnt));
1342 *e_maxmr = e_mr;
1343 return 0;
1344
1345ehca_reg_internal_maxmr_exit1:
1346 ehca_mr_delete(e_mr);
1347ehca_reg_internal_maxmr_exit0:
1348 if (ret)
1349 ehca_err(&shca->ib_device, "ret=%i shca=%p e_pd=%p e_maxmr=%p",
1350 ret, shca, e_pd, e_maxmr);
1351 return ret;
1352} /* end ehca_reg_internal_maxmr() */
1353
1354/*----------------------------------------------------------------------*/
1355
1356int ehca_reg_maxmr(struct ehca_shca *shca,
1357 struct ehca_mr *e_newmr,
1358 u64 *iova_start,
1359 int acl,
1360 struct ehca_pd *e_pd,
1361 u32 *lkey,
1362 u32 *rkey)
1363{
1364 u64 h_ret;
1365 struct ehca_mr *e_origmr = shca->maxmr;
1366 u32 hipz_acl;
1367 struct ehca_mr_hipzout_parms hipzout;
1368
1369 ehca_mrmw_map_acl(acl, &hipz_acl);
1370 ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl);
1371
1372 h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
1373 (u64)iova_start, hipz_acl, e_pd->fw_pd,
1374 &hipzout);
1375 if (h_ret != H_SUCCESS) {
1376 ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli "
1377 "e_origmr=%p hca_hndl=%llx mr_hndl=%llx lkey=%x",
1378 h_ret, e_origmr, shca->ipz_hca_handle.handle,
1379 e_origmr->ipz_mr_handle.handle,
1380 e_origmr->ib.ib_mr.lkey);
1381 return ehca2ib_return_code(h_ret);
1382 }
1383 /* successful registration */
1384 e_newmr->num_kpages = e_origmr->num_kpages;
1385 e_newmr->num_hwpages = e_origmr->num_hwpages;
1386 e_newmr->hwpage_size = e_origmr->hwpage_size;
1387 e_newmr->start = iova_start;
1388 e_newmr->size = e_origmr->size;
1389 e_newmr->acl = acl;
1390 e_newmr->ipz_mr_handle = hipzout.handle;
1391 *lkey = hipzout.lkey;
1392 *rkey = hipzout.rkey;
1393 return 0;
1394} /* end ehca_reg_maxmr() */
1395
1396/*----------------------------------------------------------------------*/
1397
1398int ehca_dereg_internal_maxmr(struct ehca_shca *shca)
1399{
1400 int ret;
1401 struct ehca_mr *e_maxmr;
1402 struct ib_pd *ib_pd;
1403
1404 if (!shca->maxmr) {
1405 ehca_err(&shca->ib_device, "bad call, shca=%p", shca);
1406 ret = -EINVAL;
1407 goto ehca_dereg_internal_maxmr_exit0;
1408 }
1409
1410 e_maxmr = shca->maxmr;
1411 ib_pd = e_maxmr->ib.ib_mr.pd;
1412 shca->maxmr = NULL; /* remove internal max-MR indication from SHCA */
1413
1414 ret = ehca_dereg_mr(&e_maxmr->ib.ib_mr);
1415 if (ret) {
1416 ehca_err(&shca->ib_device, "dereg internal max-MR failed, "
1417 "ret=%i e_maxmr=%p shca=%p lkey=%x",
1418 ret, e_maxmr, shca, e_maxmr->ib.ib_mr.lkey);
1419 shca->maxmr = e_maxmr;
1420 goto ehca_dereg_internal_maxmr_exit0;
1421 }
1422
1423 atomic_dec(&ib_pd->usecnt);
1424
1425ehca_dereg_internal_maxmr_exit0:
1426 if (ret)
1427 ehca_err(&shca->ib_device, "ret=%i shca=%p shca->maxmr=%p",
1428 ret, shca, shca->maxmr);
1429 return ret;
1430} /* end ehca_dereg_internal_maxmr() */
1431
1432/*----------------------------------------------------------------------*/
1433
1434/* check page list of map FMR verb for validness */
1435int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
1436 u64 *page_list,
1437 int list_len)
1438{
1439 u32 i;
1440 u64 *page;
1441
1442 if ((list_len == 0) || (list_len > e_fmr->fmr_max_pages)) {
1443 ehca_gen_err("bad list_len, list_len=%x "
1444 "e_fmr->fmr_max_pages=%x fmr=%p",
1445 list_len, e_fmr->fmr_max_pages, e_fmr);
1446 return -EINVAL;
1447 }
1448
1449 /* each page must be aligned */
1450 page = page_list;
1451 for (i = 0; i < list_len; i++) {
1452 if (*page % e_fmr->fmr_page_size) {
1453 ehca_gen_err("bad page, i=%x *page=%llx page=%p fmr=%p "
1454 "fmr_page_size=%x", i, *page, page, e_fmr,
1455 e_fmr->fmr_page_size);
1456 return -EINVAL;
1457 }
1458 page++;
1459 }
1460
1461 return 0;
1462} /* end ehca_fmr_check_page_list() */
1463
1464/*----------------------------------------------------------------------*/
1465
1466/* PAGE_SIZE >= pginfo->hwpage_size */
1467static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo,
1468 u32 number,
1469 u64 *kpage)
1470{
1471 int ret = 0;
1472 u64 pgaddr;
1473 u32 j = 0;
1474 int hwpages_per_kpage = PAGE_SIZE / pginfo->hwpage_size;
1475 struct scatterlist **sg = &pginfo->u.usr.next_sg;
1476
1477 while (*sg != NULL) {
1478 pgaddr = page_to_pfn(sg_page(*sg))
1479 << PAGE_SHIFT;
1480 *kpage = pgaddr + (pginfo->next_hwpage *
1481 pginfo->hwpage_size);
1482 if (!(*kpage)) {
1483 ehca_gen_err("pgaddr=%llx "
1484 "sg_dma_address=%llx "
1485 "entry=%llx next_hwpage=%llx",
1486 pgaddr, (u64)sg_dma_address(*sg),
1487 pginfo->u.usr.next_nmap,
1488 pginfo->next_hwpage);
1489 return -EFAULT;
1490 }
1491 (pginfo->hwpage_cnt)++;
1492 (pginfo->next_hwpage)++;
1493 kpage++;
1494 if (pginfo->next_hwpage % hwpages_per_kpage == 0) {
1495 (pginfo->kpage_cnt)++;
1496 (pginfo->u.usr.next_nmap)++;
1497 pginfo->next_hwpage = 0;
1498 *sg = sg_next(*sg);
1499 }
1500 j++;
1501 if (j >= number)
1502 break;
1503 }
1504
1505 return ret;
1506}
1507
1508/*
1509 * check given pages for contiguous layout
1510 * last page addr is returned in prev_pgaddr for further check
1511 */
1512static int ehca_check_kpages_per_ate(struct scatterlist **sg,
1513 int num_pages,
1514 u64 *prev_pgaddr)
1515{
1516 for (; *sg && num_pages > 0; *sg = sg_next(*sg), num_pages--) {
1517 u64 pgaddr = page_to_pfn(sg_page(*sg)) << PAGE_SHIFT;
1518 if (ehca_debug_level >= 3)
1519 ehca_gen_dbg("chunk_page=%llx value=%016llx", pgaddr,
1520 *(u64 *)__va(pgaddr));
1521 if (pgaddr - PAGE_SIZE != *prev_pgaddr) {
1522 ehca_gen_err("uncontiguous page found pgaddr=%llx "
1523 "prev_pgaddr=%llx entries_left_in_hwpage=%x",
1524 pgaddr, *prev_pgaddr, num_pages);
1525 return -EINVAL;
1526 }
1527 *prev_pgaddr = pgaddr;
1528 }
1529 return 0;
1530}
1531
1532/* PAGE_SIZE < pginfo->hwpage_size */
1533static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo,
1534 u32 number,
1535 u64 *kpage)
1536{
1537 int ret = 0;
1538 u64 pgaddr, prev_pgaddr;
1539 u32 j = 0;
1540 int kpages_per_hwpage = pginfo->hwpage_size / PAGE_SIZE;
1541 int nr_kpages = kpages_per_hwpage;
1542 struct scatterlist **sg = &pginfo->u.usr.next_sg;
1543
1544 while (*sg != NULL) {
1545
1546 if (nr_kpages == kpages_per_hwpage) {
1547 pgaddr = (page_to_pfn(sg_page(*sg))
1548 << PAGE_SHIFT);
1549 *kpage = pgaddr;
1550 if (!(*kpage)) {
1551 ehca_gen_err("pgaddr=%llx entry=%llx",
1552 pgaddr, pginfo->u.usr.next_nmap);
1553 ret = -EFAULT;
1554 return ret;
1555 }
1556 /*
1557 * The first page in a hwpage must be aligned;
1558 * the first MR page is exempt from this rule.
1559 */
1560 if (pgaddr & (pginfo->hwpage_size - 1)) {
1561 if (pginfo->hwpage_cnt) {
1562 ehca_gen_err(
1563 "invalid alignment "
1564 "pgaddr=%llx entry=%llx "
1565 "mr_pgsize=%llx",
1566 pgaddr, pginfo->u.usr.next_nmap,
1567 pginfo->hwpage_size);
1568 ret = -EFAULT;
1569 return ret;
1570 }
1571 /* first MR page */
1572 pginfo->kpage_cnt =
1573 (pgaddr &
1574 (pginfo->hwpage_size - 1)) >>
1575 PAGE_SHIFT;
1576 nr_kpages -= pginfo->kpage_cnt;
1577 *kpage = pgaddr &
1578 ~(pginfo->hwpage_size - 1);
1579 }
1580 if (ehca_debug_level >= 3) {
1581 u64 val = *(u64 *)__va(pgaddr);
1582 ehca_gen_dbg("kpage=%llx page=%llx "
1583 "value=%016llx",
1584 *kpage, pgaddr, val);
1585 }
1586 prev_pgaddr = pgaddr;
1587 *sg = sg_next(*sg);
1588 pginfo->kpage_cnt++;
1589 pginfo->u.usr.next_nmap++;
1590 nr_kpages--;
1591 if (!nr_kpages)
1592 goto next_kpage;
1593 continue;
1594 }
1595
1596 ret = ehca_check_kpages_per_ate(sg, nr_kpages,
1597 &prev_pgaddr);
1598 if (ret)
1599 return ret;
1600 pginfo->kpage_cnt += nr_kpages;
1601 pginfo->u.usr.next_nmap += nr_kpages;
1602
1603next_kpage:
1604 nr_kpages = kpages_per_hwpage;
1605 (pginfo->hwpage_cnt)++;
1606 kpage++;
1607 j++;
1608 if (j >= number)
1609 break;
1610 }
1611
1612 return ret;
1613}
1614
1615static int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo,
1616 u32 number, u64 *kpage)
1617{
1618 int ret = 0;
1619 u64 addr = pginfo->u.phy.addr;
1620 u64 size = pginfo->u.phy.size;
1621 u64 num_hw, offs_hw;
1622 u32 i = 0;
1623
1624 num_hw = NUM_CHUNKS((addr % pginfo->hwpage_size) + size,
1625 pginfo->hwpage_size);
1626 offs_hw = (addr & ~(pginfo->hwpage_size - 1)) / pginfo->hwpage_size;
1627
1628 while (pginfo->next_hwpage < offs_hw + num_hw) {
1629 /* sanity check */
1630 if ((pginfo->kpage_cnt >= pginfo->num_kpages) ||
1631 (pginfo->hwpage_cnt >= pginfo->num_hwpages)) {
1632 ehca_gen_err("kpage_cnt >= num_kpages, "
1633 "kpage_cnt=%llx num_kpages=%llx "
1634 "hwpage_cnt=%llx "
1635 "num_hwpages=%llx i=%x",
1636 pginfo->kpage_cnt,
1637 pginfo->num_kpages,
1638 pginfo->hwpage_cnt,
1639 pginfo->num_hwpages, i);
1640 return -EFAULT;
1641 }
1642 *kpage = (addr & ~(pginfo->hwpage_size - 1)) +
1643 (pginfo->next_hwpage * pginfo->hwpage_size);
1644 if ( !(*kpage) && addr ) {
1645 ehca_gen_err("addr=%llx size=%llx "
1646 "next_hwpage=%llx", addr,
1647 size, pginfo->next_hwpage);
1648 return -EFAULT;
1649 }
1650 (pginfo->hwpage_cnt)++;
1651 (pginfo->next_hwpage)++;
1652 if (PAGE_SIZE >= pginfo->hwpage_size) {
1653 if (pginfo->next_hwpage %
1654 (PAGE_SIZE / pginfo->hwpage_size) == 0)
1655 (pginfo->kpage_cnt)++;
1656 } else
1657 pginfo->kpage_cnt += pginfo->hwpage_size /
1658 PAGE_SIZE;
1659 kpage++;
1660 i++;
1661 if (i >= number) break;
1662 }
1663 if (pginfo->next_hwpage >= offs_hw + num_hw) {
1664 pginfo->next_hwpage = 0;
1665 }
1666
1667 return ret;
1668}
1669
1670static int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo,
1671 u32 number, u64 *kpage)
1672{
1673 int ret = 0;
1674 u64 *fmrlist;
1675 u32 i;
1676
1677 /* loop over desired page_list entries */
1678 fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem;
1679 for (i = 0; i < number; i++) {
1680 *kpage = (*fmrlist & ~(pginfo->hwpage_size - 1)) +
1681 pginfo->next_hwpage * pginfo->hwpage_size;
1682 if ( !(*kpage) ) {
1683 ehca_gen_err("*fmrlist=%llx fmrlist=%p "
1684 "next_listelem=%llx next_hwpage=%llx",
1685 *fmrlist, fmrlist,
1686 pginfo->u.fmr.next_listelem,
1687 pginfo->next_hwpage);
1688 return -EFAULT;
1689 }
1690 (pginfo->hwpage_cnt)++;
1691 if (pginfo->u.fmr.fmr_pgsize >= pginfo->hwpage_size) {
1692 if (pginfo->next_hwpage %
1693 (pginfo->u.fmr.fmr_pgsize /
1694 pginfo->hwpage_size) == 0) {
1695 (pginfo->kpage_cnt)++;
1696 (pginfo->u.fmr.next_listelem)++;
1697 fmrlist++;
1698 pginfo->next_hwpage = 0;
1699 } else
1700 (pginfo->next_hwpage)++;
1701 } else {
1702 unsigned int cnt_per_hwpage = pginfo->hwpage_size /
1703 pginfo->u.fmr.fmr_pgsize;
1704 unsigned int j;
1705 u64 prev = *kpage;
1706 /* check if adrs are contiguous */
1707 for (j = 1; j < cnt_per_hwpage; j++) {
1708 u64 p = fmrlist[j] & ~(pginfo->hwpage_size - 1);
1709 if (prev + pginfo->u.fmr.fmr_pgsize != p) {
1710 ehca_gen_err("uncontiguous fmr pages "
1711 "found prev=%llx p=%llx "
1712 "idx=%x", prev, p, i + j);
1713 return -EINVAL;
1714 }
1715 prev = p;
1716 }
1717 pginfo->kpage_cnt += cnt_per_hwpage;
1718 pginfo->u.fmr.next_listelem += cnt_per_hwpage;
1719 fmrlist += cnt_per_hwpage;
1720 }
1721 kpage++;
1722 }
1723 return ret;
1724}
1725
1726/* setup page buffer from page info */
1727int ehca_set_pagebuf(struct ehca_mr_pginfo *pginfo,
1728 u32 number,
1729 u64 *kpage)
1730{
1731 int ret;
1732
1733 switch (pginfo->type) {
1734 case EHCA_MR_PGI_PHYS:
1735 ret = ehca_set_pagebuf_phys(pginfo, number, kpage);
1736 break;
1737 case EHCA_MR_PGI_USER:
1738 ret = PAGE_SIZE >= pginfo->hwpage_size ?
1739 ehca_set_pagebuf_user1(pginfo, number, kpage) :
1740 ehca_set_pagebuf_user2(pginfo, number, kpage);
1741 break;
1742 case EHCA_MR_PGI_FMR:
1743 ret = ehca_set_pagebuf_fmr(pginfo, number, kpage);
1744 break;
1745 default:
1746 ehca_gen_err("bad pginfo->type=%x", pginfo->type);
1747 ret = -EFAULT;
1748 break;
1749 }
1750 return ret;
1751} /* end ehca_set_pagebuf() */
1752
1753/*----------------------------------------------------------------------*/
1754
1755/*
1756 * check MR if it is a max-MR, i.e. uses whole memory
1757 * in case it's a max-MR 1 is returned, else 0
1758 */
1759int ehca_mr_is_maxmr(u64 size,
1760 u64 *iova_start)
1761{
1762 /* a MR is treated as max-MR only if it fits following: */
1763 if ((size == ehca_mr_len) &&
1764 (iova_start == (void *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START)))) {
1765 ehca_gen_dbg("this is a max-MR");
1766 return 1;
1767 } else
1768 return 0;
1769} /* end ehca_mr_is_maxmr() */
1770
1771/*----------------------------------------------------------------------*/
1772
1773/* map access control for MR/MW. This routine is used for MR and MW. */
1774void ehca_mrmw_map_acl(int ib_acl,
1775 u32 *hipz_acl)
1776{
1777 *hipz_acl = 0;
1778 if (ib_acl & IB_ACCESS_REMOTE_READ)
1779 *hipz_acl |= HIPZ_ACCESSCTRL_R_READ;
1780 if (ib_acl & IB_ACCESS_REMOTE_WRITE)
1781 *hipz_acl |= HIPZ_ACCESSCTRL_R_WRITE;
1782 if (ib_acl & IB_ACCESS_REMOTE_ATOMIC)
1783 *hipz_acl |= HIPZ_ACCESSCTRL_R_ATOMIC;
1784 if (ib_acl & IB_ACCESS_LOCAL_WRITE)
1785 *hipz_acl |= HIPZ_ACCESSCTRL_L_WRITE;
1786 if (ib_acl & IB_ACCESS_MW_BIND)
1787 *hipz_acl |= HIPZ_ACCESSCTRL_MW_BIND;
1788} /* end ehca_mrmw_map_acl() */
1789
1790/*----------------------------------------------------------------------*/
1791
1792/* sets page size in hipz access control for MR/MW. */
1793void ehca_mrmw_set_pgsize_hipz_acl(u32 pgsize, u32 *hipz_acl) /*INOUT*/
1794{
1795 *hipz_acl |= (ehca_encode_hwpage_size(pgsize) << 24);
1796} /* end ehca_mrmw_set_pgsize_hipz_acl() */
1797
1798/*----------------------------------------------------------------------*/
1799
1800/*
1801 * reverse map access control for MR/MW.
1802 * This routine is used for MR and MW.
1803 */
1804void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
1805 int *ib_acl) /*OUT*/
1806{
1807 *ib_acl = 0;
1808 if (*hipz_acl & HIPZ_ACCESSCTRL_R_READ)
1809 *ib_acl |= IB_ACCESS_REMOTE_READ;
1810 if (*hipz_acl & HIPZ_ACCESSCTRL_R_WRITE)
1811 *ib_acl |= IB_ACCESS_REMOTE_WRITE;
1812 if (*hipz_acl & HIPZ_ACCESSCTRL_R_ATOMIC)
1813 *ib_acl |= IB_ACCESS_REMOTE_ATOMIC;
1814 if (*hipz_acl & HIPZ_ACCESSCTRL_L_WRITE)
1815 *ib_acl |= IB_ACCESS_LOCAL_WRITE;
1816 if (*hipz_acl & HIPZ_ACCESSCTRL_MW_BIND)
1817 *ib_acl |= IB_ACCESS_MW_BIND;
1818} /* end ehca_mrmw_reverse_map_acl() */
1819
1820
1821/*----------------------------------------------------------------------*/
1822
1823/*
1824 * MR destructor and constructor
1825 * used in Reregister MR verb, sets all fields in ehca_mr_t to 0,
1826 * except struct ib_mr and spinlock
1827 */
1828void ehca_mr_deletenew(struct ehca_mr *mr)
1829{
1830 mr->flags = 0;
1831 mr->num_kpages = 0;
1832 mr->num_hwpages = 0;
1833 mr->acl = 0;
1834 mr->start = NULL;
1835 mr->fmr_page_size = 0;
1836 mr->fmr_max_pages = 0;
1837 mr->fmr_max_maps = 0;
1838 mr->fmr_map_cnt = 0;
1839 memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle));
1840 memset(&mr->galpas, 0, sizeof(mr->galpas));
1841} /* end ehca_mr_deletenew() */
1842
1843int ehca_init_mrmw_cache(void)
1844{
1845 mr_cache = kmem_cache_create("ehca_cache_mr",
1846 sizeof(struct ehca_mr), 0,
1847 SLAB_HWCACHE_ALIGN,
1848 NULL);
1849 if (!mr_cache)
1850 return -ENOMEM;
1851 mw_cache = kmem_cache_create("ehca_cache_mw",
1852 sizeof(struct ehca_mw), 0,
1853 SLAB_HWCACHE_ALIGN,
1854 NULL);
1855 if (!mw_cache) {
1856 kmem_cache_destroy(mr_cache);
1857 mr_cache = NULL;
1858 return -ENOMEM;
1859 }
1860 return 0;
1861}
1862
1863void ehca_cleanup_mrmw_cache(void)
1864{
1865 kmem_cache_destroy(mr_cache);
1866 kmem_cache_destroy(mw_cache);
1867}
1868
1869static inline int ehca_init_top_bmap(struct ehca_top_bmap *ehca_top_bmap,
1870 int dir)
1871{
1872 if (!ehca_bmap_valid(ehca_top_bmap->dir[dir])) {
1873 ehca_top_bmap->dir[dir] =
1874 kmalloc(sizeof(struct ehca_dir_bmap), GFP_KERNEL);
1875 if (!ehca_top_bmap->dir[dir])
1876 return -ENOMEM;
1877 /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
1878 memset(ehca_top_bmap->dir[dir], 0xFF, EHCA_ENT_MAP_SIZE);
1879 }
1880 return 0;
1881}
1882
1883static inline int ehca_init_bmap(struct ehca_bmap *ehca_bmap, int top, int dir)
1884{
1885 if (!ehca_bmap_valid(ehca_bmap->top[top])) {
1886 ehca_bmap->top[top] =
1887 kmalloc(sizeof(struct ehca_top_bmap), GFP_KERNEL);
1888 if (!ehca_bmap->top[top])
1889 return -ENOMEM;
1890 /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
1891 memset(ehca_bmap->top[top], 0xFF, EHCA_DIR_MAP_SIZE);
1892 }
1893 return ehca_init_top_bmap(ehca_bmap->top[top], dir);
1894}
1895
1896static inline int ehca_calc_index(unsigned long i, unsigned long s)
1897{
1898 return (i >> s) & EHCA_INDEX_MASK;
1899}
1900
1901void ehca_destroy_busmap(void)
1902{
1903 int top, dir;
1904
1905 if (!ehca_bmap)
1906 return;
1907
1908 for (top = 0; top < EHCA_MAP_ENTRIES; top++) {
1909 if (!ehca_bmap_valid(ehca_bmap->top[top]))
1910 continue;
1911 for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) {
1912 if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
1913 continue;
1914
1915 kfree(ehca_bmap->top[top]->dir[dir]);
1916 }
1917
1918 kfree(ehca_bmap->top[top]);
1919 }
1920
1921 kfree(ehca_bmap);
1922 ehca_bmap = NULL;
1923}
1924
1925static int ehca_update_busmap(unsigned long pfn, unsigned long nr_pages)
1926{
1927 unsigned long i, start_section, end_section;
1928 int top, dir, idx;
1929
1930 if (!nr_pages)
1931 return 0;
1932
1933 if (!ehca_bmap) {
1934 ehca_bmap = kmalloc(sizeof(struct ehca_bmap), GFP_KERNEL);
1935 if (!ehca_bmap)
1936 return -ENOMEM;
1937 /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
1938 memset(ehca_bmap, 0xFF, EHCA_TOP_MAP_SIZE);
1939 }
1940
1941 start_section = (pfn * PAGE_SIZE) / EHCA_SECTSIZE;
1942 end_section = ((pfn + nr_pages) * PAGE_SIZE) / EHCA_SECTSIZE;
1943 for (i = start_section; i < end_section; i++) {
1944 int ret;
1945 top = ehca_calc_index(i, EHCA_TOP_INDEX_SHIFT);
1946 dir = ehca_calc_index(i, EHCA_DIR_INDEX_SHIFT);
1947 idx = i & EHCA_INDEX_MASK;
1948
1949 ret = ehca_init_bmap(ehca_bmap, top, dir);
1950 if (ret) {
1951 ehca_destroy_busmap();
1952 return ret;
1953 }
1954 ehca_bmap->top[top]->dir[dir]->ent[idx] = ehca_mr_len;
1955 ehca_mr_len += EHCA_SECTSIZE;
1956 }
1957 return 0;
1958}
1959
1960static int ehca_is_hugepage(unsigned long pfn)
1961{
1962 int page_order;
1963
1964 if (pfn & EHCA_HUGEPAGE_PFN_MASK)
1965 return 0;
1966
1967 page_order = compound_order(pfn_to_page(pfn));
1968 if (page_order + PAGE_SHIFT != EHCA_HUGEPAGESHIFT)
1969 return 0;
1970
1971 return 1;
1972}
1973
1974static int ehca_create_busmap_callback(unsigned long initial_pfn,
1975 unsigned long total_nr_pages, void *arg)
1976{
1977 int ret;
1978 unsigned long pfn, start_pfn, end_pfn, nr_pages;
1979
1980 if ((total_nr_pages * PAGE_SIZE) < EHCA_HUGEPAGE_SIZE)
1981 return ehca_update_busmap(initial_pfn, total_nr_pages);
1982
1983 /* Given chunk is >= 16GB -> check for hugepages */
1984 start_pfn = initial_pfn;
1985 end_pfn = initial_pfn + total_nr_pages;
1986 pfn = start_pfn;
1987
1988 while (pfn < end_pfn) {
1989 if (ehca_is_hugepage(pfn)) {
1990 /* Add mem found in front of the hugepage */
1991 nr_pages = pfn - start_pfn;
1992 ret = ehca_update_busmap(start_pfn, nr_pages);
1993 if (ret)
1994 return ret;
1995 /* Skip the hugepage */
1996 pfn += (EHCA_HUGEPAGE_SIZE / PAGE_SIZE);
1997 start_pfn = pfn;
1998 } else
1999 pfn += (EHCA_SECTSIZE / PAGE_SIZE);
2000 }
2001
2002 /* Add mem found behind the hugepage(s) */
2003 nr_pages = pfn - start_pfn;
2004 return ehca_update_busmap(start_pfn, nr_pages);
2005}
2006
2007int ehca_create_busmap(void)
2008{
2009 int ret;
2010
2011 ehca_mr_len = 0;
2012 ret = walk_system_ram_range(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
2013 ehca_create_busmap_callback);
2014 return ret;
2015}
2016
2017static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca,
2018 struct ehca_mr *e_mr,
2019 struct ehca_mr_pginfo *pginfo)
2020{
2021 int top;
2022 u64 hret, *kpage;
2023
2024 kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
2025 if (!kpage) {
2026 ehca_err(&shca->ib_device, "kpage alloc failed");
2027 return -ENOMEM;
2028 }
2029 for (top = 0; top < EHCA_MAP_ENTRIES; top++) {
2030 if (!ehca_bmap_valid(ehca_bmap->top[top]))
2031 continue;
2032 hret = ehca_reg_mr_dir_sections(top, kpage, shca, e_mr, pginfo);
2033 if ((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS))
2034 break;
2035 }
2036
2037 ehca_free_fw_ctrlblock(kpage);
2038
2039 if (hret == H_SUCCESS)
2040 return 0; /* Everything is fine */
2041 else {
2042 ehca_err(&shca->ib_device, "ehca_reg_bmap_mr_rpages failed, "
2043 "h_ret=%lli e_mr=%p top=%x lkey=%x "
2044 "hca_hndl=%llx mr_hndl=%llx", hret, e_mr, top,
2045 e_mr->ib.ib_mr.lkey,
2046 shca->ipz_hca_handle.handle,
2047 e_mr->ipz_mr_handle.handle);
2048 return ehca2ib_return_code(hret);
2049 }
2050}
2051
2052static u64 ehca_map_vaddr(void *caddr)
2053{
2054 int top, dir, idx;
2055 unsigned long abs_addr, offset;
2056 u64 entry;
2057
2058 if (!ehca_bmap)
2059 return EHCA_INVAL_ADDR;
2060
2061 abs_addr = __pa(caddr);
2062 top = ehca_calc_index(abs_addr, EHCA_TOP_INDEX_SHIFT + EHCA_SECTSHIFT);
2063 if (!ehca_bmap_valid(ehca_bmap->top[top]))
2064 return EHCA_INVAL_ADDR;
2065
2066 dir = ehca_calc_index(abs_addr, EHCA_DIR_INDEX_SHIFT + EHCA_SECTSHIFT);
2067 if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
2068 return EHCA_INVAL_ADDR;
2069
2070 idx = ehca_calc_index(abs_addr, EHCA_SECTSHIFT);
2071
2072 entry = ehca_bmap->top[top]->dir[dir]->ent[idx];
2073 if (ehca_bmap_valid(entry)) {
2074 offset = (unsigned long)caddr & (EHCA_SECTSIZE - 1);
2075 return entry | offset;
2076 } else
2077 return EHCA_INVAL_ADDR;
2078}
2079
2080static int ehca_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
2081{
2082 return dma_addr == EHCA_INVAL_ADDR;
2083}
2084
2085static u64 ehca_dma_map_single(struct ib_device *dev, void *cpu_addr,
2086 size_t size, enum dma_data_direction direction)
2087{
2088 if (cpu_addr)
2089 return ehca_map_vaddr(cpu_addr);
2090 else
2091 return EHCA_INVAL_ADDR;
2092}
2093
2094static void ehca_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size,
2095 enum dma_data_direction direction)
2096{
2097 /* This is only a stub; nothing to be done here */
2098}
2099
2100static u64 ehca_dma_map_page(struct ib_device *dev, struct page *page,
2101 unsigned long offset, size_t size,
2102 enum dma_data_direction direction)
2103{
2104 u64 addr;
2105
2106 if (offset + size > PAGE_SIZE)
2107 return EHCA_INVAL_ADDR;
2108
2109 addr = ehca_map_vaddr(page_address(page));
2110 if (!ehca_dma_mapping_error(dev, addr))
2111 addr += offset;
2112
2113 return addr;
2114}
2115
2116static void ehca_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size,
2117 enum dma_data_direction direction)
2118{
2119 /* This is only a stub; nothing to be done here */
2120}
2121
2122static int ehca_dma_map_sg(struct ib_device *dev, struct scatterlist *sgl,
2123 int nents, enum dma_data_direction direction)
2124{
2125 struct scatterlist *sg;
2126 int i;
2127
2128 for_each_sg(sgl, sg, nents, i) {
2129 u64 addr;
2130 addr = ehca_map_vaddr(sg_virt(sg));
2131 if (ehca_dma_mapping_error(dev, addr))
2132 return 0;
2133
2134 sg->dma_address = addr;
2135 sg->dma_length = sg->length;
2136 }
2137 return nents;
2138}
2139
2140static void ehca_dma_unmap_sg(struct ib_device *dev, struct scatterlist *sg,
2141 int nents, enum dma_data_direction direction)
2142{
2143 /* This is only a stub; nothing to be done here */
2144}
2145
2146static void ehca_dma_sync_single_for_cpu(struct ib_device *dev, u64 addr,
2147 size_t size,
2148 enum dma_data_direction dir)
2149{
2150 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
2151}
2152
2153static void ehca_dma_sync_single_for_device(struct ib_device *dev, u64 addr,
2154 size_t size,
2155 enum dma_data_direction dir)
2156{
2157 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
2158}
2159
2160static void *ehca_dma_alloc_coherent(struct ib_device *dev, size_t size,
2161 u64 *dma_handle, gfp_t flag)
2162{
2163 struct page *p;
2164 void *addr = NULL;
2165 u64 dma_addr;
2166
2167 p = alloc_pages(flag, get_order(size));
2168 if (p) {
2169 addr = page_address(p);
2170 dma_addr = ehca_map_vaddr(addr);
2171 if (ehca_dma_mapping_error(dev, dma_addr)) {
2172 free_pages((unsigned long)addr, get_order(size));
2173 return NULL;
2174 }
2175 if (dma_handle)
2176 *dma_handle = dma_addr;
2177 return addr;
2178 }
2179 return NULL;
2180}
2181
2182static void ehca_dma_free_coherent(struct ib_device *dev, size_t size,
2183 void *cpu_addr, u64 dma_handle)
2184{
2185 if (cpu_addr && size)
2186 free_pages((unsigned long)cpu_addr, get_order(size));
2187}
2188
2189
2190struct ib_dma_mapping_ops ehca_dma_mapping_ops = {
2191 .mapping_error = ehca_dma_mapping_error,
2192 .map_single = ehca_dma_map_single,
2193 .unmap_single = ehca_dma_unmap_single,
2194 .map_page = ehca_dma_map_page,
2195 .unmap_page = ehca_dma_unmap_page,
2196 .map_sg = ehca_dma_map_sg,
2197 .unmap_sg = ehca_dma_unmap_sg,
2198 .sync_single_for_cpu = ehca_dma_sync_single_for_cpu,
2199 .sync_single_for_device = ehca_dma_sync_single_for_device,
2200 .alloc_coherent = ehca_dma_alloc_coherent,
2201 .free_coherent = ehca_dma_free_coherent,
2202};
diff --git a/drivers/staging/rdma/ehca/ehca_mrmw.h b/drivers/staging/rdma/ehca/ehca_mrmw.h
deleted file mode 100644
index 52bfa95697f7..000000000000
--- a/drivers/staging/rdma/ehca/ehca_mrmw.h
+++ /dev/null
@@ -1,127 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * MR/MW declarations and inline functions
5 *
6 * Authors: Dietmar Decker <ddecker@de.ibm.com>
7 * Christoph Raisch <raisch@de.ibm.com>
8 *
9 * Copyright (c) 2005 IBM Corporation
10 *
11 * All rights reserved.
12 *
13 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
14 * BSD.
15 *
16 * OpenIB BSD License
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are met:
20 *
21 * Redistributions of source code must retain the above copyright notice, this
22 * list of conditions and the following disclaimer.
23 *
24 * Redistributions in binary form must reproduce the above copyright notice,
25 * this list of conditions and the following disclaimer in the documentation
26 * and/or other materials
27 * provided with the distribution.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42#ifndef _EHCA_MRMW_H_
43#define _EHCA_MRMW_H_
44
45enum ehca_reg_type {
46 EHCA_REG_MR,
47 EHCA_REG_BUSMAP_MR
48};
49
50int ehca_reg_mr(struct ehca_shca *shca,
51 struct ehca_mr *e_mr,
52 u64 *iova_start,
53 u64 size,
54 int acl,
55 struct ehca_pd *e_pd,
56 struct ehca_mr_pginfo *pginfo,
57 u32 *lkey,
58 u32 *rkey,
59 enum ehca_reg_type reg_type);
60
61int ehca_reg_mr_rpages(struct ehca_shca *shca,
62 struct ehca_mr *e_mr,
63 struct ehca_mr_pginfo *pginfo);
64
65int ehca_rereg_mr(struct ehca_shca *shca,
66 struct ehca_mr *e_mr,
67 u64 *iova_start,
68 u64 size,
69 int mr_access_flags,
70 struct ehca_pd *e_pd,
71 struct ehca_mr_pginfo *pginfo,
72 u32 *lkey,
73 u32 *rkey);
74
75int ehca_unmap_one_fmr(struct ehca_shca *shca,
76 struct ehca_mr *e_fmr);
77
78int ehca_reg_smr(struct ehca_shca *shca,
79 struct ehca_mr *e_origmr,
80 struct ehca_mr *e_newmr,
81 u64 *iova_start,
82 int acl,
83 struct ehca_pd *e_pd,
84 u32 *lkey,
85 u32 *rkey);
86
87int ehca_reg_internal_maxmr(struct ehca_shca *shca,
88 struct ehca_pd *e_pd,
89 struct ehca_mr **maxmr);
90
91int ehca_reg_maxmr(struct ehca_shca *shca,
92 struct ehca_mr *e_newmr,
93 u64 *iova_start,
94 int acl,
95 struct ehca_pd *e_pd,
96 u32 *lkey,
97 u32 *rkey);
98
99int ehca_dereg_internal_maxmr(struct ehca_shca *shca);
100
101int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
102 u64 *page_list,
103 int list_len);
104
105int ehca_set_pagebuf(struct ehca_mr_pginfo *pginfo,
106 u32 number,
107 u64 *kpage);
108
109int ehca_mr_is_maxmr(u64 size,
110 u64 *iova_start);
111
112void ehca_mrmw_map_acl(int ib_acl,
113 u32 *hipz_acl);
114
115void ehca_mrmw_set_pgsize_hipz_acl(u32 pgsize, u32 *hipz_acl);
116
117void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
118 int *ib_acl);
119
120void ehca_mr_deletenew(struct ehca_mr *mr);
121
122int ehca_create_busmap(void);
123
124void ehca_destroy_busmap(void);
125
126extern struct ib_dma_mapping_ops ehca_dma_mapping_ops;
127#endif /*_EHCA_MRMW_H_*/
diff --git a/drivers/staging/rdma/ehca/ehca_pd.c b/drivers/staging/rdma/ehca/ehca_pd.c
deleted file mode 100644
index 2a8aae411941..000000000000
--- a/drivers/staging/rdma/ehca/ehca_pd.c
+++ /dev/null
@@ -1,123 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * PD functions
5 *
6 * Authors: Christoph Raisch <raisch@de.ibm.com>
7 *
8 * Copyright (c) 2005 IBM Corporation
9 *
10 * All rights reserved.
11 *
12 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
13 * BSD.
14 *
15 * OpenIB BSD License
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are met:
19 *
20 * Redistributions of source code must retain the above copyright notice, this
21 * list of conditions and the following disclaimer.
22 *
23 * Redistributions in binary form must reproduce the above copyright notice,
24 * this list of conditions and the following disclaimer in the documentation
25 * and/or other materials
26 * provided with the distribution.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
29 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
32 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
35 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
36 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGE.
39 */
40
41#include <linux/slab.h>
42
43#include "ehca_tools.h"
44#include "ehca_iverbs.h"
45
46static struct kmem_cache *pd_cache;
47
48struct ib_pd *ehca_alloc_pd(struct ib_device *device,
49 struct ib_ucontext *context, struct ib_udata *udata)
50{
51 struct ehca_pd *pd;
52 int i;
53
54 pd = kmem_cache_zalloc(pd_cache, GFP_KERNEL);
55 if (!pd) {
56 ehca_err(device, "device=%p context=%p out of memory",
57 device, context);
58 return ERR_PTR(-ENOMEM);
59 }
60
61 for (i = 0; i < 2; i++) {
62 INIT_LIST_HEAD(&pd->free[i]);
63 INIT_LIST_HEAD(&pd->full[i]);
64 }
65 mutex_init(&pd->lock);
66
67 /*
68 * Kernel PD: when device = -1, 0
69 * User PD: when context != -1
70 */
71 if (!context) {
72 /*
73 * Kernel PDs after init reuses always
74 * the one created in ehca_shca_reopen()
75 */
76 struct ehca_shca *shca = container_of(device, struct ehca_shca,
77 ib_device);
78 pd->fw_pd.value = shca->pd->fw_pd.value;
79 } else
80 pd->fw_pd.value = (u64)pd;
81
82 return &pd->ib_pd;
83}
84
85int ehca_dealloc_pd(struct ib_pd *pd)
86{
87 struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
88 int i, leftovers = 0;
89 struct ipz_small_queue_page *page, *tmp;
90
91 for (i = 0; i < 2; i++) {
92 list_splice(&my_pd->full[i], &my_pd->free[i]);
93 list_for_each_entry_safe(page, tmp, &my_pd->free[i], list) {
94 leftovers = 1;
95 free_page(page->page);
96 kmem_cache_free(small_qp_cache, page);
97 }
98 }
99
100 if (leftovers)
101 ehca_warn(pd->device,
102 "Some small queue pages were not freed");
103
104 kmem_cache_free(pd_cache, my_pd);
105
106 return 0;
107}
108
109int ehca_init_pd_cache(void)
110{
111 pd_cache = kmem_cache_create("ehca_cache_pd",
112 sizeof(struct ehca_pd), 0,
113 SLAB_HWCACHE_ALIGN,
114 NULL);
115 if (!pd_cache)
116 return -ENOMEM;
117 return 0;
118}
119
120void ehca_cleanup_pd_cache(void)
121{
122 kmem_cache_destroy(pd_cache);
123}
diff --git a/drivers/staging/rdma/ehca/ehca_qes.h b/drivers/staging/rdma/ehca/ehca_qes.h
deleted file mode 100644
index 90c4efa67586..000000000000
--- a/drivers/staging/rdma/ehca/ehca_qes.h
+++ /dev/null
@@ -1,260 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * Hardware request structures
5 *
6 * Authors: Waleri Fomin <fomin@de.ibm.com>
7 * Reinhard Ernst <rernst@de.ibm.com>
8 * Christoph Raisch <raisch@de.ibm.com>
9 *
10 * Copyright (c) 2005 IBM Corporation
11 *
12 * All rights reserved.
13 *
14 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
15 * BSD.
16 *
17 * OpenIB BSD License
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are met:
21 *
22 * Redistributions of source code must retain the above copyright notice, this
23 * list of conditions and the following disclaimer.
24 *
25 * Redistributions in binary form must reproduce the above copyright notice,
26 * this list of conditions and the following disclaimer in the documentation
27 * and/or other materials
28 * provided with the distribution.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
41 */
42
43
44#ifndef _EHCA_QES_H_
45#define _EHCA_QES_H_
46
47#include "ehca_tools.h"
48
49/* virtual scatter gather entry to specify remote addresses with length */
50struct ehca_vsgentry {
51 u64 vaddr;
52 u32 lkey;
53 u32 length;
54};
55
56#define GRH_FLAG_MASK EHCA_BMASK_IBM( 7, 7)
57#define GRH_IPVERSION_MASK EHCA_BMASK_IBM( 0, 3)
58#define GRH_TCLASS_MASK EHCA_BMASK_IBM( 4, 12)
59#define GRH_FLOWLABEL_MASK EHCA_BMASK_IBM(13, 31)
60#define GRH_PAYLEN_MASK EHCA_BMASK_IBM(32, 47)
61#define GRH_NEXTHEADER_MASK EHCA_BMASK_IBM(48, 55)
62#define GRH_HOPLIMIT_MASK EHCA_BMASK_IBM(56, 63)
63
64/*
65 * Unreliable Datagram Address Vector Format
66 * see IBTA Vol1 chapter 8.3 Global Routing Header
67 */
68struct ehca_ud_av {
69 u8 sl;
70 u8 lnh;
71 u16 dlid;
72 u8 reserved1;
73 u8 reserved2;
74 u8 reserved3;
75 u8 slid_path_bits;
76 u8 reserved4;
77 u8 ipd;
78 u8 reserved5;
79 u8 pmtu;
80 u32 reserved6;
81 u64 reserved7;
82 union {
83 struct {
84 u64 word_0; /* always set to 6 */
85 /*should be 0x1B for IB transport */
86 u64 word_1;
87 u64 word_2;
88 u64 word_3;
89 u64 word_4;
90 } grh;
91 struct {
92 u32 wd_0;
93 u32 wd_1;
94 /* DWord_1 --> SGID */
95
96 u32 sgid_wd3;
97 u32 sgid_wd2;
98
99 u32 sgid_wd1;
100 u32 sgid_wd0;
101 /* DWord_3 --> DGID */
102
103 u32 dgid_wd3;
104 u32 dgid_wd2;
105
106 u32 dgid_wd1;
107 u32 dgid_wd0;
108 } grh_l;
109 };
110};
111
112/* maximum number of sg entries allowed in a WQE */
113#define MAX_WQE_SG_ENTRIES 252
114
115#define WQE_OPTYPE_SEND 0x80
116#define WQE_OPTYPE_RDMAREAD 0x40
117#define WQE_OPTYPE_RDMAWRITE 0x20
118#define WQE_OPTYPE_CMPSWAP 0x10
119#define WQE_OPTYPE_FETCHADD 0x08
120#define WQE_OPTYPE_BIND 0x04
121
122#define WQE_WRFLAG_REQ_SIGNAL_COM 0x80
123#define WQE_WRFLAG_FENCE 0x40
124#define WQE_WRFLAG_IMM_DATA_PRESENT 0x20
125#define WQE_WRFLAG_SOLIC_EVENT 0x10
126
127#define WQEF_CACHE_HINT 0x80
128#define WQEF_CACHE_HINT_RD_WR 0x40
129#define WQEF_TIMED_WQE 0x20
130#define WQEF_PURGE 0x08
131#define WQEF_HIGH_NIBBLE 0xF0
132
133#define MW_BIND_ACCESSCTRL_R_WRITE 0x40
134#define MW_BIND_ACCESSCTRL_R_READ 0x20
135#define MW_BIND_ACCESSCTRL_R_ATOMIC 0x10
136
137struct ehca_wqe {
138 u64 work_request_id;
139 u8 optype;
140 u8 wr_flag;
141 u16 pkeyi;
142 u8 wqef;
143 u8 nr_of_data_seg;
144 u16 wqe_provided_slid;
145 u32 destination_qp_number;
146 u32 resync_psn_sqp;
147 u32 local_ee_context_qkey;
148 u32 immediate_data;
149 union {
150 struct {
151 u64 remote_virtual_address;
152 u32 rkey;
153 u32 reserved;
154 u64 atomic_1st_op_dma_len;
155 u64 atomic_2nd_op;
156 struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES];
157
158 } nud;
159 struct {
160 u64 ehca_ud_av_ptr;
161 u64 reserved1;
162 u64 reserved2;
163 u64 reserved3;
164 struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES];
165 } ud_avp;
166 struct {
167 struct ehca_ud_av ud_av;
168 struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES -
169 2];
170 } ud_av;
171 struct {
172 u64 reserved0;
173 u64 reserved1;
174 u64 reserved2;
175 u64 reserved3;
176 struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES];
177 } all_rcv;
178
179 struct {
180 u64 reserved;
181 u32 rkey;
182 u32 old_rkey;
183 u64 reserved1;
184 u64 reserved2;
185 u64 virtual_address;
186 u32 reserved3;
187 u32 length;
188 u32 reserved4;
189 u16 reserved5;
190 u8 reserved6;
191 u8 lr_ctl;
192 u32 lkey;
193 u32 reserved7;
194 u64 reserved8;
195 u64 reserved9;
196 u64 reserved10;
197 u64 reserved11;
198 } bind;
199 struct {
200 u64 reserved12;
201 u64 reserved13;
202 u32 size;
203 u32 start;
204 } inline_data;
205 } u;
206
207};
208
209#define WC_SEND_RECEIVE EHCA_BMASK_IBM(0, 0)
210#define WC_IMM_DATA EHCA_BMASK_IBM(1, 1)
211#define WC_GRH_PRESENT EHCA_BMASK_IBM(2, 2)
212#define WC_SE_BIT EHCA_BMASK_IBM(3, 3)
213#define WC_STATUS_ERROR_BIT 0x80000000
214#define WC_STATUS_REMOTE_ERROR_FLAGS 0x0000F800
215#define WC_STATUS_PURGE_BIT 0x10
216#define WC_SEND_RECEIVE_BIT 0x80
217
218struct ehca_cqe {
219 u64 work_request_id;
220 u8 optype;
221 u8 w_completion_flags;
222 u16 reserved1;
223 u32 nr_bytes_transferred;
224 u32 immediate_data;
225 u32 local_qp_number;
226 u8 freed_resource_count;
227 u8 service_level;
228 u16 wqe_count;
229 u32 qp_token;
230 u32 qkey_ee_token;
231 u32 remote_qp_number;
232 u16 dlid;
233 u16 rlid;
234 u16 reserved2;
235 u16 pkey_index;
236 u32 cqe_timestamp;
237 u32 wqe_timestamp;
238 u8 wqe_timestamp_valid;
239 u8 reserved3;
240 u8 reserved4;
241 u8 cqe_flags;
242 u32 status;
243};
244
245struct ehca_eqe {
246 u64 entry;
247};
248
249struct ehca_mrte {
250 u64 starting_va;
251 u64 length; /* length of memory region in bytes*/
252 u32 pd;
253 u8 key_instance;
254 u8 pagesize;
255 u8 mr_control;
256 u8 local_remote_access_ctrl;
257 u8 reserved[0x20 - 0x18];
258 u64 at_pointer[4];
259};
260#endif /*_EHCA_QES_H_*/
diff --git a/drivers/staging/rdma/ehca/ehca_qp.c b/drivers/staging/rdma/ehca/ehca_qp.c
deleted file mode 100644
index 896c01f810f6..000000000000
--- a/drivers/staging/rdma/ehca/ehca_qp.c
+++ /dev/null
@@ -1,2256 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * QP functions
5 *
6 * Authors: Joachim Fenkes <fenkes@de.ibm.com>
7 * Stefan Roscher <stefan.roscher@de.ibm.com>
8 * Waleri Fomin <fomin@de.ibm.com>
9 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
10 * Reinhard Ernst <rernst@de.ibm.com>
11 * Heiko J Schick <schickhj@de.ibm.com>
12 *
13 * Copyright (c) 2005 IBM Corporation
14 *
15 * All rights reserved.
16 *
17 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
18 * BSD.
19 *
20 * OpenIB BSD License
21 *
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions are met:
24 *
25 * Redistributions of source code must retain the above copyright notice, this
26 * list of conditions and the following disclaimer.
27 *
28 * Redistributions in binary form must reproduce the above copyright notice,
29 * this list of conditions and the following disclaimer in the documentation
30 * and/or other materials
31 * provided with the distribution.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
34 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
37 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
38 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
39 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
40 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
41 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
43 * POSSIBILITY OF SUCH DAMAGE.
44 */
45
46#include <linux/slab.h>
47
48#include "ehca_classes.h"
49#include "ehca_tools.h"
50#include "ehca_qes.h"
51#include "ehca_iverbs.h"
52#include "hcp_if.h"
53#include "hipz_fns.h"
54
55static struct kmem_cache *qp_cache;
56
57/*
58 * attributes not supported by query qp
59 */
60#define QP_ATTR_QUERY_NOT_SUPPORTED (IB_QP_ACCESS_FLAGS | \
61 IB_QP_EN_SQD_ASYNC_NOTIFY)
62
63/*
64 * ehca (internal) qp state values
65 */
66enum ehca_qp_state {
67 EHCA_QPS_RESET = 1,
68 EHCA_QPS_INIT = 2,
69 EHCA_QPS_RTR = 3,
70 EHCA_QPS_RTS = 5,
71 EHCA_QPS_SQD = 6,
72 EHCA_QPS_SQE = 8,
73 EHCA_QPS_ERR = 128
74};
75
76/*
77 * qp state transitions as defined by IB Arch Rel 1.1 page 431
78 */
79enum ib_qp_statetrans {
80 IB_QPST_ANY2RESET,
81 IB_QPST_ANY2ERR,
82 IB_QPST_RESET2INIT,
83 IB_QPST_INIT2RTR,
84 IB_QPST_INIT2INIT,
85 IB_QPST_RTR2RTS,
86 IB_QPST_RTS2SQD,
87 IB_QPST_RTS2RTS,
88 IB_QPST_SQD2RTS,
89 IB_QPST_SQE2RTS,
90 IB_QPST_SQD2SQD,
91 IB_QPST_MAX /* nr of transitions, this must be last!!! */
92};
93
94/*
95 * ib2ehca_qp_state maps IB to ehca qp_state
96 * returns ehca qp state corresponding to given ib qp state
97 */
98static inline enum ehca_qp_state ib2ehca_qp_state(enum ib_qp_state ib_qp_state)
99{
100 switch (ib_qp_state) {
101 case IB_QPS_RESET:
102 return EHCA_QPS_RESET;
103 case IB_QPS_INIT:
104 return EHCA_QPS_INIT;
105 case IB_QPS_RTR:
106 return EHCA_QPS_RTR;
107 case IB_QPS_RTS:
108 return EHCA_QPS_RTS;
109 case IB_QPS_SQD:
110 return EHCA_QPS_SQD;
111 case IB_QPS_SQE:
112 return EHCA_QPS_SQE;
113 case IB_QPS_ERR:
114 return EHCA_QPS_ERR;
115 default:
116 ehca_gen_err("invalid ib_qp_state=%x", ib_qp_state);
117 return -EINVAL;
118 }
119}
120
121/*
122 * ehca2ib_qp_state maps ehca to IB qp_state
123 * returns ib qp state corresponding to given ehca qp state
124 */
125static inline enum ib_qp_state ehca2ib_qp_state(enum ehca_qp_state
126 ehca_qp_state)
127{
128 switch (ehca_qp_state) {
129 case EHCA_QPS_RESET:
130 return IB_QPS_RESET;
131 case EHCA_QPS_INIT:
132 return IB_QPS_INIT;
133 case EHCA_QPS_RTR:
134 return IB_QPS_RTR;
135 case EHCA_QPS_RTS:
136 return IB_QPS_RTS;
137 case EHCA_QPS_SQD:
138 return IB_QPS_SQD;
139 case EHCA_QPS_SQE:
140 return IB_QPS_SQE;
141 case EHCA_QPS_ERR:
142 return IB_QPS_ERR;
143 default:
144 ehca_gen_err("invalid ehca_qp_state=%x", ehca_qp_state);
145 return -EINVAL;
146 }
147}
148
149/*
150 * ehca_qp_type used as index for req_attr and opt_attr of
151 * struct ehca_modqp_statetrans
152 */
153enum ehca_qp_type {
154 QPT_RC = 0,
155 QPT_UC = 1,
156 QPT_UD = 2,
157 QPT_SQP = 3,
158 QPT_MAX
159};
160
161/*
162 * ib2ehcaqptype maps Ib to ehca qp_type
163 * returns ehca qp type corresponding to ib qp type
164 */
165static inline enum ehca_qp_type ib2ehcaqptype(enum ib_qp_type ibqptype)
166{
167 switch (ibqptype) {
168 case IB_QPT_SMI:
169 case IB_QPT_GSI:
170 return QPT_SQP;
171 case IB_QPT_RC:
172 return QPT_RC;
173 case IB_QPT_UC:
174 return QPT_UC;
175 case IB_QPT_UD:
176 return QPT_UD;
177 default:
178 ehca_gen_err("Invalid ibqptype=%x", ibqptype);
179 return -EINVAL;
180 }
181}
182
183static inline enum ib_qp_statetrans get_modqp_statetrans(int ib_fromstate,
184 int ib_tostate)
185{
186 int index = -EINVAL;
187 switch (ib_tostate) {
188 case IB_QPS_RESET:
189 index = IB_QPST_ANY2RESET;
190 break;
191 case IB_QPS_INIT:
192 switch (ib_fromstate) {
193 case IB_QPS_RESET:
194 index = IB_QPST_RESET2INIT;
195 break;
196 case IB_QPS_INIT:
197 index = IB_QPST_INIT2INIT;
198 break;
199 }
200 break;
201 case IB_QPS_RTR:
202 if (ib_fromstate == IB_QPS_INIT)
203 index = IB_QPST_INIT2RTR;
204 break;
205 case IB_QPS_RTS:
206 switch (ib_fromstate) {
207 case IB_QPS_RTR:
208 index = IB_QPST_RTR2RTS;
209 break;
210 case IB_QPS_RTS:
211 index = IB_QPST_RTS2RTS;
212 break;
213 case IB_QPS_SQD:
214 index = IB_QPST_SQD2RTS;
215 break;
216 case IB_QPS_SQE:
217 index = IB_QPST_SQE2RTS;
218 break;
219 }
220 break;
221 case IB_QPS_SQD:
222 if (ib_fromstate == IB_QPS_RTS)
223 index = IB_QPST_RTS2SQD;
224 break;
225 case IB_QPS_SQE:
226 break;
227 case IB_QPS_ERR:
228 index = IB_QPST_ANY2ERR;
229 break;
230 default:
231 break;
232 }
233 return index;
234}
235
236/*
237 * ibqptype2servicetype returns hcp service type corresponding to given
238 * ib qp type used by create_qp()
239 */
240static inline int ibqptype2servicetype(enum ib_qp_type ibqptype)
241{
242 switch (ibqptype) {
243 case IB_QPT_SMI:
244 case IB_QPT_GSI:
245 return ST_UD;
246 case IB_QPT_RC:
247 return ST_RC;
248 case IB_QPT_UC:
249 return ST_UC;
250 case IB_QPT_UD:
251 return ST_UD;
252 case IB_QPT_RAW_IPV6:
253 return -EINVAL;
254 case IB_QPT_RAW_ETHERTYPE:
255 return -EINVAL;
256 default:
257 ehca_gen_err("Invalid ibqptype=%x", ibqptype);
258 return -EINVAL;
259 }
260}
261
262/*
263 * init userspace queue info from ipz_queue data
264 */
265static inline void queue2resp(struct ipzu_queue_resp *resp,
266 struct ipz_queue *queue)
267{
268 resp->qe_size = queue->qe_size;
269 resp->act_nr_of_sg = queue->act_nr_of_sg;
270 resp->queue_length = queue->queue_length;
271 resp->pagesize = queue->pagesize;
272 resp->toggle_state = queue->toggle_state;
273 resp->offset = queue->offset;
274}
275
276/*
277 * init_qp_queue initializes/constructs r/squeue and registers queue pages.
278 */
279static inline int init_qp_queue(struct ehca_shca *shca,
280 struct ehca_pd *pd,
281 struct ehca_qp *my_qp,
282 struct ipz_queue *queue,
283 int q_type,
284 u64 expected_hret,
285 struct ehca_alloc_queue_parms *parms,
286 int wqe_size)
287{
288 int ret, cnt, ipz_rc, nr_q_pages;
289 void *vpage;
290 u64 rpage, h_ret;
291 struct ib_device *ib_dev = &shca->ib_device;
292 struct ipz_adapter_handle ipz_hca_handle = shca->ipz_hca_handle;
293
294 if (!parms->queue_size)
295 return 0;
296
297 if (parms->is_small) {
298 nr_q_pages = 1;
299 ipz_rc = ipz_queue_ctor(pd, queue, nr_q_pages,
300 128 << parms->page_size,
301 wqe_size, parms->act_nr_sges, 1);
302 } else {
303 nr_q_pages = parms->queue_size;
304 ipz_rc = ipz_queue_ctor(pd, queue, nr_q_pages,
305 EHCA_PAGESIZE, wqe_size,
306 parms->act_nr_sges, 0);
307 }
308
309 if (!ipz_rc) {
310 ehca_err(ib_dev, "Cannot allocate page for queue. ipz_rc=%i",
311 ipz_rc);
312 return -EBUSY;
313 }
314
315 /* register queue pages */
316 for (cnt = 0; cnt < nr_q_pages; cnt++) {
317 vpage = ipz_qpageit_get_inc(queue);
318 if (!vpage) {
319 ehca_err(ib_dev, "ipz_qpageit_get_inc() "
320 "failed p_vpage= %p", vpage);
321 ret = -EINVAL;
322 goto init_qp_queue1;
323 }
324 rpage = __pa(vpage);
325
326 h_ret = hipz_h_register_rpage_qp(ipz_hca_handle,
327 my_qp->ipz_qp_handle,
328 NULL, 0, q_type,
329 rpage, parms->is_small ? 0 : 1,
330 my_qp->galpas.kernel);
331 if (cnt == (nr_q_pages - 1)) { /* last page! */
332 if (h_ret != expected_hret) {
333 ehca_err(ib_dev, "hipz_qp_register_rpage() "
334 "h_ret=%lli", h_ret);
335 ret = ehca2ib_return_code(h_ret);
336 goto init_qp_queue1;
337 }
338 vpage = ipz_qpageit_get_inc(&my_qp->ipz_rqueue);
339 if (vpage) {
340 ehca_err(ib_dev, "ipz_qpageit_get_inc() "
341 "should not succeed vpage=%p", vpage);
342 ret = -EINVAL;
343 goto init_qp_queue1;
344 }
345 } else {
346 if (h_ret != H_PAGE_REGISTERED) {
347 ehca_err(ib_dev, "hipz_qp_register_rpage() "
348 "h_ret=%lli", h_ret);
349 ret = ehca2ib_return_code(h_ret);
350 goto init_qp_queue1;
351 }
352 }
353 }
354
355 ipz_qeit_reset(queue);
356
357 return 0;
358
359init_qp_queue1:
360 ipz_queue_dtor(pd, queue);
361 return ret;
362}
363
364static inline int ehca_calc_wqe_size(int act_nr_sge, int is_llqp)
365{
366 if (is_llqp)
367 return 128 << act_nr_sge;
368 else
369 return offsetof(struct ehca_wqe,
370 u.nud.sg_list[act_nr_sge]);
371}
372
373static void ehca_determine_small_queue(struct ehca_alloc_queue_parms *queue,
374 int req_nr_sge, int is_llqp)
375{
376 u32 wqe_size, q_size;
377 int act_nr_sge = req_nr_sge;
378
379 if (!is_llqp)
380 /* round up #SGEs so WQE size is a power of 2 */
381 for (act_nr_sge = 4; act_nr_sge <= 252;
382 act_nr_sge = 4 + 2 * act_nr_sge)
383 if (act_nr_sge >= req_nr_sge)
384 break;
385
386 wqe_size = ehca_calc_wqe_size(act_nr_sge, is_llqp);
387 q_size = wqe_size * (queue->max_wr + 1);
388
389 if (q_size <= 512)
390 queue->page_size = 2;
391 else if (q_size <= 1024)
392 queue->page_size = 3;
393 else
394 queue->page_size = 0;
395
396 queue->is_small = (queue->page_size != 0);
397}
398
399/* needs to be called with cq->spinlock held */
400void ehca_add_to_err_list(struct ehca_qp *qp, int on_sq)
401{
402 struct list_head *list, *node;
403
404 /* TODO: support low latency QPs */
405 if (qp->ext_type == EQPT_LLQP)
406 return;
407
408 if (on_sq) {
409 list = &qp->send_cq->sqp_err_list;
410 node = &qp->sq_err_node;
411 } else {
412 list = &qp->recv_cq->rqp_err_list;
413 node = &qp->rq_err_node;
414 }
415
416 if (list_empty(node))
417 list_add_tail(node, list);
418
419 return;
420}
421
422static void del_from_err_list(struct ehca_cq *cq, struct list_head *node)
423{
424 unsigned long flags;
425
426 spin_lock_irqsave(&cq->spinlock, flags);
427
428 if (!list_empty(node))
429 list_del_init(node);
430
431 spin_unlock_irqrestore(&cq->spinlock, flags);
432}
433
434static void reset_queue_map(struct ehca_queue_map *qmap)
435{
436 int i;
437
438 qmap->tail = qmap->entries - 1;
439 qmap->left_to_poll = 0;
440 qmap->next_wqe_idx = 0;
441 for (i = 0; i < qmap->entries; i++) {
442 qmap->map[i].reported = 1;
443 qmap->map[i].cqe_req = 0;
444 }
445}
446
447/*
448 * Create an ib_qp struct that is either a QP or an SRQ, depending on
449 * the value of the is_srq parameter. If init_attr and srq_init_attr share
450 * fields, the field out of init_attr is used.
451 */
452static struct ehca_qp *internal_create_qp(
453 struct ib_pd *pd,
454 struct ib_qp_init_attr *init_attr,
455 struct ib_srq_init_attr *srq_init_attr,
456 struct ib_udata *udata, int is_srq)
457{
458 struct ehca_qp *my_qp, *my_srq = NULL;
459 struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
460 struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
461 ib_device);
462 struct ib_ucontext *context = NULL;
463 u64 h_ret;
464 int is_llqp = 0, has_srq = 0, is_user = 0;
465 int qp_type, max_send_sge, max_recv_sge, ret;
466
467 /* h_call's out parameters */
468 struct ehca_alloc_qp_parms parms;
469 u32 swqe_size = 0, rwqe_size = 0, ib_qp_num;
470 unsigned long flags;
471
472 if (!atomic_add_unless(&shca->num_qps, 1, shca->max_num_qps)) {
473 ehca_err(pd->device, "Unable to create QP, max number of %i "
474 "QPs reached.", shca->max_num_qps);
475 ehca_err(pd->device, "To increase the maximum number of QPs "
476 "use the number_of_qps module parameter.\n");
477 return ERR_PTR(-ENOSPC);
478 }
479
480 if (init_attr->create_flags) {
481 atomic_dec(&shca->num_qps);
482 return ERR_PTR(-EINVAL);
483 }
484
485 memset(&parms, 0, sizeof(parms));
486 qp_type = init_attr->qp_type;
487
488 if (init_attr->sq_sig_type != IB_SIGNAL_REQ_WR &&
489 init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) {
490 ehca_err(pd->device, "init_attr->sg_sig_type=%x not allowed",
491 init_attr->sq_sig_type);
492 atomic_dec(&shca->num_qps);
493 return ERR_PTR(-EINVAL);
494 }
495
496 /* save LLQP info */
497 if (qp_type & 0x80) {
498 is_llqp = 1;
499 parms.ext_type = EQPT_LLQP;
500 parms.ll_comp_flags = qp_type & LLQP_COMP_MASK;
501 }
502 qp_type &= 0x1F;
503 init_attr->qp_type &= 0x1F;
504
505 /* handle SRQ base QPs */
506 if (init_attr->srq) {
507 my_srq = container_of(init_attr->srq, struct ehca_qp, ib_srq);
508
509 if (qp_type == IB_QPT_UC) {
510 ehca_err(pd->device, "UC with SRQ not supported");
511 atomic_dec(&shca->num_qps);
512 return ERR_PTR(-EINVAL);
513 }
514
515 has_srq = 1;
516 parms.ext_type = EQPT_SRQBASE;
517 parms.srq_qpn = my_srq->real_qp_num;
518 }
519
520 if (is_llqp && has_srq) {
521 ehca_err(pd->device, "LLQPs can't have an SRQ");
522 atomic_dec(&shca->num_qps);
523 return ERR_PTR(-EINVAL);
524 }
525
526 /* handle SRQs */
527 if (is_srq) {
528 parms.ext_type = EQPT_SRQ;
529 parms.srq_limit = srq_init_attr->attr.srq_limit;
530 if (init_attr->cap.max_recv_sge > 3) {
531 ehca_err(pd->device, "no more than three SGEs "
532 "supported for SRQ pd=%p max_sge=%x",
533 pd, init_attr->cap.max_recv_sge);
534 atomic_dec(&shca->num_qps);
535 return ERR_PTR(-EINVAL);
536 }
537 }
538
539 /* check QP type */
540 if (qp_type != IB_QPT_UD &&
541 qp_type != IB_QPT_UC &&
542 qp_type != IB_QPT_RC &&
543 qp_type != IB_QPT_SMI &&
544 qp_type != IB_QPT_GSI) {
545 ehca_err(pd->device, "wrong QP Type=%x", qp_type);
546 atomic_dec(&shca->num_qps);
547 return ERR_PTR(-EINVAL);
548 }
549
550 if (is_llqp) {
551 switch (qp_type) {
552 case IB_QPT_RC:
553 if ((init_attr->cap.max_send_wr > 255) ||
554 (init_attr->cap.max_recv_wr > 255)) {
555 ehca_err(pd->device,
556 "Invalid Number of max_sq_wr=%x "
557 "or max_rq_wr=%x for RC LLQP",
558 init_attr->cap.max_send_wr,
559 init_attr->cap.max_recv_wr);
560 atomic_dec(&shca->num_qps);
561 return ERR_PTR(-EINVAL);
562 }
563 break;
564 case IB_QPT_UD:
565 if (!EHCA_BMASK_GET(HCA_CAP_UD_LL_QP, shca->hca_cap)) {
566 ehca_err(pd->device, "UD LLQP not supported "
567 "by this adapter");
568 atomic_dec(&shca->num_qps);
569 return ERR_PTR(-ENOSYS);
570 }
571 if (!(init_attr->cap.max_send_sge <= 5
572 && init_attr->cap.max_send_sge >= 1
573 && init_attr->cap.max_recv_sge <= 5
574 && init_attr->cap.max_recv_sge >= 1)) {
575 ehca_err(pd->device,
576 "Invalid Number of max_send_sge=%x "
577 "or max_recv_sge=%x for UD LLQP",
578 init_attr->cap.max_send_sge,
579 init_attr->cap.max_recv_sge);
580 atomic_dec(&shca->num_qps);
581 return ERR_PTR(-EINVAL);
582 } else if (init_attr->cap.max_send_wr > 255) {
583 ehca_err(pd->device,
584 "Invalid Number of "
585 "max_send_wr=%x for UD QP_TYPE=%x",
586 init_attr->cap.max_send_wr, qp_type);
587 atomic_dec(&shca->num_qps);
588 return ERR_PTR(-EINVAL);
589 }
590 break;
591 default:
592 ehca_err(pd->device, "unsupported LL QP Type=%x",
593 qp_type);
594 atomic_dec(&shca->num_qps);
595 return ERR_PTR(-EINVAL);
596 }
597 } else {
598 int max_sge = (qp_type == IB_QPT_UD || qp_type == IB_QPT_SMI
599 || qp_type == IB_QPT_GSI) ? 250 : 252;
600
601 if (init_attr->cap.max_send_sge > max_sge
602 || init_attr->cap.max_recv_sge > max_sge) {
603 ehca_err(pd->device, "Invalid number of SGEs requested "
604 "send_sge=%x recv_sge=%x max_sge=%x",
605 init_attr->cap.max_send_sge,
606 init_attr->cap.max_recv_sge, max_sge);
607 atomic_dec(&shca->num_qps);
608 return ERR_PTR(-EINVAL);
609 }
610 }
611
612 my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL);
613 if (!my_qp) {
614 ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd);
615 atomic_dec(&shca->num_qps);
616 return ERR_PTR(-ENOMEM);
617 }
618
619 if (pd->uobject && udata) {
620 is_user = 1;
621 context = pd->uobject->context;
622 }
623
624 atomic_set(&my_qp->nr_events, 0);
625 init_waitqueue_head(&my_qp->wait_completion);
626 spin_lock_init(&my_qp->spinlock_s);
627 spin_lock_init(&my_qp->spinlock_r);
628 my_qp->qp_type = qp_type;
629 my_qp->ext_type = parms.ext_type;
630 my_qp->state = IB_QPS_RESET;
631
632 if (init_attr->recv_cq)
633 my_qp->recv_cq =
634 container_of(init_attr->recv_cq, struct ehca_cq, ib_cq);
635 if (init_attr->send_cq)
636 my_qp->send_cq =
637 container_of(init_attr->send_cq, struct ehca_cq, ib_cq);
638
639 idr_preload(GFP_KERNEL);
640 write_lock_irqsave(&ehca_qp_idr_lock, flags);
641
642 ret = idr_alloc(&ehca_qp_idr, my_qp, 0, 0x2000000, GFP_NOWAIT);
643 if (ret >= 0)
644 my_qp->token = ret;
645
646 write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
647 idr_preload_end();
648 if (ret < 0) {
649 if (ret == -ENOSPC) {
650 ret = -EINVAL;
651 ehca_err(pd->device, "Invalid number of qp");
652 } else {
653 ret = -ENOMEM;
654 ehca_err(pd->device, "Can't allocate new idr entry.");
655 }
656 goto create_qp_exit0;
657 }
658
659 if (has_srq)
660 parms.srq_token = my_qp->token;
661
662 parms.servicetype = ibqptype2servicetype(qp_type);
663 if (parms.servicetype < 0) {
664 ret = -EINVAL;
665 ehca_err(pd->device, "Invalid qp_type=%x", qp_type);
666 goto create_qp_exit1;
667 }
668
669 /* Always signal by WQE so we can hide circ. WQEs */
670 parms.sigtype = HCALL_SIGT_BY_WQE;
671
672 /* UD_AV CIRCUMVENTION */
673 max_send_sge = init_attr->cap.max_send_sge;
674 max_recv_sge = init_attr->cap.max_recv_sge;
675 if (parms.servicetype == ST_UD && !is_llqp) {
676 max_send_sge += 2;
677 max_recv_sge += 2;
678 }
679
680 parms.token = my_qp->token;
681 parms.eq_handle = shca->eq.ipz_eq_handle;
682 parms.pd = my_pd->fw_pd;
683 if (my_qp->send_cq)
684 parms.send_cq_handle = my_qp->send_cq->ipz_cq_handle;
685 if (my_qp->recv_cq)
686 parms.recv_cq_handle = my_qp->recv_cq->ipz_cq_handle;
687
688 parms.squeue.max_wr = init_attr->cap.max_send_wr;
689 parms.rqueue.max_wr = init_attr->cap.max_recv_wr;
690 parms.squeue.max_sge = max_send_sge;
691 parms.rqueue.max_sge = max_recv_sge;
692
693 /* RC QPs need one more SWQE for unsolicited ack circumvention */
694 if (qp_type == IB_QPT_RC)
695 parms.squeue.max_wr++;
696
697 if (EHCA_BMASK_GET(HCA_CAP_MINI_QP, shca->hca_cap)) {
698 if (HAS_SQ(my_qp))
699 ehca_determine_small_queue(
700 &parms.squeue, max_send_sge, is_llqp);
701 if (HAS_RQ(my_qp))
702 ehca_determine_small_queue(
703 &parms.rqueue, max_recv_sge, is_llqp);
704 parms.qp_storage =
705 (parms.squeue.is_small || parms.rqueue.is_small);
706 }
707
708 h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms, is_user);
709 if (h_ret != H_SUCCESS) {
710 ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lli",
711 h_ret);
712 ret = ehca2ib_return_code(h_ret);
713 goto create_qp_exit1;
714 }
715
716 ib_qp_num = my_qp->real_qp_num = parms.real_qp_num;
717 my_qp->ipz_qp_handle = parms.qp_handle;
718 my_qp->galpas = parms.galpas;
719
720 swqe_size = ehca_calc_wqe_size(parms.squeue.act_nr_sges, is_llqp);
721 rwqe_size = ehca_calc_wqe_size(parms.rqueue.act_nr_sges, is_llqp);
722
723 switch (qp_type) {
724 case IB_QPT_RC:
725 if (is_llqp) {
726 parms.squeue.act_nr_sges = 1;
727 parms.rqueue.act_nr_sges = 1;
728 }
729 /* hide the extra WQE */
730 parms.squeue.act_nr_wqes--;
731 break;
732 case IB_QPT_UD:
733 case IB_QPT_GSI:
734 case IB_QPT_SMI:
735 /* UD circumvention */
736 if (is_llqp) {
737 parms.squeue.act_nr_sges = 1;
738 parms.rqueue.act_nr_sges = 1;
739 } else {
740 parms.squeue.act_nr_sges -= 2;
741 parms.rqueue.act_nr_sges -= 2;
742 }
743
744 if (IB_QPT_GSI == qp_type || IB_QPT_SMI == qp_type) {
745 parms.squeue.act_nr_wqes = init_attr->cap.max_send_wr;
746 parms.rqueue.act_nr_wqes = init_attr->cap.max_recv_wr;
747 parms.squeue.act_nr_sges = init_attr->cap.max_send_sge;
748 parms.rqueue.act_nr_sges = init_attr->cap.max_recv_sge;
749 ib_qp_num = (qp_type == IB_QPT_SMI) ? 0 : 1;
750 }
751
752 break;
753
754 default:
755 break;
756 }
757
758 /* initialize r/squeue and register queue pages */
759 if (HAS_SQ(my_qp)) {
760 ret = init_qp_queue(
761 shca, my_pd, my_qp, &my_qp->ipz_squeue, 0,
762 HAS_RQ(my_qp) ? H_PAGE_REGISTERED : H_SUCCESS,
763 &parms.squeue, swqe_size);
764 if (ret) {
765 ehca_err(pd->device, "Couldn't initialize squeue "
766 "and pages ret=%i", ret);
767 goto create_qp_exit2;
768 }
769
770 if (!is_user) {
771 my_qp->sq_map.entries = my_qp->ipz_squeue.queue_length /
772 my_qp->ipz_squeue.qe_size;
773 my_qp->sq_map.map = vmalloc(my_qp->sq_map.entries *
774 sizeof(struct ehca_qmap_entry));
775 if (!my_qp->sq_map.map) {
776 ehca_err(pd->device, "Couldn't allocate squeue "
777 "map ret=%i", ret);
778 goto create_qp_exit3;
779 }
780 INIT_LIST_HEAD(&my_qp->sq_err_node);
781 /* to avoid the generation of bogus flush CQEs */
782 reset_queue_map(&my_qp->sq_map);
783 }
784 }
785
786 if (HAS_RQ(my_qp)) {
787 ret = init_qp_queue(
788 shca, my_pd, my_qp, &my_qp->ipz_rqueue, 1,
789 H_SUCCESS, &parms.rqueue, rwqe_size);
790 if (ret) {
791 ehca_err(pd->device, "Couldn't initialize rqueue "
792 "and pages ret=%i", ret);
793 goto create_qp_exit4;
794 }
795 if (!is_user) {
796 my_qp->rq_map.entries = my_qp->ipz_rqueue.queue_length /
797 my_qp->ipz_rqueue.qe_size;
798 my_qp->rq_map.map = vmalloc(my_qp->rq_map.entries *
799 sizeof(struct ehca_qmap_entry));
800 if (!my_qp->rq_map.map) {
801 ehca_err(pd->device, "Couldn't allocate squeue "
802 "map ret=%i", ret);
803 goto create_qp_exit5;
804 }
805 INIT_LIST_HEAD(&my_qp->rq_err_node);
806 /* to avoid the generation of bogus flush CQEs */
807 reset_queue_map(&my_qp->rq_map);
808 }
809 } else if (init_attr->srq && !is_user) {
810 /* this is a base QP, use the queue map of the SRQ */
811 my_qp->rq_map = my_srq->rq_map;
812 INIT_LIST_HEAD(&my_qp->rq_err_node);
813
814 my_qp->ipz_rqueue = my_srq->ipz_rqueue;
815 }
816
817 if (is_srq) {
818 my_qp->ib_srq.pd = &my_pd->ib_pd;
819 my_qp->ib_srq.device = my_pd->ib_pd.device;
820
821 my_qp->ib_srq.srq_context = init_attr->qp_context;
822 my_qp->ib_srq.event_handler = init_attr->event_handler;
823 } else {
824 my_qp->ib_qp.qp_num = ib_qp_num;
825 my_qp->ib_qp.pd = &my_pd->ib_pd;
826 my_qp->ib_qp.device = my_pd->ib_pd.device;
827
828 my_qp->ib_qp.recv_cq = init_attr->recv_cq;
829 my_qp->ib_qp.send_cq = init_attr->send_cq;
830
831 my_qp->ib_qp.qp_type = qp_type;
832 my_qp->ib_qp.srq = init_attr->srq;
833
834 my_qp->ib_qp.qp_context = init_attr->qp_context;
835 my_qp->ib_qp.event_handler = init_attr->event_handler;
836 }
837
838 init_attr->cap.max_inline_data = 0; /* not supported yet */
839 init_attr->cap.max_recv_sge = parms.rqueue.act_nr_sges;
840 init_attr->cap.max_recv_wr = parms.rqueue.act_nr_wqes;
841 init_attr->cap.max_send_sge = parms.squeue.act_nr_sges;
842 init_attr->cap.max_send_wr = parms.squeue.act_nr_wqes;
843 my_qp->init_attr = *init_attr;
844
845 if (qp_type == IB_QPT_SMI || qp_type == IB_QPT_GSI) {
846 shca->sport[init_attr->port_num - 1].ibqp_sqp[qp_type] =
847 &my_qp->ib_qp;
848 if (ehca_nr_ports < 0) {
849 /* alloc array to cache subsequent modify qp parms
850 * for autodetect mode
851 */
852 my_qp->mod_qp_parm =
853 kzalloc(EHCA_MOD_QP_PARM_MAX *
854 sizeof(*my_qp->mod_qp_parm),
855 GFP_KERNEL);
856 if (!my_qp->mod_qp_parm) {
857 ehca_err(pd->device,
858 "Could not alloc mod_qp_parm");
859 goto create_qp_exit5;
860 }
861 }
862 }
863
864 /* NOTE: define_apq0() not supported yet */
865 if (qp_type == IB_QPT_GSI) {
866 h_ret = ehca_define_sqp(shca, my_qp, init_attr);
867 if (h_ret != H_SUCCESS) {
868 kfree(my_qp->mod_qp_parm);
869 my_qp->mod_qp_parm = NULL;
870 /* the QP pointer is no longer valid */
871 shca->sport[init_attr->port_num - 1].ibqp_sqp[qp_type] =
872 NULL;
873 ret = ehca2ib_return_code(h_ret);
874 goto create_qp_exit6;
875 }
876 }
877
878 if (my_qp->send_cq) {
879 ret = ehca_cq_assign_qp(my_qp->send_cq, my_qp);
880 if (ret) {
881 ehca_err(pd->device,
882 "Couldn't assign qp to send_cq ret=%i", ret);
883 goto create_qp_exit7;
884 }
885 }
886
887 /* copy queues, galpa data to user space */
888 if (context && udata) {
889 struct ehca_create_qp_resp resp;
890 memset(&resp, 0, sizeof(resp));
891
892 resp.qp_num = my_qp->real_qp_num;
893 resp.token = my_qp->token;
894 resp.qp_type = my_qp->qp_type;
895 resp.ext_type = my_qp->ext_type;
896 resp.qkey = my_qp->qkey;
897 resp.real_qp_num = my_qp->real_qp_num;
898
899 if (HAS_SQ(my_qp))
900 queue2resp(&resp.ipz_squeue, &my_qp->ipz_squeue);
901 if (HAS_RQ(my_qp))
902 queue2resp(&resp.ipz_rqueue, &my_qp->ipz_rqueue);
903 resp.fw_handle_ofs = (u32)
904 (my_qp->galpas.user.fw_handle & (PAGE_SIZE - 1));
905
906 if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
907 ehca_err(pd->device, "Copy to udata failed");
908 ret = -EINVAL;
909 goto create_qp_exit8;
910 }
911 }
912
913 return my_qp;
914
915create_qp_exit8:
916 ehca_cq_unassign_qp(my_qp->send_cq, my_qp->real_qp_num);
917
918create_qp_exit7:
919 kfree(my_qp->mod_qp_parm);
920
921create_qp_exit6:
922 if (HAS_RQ(my_qp) && !is_user)
923 vfree(my_qp->rq_map.map);
924
925create_qp_exit5:
926 if (HAS_RQ(my_qp))
927 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
928
929create_qp_exit4:
930 if (HAS_SQ(my_qp) && !is_user)
931 vfree(my_qp->sq_map.map);
932
933create_qp_exit3:
934 if (HAS_SQ(my_qp))
935 ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
936
937create_qp_exit2:
938 hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
939
940create_qp_exit1:
941 write_lock_irqsave(&ehca_qp_idr_lock, flags);
942 idr_remove(&ehca_qp_idr, my_qp->token);
943 write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
944
945create_qp_exit0:
946 kmem_cache_free(qp_cache, my_qp);
947 atomic_dec(&shca->num_qps);
948 return ERR_PTR(ret);
949}
950
951struct ib_qp *ehca_create_qp(struct ib_pd *pd,
952 struct ib_qp_init_attr *qp_init_attr,
953 struct ib_udata *udata)
954{
955 struct ehca_qp *ret;
956
957 ret = internal_create_qp(pd, qp_init_attr, NULL, udata, 0);
958 return IS_ERR(ret) ? (struct ib_qp *)ret : &ret->ib_qp;
959}
960
961static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
962 struct ib_uobject *uobject);
963
964struct ib_srq *ehca_create_srq(struct ib_pd *pd,
965 struct ib_srq_init_attr *srq_init_attr,
966 struct ib_udata *udata)
967{
968 struct ib_qp_init_attr qp_init_attr;
969 struct ehca_qp *my_qp;
970 struct ib_srq *ret;
971 struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
972 ib_device);
973 struct hcp_modify_qp_control_block *mqpcb;
974 u64 hret, update_mask;
975
976 if (srq_init_attr->srq_type != IB_SRQT_BASIC)
977 return ERR_PTR(-ENOSYS);
978
979 /* For common attributes, internal_create_qp() takes its info
980 * out of qp_init_attr, so copy all common attrs there.
981 */
982 memset(&qp_init_attr, 0, sizeof(qp_init_attr));
983 qp_init_attr.event_handler = srq_init_attr->event_handler;
984 qp_init_attr.qp_context = srq_init_attr->srq_context;
985 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
986 qp_init_attr.qp_type = IB_QPT_RC;
987 qp_init_attr.cap.max_recv_wr = srq_init_attr->attr.max_wr;
988 qp_init_attr.cap.max_recv_sge = srq_init_attr->attr.max_sge;
989
990 my_qp = internal_create_qp(pd, &qp_init_attr, srq_init_attr, udata, 1);
991 if (IS_ERR(my_qp))
992 return (struct ib_srq *)my_qp;
993
994 /* copy back return values */
995 srq_init_attr->attr.max_wr = qp_init_attr.cap.max_recv_wr;
996 srq_init_attr->attr.max_sge = 3;
997
998 /* drive SRQ into RTR state */
999 mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1000 if (!mqpcb) {
1001 ehca_err(pd->device, "Could not get zeroed page for mqpcb "
1002 "ehca_qp=%p qp_num=%x ", my_qp, my_qp->real_qp_num);
1003 ret = ERR_PTR(-ENOMEM);
1004 goto create_srq1;
1005 }
1006
1007 mqpcb->qp_state = EHCA_QPS_INIT;
1008 mqpcb->prim_phys_port = 1;
1009 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
1010 hret = hipz_h_modify_qp(shca->ipz_hca_handle,
1011 my_qp->ipz_qp_handle,
1012 &my_qp->pf,
1013 update_mask,
1014 mqpcb, my_qp->galpas.kernel);
1015 if (hret != H_SUCCESS) {
1016 ehca_err(pd->device, "Could not modify SRQ to INIT "
1017 "ehca_qp=%p qp_num=%x h_ret=%lli",
1018 my_qp, my_qp->real_qp_num, hret);
1019 goto create_srq2;
1020 }
1021
1022 mqpcb->qp_enable = 1;
1023 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_ENABLE, 1);
1024 hret = hipz_h_modify_qp(shca->ipz_hca_handle,
1025 my_qp->ipz_qp_handle,
1026 &my_qp->pf,
1027 update_mask,
1028 mqpcb, my_qp->galpas.kernel);
1029 if (hret != H_SUCCESS) {
1030 ehca_err(pd->device, "Could not enable SRQ "
1031 "ehca_qp=%p qp_num=%x h_ret=%lli",
1032 my_qp, my_qp->real_qp_num, hret);
1033 goto create_srq2;
1034 }
1035
1036 mqpcb->qp_state = EHCA_QPS_RTR;
1037 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
1038 hret = hipz_h_modify_qp(shca->ipz_hca_handle,
1039 my_qp->ipz_qp_handle,
1040 &my_qp->pf,
1041 update_mask,
1042 mqpcb, my_qp->galpas.kernel);
1043 if (hret != H_SUCCESS) {
1044 ehca_err(pd->device, "Could not modify SRQ to RTR "
1045 "ehca_qp=%p qp_num=%x h_ret=%lli",
1046 my_qp, my_qp->real_qp_num, hret);
1047 goto create_srq2;
1048 }
1049
1050 ehca_free_fw_ctrlblock(mqpcb);
1051
1052 return &my_qp->ib_srq;
1053
1054create_srq2:
1055 ret = ERR_PTR(ehca2ib_return_code(hret));
1056 ehca_free_fw_ctrlblock(mqpcb);
1057
1058create_srq1:
1059 internal_destroy_qp(pd->device, my_qp, my_qp->ib_srq.uobject);
1060
1061 return ret;
1062}
1063
1064/*
1065 * prepare_sqe_rts called by internal_modify_qp() at trans sqe -> rts
1066 * set purge bit of bad wqe and subsequent wqes to avoid reentering sqe
1067 * returns total number of bad wqes in bad_wqe_cnt
1068 */
1069static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
1070 int *bad_wqe_cnt)
1071{
1072 u64 h_ret;
1073 struct ipz_queue *squeue;
1074 void *bad_send_wqe_p, *bad_send_wqe_v;
1075 u64 q_ofs;
1076 struct ehca_wqe *wqe;
1077 int qp_num = my_qp->ib_qp.qp_num;
1078
1079 /* get send wqe pointer */
1080 h_ret = hipz_h_disable_and_get_wqe(shca->ipz_hca_handle,
1081 my_qp->ipz_qp_handle, &my_qp->pf,
1082 &bad_send_wqe_p, NULL, 2);
1083 if (h_ret != H_SUCCESS) {
1084 ehca_err(&shca->ib_device, "hipz_h_disable_and_get_wqe() failed"
1085 " ehca_qp=%p qp_num=%x h_ret=%lli",
1086 my_qp, qp_num, h_ret);
1087 return ehca2ib_return_code(h_ret);
1088 }
1089 bad_send_wqe_p = (void *)((u64)bad_send_wqe_p & (~(1L << 63)));
1090 ehca_dbg(&shca->ib_device, "qp_num=%x bad_send_wqe_p=%p",
1091 qp_num, bad_send_wqe_p);
1092 /* convert wqe pointer to vadr */
1093 bad_send_wqe_v = __va((u64)bad_send_wqe_p);
1094 if (ehca_debug_level >= 2)
1095 ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num);
1096 squeue = &my_qp->ipz_squeue;
1097 if (ipz_queue_abs_to_offset(squeue, (u64)bad_send_wqe_p, &q_ofs)) {
1098 ehca_err(&shca->ib_device, "failed to get wqe offset qp_num=%x"
1099 " bad_send_wqe_p=%p", qp_num, bad_send_wqe_p);
1100 return -EFAULT;
1101 }
1102
1103 /* loop sets wqe's purge bit */
1104 wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs);
1105 *bad_wqe_cnt = 0;
1106 while (wqe->optype != 0xff && wqe->wqef != 0xff) {
1107 if (ehca_debug_level >= 2)
1108 ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num);
1109 wqe->nr_of_data_seg = 0; /* suppress data access */
1110 wqe->wqef = WQEF_PURGE; /* WQE to be purged */
1111 q_ofs = ipz_queue_advance_offset(squeue, q_ofs);
1112 wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs);
1113 *bad_wqe_cnt = (*bad_wqe_cnt)+1;
1114 }
1115 /*
1116 * bad wqe will be reprocessed and ignored when pol_cq() is called,
1117 * i.e. nr of wqes with flush error status is one less
1118 */
1119 ehca_dbg(&shca->ib_device, "qp_num=%x flusherr_wqe_cnt=%x",
1120 qp_num, (*bad_wqe_cnt)-1);
1121 wqe->wqef = 0;
1122
1123 return 0;
1124}
1125
1126static int calc_left_cqes(u64 wqe_p, struct ipz_queue *ipz_queue,
1127 struct ehca_queue_map *qmap)
1128{
1129 void *wqe_v;
1130 u64 q_ofs;
1131 u32 wqe_idx;
1132 unsigned int tail_idx;
1133
1134 /* convert real to abs address */
1135 wqe_p = wqe_p & (~(1UL << 63));
1136
1137 wqe_v = __va(wqe_p);
1138
1139 if (ipz_queue_abs_to_offset(ipz_queue, wqe_p, &q_ofs)) {
1140 ehca_gen_err("Invalid offset for calculating left cqes "
1141 "wqe_p=%#llx wqe_v=%p\n", wqe_p, wqe_v);
1142 return -EFAULT;
1143 }
1144
1145 tail_idx = next_index(qmap->tail, qmap->entries);
1146 wqe_idx = q_ofs / ipz_queue->qe_size;
1147
1148 /* check all processed wqes, whether a cqe is requested or not */
1149 while (tail_idx != wqe_idx) {
1150 if (qmap->map[tail_idx].cqe_req)
1151 qmap->left_to_poll++;
1152 tail_idx = next_index(tail_idx, qmap->entries);
1153 }
1154 /* save index in queue, where we have to start flushing */
1155 qmap->next_wqe_idx = wqe_idx;
1156 return 0;
1157}
1158
1159static int check_for_left_cqes(struct ehca_qp *my_qp, struct ehca_shca *shca)
1160{
1161 u64 h_ret;
1162 void *send_wqe_p, *recv_wqe_p;
1163 int ret;
1164 unsigned long flags;
1165 int qp_num = my_qp->ib_qp.qp_num;
1166
1167 /* this hcall is not supported on base QPs */
1168 if (my_qp->ext_type != EQPT_SRQBASE) {
1169 /* get send and receive wqe pointer */
1170 h_ret = hipz_h_disable_and_get_wqe(shca->ipz_hca_handle,
1171 my_qp->ipz_qp_handle, &my_qp->pf,
1172 &send_wqe_p, &recv_wqe_p, 4);
1173 if (h_ret != H_SUCCESS) {
1174 ehca_err(&shca->ib_device, "disable_and_get_wqe() "
1175 "failed ehca_qp=%p qp_num=%x h_ret=%lli",
1176 my_qp, qp_num, h_ret);
1177 return ehca2ib_return_code(h_ret);
1178 }
1179
1180 /*
1181 * acquire lock to ensure that nobody is polling the cq which
1182 * could mean that the qmap->tail pointer is in an
1183 * inconsistent state.
1184 */
1185 spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
1186 ret = calc_left_cqes((u64)send_wqe_p, &my_qp->ipz_squeue,
1187 &my_qp->sq_map);
1188 spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
1189 if (ret)
1190 return ret;
1191
1192
1193 spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
1194 ret = calc_left_cqes((u64)recv_wqe_p, &my_qp->ipz_rqueue,
1195 &my_qp->rq_map);
1196 spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags);
1197 if (ret)
1198 return ret;
1199 } else {
1200 spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
1201 my_qp->sq_map.left_to_poll = 0;
1202 my_qp->sq_map.next_wqe_idx = next_index(my_qp->sq_map.tail,
1203 my_qp->sq_map.entries);
1204 spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
1205
1206 spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
1207 my_qp->rq_map.left_to_poll = 0;
1208 my_qp->rq_map.next_wqe_idx = next_index(my_qp->rq_map.tail,
1209 my_qp->rq_map.entries);
1210 spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags);
1211 }
1212
1213 /* this assures flush cqes being generated only for pending wqes */
1214 if ((my_qp->sq_map.left_to_poll == 0) &&
1215 (my_qp->rq_map.left_to_poll == 0)) {
1216 spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
1217 ehca_add_to_err_list(my_qp, 1);
1218 spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
1219
1220 if (HAS_RQ(my_qp)) {
1221 spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
1222 ehca_add_to_err_list(my_qp, 0);
1223 spin_unlock_irqrestore(&my_qp->recv_cq->spinlock,
1224 flags);
1225 }
1226 }
1227
1228 return 0;
1229}
1230
1231/*
1232 * internal_modify_qp with circumvention to handle aqp0 properly
1233 * smi_reset2init indicates if this is an internal reset-to-init-call for
1234 * smi. This flag must always be zero if called from ehca_modify_qp()!
1235 * This internal func was intorduced to avoid recursion of ehca_modify_qp()!
1236 */
1237static int internal_modify_qp(struct ib_qp *ibqp,
1238 struct ib_qp_attr *attr,
1239 int attr_mask, int smi_reset2init)
1240{
1241 enum ib_qp_state qp_cur_state, qp_new_state;
1242 int cnt, qp_attr_idx, ret = 0;
1243 enum ib_qp_statetrans statetrans;
1244 struct hcp_modify_qp_control_block *mqpcb;
1245 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
1246 struct ehca_shca *shca =
1247 container_of(ibqp->pd->device, struct ehca_shca, ib_device);
1248 u64 update_mask;
1249 u64 h_ret;
1250 int bad_wqe_cnt = 0;
1251 int is_user = 0;
1252 int squeue_locked = 0;
1253 unsigned long flags = 0;
1254
1255 /* do query_qp to obtain current attr values */
1256 mqpcb = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
1257 if (!mqpcb) {
1258 ehca_err(ibqp->device, "Could not get zeroed page for mqpcb "
1259 "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num);
1260 return -ENOMEM;
1261 }
1262
1263 h_ret = hipz_h_query_qp(shca->ipz_hca_handle,
1264 my_qp->ipz_qp_handle,
1265 &my_qp->pf,
1266 mqpcb, my_qp->galpas.kernel);
1267 if (h_ret != H_SUCCESS) {
1268 ehca_err(ibqp->device, "hipz_h_query_qp() failed "
1269 "ehca_qp=%p qp_num=%x h_ret=%lli",
1270 my_qp, ibqp->qp_num, h_ret);
1271 ret = ehca2ib_return_code(h_ret);
1272 goto modify_qp_exit1;
1273 }
1274 if (ibqp->uobject)
1275 is_user = 1;
1276
1277 qp_cur_state = ehca2ib_qp_state(mqpcb->qp_state);
1278
1279 if (qp_cur_state == -EINVAL) { /* invalid qp state */
1280 ret = -EINVAL;
1281 ehca_err(ibqp->device, "Invalid current ehca_qp_state=%x "
1282 "ehca_qp=%p qp_num=%x",
1283 mqpcb->qp_state, my_qp, ibqp->qp_num);
1284 goto modify_qp_exit1;
1285 }
1286 /*
1287 * circumvention to set aqp0 initial state to init
1288 * as expected by IB spec
1289 */
1290 if (smi_reset2init == 0 &&
1291 ibqp->qp_type == IB_QPT_SMI &&
1292 qp_cur_state == IB_QPS_RESET &&
1293 (attr_mask & IB_QP_STATE) &&
1294 attr->qp_state == IB_QPS_INIT) { /* RESET -> INIT */
1295 struct ib_qp_attr smiqp_attr = {
1296 .qp_state = IB_QPS_INIT,
1297 .port_num = my_qp->init_attr.port_num,
1298 .pkey_index = 0,
1299 .qkey = 0
1300 };
1301 int smiqp_attr_mask = IB_QP_STATE | IB_QP_PORT |
1302 IB_QP_PKEY_INDEX | IB_QP_QKEY;
1303 int smirc = internal_modify_qp(
1304 ibqp, &smiqp_attr, smiqp_attr_mask, 1);
1305 if (smirc) {
1306 ehca_err(ibqp->device, "SMI RESET -> INIT failed. "
1307 "ehca_modify_qp() rc=%i", smirc);
1308 ret = H_PARAMETER;
1309 goto modify_qp_exit1;
1310 }
1311 qp_cur_state = IB_QPS_INIT;
1312 ehca_dbg(ibqp->device, "SMI RESET -> INIT succeeded");
1313 }
1314 /* is transmitted current state equal to "real" current state */
1315 if ((attr_mask & IB_QP_CUR_STATE) &&
1316 qp_cur_state != attr->cur_qp_state) {
1317 ret = -EINVAL;
1318 ehca_err(ibqp->device,
1319 "Invalid IB_QP_CUR_STATE attr->curr_qp_state=%x <>"
1320 " actual cur_qp_state=%x. ehca_qp=%p qp_num=%x",
1321 attr->cur_qp_state, qp_cur_state, my_qp, ibqp->qp_num);
1322 goto modify_qp_exit1;
1323 }
1324
1325 ehca_dbg(ibqp->device, "ehca_qp=%p qp_num=%x current qp_state=%x "
1326 "new qp_state=%x attribute_mask=%x",
1327 my_qp, ibqp->qp_num, qp_cur_state, attr->qp_state, attr_mask);
1328
1329 qp_new_state = attr_mask & IB_QP_STATE ? attr->qp_state : qp_cur_state;
1330 if (!smi_reset2init &&
1331 !ib_modify_qp_is_ok(qp_cur_state, qp_new_state, ibqp->qp_type,
1332 attr_mask, IB_LINK_LAYER_UNSPECIFIED)) {
1333 ret = -EINVAL;
1334 ehca_err(ibqp->device,
1335 "Invalid qp transition new_state=%x cur_state=%x "
1336 "ehca_qp=%p qp_num=%x attr_mask=%x", qp_new_state,
1337 qp_cur_state, my_qp, ibqp->qp_num, attr_mask);
1338 goto modify_qp_exit1;
1339 }
1340
1341 mqpcb->qp_state = ib2ehca_qp_state(qp_new_state);
1342 if (mqpcb->qp_state)
1343 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
1344 else {
1345 ret = -EINVAL;
1346 ehca_err(ibqp->device, "Invalid new qp state=%x "
1347 "ehca_qp=%p qp_num=%x",
1348 qp_new_state, my_qp, ibqp->qp_num);
1349 goto modify_qp_exit1;
1350 }
1351
1352 /* retrieve state transition struct to get req and opt attrs */
1353 statetrans = get_modqp_statetrans(qp_cur_state, qp_new_state);
1354 if (statetrans < 0) {
1355 ret = -EINVAL;
1356 ehca_err(ibqp->device, "<INVALID STATE CHANGE> qp_cur_state=%x "
1357 "new_qp_state=%x State_xsition=%x ehca_qp=%p "
1358 "qp_num=%x", qp_cur_state, qp_new_state,
1359 statetrans, my_qp, ibqp->qp_num);
1360 goto modify_qp_exit1;
1361 }
1362
1363 qp_attr_idx = ib2ehcaqptype(ibqp->qp_type);
1364
1365 if (qp_attr_idx < 0) {
1366 ret = qp_attr_idx;
1367 ehca_err(ibqp->device,
1368 "Invalid QP type=%x ehca_qp=%p qp_num=%x",
1369 ibqp->qp_type, my_qp, ibqp->qp_num);
1370 goto modify_qp_exit1;
1371 }
1372
1373 ehca_dbg(ibqp->device,
1374 "ehca_qp=%p qp_num=%x <VALID STATE CHANGE> qp_state_xsit=%x",
1375 my_qp, ibqp->qp_num, statetrans);
1376
1377 /* eHCA2 rev2 and higher require the SEND_GRH_FLAG to be set
1378 * in non-LL UD QPs.
1379 */
1380 if ((my_qp->qp_type == IB_QPT_UD) &&
1381 (my_qp->ext_type != EQPT_LLQP) &&
1382 (statetrans == IB_QPST_INIT2RTR) &&
1383 (shca->hw_level >= 0x22)) {
1384 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1);
1385 mqpcb->send_grh_flag = 1;
1386 }
1387
1388 /* sqe -> rts: set purge bit of bad wqe before actual trans */
1389 if ((my_qp->qp_type == IB_QPT_UD ||
1390 my_qp->qp_type == IB_QPT_GSI ||
1391 my_qp->qp_type == IB_QPT_SMI) &&
1392 statetrans == IB_QPST_SQE2RTS) {
1393 /* mark next free wqe if kernel */
1394 if (!ibqp->uobject) {
1395 struct ehca_wqe *wqe;
1396 /* lock send queue */
1397 spin_lock_irqsave(&my_qp->spinlock_s, flags);
1398 squeue_locked = 1;
1399 /* mark next free wqe */
1400 wqe = (struct ehca_wqe *)
1401 ipz_qeit_get(&my_qp->ipz_squeue);
1402 wqe->optype = wqe->wqef = 0xff;
1403 ehca_dbg(ibqp->device, "qp_num=%x next_free_wqe=%p",
1404 ibqp->qp_num, wqe);
1405 }
1406 ret = prepare_sqe_rts(my_qp, shca, &bad_wqe_cnt);
1407 if (ret) {
1408 ehca_err(ibqp->device, "prepare_sqe_rts() failed "
1409 "ehca_qp=%p qp_num=%x ret=%i",
1410 my_qp, ibqp->qp_num, ret);
1411 goto modify_qp_exit2;
1412 }
1413 }
1414
1415 /*
1416 * enable RDMA_Atomic_Control if reset->init und reliable con
1417 * this is necessary since gen2 does not provide that flag,
1418 * but pHyp requires it
1419 */
1420 if (statetrans == IB_QPST_RESET2INIT &&
1421 (ibqp->qp_type == IB_QPT_RC || ibqp->qp_type == IB_QPT_UC)) {
1422 mqpcb->rdma_atomic_ctrl = 3;
1423 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RDMA_ATOMIC_CTRL, 1);
1424 }
1425 /* circ. pHyp requires #RDMA/Atomic Resp Res for UC INIT -> RTR */
1426 if (statetrans == IB_QPST_INIT2RTR &&
1427 (ibqp->qp_type == IB_QPT_UC) &&
1428 !(attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)) {
1429 mqpcb->rdma_nr_atomic_resp_res = 1; /* default to 1 */
1430 update_mask |=
1431 EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES, 1);
1432 }
1433
1434 if (attr_mask & IB_QP_PKEY_INDEX) {
1435 if (attr->pkey_index >= 16) {
1436 ret = -EINVAL;
1437 ehca_err(ibqp->device, "Invalid pkey_index=%x. "
1438 "ehca_qp=%p qp_num=%x max_pkey_index=f",
1439 attr->pkey_index, my_qp, ibqp->qp_num);
1440 goto modify_qp_exit2;
1441 }
1442 mqpcb->prim_p_key_idx = attr->pkey_index;
1443 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_P_KEY_IDX, 1);
1444 }
1445 if (attr_mask & IB_QP_PORT) {
1446 struct ehca_sport *sport;
1447 struct ehca_qp *aqp1;
1448 if (attr->port_num < 1 || attr->port_num > shca->num_ports) {
1449 ret = -EINVAL;
1450 ehca_err(ibqp->device, "Invalid port=%x. "
1451 "ehca_qp=%p qp_num=%x num_ports=%x",
1452 attr->port_num, my_qp, ibqp->qp_num,
1453 shca->num_ports);
1454 goto modify_qp_exit2;
1455 }
1456 sport = &shca->sport[attr->port_num - 1];
1457 if (!sport->ibqp_sqp[IB_QPT_GSI]) {
1458 /* should not occur */
1459 ret = -EFAULT;
1460 ehca_err(ibqp->device, "AQP1 was not created for "
1461 "port=%x", attr->port_num);
1462 goto modify_qp_exit2;
1463 }
1464 aqp1 = container_of(sport->ibqp_sqp[IB_QPT_GSI],
1465 struct ehca_qp, ib_qp);
1466 if (ibqp->qp_type != IB_QPT_GSI &&
1467 ibqp->qp_type != IB_QPT_SMI &&
1468 aqp1->mod_qp_parm) {
1469 /*
1470 * firmware will reject this modify_qp() because
1471 * port is not activated/initialized fully
1472 */
1473 ret = -EFAULT;
1474 ehca_warn(ibqp->device, "Couldn't modify qp port=%x: "
1475 "either port is being activated (try again) "
1476 "or cabling issue", attr->port_num);
1477 goto modify_qp_exit2;
1478 }
1479 mqpcb->prim_phys_port = attr->port_num;
1480 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_PHYS_PORT, 1);
1481 }
1482 if (attr_mask & IB_QP_QKEY) {
1483 mqpcb->qkey = attr->qkey;
1484 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_QKEY, 1);
1485 }
1486 if (attr_mask & IB_QP_AV) {
1487 mqpcb->dlid = attr->ah_attr.dlid;
1488 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DLID, 1);
1489 mqpcb->source_path_bits = attr->ah_attr.src_path_bits;
1490 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS, 1);
1491 mqpcb->service_level = attr->ah_attr.sl;
1492 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL, 1);
1493
1494 if (ehca_calc_ipd(shca, mqpcb->prim_phys_port,
1495 attr->ah_attr.static_rate,
1496 &mqpcb->max_static_rate)) {
1497 ret = -EINVAL;
1498 goto modify_qp_exit2;
1499 }
1500 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE, 1);
1501
1502 /*
1503 * Always supply the GRH flag, even if it's zero, to give the
1504 * hypervisor a clear "yes" or "no" instead of a "perhaps"
1505 */
1506 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1);
1507
1508 /*
1509 * only if GRH is TRUE we might consider SOURCE_GID_IDX
1510 * and DEST_GID otherwise phype will return H_ATTR_PARM!!!
1511 */
1512 if (attr->ah_attr.ah_flags == IB_AH_GRH) {
1513 mqpcb->send_grh_flag = 1;
1514
1515 mqpcb->source_gid_idx = attr->ah_attr.grh.sgid_index;
1516 update_mask |=
1517 EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX, 1);
1518
1519 for (cnt = 0; cnt < 16; cnt++)
1520 mqpcb->dest_gid.byte[cnt] =
1521 attr->ah_attr.grh.dgid.raw[cnt];
1522
1523 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DEST_GID, 1);
1524 mqpcb->flow_label = attr->ah_attr.grh.flow_label;
1525 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL, 1);
1526 mqpcb->hop_limit = attr->ah_attr.grh.hop_limit;
1527 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT, 1);
1528 mqpcb->traffic_class = attr->ah_attr.grh.traffic_class;
1529 update_mask |=
1530 EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS, 1);
1531 }
1532 }
1533
1534 if (attr_mask & IB_QP_PATH_MTU) {
1535 /* store ld(MTU) */
1536 my_qp->mtu_shift = attr->path_mtu + 7;
1537 mqpcb->path_mtu = attr->path_mtu;
1538 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PATH_MTU, 1);
1539 }
1540 if (attr_mask & IB_QP_TIMEOUT) {
1541 mqpcb->timeout = attr->timeout;
1542 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_TIMEOUT, 1);
1543 }
1544 if (attr_mask & IB_QP_RETRY_CNT) {
1545 mqpcb->retry_count = attr->retry_cnt;
1546 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RETRY_COUNT, 1);
1547 }
1548 if (attr_mask & IB_QP_RNR_RETRY) {
1549 mqpcb->rnr_retry_count = attr->rnr_retry;
1550 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RNR_RETRY_COUNT, 1);
1551 }
1552 if (attr_mask & IB_QP_RQ_PSN) {
1553 mqpcb->receive_psn = attr->rq_psn;
1554 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RECEIVE_PSN, 1);
1555 }
1556 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1557 mqpcb->rdma_nr_atomic_resp_res = attr->max_dest_rd_atomic < 3 ?
1558 attr->max_dest_rd_atomic : 2;
1559 update_mask |=
1560 EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES, 1);
1561 }
1562 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1563 mqpcb->rdma_atomic_outst_dest_qp = attr->max_rd_atomic < 3 ?
1564 attr->max_rd_atomic : 2;
1565 update_mask |=
1566 EHCA_BMASK_SET
1567 (MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP, 1);
1568 }
1569 if (attr_mask & IB_QP_ALT_PATH) {
1570 if (attr->alt_port_num < 1
1571 || attr->alt_port_num > shca->num_ports) {
1572 ret = -EINVAL;
1573 ehca_err(ibqp->device, "Invalid alt_port=%x. "
1574 "ehca_qp=%p qp_num=%x num_ports=%x",
1575 attr->alt_port_num, my_qp, ibqp->qp_num,
1576 shca->num_ports);
1577 goto modify_qp_exit2;
1578 }
1579 mqpcb->alt_phys_port = attr->alt_port_num;
1580
1581 if (attr->alt_pkey_index >= 16) {
1582 ret = -EINVAL;
1583 ehca_err(ibqp->device, "Invalid alt_pkey_index=%x. "
1584 "ehca_qp=%p qp_num=%x max_pkey_index=f",
1585 attr->pkey_index, my_qp, ibqp->qp_num);
1586 goto modify_qp_exit2;
1587 }
1588 mqpcb->alt_p_key_idx = attr->alt_pkey_index;
1589
1590 mqpcb->timeout_al = attr->alt_timeout;
1591 mqpcb->dlid_al = attr->alt_ah_attr.dlid;
1592 mqpcb->source_path_bits_al = attr->alt_ah_attr.src_path_bits;
1593 mqpcb->service_level_al = attr->alt_ah_attr.sl;
1594
1595 if (ehca_calc_ipd(shca, mqpcb->alt_phys_port,
1596 attr->alt_ah_attr.static_rate,
1597 &mqpcb->max_static_rate_al)) {
1598 ret = -EINVAL;
1599 goto modify_qp_exit2;
1600 }
1601
1602 /* OpenIB doesn't support alternate retry counts - copy them */
1603 mqpcb->retry_count_al = mqpcb->retry_count;
1604 mqpcb->rnr_retry_count_al = mqpcb->rnr_retry_count;
1605
1606 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_ALT_PHYS_PORT, 1)
1607 | EHCA_BMASK_SET(MQPCB_MASK_ALT_P_KEY_IDX, 1)
1608 | EHCA_BMASK_SET(MQPCB_MASK_TIMEOUT_AL, 1)
1609 | EHCA_BMASK_SET(MQPCB_MASK_DLID_AL, 1)
1610 | EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS_AL, 1)
1611 | EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL_AL, 1)
1612 | EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE_AL, 1)
1613 | EHCA_BMASK_SET(MQPCB_MASK_RETRY_COUNT_AL, 1)
1614 | EHCA_BMASK_SET(MQPCB_MASK_RNR_RETRY_COUNT_AL, 1);
1615
1616 /*
1617 * Always supply the GRH flag, even if it's zero, to give the
1618 * hypervisor a clear "yes" or "no" instead of a "perhaps"
1619 */
1620 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG_AL, 1);
1621
1622 /*
1623 * only if GRH is TRUE we might consider SOURCE_GID_IDX
1624 * and DEST_GID otherwise phype will return H_ATTR_PARM!!!
1625 */
1626 if (attr->alt_ah_attr.ah_flags == IB_AH_GRH) {
1627 mqpcb->send_grh_flag_al = 1;
1628
1629 for (cnt = 0; cnt < 16; cnt++)
1630 mqpcb->dest_gid_al.byte[cnt] =
1631 attr->alt_ah_attr.grh.dgid.raw[cnt];
1632 mqpcb->source_gid_idx_al =
1633 attr->alt_ah_attr.grh.sgid_index;
1634 mqpcb->flow_label_al = attr->alt_ah_attr.grh.flow_label;
1635 mqpcb->hop_limit_al = attr->alt_ah_attr.grh.hop_limit;
1636 mqpcb->traffic_class_al =
1637 attr->alt_ah_attr.grh.traffic_class;
1638
1639 update_mask |=
1640 EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX_AL, 1)
1641 | EHCA_BMASK_SET(MQPCB_MASK_DEST_GID_AL, 1)
1642 | EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL_AL, 1)
1643 | EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT_AL, 1) |
1644 EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS_AL, 1);
1645 }
1646 }
1647
1648 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
1649 mqpcb->min_rnr_nak_timer_field = attr->min_rnr_timer;
1650 update_mask |=
1651 EHCA_BMASK_SET(MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD, 1);
1652 }
1653
1654 if (attr_mask & IB_QP_SQ_PSN) {
1655 mqpcb->send_psn = attr->sq_psn;
1656 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_PSN, 1);
1657 }
1658
1659 if (attr_mask & IB_QP_DEST_QPN) {
1660 mqpcb->dest_qp_nr = attr->dest_qp_num;
1661 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DEST_QP_NR, 1);
1662 }
1663
1664 if (attr_mask & IB_QP_PATH_MIG_STATE) {
1665 if (attr->path_mig_state != IB_MIG_REARM
1666 && attr->path_mig_state != IB_MIG_MIGRATED) {
1667 ret = -EINVAL;
1668 ehca_err(ibqp->device, "Invalid mig_state=%x",
1669 attr->path_mig_state);
1670 goto modify_qp_exit2;
1671 }
1672 mqpcb->path_migration_state = attr->path_mig_state + 1;
1673 if (attr->path_mig_state == IB_MIG_REARM)
1674 my_qp->mig_armed = 1;
1675 update_mask |=
1676 EHCA_BMASK_SET(MQPCB_MASK_PATH_MIGRATION_STATE, 1);
1677 }
1678
1679 if (attr_mask & IB_QP_CAP) {
1680 mqpcb->max_nr_outst_send_wr = attr->cap.max_send_wr+1;
1681 update_mask |=
1682 EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_SEND_WR, 1);
1683 mqpcb->max_nr_outst_recv_wr = attr->cap.max_recv_wr+1;
1684 update_mask |=
1685 EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_RECV_WR, 1);
1686 /* no support for max_send/recv_sge yet */
1687 }
1688
1689 if (ehca_debug_level >= 2)
1690 ehca_dmp(mqpcb, 4*70, "qp_num=%x", ibqp->qp_num);
1691
1692 h_ret = hipz_h_modify_qp(shca->ipz_hca_handle,
1693 my_qp->ipz_qp_handle,
1694 &my_qp->pf,
1695 update_mask,
1696 mqpcb, my_qp->galpas.kernel);
1697
1698 if (h_ret != H_SUCCESS) {
1699 ret = ehca2ib_return_code(h_ret);
1700 ehca_err(ibqp->device, "hipz_h_modify_qp() failed h_ret=%lli "
1701 "ehca_qp=%p qp_num=%x", h_ret, my_qp, ibqp->qp_num);
1702 goto modify_qp_exit2;
1703 }
1704
1705 if ((my_qp->qp_type == IB_QPT_UD ||
1706 my_qp->qp_type == IB_QPT_GSI ||
1707 my_qp->qp_type == IB_QPT_SMI) &&
1708 statetrans == IB_QPST_SQE2RTS) {
1709 /* doorbell to reprocessing wqes */
1710 iosync(); /* serialize GAL register access */
1711 hipz_update_sqa(my_qp, bad_wqe_cnt-1);
1712 ehca_gen_dbg("doorbell for %x wqes", bad_wqe_cnt);
1713 }
1714
1715 if (statetrans == IB_QPST_RESET2INIT ||
1716 statetrans == IB_QPST_INIT2INIT) {
1717 mqpcb->qp_enable = 1;
1718 mqpcb->qp_state = EHCA_QPS_INIT;
1719 update_mask = 0;
1720 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_ENABLE, 1);
1721
1722 h_ret = hipz_h_modify_qp(shca->ipz_hca_handle,
1723 my_qp->ipz_qp_handle,
1724 &my_qp->pf,
1725 update_mask,
1726 mqpcb,
1727 my_qp->galpas.kernel);
1728
1729 if (h_ret != H_SUCCESS) {
1730 ret = ehca2ib_return_code(h_ret);
1731 ehca_err(ibqp->device, "ENABLE in context of "
1732 "RESET_2_INIT failed! Maybe you didn't get "
1733 "a LID h_ret=%lli ehca_qp=%p qp_num=%x",
1734 h_ret, my_qp, ibqp->qp_num);
1735 goto modify_qp_exit2;
1736 }
1737 }
1738 if ((qp_new_state == IB_QPS_ERR) && (qp_cur_state != IB_QPS_ERR)
1739 && !is_user) {
1740 ret = check_for_left_cqes(my_qp, shca);
1741 if (ret)
1742 goto modify_qp_exit2;
1743 }
1744
1745 if (statetrans == IB_QPST_ANY2RESET) {
1746 ipz_qeit_reset(&my_qp->ipz_rqueue);
1747 ipz_qeit_reset(&my_qp->ipz_squeue);
1748
1749 if (qp_cur_state == IB_QPS_ERR && !is_user) {
1750 del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
1751
1752 if (HAS_RQ(my_qp))
1753 del_from_err_list(my_qp->recv_cq,
1754 &my_qp->rq_err_node);
1755 }
1756 if (!is_user)
1757 reset_queue_map(&my_qp->sq_map);
1758
1759 if (HAS_RQ(my_qp) && !is_user)
1760 reset_queue_map(&my_qp->rq_map);
1761 }
1762
1763 if (attr_mask & IB_QP_QKEY)
1764 my_qp->qkey = attr->qkey;
1765
1766modify_qp_exit2:
1767 if (squeue_locked) { /* this means: sqe -> rts */
1768 spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
1769 my_qp->sqerr_purgeflag = 1;
1770 }
1771
1772modify_qp_exit1:
1773 ehca_free_fw_ctrlblock(mqpcb);
1774
1775 return ret;
1776}
1777
1778int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
1779 struct ib_udata *udata)
1780{
1781 int ret = 0;
1782
1783 struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
1784 ib_device);
1785 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
1786
1787 /* The if-block below caches qp_attr to be modified for GSI and SMI
1788 * qps during the initialization by ib_mad. When the respective port
1789 * is activated, ie we got an event PORT_ACTIVE, we'll replay the
1790 * cached modify calls sequence, see ehca_recover_sqs() below.
1791 * Why that is required:
1792 * 1) If one port is connected, older code requires that port one
1793 * to be connected and module option nr_ports=1 to be given by
1794 * user, which is very inconvenient for end user.
1795 * 2) Firmware accepts modify_qp() only if respective port has become
1796 * active. Older code had a wait loop of 30sec create_qp()/
1797 * define_aqp1(), which is not appropriate in practice. This
1798 * code now removes that wait loop, see define_aqp1(), and always
1799 * reports all ports to ib_mad resp. users. Only activated ports
1800 * will then usable for the users.
1801 */
1802 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) {
1803 int port = my_qp->init_attr.port_num;
1804 struct ehca_sport *sport = &shca->sport[port - 1];
1805 unsigned long flags;
1806 spin_lock_irqsave(&sport->mod_sqp_lock, flags);
1807 /* cache qp_attr only during init */
1808 if (my_qp->mod_qp_parm) {
1809 struct ehca_mod_qp_parm *p;
1810 if (my_qp->mod_qp_parm_idx >= EHCA_MOD_QP_PARM_MAX) {
1811 ehca_err(&shca->ib_device,
1812 "mod_qp_parm overflow state=%x port=%x"
1813 " type=%x", attr->qp_state,
1814 my_qp->init_attr.port_num,
1815 ibqp->qp_type);
1816 spin_unlock_irqrestore(&sport->mod_sqp_lock,
1817 flags);
1818 return -EINVAL;
1819 }
1820 p = &my_qp->mod_qp_parm[my_qp->mod_qp_parm_idx];
1821 p->mask = attr_mask;
1822 p->attr = *attr;
1823 my_qp->mod_qp_parm_idx++;
1824 ehca_dbg(&shca->ib_device,
1825 "Saved qp_attr for state=%x port=%x type=%x",
1826 attr->qp_state, my_qp->init_attr.port_num,
1827 ibqp->qp_type);
1828 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
1829 goto out;
1830 }
1831 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
1832 }
1833
1834 ret = internal_modify_qp(ibqp, attr, attr_mask, 0);
1835
1836out:
1837 if ((ret == 0) && (attr_mask & IB_QP_STATE))
1838 my_qp->state = attr->qp_state;
1839
1840 return ret;
1841}
1842
1843void ehca_recover_sqp(struct ib_qp *sqp)
1844{
1845 struct ehca_qp *my_sqp = container_of(sqp, struct ehca_qp, ib_qp);
1846 int port = my_sqp->init_attr.port_num;
1847 struct ib_qp_attr attr;
1848 struct ehca_mod_qp_parm *qp_parm;
1849 int i, qp_parm_idx, ret;
1850 unsigned long flags, wr_cnt;
1851
1852 if (!my_sqp->mod_qp_parm)
1853 return;
1854 ehca_dbg(sqp->device, "SQP port=%x qp_num=%x", port, sqp->qp_num);
1855
1856 qp_parm = my_sqp->mod_qp_parm;
1857 qp_parm_idx = my_sqp->mod_qp_parm_idx;
1858 for (i = 0; i < qp_parm_idx; i++) {
1859 attr = qp_parm[i].attr;
1860 ret = internal_modify_qp(sqp, &attr, qp_parm[i].mask, 0);
1861 if (ret) {
1862 ehca_err(sqp->device, "Could not modify SQP port=%x "
1863 "qp_num=%x ret=%x", port, sqp->qp_num, ret);
1864 goto free_qp_parm;
1865 }
1866 ehca_dbg(sqp->device, "SQP port=%x qp_num=%x in state=%x",
1867 port, sqp->qp_num, attr.qp_state);
1868 }
1869
1870 /* re-trigger posted recv wrs */
1871 wr_cnt = my_sqp->ipz_rqueue.current_q_offset /
1872 my_sqp->ipz_rqueue.qe_size;
1873 if (wr_cnt) {
1874 spin_lock_irqsave(&my_sqp->spinlock_r, flags);
1875 hipz_update_rqa(my_sqp, wr_cnt);
1876 spin_unlock_irqrestore(&my_sqp->spinlock_r, flags);
1877 ehca_dbg(sqp->device, "doorbell port=%x qp_num=%x wr_cnt=%lx",
1878 port, sqp->qp_num, wr_cnt);
1879 }
1880
1881free_qp_parm:
1882 kfree(qp_parm);
1883 /* this prevents subsequent calls to modify_qp() to cache qp_attr */
1884 my_sqp->mod_qp_parm = NULL;
1885}
1886
1887int ehca_query_qp(struct ib_qp *qp,
1888 struct ib_qp_attr *qp_attr,
1889 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
1890{
1891 struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
1892 struct ehca_shca *shca = container_of(qp->device, struct ehca_shca,
1893 ib_device);
1894 struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
1895 struct hcp_modify_qp_control_block *qpcb;
1896 int cnt, ret = 0;
1897 u64 h_ret;
1898
1899 if (qp_attr_mask & QP_ATTR_QUERY_NOT_SUPPORTED) {
1900 ehca_err(qp->device, "Invalid attribute mask "
1901 "ehca_qp=%p qp_num=%x qp_attr_mask=%x ",
1902 my_qp, qp->qp_num, qp_attr_mask);
1903 return -EINVAL;
1904 }
1905
1906 qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1907 if (!qpcb) {
1908 ehca_err(qp->device, "Out of memory for qpcb "
1909 "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num);
1910 return -ENOMEM;
1911 }
1912
1913 h_ret = hipz_h_query_qp(adapter_handle,
1914 my_qp->ipz_qp_handle,
1915 &my_qp->pf,
1916 qpcb, my_qp->galpas.kernel);
1917
1918 if (h_ret != H_SUCCESS) {
1919 ret = ehca2ib_return_code(h_ret);
1920 ehca_err(qp->device, "hipz_h_query_qp() failed "
1921 "ehca_qp=%p qp_num=%x h_ret=%lli",
1922 my_qp, qp->qp_num, h_ret);
1923 goto query_qp_exit1;
1924 }
1925
1926 qp_attr->cur_qp_state = ehca2ib_qp_state(qpcb->qp_state);
1927 qp_attr->qp_state = qp_attr->cur_qp_state;
1928
1929 if (qp_attr->cur_qp_state == -EINVAL) {
1930 ret = -EINVAL;
1931 ehca_err(qp->device, "Got invalid ehca_qp_state=%x "
1932 "ehca_qp=%p qp_num=%x",
1933 qpcb->qp_state, my_qp, qp->qp_num);
1934 goto query_qp_exit1;
1935 }
1936
1937 if (qp_attr->qp_state == IB_QPS_SQD)
1938 qp_attr->sq_draining = 1;
1939
1940 qp_attr->qkey = qpcb->qkey;
1941 qp_attr->path_mtu = qpcb->path_mtu;
1942 qp_attr->path_mig_state = qpcb->path_migration_state - 1;
1943 qp_attr->rq_psn = qpcb->receive_psn;
1944 qp_attr->sq_psn = qpcb->send_psn;
1945 qp_attr->min_rnr_timer = qpcb->min_rnr_nak_timer_field;
1946 qp_attr->cap.max_send_wr = qpcb->max_nr_outst_send_wr-1;
1947 qp_attr->cap.max_recv_wr = qpcb->max_nr_outst_recv_wr-1;
1948 /* UD_AV CIRCUMVENTION */
1949 if (my_qp->qp_type == IB_QPT_UD) {
1950 qp_attr->cap.max_send_sge =
1951 qpcb->actual_nr_sges_in_sq_wqe - 2;
1952 qp_attr->cap.max_recv_sge =
1953 qpcb->actual_nr_sges_in_rq_wqe - 2;
1954 } else {
1955 qp_attr->cap.max_send_sge =
1956 qpcb->actual_nr_sges_in_sq_wqe;
1957 qp_attr->cap.max_recv_sge =
1958 qpcb->actual_nr_sges_in_rq_wqe;
1959 }
1960
1961 qp_attr->cap.max_inline_data = my_qp->sq_max_inline_data_size;
1962 qp_attr->dest_qp_num = qpcb->dest_qp_nr;
1963
1964 qp_attr->pkey_index = qpcb->prim_p_key_idx;
1965 qp_attr->port_num = qpcb->prim_phys_port;
1966 qp_attr->timeout = qpcb->timeout;
1967 qp_attr->retry_cnt = qpcb->retry_count;
1968 qp_attr->rnr_retry = qpcb->rnr_retry_count;
1969
1970 qp_attr->alt_pkey_index = qpcb->alt_p_key_idx;
1971 qp_attr->alt_port_num = qpcb->alt_phys_port;
1972 qp_attr->alt_timeout = qpcb->timeout_al;
1973
1974 qp_attr->max_dest_rd_atomic = qpcb->rdma_nr_atomic_resp_res;
1975 qp_attr->max_rd_atomic = qpcb->rdma_atomic_outst_dest_qp;
1976
1977 /* primary av */
1978 qp_attr->ah_attr.sl = qpcb->service_level;
1979
1980 if (qpcb->send_grh_flag) {
1981 qp_attr->ah_attr.ah_flags = IB_AH_GRH;
1982 }
1983
1984 qp_attr->ah_attr.static_rate = qpcb->max_static_rate;
1985 qp_attr->ah_attr.dlid = qpcb->dlid;
1986 qp_attr->ah_attr.src_path_bits = qpcb->source_path_bits;
1987 qp_attr->ah_attr.port_num = qp_attr->port_num;
1988
1989 /* primary GRH */
1990 qp_attr->ah_attr.grh.traffic_class = qpcb->traffic_class;
1991 qp_attr->ah_attr.grh.hop_limit = qpcb->hop_limit;
1992 qp_attr->ah_attr.grh.sgid_index = qpcb->source_gid_idx;
1993 qp_attr->ah_attr.grh.flow_label = qpcb->flow_label;
1994
1995 for (cnt = 0; cnt < 16; cnt++)
1996 qp_attr->ah_attr.grh.dgid.raw[cnt] =
1997 qpcb->dest_gid.byte[cnt];
1998
1999 /* alternate AV */
2000 qp_attr->alt_ah_attr.sl = qpcb->service_level_al;
2001 if (qpcb->send_grh_flag_al) {
2002 qp_attr->alt_ah_attr.ah_flags = IB_AH_GRH;
2003 }
2004
2005 qp_attr->alt_ah_attr.static_rate = qpcb->max_static_rate_al;
2006 qp_attr->alt_ah_attr.dlid = qpcb->dlid_al;
2007 qp_attr->alt_ah_attr.src_path_bits = qpcb->source_path_bits_al;
2008
2009 /* alternate GRH */
2010 qp_attr->alt_ah_attr.grh.traffic_class = qpcb->traffic_class_al;
2011 qp_attr->alt_ah_attr.grh.hop_limit = qpcb->hop_limit_al;
2012 qp_attr->alt_ah_attr.grh.sgid_index = qpcb->source_gid_idx_al;
2013 qp_attr->alt_ah_attr.grh.flow_label = qpcb->flow_label_al;
2014
2015 for (cnt = 0; cnt < 16; cnt++)
2016 qp_attr->alt_ah_attr.grh.dgid.raw[cnt] =
2017 qpcb->dest_gid_al.byte[cnt];
2018
2019 /* return init attributes given in ehca_create_qp */
2020 if (qp_init_attr)
2021 *qp_init_attr = my_qp->init_attr;
2022
2023 if (ehca_debug_level >= 2)
2024 ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num);
2025
2026query_qp_exit1:
2027 ehca_free_fw_ctrlblock(qpcb);
2028
2029 return ret;
2030}
2031
2032int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
2033 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
2034{
2035 struct ehca_qp *my_qp =
2036 container_of(ibsrq, struct ehca_qp, ib_srq);
2037 struct ehca_shca *shca =
2038 container_of(ibsrq->pd->device, struct ehca_shca, ib_device);
2039 struct hcp_modify_qp_control_block *mqpcb;
2040 u64 update_mask;
2041 u64 h_ret;
2042 int ret = 0;
2043
2044 mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
2045 if (!mqpcb) {
2046 ehca_err(ibsrq->device, "Could not get zeroed page for mqpcb "
2047 "ehca_qp=%p qp_num=%x ", my_qp, my_qp->real_qp_num);
2048 return -ENOMEM;
2049 }
2050
2051 update_mask = 0;
2052 if (attr_mask & IB_SRQ_LIMIT) {
2053 attr_mask &= ~IB_SRQ_LIMIT;
2054 update_mask |=
2055 EHCA_BMASK_SET(MQPCB_MASK_CURR_SRQ_LIMIT, 1)
2056 | EHCA_BMASK_SET(MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG, 1);
2057 mqpcb->curr_srq_limit = attr->srq_limit;
2058 mqpcb->qp_aff_asyn_ev_log_reg =
2059 EHCA_BMASK_SET(QPX_AAELOG_RESET_SRQ_LIMIT, 1);
2060 }
2061
2062 /* by now, all bits in attr_mask should have been cleared */
2063 if (attr_mask) {
2064 ehca_err(ibsrq->device, "invalid attribute mask bits set "
2065 "attr_mask=%x", attr_mask);
2066 ret = -EINVAL;
2067 goto modify_srq_exit0;
2068 }
2069
2070 if (ehca_debug_level >= 2)
2071 ehca_dmp(mqpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
2072
2073 h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, my_qp->ipz_qp_handle,
2074 NULL, update_mask, mqpcb,
2075 my_qp->galpas.kernel);
2076
2077 if (h_ret != H_SUCCESS) {
2078 ret = ehca2ib_return_code(h_ret);
2079 ehca_err(ibsrq->device, "hipz_h_modify_qp() failed h_ret=%lli "
2080 "ehca_qp=%p qp_num=%x",
2081 h_ret, my_qp, my_qp->real_qp_num);
2082 }
2083
2084modify_srq_exit0:
2085 ehca_free_fw_ctrlblock(mqpcb);
2086
2087 return ret;
2088}
2089
2090int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr)
2091{
2092 struct ehca_qp *my_qp = container_of(srq, struct ehca_qp, ib_srq);
2093 struct ehca_shca *shca = container_of(srq->device, struct ehca_shca,
2094 ib_device);
2095 struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
2096 struct hcp_modify_qp_control_block *qpcb;
2097 int ret = 0;
2098 u64 h_ret;
2099
2100 qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
2101 if (!qpcb) {
2102 ehca_err(srq->device, "Out of memory for qpcb "
2103 "ehca_qp=%p qp_num=%x", my_qp, my_qp->real_qp_num);
2104 return -ENOMEM;
2105 }
2106
2107 h_ret = hipz_h_query_qp(adapter_handle, my_qp->ipz_qp_handle,
2108 NULL, qpcb, my_qp->galpas.kernel);
2109
2110 if (h_ret != H_SUCCESS) {
2111 ret = ehca2ib_return_code(h_ret);
2112 ehca_err(srq->device, "hipz_h_query_qp() failed "
2113 "ehca_qp=%p qp_num=%x h_ret=%lli",
2114 my_qp, my_qp->real_qp_num, h_ret);
2115 goto query_srq_exit1;
2116 }
2117
2118 srq_attr->max_wr = qpcb->max_nr_outst_recv_wr - 1;
2119 srq_attr->max_sge = 3;
2120 srq_attr->srq_limit = qpcb->curr_srq_limit;
2121
2122 if (ehca_debug_level >= 2)
2123 ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
2124
2125query_srq_exit1:
2126 ehca_free_fw_ctrlblock(qpcb);
2127
2128 return ret;
2129}
2130
2131static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
2132 struct ib_uobject *uobject)
2133{
2134 struct ehca_shca *shca = container_of(dev, struct ehca_shca, ib_device);
2135 struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
2136 ib_pd);
2137 struct ehca_sport *sport = &shca->sport[my_qp->init_attr.port_num - 1];
2138 u32 qp_num = my_qp->real_qp_num;
2139 int ret;
2140 u64 h_ret;
2141 u8 port_num;
2142 int is_user = 0;
2143 enum ib_qp_type qp_type;
2144 unsigned long flags;
2145
2146 if (uobject) {
2147 is_user = 1;
2148 if (my_qp->mm_count_galpa ||
2149 my_qp->mm_count_rqueue || my_qp->mm_count_squeue) {
2150 ehca_err(dev, "Resources still referenced in "
2151 "user space qp_num=%x", qp_num);
2152 return -EINVAL;
2153 }
2154 }
2155
2156 if (my_qp->send_cq) {
2157 ret = ehca_cq_unassign_qp(my_qp->send_cq, qp_num);
2158 if (ret) {
2159 ehca_err(dev, "Couldn't unassign qp from "
2160 "send_cq ret=%i qp_num=%x cq_num=%x", ret,
2161 qp_num, my_qp->send_cq->cq_number);
2162 return ret;
2163 }
2164 }
2165
2166 write_lock_irqsave(&ehca_qp_idr_lock, flags);
2167 idr_remove(&ehca_qp_idr, my_qp->token);
2168 write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
2169
2170 /*
2171 * SRQs will never get into an error list and do not have a recv_cq,
2172 * so we need to skip them here.
2173 */
2174 if (HAS_RQ(my_qp) && !IS_SRQ(my_qp) && !is_user)
2175 del_from_err_list(my_qp->recv_cq, &my_qp->rq_err_node);
2176
2177 if (HAS_SQ(my_qp) && !is_user)
2178 del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
2179
2180 /* now wait until all pending events have completed */
2181 wait_event(my_qp->wait_completion, !atomic_read(&my_qp->nr_events));
2182
2183 h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
2184 if (h_ret != H_SUCCESS) {
2185 ehca_err(dev, "hipz_h_destroy_qp() failed h_ret=%lli "
2186 "ehca_qp=%p qp_num=%x", h_ret, my_qp, qp_num);
2187 return ehca2ib_return_code(h_ret);
2188 }
2189
2190 port_num = my_qp->init_attr.port_num;
2191 qp_type = my_qp->init_attr.qp_type;
2192
2193 if (qp_type == IB_QPT_SMI || qp_type == IB_QPT_GSI) {
2194 spin_lock_irqsave(&sport->mod_sqp_lock, flags);
2195 kfree(my_qp->mod_qp_parm);
2196 my_qp->mod_qp_parm = NULL;
2197 shca->sport[port_num - 1].ibqp_sqp[qp_type] = NULL;
2198 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
2199 }
2200
2201 /* no support for IB_QPT_SMI yet */
2202 if (qp_type == IB_QPT_GSI) {
2203 struct ib_event event;
2204 ehca_info(dev, "device %s: port %x is inactive.",
2205 shca->ib_device.name, port_num);
2206 event.device = &shca->ib_device;
2207 event.event = IB_EVENT_PORT_ERR;
2208 event.element.port_num = port_num;
2209 shca->sport[port_num - 1].port_state = IB_PORT_DOWN;
2210 ib_dispatch_event(&event);
2211 }
2212
2213 if (HAS_RQ(my_qp)) {
2214 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
2215 if (!is_user)
2216 vfree(my_qp->rq_map.map);
2217 }
2218 if (HAS_SQ(my_qp)) {
2219 ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
2220 if (!is_user)
2221 vfree(my_qp->sq_map.map);
2222 }
2223 kmem_cache_free(qp_cache, my_qp);
2224 atomic_dec(&shca->num_qps);
2225 return 0;
2226}
2227
2228int ehca_destroy_qp(struct ib_qp *qp)
2229{
2230 return internal_destroy_qp(qp->device,
2231 container_of(qp, struct ehca_qp, ib_qp),
2232 qp->uobject);
2233}
2234
2235int ehca_destroy_srq(struct ib_srq *srq)
2236{
2237 return internal_destroy_qp(srq->device,
2238 container_of(srq, struct ehca_qp, ib_srq),
2239 srq->uobject);
2240}
2241
2242int ehca_init_qp_cache(void)
2243{
2244 qp_cache = kmem_cache_create("ehca_cache_qp",
2245 sizeof(struct ehca_qp), 0,
2246 SLAB_HWCACHE_ALIGN,
2247 NULL);
2248 if (!qp_cache)
2249 return -ENOMEM;
2250 return 0;
2251}
2252
2253void ehca_cleanup_qp_cache(void)
2254{
2255 kmem_cache_destroy(qp_cache);
2256}
diff --git a/drivers/staging/rdma/ehca/ehca_reqs.c b/drivers/staging/rdma/ehca/ehca_reqs.c
deleted file mode 100644
index 11813b880e16..000000000000
--- a/drivers/staging/rdma/ehca/ehca_reqs.c
+++ /dev/null
@@ -1,953 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * post_send/recv, poll_cq, req_notify
5 *
6 * Authors: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
7 * Waleri Fomin <fomin@de.ibm.com>
8 * Joachim Fenkes <fenkes@de.ibm.com>
9 * Reinhard Ernst <rernst@de.ibm.com>
10 *
11 * Copyright (c) 2005 IBM Corporation
12 *
13 * All rights reserved.
14 *
15 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
16 * BSD.
17 *
18 * OpenIB BSD License
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are met:
22 *
23 * Redistributions of source code must retain the above copyright notice, this
24 * list of conditions and the following disclaimer.
25 *
26 * Redistributions in binary form must reproduce the above copyright notice,
27 * this list of conditions and the following disclaimer in the documentation
28 * and/or other materials
29 * provided with the distribution.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
32 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
35 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
36 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
37 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
38 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
39 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGE.
42 */
43
44
45#include "ehca_classes.h"
46#include "ehca_tools.h"
47#include "ehca_qes.h"
48#include "ehca_iverbs.h"
49#include "hcp_if.h"
50#include "hipz_fns.h"
51
52/* in RC traffic, insert an empty RDMA READ every this many packets */
53#define ACK_CIRC_THRESHOLD 2000000
54
55static u64 replace_wr_id(u64 wr_id, u16 idx)
56{
57 u64 ret;
58
59 ret = wr_id & ~QMAP_IDX_MASK;
60 ret |= idx & QMAP_IDX_MASK;
61
62 return ret;
63}
64
65static u16 get_app_wr_id(u64 wr_id)
66{
67 return wr_id & QMAP_IDX_MASK;
68}
69
70static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
71 struct ehca_wqe *wqe_p,
72 struct ib_recv_wr *recv_wr,
73 u32 rq_map_idx)
74{
75 u8 cnt_ds;
76 if (unlikely((recv_wr->num_sge < 0) ||
77 (recv_wr->num_sge > ipz_rqueue->act_nr_of_sg))) {
78 ehca_gen_err("Invalid number of WQE SGE. "
79 "num_sqe=%x max_nr_of_sg=%x",
80 recv_wr->num_sge, ipz_rqueue->act_nr_of_sg);
81 return -EINVAL; /* invalid SG list length */
82 }
83
84 /* clear wqe header until sglist */
85 memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
86
87 wqe_p->work_request_id = replace_wr_id(recv_wr->wr_id, rq_map_idx);
88 wqe_p->nr_of_data_seg = recv_wr->num_sge;
89
90 for (cnt_ds = 0; cnt_ds < recv_wr->num_sge; cnt_ds++) {
91 wqe_p->u.all_rcv.sg_list[cnt_ds].vaddr =
92 recv_wr->sg_list[cnt_ds].addr;
93 wqe_p->u.all_rcv.sg_list[cnt_ds].lkey =
94 recv_wr->sg_list[cnt_ds].lkey;
95 wqe_p->u.all_rcv.sg_list[cnt_ds].length =
96 recv_wr->sg_list[cnt_ds].length;
97 }
98
99 if (ehca_debug_level >= 3) {
100 ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p",
101 ipz_rqueue);
102 ehca_dmp(wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe");
103 }
104
105 return 0;
106}
107
108#if defined(DEBUG_GSI_SEND_WR)
109
110/* need ib_mad struct */
111#include <rdma/ib_mad.h>
112
113static void trace_ud_wr(const struct ib_ud_wr *ud_wr)
114{
115 int idx;
116 int j;
117 while (ud_wr) {
118 struct ib_mad_hdr *mad_hdr = ud_wrmad_hdr;
119 struct ib_sge *sge = ud_wr->wr.sg_list;
120 ehca_gen_dbg("ud_wr#%x wr_id=%lx num_sge=%x "
121 "send_flags=%x opcode=%x", idx, ud_wr->wr.wr_id,
122 ud_wr->wr.num_sge, ud_wr->wr.send_flags,
123 ud_wr->.wr.opcode);
124 if (mad_hdr) {
125 ehca_gen_dbg("ud_wr#%x mad_hdr base_version=%x "
126 "mgmt_class=%x class_version=%x method=%x "
127 "status=%x class_specific=%x tid=%lx "
128 "attr_id=%x resv=%x attr_mod=%x",
129 idx, mad_hdr->base_version,
130 mad_hdr->mgmt_class,
131 mad_hdr->class_version, mad_hdr->method,
132 mad_hdr->status, mad_hdr->class_specific,
133 mad_hdr->tid, mad_hdr->attr_id,
134 mad_hdr->resv,
135 mad_hdr->attr_mod);
136 }
137 for (j = 0; j < ud_wr->wr.num_sge; j++) {
138 u8 *data = __va(sge->addr);
139 ehca_gen_dbg("ud_wr#%x sge#%x addr=%p length=%x "
140 "lkey=%x",
141 idx, j, data, sge->length, sge->lkey);
142 /* assume length is n*16 */
143 ehca_dmp(data, sge->length, "ud_wr#%x sge#%x",
144 idx, j);
145 sge++;
146 } /* eof for j */
147 idx++;
148 ud_wr = ud_wr(ud_wr->wr.next);
149 } /* eof while ud_wr */
150}
151
152#endif /* DEBUG_GSI_SEND_WR */
153
154static inline int ehca_write_swqe(struct ehca_qp *qp,
155 struct ehca_wqe *wqe_p,
156 struct ib_send_wr *send_wr,
157 u32 sq_map_idx,
158 int hidden)
159{
160 u32 idx;
161 u64 dma_length;
162 struct ehca_av *my_av;
163 u32 remote_qkey;
164 struct ehca_qmap_entry *qmap_entry = &qp->sq_map.map[sq_map_idx];
165
166 if (unlikely((send_wr->num_sge < 0) ||
167 (send_wr->num_sge > qp->ipz_squeue.act_nr_of_sg))) {
168 ehca_gen_err("Invalid number of WQE SGE. "
169 "num_sqe=%x max_nr_of_sg=%x",
170 send_wr->num_sge, qp->ipz_squeue.act_nr_of_sg);
171 return -EINVAL; /* invalid SG list length */
172 }
173
174 /* clear wqe header until sglist */
175 memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
176
177 wqe_p->work_request_id = replace_wr_id(send_wr->wr_id, sq_map_idx);
178
179 qmap_entry->app_wr_id = get_app_wr_id(send_wr->wr_id);
180 qmap_entry->reported = 0;
181 qmap_entry->cqe_req = 0;
182
183 switch (send_wr->opcode) {
184 case IB_WR_SEND:
185 case IB_WR_SEND_WITH_IMM:
186 wqe_p->optype = WQE_OPTYPE_SEND;
187 break;
188 case IB_WR_RDMA_WRITE:
189 case IB_WR_RDMA_WRITE_WITH_IMM:
190 wqe_p->optype = WQE_OPTYPE_RDMAWRITE;
191 break;
192 case IB_WR_RDMA_READ:
193 wqe_p->optype = WQE_OPTYPE_RDMAREAD;
194 break;
195 default:
196 ehca_gen_err("Invalid opcode=%x", send_wr->opcode);
197 return -EINVAL; /* invalid opcode */
198 }
199
200 wqe_p->wqef = (send_wr->opcode) & WQEF_HIGH_NIBBLE;
201
202 wqe_p->wr_flag = 0;
203
204 if ((send_wr->send_flags & IB_SEND_SIGNALED ||
205 qp->init_attr.sq_sig_type == IB_SIGNAL_ALL_WR)
206 && !hidden) {
207 wqe_p->wr_flag |= WQE_WRFLAG_REQ_SIGNAL_COM;
208 qmap_entry->cqe_req = 1;
209 }
210
211 if (send_wr->opcode == IB_WR_SEND_WITH_IMM ||
212 send_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
213 /* this might not work as long as HW does not support it */
214 wqe_p->immediate_data = be32_to_cpu(send_wr->ex.imm_data);
215 wqe_p->wr_flag |= WQE_WRFLAG_IMM_DATA_PRESENT;
216 }
217
218 wqe_p->nr_of_data_seg = send_wr->num_sge;
219
220 switch (qp->qp_type) {
221 case IB_QPT_SMI:
222 case IB_QPT_GSI:
223 /* no break is intential here */
224 case IB_QPT_UD:
225 /* IB 1.2 spec C10-15 compliance */
226 remote_qkey = ud_wr(send_wr)->remote_qkey;
227 if (remote_qkey & 0x80000000)
228 remote_qkey = qp->qkey;
229
230 wqe_p->destination_qp_number = ud_wr(send_wr)->remote_qpn << 8;
231 wqe_p->local_ee_context_qkey = remote_qkey;
232 if (unlikely(!ud_wr(send_wr)->ah)) {
233 ehca_gen_err("ud_wr(send_wr) is NULL. qp=%p", qp);
234 return -EINVAL;
235 }
236 if (unlikely(ud_wr(send_wr)->remote_qpn == 0)) {
237 ehca_gen_err("dest QP# is 0. qp=%x", qp->real_qp_num);
238 return -EINVAL;
239 }
240 my_av = container_of(ud_wr(send_wr)->ah, struct ehca_av, ib_ah);
241 wqe_p->u.ud_av.ud_av = my_av->av;
242
243 /*
244 * omitted check of IB_SEND_INLINE
245 * since HW does not support it
246 */
247 for (idx = 0; idx < send_wr->num_sge; idx++) {
248 wqe_p->u.ud_av.sg_list[idx].vaddr =
249 send_wr->sg_list[idx].addr;
250 wqe_p->u.ud_av.sg_list[idx].lkey =
251 send_wr->sg_list[idx].lkey;
252 wqe_p->u.ud_av.sg_list[idx].length =
253 send_wr->sg_list[idx].length;
254 } /* eof for idx */
255 if (qp->qp_type == IB_QPT_SMI ||
256 qp->qp_type == IB_QPT_GSI)
257 wqe_p->u.ud_av.ud_av.pmtu = 1;
258 if (qp->qp_type == IB_QPT_GSI) {
259 wqe_p->pkeyi = ud_wr(send_wr)->pkey_index;
260#ifdef DEBUG_GSI_SEND_WR
261 trace_ud_wr(ud_wr(send_wr));
262#endif /* DEBUG_GSI_SEND_WR */
263 }
264 break;
265
266 case IB_QPT_UC:
267 if (send_wr->send_flags & IB_SEND_FENCE)
268 wqe_p->wr_flag |= WQE_WRFLAG_FENCE;
269 /* no break is intentional here */
270 case IB_QPT_RC:
271 /* TODO: atomic not implemented */
272 wqe_p->u.nud.remote_virtual_address =
273 rdma_wr(send_wr)->remote_addr;
274 wqe_p->u.nud.rkey = rdma_wr(send_wr)->rkey;
275
276 /*
277 * omitted checking of IB_SEND_INLINE
278 * since HW does not support it
279 */
280 dma_length = 0;
281 for (idx = 0; idx < send_wr->num_sge; idx++) {
282 wqe_p->u.nud.sg_list[idx].vaddr =
283 send_wr->sg_list[idx].addr;
284 wqe_p->u.nud.sg_list[idx].lkey =
285 send_wr->sg_list[idx].lkey;
286 wqe_p->u.nud.sg_list[idx].length =
287 send_wr->sg_list[idx].length;
288 dma_length += send_wr->sg_list[idx].length;
289 } /* eof idx */
290 wqe_p->u.nud.atomic_1st_op_dma_len = dma_length;
291
292 /* unsolicited ack circumvention */
293 if (send_wr->opcode == IB_WR_RDMA_READ) {
294 /* on RDMA read, switch on and reset counters */
295 qp->message_count = qp->packet_count = 0;
296 qp->unsol_ack_circ = 1;
297 } else
298 /* else estimate #packets */
299 qp->packet_count += (dma_length >> qp->mtu_shift) + 1;
300
301 break;
302
303 default:
304 ehca_gen_err("Invalid qptype=%x", qp->qp_type);
305 return -EINVAL;
306 }
307
308 if (ehca_debug_level >= 3) {
309 ehca_gen_dbg("SEND WQE written into queue qp=%p ", qp);
310 ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "send wqe");
311 }
312 return 0;
313}
314
315/* map_ib_wc_status converts raw cqe_status to ib_wc_status */
316static inline void map_ib_wc_status(u32 cqe_status,
317 enum ib_wc_status *wc_status)
318{
319 if (unlikely(cqe_status & WC_STATUS_ERROR_BIT)) {
320 switch (cqe_status & 0x3F) {
321 case 0x01:
322 case 0x21:
323 *wc_status = IB_WC_LOC_LEN_ERR;
324 break;
325 case 0x02:
326 case 0x22:
327 *wc_status = IB_WC_LOC_QP_OP_ERR;
328 break;
329 case 0x03:
330 case 0x23:
331 *wc_status = IB_WC_LOC_EEC_OP_ERR;
332 break;
333 case 0x04:
334 case 0x24:
335 *wc_status = IB_WC_LOC_PROT_ERR;
336 break;
337 case 0x05:
338 case 0x25:
339 *wc_status = IB_WC_WR_FLUSH_ERR;
340 break;
341 case 0x06:
342 *wc_status = IB_WC_MW_BIND_ERR;
343 break;
344 case 0x07: /* remote error - look into bits 20:24 */
345 switch ((cqe_status
346 & WC_STATUS_REMOTE_ERROR_FLAGS) >> 11) {
347 case 0x0:
348 /*
349 * PSN Sequence Error!
350 * couldn't find a matching status!
351 */
352 *wc_status = IB_WC_GENERAL_ERR;
353 break;
354 case 0x1:
355 *wc_status = IB_WC_REM_INV_REQ_ERR;
356 break;
357 case 0x2:
358 *wc_status = IB_WC_REM_ACCESS_ERR;
359 break;
360 case 0x3:
361 *wc_status = IB_WC_REM_OP_ERR;
362 break;
363 case 0x4:
364 *wc_status = IB_WC_REM_INV_RD_REQ_ERR;
365 break;
366 }
367 break;
368 case 0x08:
369 *wc_status = IB_WC_RETRY_EXC_ERR;
370 break;
371 case 0x09:
372 *wc_status = IB_WC_RNR_RETRY_EXC_ERR;
373 break;
374 case 0x0A:
375 case 0x2D:
376 *wc_status = IB_WC_REM_ABORT_ERR;
377 break;
378 case 0x0B:
379 case 0x2E:
380 *wc_status = IB_WC_INV_EECN_ERR;
381 break;
382 case 0x0C:
383 case 0x2F:
384 *wc_status = IB_WC_INV_EEC_STATE_ERR;
385 break;
386 case 0x0D:
387 *wc_status = IB_WC_BAD_RESP_ERR;
388 break;
389 case 0x10:
390 /* WQE purged */
391 *wc_status = IB_WC_WR_FLUSH_ERR;
392 break;
393 default:
394 *wc_status = IB_WC_FATAL_ERR;
395
396 }
397 } else
398 *wc_status = IB_WC_SUCCESS;
399}
400
401static inline int post_one_send(struct ehca_qp *my_qp,
402 struct ib_send_wr *cur_send_wr,
403 int hidden)
404{
405 struct ehca_wqe *wqe_p;
406 int ret;
407 u32 sq_map_idx;
408 u64 start_offset = my_qp->ipz_squeue.current_q_offset;
409
410 /* get pointer next to free WQE */
411 wqe_p = ipz_qeit_get_inc(&my_qp->ipz_squeue);
412 if (unlikely(!wqe_p)) {
413 /* too many posted work requests: queue overflow */
414 ehca_err(my_qp->ib_qp.device, "Too many posted WQEs "
415 "qp_num=%x", my_qp->ib_qp.qp_num);
416 return -ENOMEM;
417 }
418
419 /*
420 * Get the index of the WQE in the send queue. The same index is used
421 * for writing into the sq_map.
422 */
423 sq_map_idx = start_offset / my_qp->ipz_squeue.qe_size;
424
425 /* write a SEND WQE into the QUEUE */
426 ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr, sq_map_idx, hidden);
427 /*
428 * if something failed,
429 * reset the free entry pointer to the start value
430 */
431 if (unlikely(ret)) {
432 my_qp->ipz_squeue.current_q_offset = start_offset;
433 ehca_err(my_qp->ib_qp.device, "Could not write WQE "
434 "qp_num=%x", my_qp->ib_qp.qp_num);
435 return -EINVAL;
436 }
437
438 return 0;
439}
440
441int ehca_post_send(struct ib_qp *qp,
442 struct ib_send_wr *send_wr,
443 struct ib_send_wr **bad_send_wr)
444{
445 struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
446 int wqe_cnt = 0;
447 int ret = 0;
448 unsigned long flags;
449
450 /* Reject WR if QP is in RESET, INIT or RTR state */
451 if (unlikely(my_qp->state < IB_QPS_RTS)) {
452 ehca_err(qp->device, "Invalid QP state qp_state=%d qpn=%x",
453 my_qp->state, qp->qp_num);
454 ret = -EINVAL;
455 goto out;
456 }
457
458 /* LOCK the QUEUE */
459 spin_lock_irqsave(&my_qp->spinlock_s, flags);
460
461 /* Send an empty extra RDMA read if:
462 * 1) there has been an RDMA read on this connection before
463 * 2) no RDMA read occurred for ACK_CIRC_THRESHOLD link packets
464 * 3) we can be sure that any previous extra RDMA read has been
465 * processed so we don't overflow the SQ
466 */
467 if (unlikely(my_qp->unsol_ack_circ &&
468 my_qp->packet_count > ACK_CIRC_THRESHOLD &&
469 my_qp->message_count > my_qp->init_attr.cap.max_send_wr)) {
470 /* insert an empty RDMA READ to fix up the remote QP state */
471 struct ib_send_wr circ_wr;
472 memset(&circ_wr, 0, sizeof(circ_wr));
473 circ_wr.opcode = IB_WR_RDMA_READ;
474 post_one_send(my_qp, &circ_wr, 1); /* ignore retcode */
475 wqe_cnt++;
476 ehca_dbg(qp->device, "posted circ wr qp_num=%x", qp->qp_num);
477 my_qp->message_count = my_qp->packet_count = 0;
478 }
479
480 /* loop processes list of send reqs */
481 while (send_wr) {
482 ret = post_one_send(my_qp, send_wr, 0);
483 if (unlikely(ret)) {
484 goto post_send_exit0;
485 }
486 wqe_cnt++;
487 send_wr = send_wr->next;
488 }
489
490post_send_exit0:
491 iosync(); /* serialize GAL register access */
492 hipz_update_sqa(my_qp, wqe_cnt);
493 if (unlikely(ret || ehca_debug_level >= 2))
494 ehca_dbg(qp->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i",
495 my_qp, qp->qp_num, wqe_cnt, ret);
496 my_qp->message_count += wqe_cnt;
497 spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
498
499out:
500 if (ret)
501 *bad_send_wr = send_wr;
502 return ret;
503}
504
505static int internal_post_recv(struct ehca_qp *my_qp,
506 struct ib_device *dev,
507 struct ib_recv_wr *recv_wr,
508 struct ib_recv_wr **bad_recv_wr)
509{
510 struct ehca_wqe *wqe_p;
511 int wqe_cnt = 0;
512 int ret = 0;
513 u32 rq_map_idx;
514 unsigned long flags;
515 struct ehca_qmap_entry *qmap_entry;
516
517 if (unlikely(!HAS_RQ(my_qp))) {
518 ehca_err(dev, "QP has no RQ ehca_qp=%p qp_num=%x ext_type=%d",
519 my_qp, my_qp->real_qp_num, my_qp->ext_type);
520 ret = -ENODEV;
521 goto out;
522 }
523
524 /* LOCK the QUEUE */
525 spin_lock_irqsave(&my_qp->spinlock_r, flags);
526
527 /* loop processes list of recv reqs */
528 while (recv_wr) {
529 u64 start_offset = my_qp->ipz_rqueue.current_q_offset;
530 /* get pointer next to free WQE */
531 wqe_p = ipz_qeit_get_inc(&my_qp->ipz_rqueue);
532 if (unlikely(!wqe_p)) {
533 /* too many posted work requests: queue overflow */
534 ret = -ENOMEM;
535 ehca_err(dev, "Too many posted WQEs "
536 "qp_num=%x", my_qp->real_qp_num);
537 goto post_recv_exit0;
538 }
539 /*
540 * Get the index of the WQE in the recv queue. The same index
541 * is used for writing into the rq_map.
542 */
543 rq_map_idx = start_offset / my_qp->ipz_rqueue.qe_size;
544
545 /* write a RECV WQE into the QUEUE */
546 ret = ehca_write_rwqe(&my_qp->ipz_rqueue, wqe_p, recv_wr,
547 rq_map_idx);
548 /*
549 * if something failed,
550 * reset the free entry pointer to the start value
551 */
552 if (unlikely(ret)) {
553 my_qp->ipz_rqueue.current_q_offset = start_offset;
554 ret = -EINVAL;
555 ehca_err(dev, "Could not write WQE "
556 "qp_num=%x", my_qp->real_qp_num);
557 goto post_recv_exit0;
558 }
559
560 qmap_entry = &my_qp->rq_map.map[rq_map_idx];
561 qmap_entry->app_wr_id = get_app_wr_id(recv_wr->wr_id);
562 qmap_entry->reported = 0;
563 qmap_entry->cqe_req = 1;
564
565 wqe_cnt++;
566 recv_wr = recv_wr->next;
567 } /* eof for recv_wr */
568
569post_recv_exit0:
570 iosync(); /* serialize GAL register access */
571 hipz_update_rqa(my_qp, wqe_cnt);
572 if (unlikely(ret || ehca_debug_level >= 2))
573 ehca_dbg(dev, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i",
574 my_qp, my_qp->real_qp_num, wqe_cnt, ret);
575 spin_unlock_irqrestore(&my_qp->spinlock_r, flags);
576
577out:
578 if (ret)
579 *bad_recv_wr = recv_wr;
580
581 return ret;
582}
583
584int ehca_post_recv(struct ib_qp *qp,
585 struct ib_recv_wr *recv_wr,
586 struct ib_recv_wr **bad_recv_wr)
587{
588 struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
589
590 /* Reject WR if QP is in RESET state */
591 if (unlikely(my_qp->state == IB_QPS_RESET)) {
592 ehca_err(qp->device, "Invalid QP state qp_state=%d qpn=%x",
593 my_qp->state, qp->qp_num);
594 *bad_recv_wr = recv_wr;
595 return -EINVAL;
596 }
597
598 return internal_post_recv(my_qp, qp->device, recv_wr, bad_recv_wr);
599}
600
601int ehca_post_srq_recv(struct ib_srq *srq,
602 struct ib_recv_wr *recv_wr,
603 struct ib_recv_wr **bad_recv_wr)
604{
605 return internal_post_recv(container_of(srq, struct ehca_qp, ib_srq),
606 srq->device, recv_wr, bad_recv_wr);
607}
608
609/*
610 * ib_wc_opcode table converts ehca wc opcode to ib
611 * Since we use zero to indicate invalid opcode, the actual ib opcode must
612 * be decremented!!!
613 */
614static const u8 ib_wc_opcode[255] = {
615 [0x01] = IB_WC_RECV+1,
616 [0x02] = IB_WC_RECV_RDMA_WITH_IMM+1,
617 [0x08] = IB_WC_FETCH_ADD+1,
618 [0x10] = IB_WC_COMP_SWAP+1,
619 [0x20] = IB_WC_RDMA_WRITE+1,
620 [0x40] = IB_WC_RDMA_READ+1,
621 [0x80] = IB_WC_SEND+1
622};
623
624/* internal function to poll one entry of cq */
625static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
626{
627 int ret = 0, qmap_tail_idx;
628 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
629 struct ehca_cqe *cqe;
630 struct ehca_qp *my_qp;
631 struct ehca_qmap_entry *qmap_entry;
632 struct ehca_queue_map *qmap;
633 int cqe_count = 0, is_error;
634
635repoll:
636 cqe = (struct ehca_cqe *)
637 ipz_qeit_get_inc_valid(&my_cq->ipz_queue);
638 if (!cqe) {
639 ret = -EAGAIN;
640 if (ehca_debug_level >= 3)
641 ehca_dbg(cq->device, "Completion queue is empty "
642 "my_cq=%p cq_num=%x", my_cq, my_cq->cq_number);
643 goto poll_cq_one_exit0;
644 }
645
646 /* prevents loads being reordered across this point */
647 rmb();
648
649 cqe_count++;
650 if (unlikely(cqe->status & WC_STATUS_PURGE_BIT)) {
651 struct ehca_qp *qp;
652 int purgeflag;
653 unsigned long flags;
654
655 qp = ehca_cq_get_qp(my_cq, cqe->local_qp_number);
656 if (!qp) {
657 ehca_err(cq->device, "cq_num=%x qp_num=%x "
658 "could not find qp -> ignore cqe",
659 my_cq->cq_number, cqe->local_qp_number);
660 ehca_dmp(cqe, 64, "cq_num=%x qp_num=%x",
661 my_cq->cq_number, cqe->local_qp_number);
662 /* ignore this purged cqe */
663 goto repoll;
664 }
665 spin_lock_irqsave(&qp->spinlock_s, flags);
666 purgeflag = qp->sqerr_purgeflag;
667 spin_unlock_irqrestore(&qp->spinlock_s, flags);
668
669 if (purgeflag) {
670 ehca_dbg(cq->device,
671 "Got CQE with purged bit qp_num=%x src_qp=%x",
672 cqe->local_qp_number, cqe->remote_qp_number);
673 if (ehca_debug_level >= 2)
674 ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x",
675 cqe->local_qp_number,
676 cqe->remote_qp_number);
677 /*
678 * ignore this to avoid double cqes of bad wqe
679 * that caused sqe and turn off purge flag
680 */
681 qp->sqerr_purgeflag = 0;
682 goto repoll;
683 }
684 }
685
686 is_error = cqe->status & WC_STATUS_ERROR_BIT;
687
688 /* trace error CQEs if debug_level >= 1, trace all CQEs if >= 3 */
689 if (unlikely(ehca_debug_level >= 3 || (ehca_debug_level && is_error))) {
690 ehca_dbg(cq->device,
691 "Received %sCOMPLETION ehca_cq=%p cq_num=%x -----",
692 is_error ? "ERROR " : "", my_cq, my_cq->cq_number);
693 ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
694 my_cq, my_cq->cq_number);
695 ehca_dbg(cq->device,
696 "ehca_cq=%p cq_num=%x -------------------------",
697 my_cq, my_cq->cq_number);
698 }
699
700 read_lock(&ehca_qp_idr_lock);
701 my_qp = idr_find(&ehca_qp_idr, cqe->qp_token);
702 read_unlock(&ehca_qp_idr_lock);
703 if (!my_qp)
704 goto repoll;
705 wc->qp = &my_qp->ib_qp;
706
707 qmap_tail_idx = get_app_wr_id(cqe->work_request_id);
708 if (!(cqe->w_completion_flags & WC_SEND_RECEIVE_BIT))
709 /* We got a send completion. */
710 qmap = &my_qp->sq_map;
711 else
712 /* We got a receive completion. */
713 qmap = &my_qp->rq_map;
714
715 /* advance the tail pointer */
716 qmap->tail = qmap_tail_idx;
717
718 if (is_error) {
719 /*
720 * set left_to_poll to 0 because in error state, we will not
721 * get any additional CQEs
722 */
723 my_qp->sq_map.next_wqe_idx = next_index(my_qp->sq_map.tail,
724 my_qp->sq_map.entries);
725 my_qp->sq_map.left_to_poll = 0;
726 ehca_add_to_err_list(my_qp, 1);
727
728 my_qp->rq_map.next_wqe_idx = next_index(my_qp->rq_map.tail,
729 my_qp->rq_map.entries);
730 my_qp->rq_map.left_to_poll = 0;
731 if (HAS_RQ(my_qp))
732 ehca_add_to_err_list(my_qp, 0);
733 }
734
735 qmap_entry = &qmap->map[qmap_tail_idx];
736 if (qmap_entry->reported) {
737 ehca_warn(cq->device, "Double cqe on qp_num=%#x",
738 my_qp->real_qp_num);
739 /* found a double cqe, discard it and read next one */
740 goto repoll;
741 }
742
743 wc->wr_id = replace_wr_id(cqe->work_request_id, qmap_entry->app_wr_id);
744 qmap_entry->reported = 1;
745
746 /* if left_to_poll is decremented to 0, add the QP to the error list */
747 if (qmap->left_to_poll > 0) {
748 qmap->left_to_poll--;
749 if ((my_qp->sq_map.left_to_poll == 0) &&
750 (my_qp->rq_map.left_to_poll == 0)) {
751 ehca_add_to_err_list(my_qp, 1);
752 if (HAS_RQ(my_qp))
753 ehca_add_to_err_list(my_qp, 0);
754 }
755 }
756
757 /* eval ib_wc_opcode */
758 wc->opcode = ib_wc_opcode[cqe->optype]-1;
759 if (unlikely(wc->opcode == -1)) {
760 ehca_err(cq->device, "Invalid cqe->OPType=%x cqe->status=%x "
761 "ehca_cq=%p cq_num=%x",
762 cqe->optype, cqe->status, my_cq, my_cq->cq_number);
763 /* dump cqe for other infos */
764 ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
765 my_cq, my_cq->cq_number);
766 /* update also queue adder to throw away this entry!!! */
767 goto repoll;
768 }
769
770 /* eval ib_wc_status */
771 if (unlikely(is_error)) {
772 /* complete with errors */
773 map_ib_wc_status(cqe->status, &wc->status);
774 wc->vendor_err = wc->status;
775 } else
776 wc->status = IB_WC_SUCCESS;
777
778 wc->byte_len = cqe->nr_bytes_transferred;
779 wc->pkey_index = cqe->pkey_index;
780 wc->slid = cqe->rlid;
781 wc->dlid_path_bits = cqe->dlid;
782 wc->src_qp = cqe->remote_qp_number;
783 /*
784 * HW has "Immed data present" and "GRH present" in bits 6 and 5.
785 * SW defines those in bits 1 and 0, so we can just shift and mask.
786 */
787 wc->wc_flags = (cqe->w_completion_flags >> 5) & 3;
788 wc->ex.imm_data = cpu_to_be32(cqe->immediate_data);
789 wc->sl = cqe->service_level;
790
791poll_cq_one_exit0:
792 if (cqe_count > 0)
793 hipz_update_feca(my_cq, cqe_count);
794
795 return ret;
796}
797
798static int generate_flush_cqes(struct ehca_qp *my_qp, struct ib_cq *cq,
799 struct ib_wc *wc, int num_entries,
800 struct ipz_queue *ipz_queue, int on_sq)
801{
802 int nr = 0;
803 struct ehca_wqe *wqe;
804 u64 offset;
805 struct ehca_queue_map *qmap;
806 struct ehca_qmap_entry *qmap_entry;
807
808 if (on_sq)
809 qmap = &my_qp->sq_map;
810 else
811 qmap = &my_qp->rq_map;
812
813 qmap_entry = &qmap->map[qmap->next_wqe_idx];
814
815 while ((nr < num_entries) && (qmap_entry->reported == 0)) {
816 /* generate flush CQE */
817
818 memset(wc, 0, sizeof(*wc));
819
820 offset = qmap->next_wqe_idx * ipz_queue->qe_size;
821 wqe = (struct ehca_wqe *)ipz_qeit_calc(ipz_queue, offset);
822 if (!wqe) {
823 ehca_err(cq->device, "Invalid wqe offset=%#llx on "
824 "qp_num=%#x", offset, my_qp->real_qp_num);
825 return nr;
826 }
827
828 wc->wr_id = replace_wr_id(wqe->work_request_id,
829 qmap_entry->app_wr_id);
830
831 if (on_sq) {
832 switch (wqe->optype) {
833 case WQE_OPTYPE_SEND:
834 wc->opcode = IB_WC_SEND;
835 break;
836 case WQE_OPTYPE_RDMAWRITE:
837 wc->opcode = IB_WC_RDMA_WRITE;
838 break;
839 case WQE_OPTYPE_RDMAREAD:
840 wc->opcode = IB_WC_RDMA_READ;
841 break;
842 default:
843 ehca_err(cq->device, "Invalid optype=%x",
844 wqe->optype);
845 return nr;
846 }
847 } else
848 wc->opcode = IB_WC_RECV;
849
850 if (wqe->wr_flag & WQE_WRFLAG_IMM_DATA_PRESENT) {
851 wc->ex.imm_data = wqe->immediate_data;
852 wc->wc_flags |= IB_WC_WITH_IMM;
853 }
854
855 wc->status = IB_WC_WR_FLUSH_ERR;
856
857 wc->qp = &my_qp->ib_qp;
858
859 /* mark as reported and advance next_wqe pointer */
860 qmap_entry->reported = 1;
861 qmap->next_wqe_idx = next_index(qmap->next_wqe_idx,
862 qmap->entries);
863 qmap_entry = &qmap->map[qmap->next_wqe_idx];
864
865 wc++; nr++;
866 }
867
868 return nr;
869
870}
871
872int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
873{
874 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
875 int nr;
876 struct ehca_qp *err_qp;
877 struct ib_wc *current_wc = wc;
878 int ret = 0;
879 unsigned long flags;
880 int entries_left = num_entries;
881
882 if (num_entries < 1) {
883 ehca_err(cq->device, "Invalid num_entries=%d ehca_cq=%p "
884 "cq_num=%x", num_entries, my_cq, my_cq->cq_number);
885 ret = -EINVAL;
886 goto poll_cq_exit0;
887 }
888
889 spin_lock_irqsave(&my_cq->spinlock, flags);
890
891 /* generate flush cqes for send queues */
892 list_for_each_entry(err_qp, &my_cq->sqp_err_list, sq_err_node) {
893 nr = generate_flush_cqes(err_qp, cq, current_wc, entries_left,
894 &err_qp->ipz_squeue, 1);
895 entries_left -= nr;
896 current_wc += nr;
897
898 if (entries_left == 0)
899 break;
900 }
901
902 /* generate flush cqes for receive queues */
903 list_for_each_entry(err_qp, &my_cq->rqp_err_list, rq_err_node) {
904 nr = generate_flush_cqes(err_qp, cq, current_wc, entries_left,
905 &err_qp->ipz_rqueue, 0);
906 entries_left -= nr;
907 current_wc += nr;
908
909 if (entries_left == 0)
910 break;
911 }
912
913 for (nr = 0; nr < entries_left; nr++) {
914 ret = ehca_poll_cq_one(cq, current_wc);
915 if (ret)
916 break;
917 current_wc++;
918 } /* eof for nr */
919 entries_left -= nr;
920
921 spin_unlock_irqrestore(&my_cq->spinlock, flags);
922 if (ret == -EAGAIN || !ret)
923 ret = num_entries - entries_left;
924
925poll_cq_exit0:
926 return ret;
927}
928
929int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags notify_flags)
930{
931 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
932 int ret = 0;
933
934 switch (notify_flags & IB_CQ_SOLICITED_MASK) {
935 case IB_CQ_SOLICITED:
936 hipz_set_cqx_n0(my_cq, 1);
937 break;
938 case IB_CQ_NEXT_COMP:
939 hipz_set_cqx_n1(my_cq, 1);
940 break;
941 default:
942 return -EINVAL;
943 }
944
945 if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
946 unsigned long spl_flags;
947 spin_lock_irqsave(&my_cq->spinlock, spl_flags);
948 ret = ipz_qeit_is_valid(&my_cq->ipz_queue);
949 spin_unlock_irqrestore(&my_cq->spinlock, spl_flags);
950 }
951
952 return ret;
953}
diff --git a/drivers/staging/rdma/ehca/ehca_sqp.c b/drivers/staging/rdma/ehca/ehca_sqp.c
deleted file mode 100644
index 376b031c2c7f..000000000000
--- a/drivers/staging/rdma/ehca/ehca_sqp.c
+++ /dev/null
@@ -1,245 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * SQP functions
5 *
6 * Authors: Khadija Souissi <souissi@de.ibm.com>
7 * Heiko J Schick <schickhj@de.ibm.com>
8 *
9 * Copyright (c) 2005 IBM Corporation
10 *
11 * All rights reserved.
12 *
13 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
14 * BSD.
15 *
16 * OpenIB BSD License
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are met:
20 *
21 * Redistributions of source code must retain the above copyright notice, this
22 * list of conditions and the following disclaimer.
23 *
24 * Redistributions in binary form must reproduce the above copyright notice,
25 * this list of conditions and the following disclaimer in the documentation
26 * and/or other materials
27 * provided with the distribution.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42#include <rdma/ib_mad.h>
43
44#include "ehca_classes.h"
45#include "ehca_tools.h"
46#include "ehca_iverbs.h"
47#include "hcp_if.h"
48
49#define IB_MAD_STATUS_REDIRECT cpu_to_be16(0x0002)
50#define IB_MAD_STATUS_UNSUP_VERSION cpu_to_be16(0x0004)
51#define IB_MAD_STATUS_UNSUP_METHOD cpu_to_be16(0x0008)
52
53#define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001)
54
55/**
56 * ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue
57 * pair is created successfully, the corresponding port gets active.
58 *
59 * Define Special Queue pair 0 (SMI QP) is still not supported.
60 *
61 * @qp_init_attr: Queue pair init attributes with port and queue pair type
62 */
63
64u64 ehca_define_sqp(struct ehca_shca *shca,
65 struct ehca_qp *ehca_qp,
66 struct ib_qp_init_attr *qp_init_attr)
67{
68 u32 pma_qp_nr, bma_qp_nr;
69 u64 ret;
70 u8 port = qp_init_attr->port_num;
71 int counter;
72
73 shca->sport[port - 1].port_state = IB_PORT_DOWN;
74
75 switch (qp_init_attr->qp_type) {
76 case IB_QPT_SMI:
77 /* function not supported yet */
78 break;
79 case IB_QPT_GSI:
80 ret = hipz_h_define_aqp1(shca->ipz_hca_handle,
81 ehca_qp->ipz_qp_handle,
82 ehca_qp->galpas.kernel,
83 (u32) qp_init_attr->port_num,
84 &pma_qp_nr, &bma_qp_nr);
85
86 if (ret != H_SUCCESS) {
87 ehca_err(&shca->ib_device,
88 "Can't define AQP1 for port %x. h_ret=%lli",
89 port, ret);
90 return ret;
91 }
92 shca->sport[port - 1].pma_qp_nr = pma_qp_nr;
93 ehca_dbg(&shca->ib_device, "port=%x pma_qp_nr=%x",
94 port, pma_qp_nr);
95 break;
96 default:
97 ehca_err(&shca->ib_device, "invalid qp_type=%x",
98 qp_init_attr->qp_type);
99 return H_PARAMETER;
100 }
101
102 if (ehca_nr_ports < 0) /* autodetect mode */
103 return H_SUCCESS;
104
105 for (counter = 0;
106 shca->sport[port - 1].port_state != IB_PORT_ACTIVE &&
107 counter < ehca_port_act_time;
108 counter++) {
109 ehca_dbg(&shca->ib_device, "... wait until port %x is active",
110 port);
111 msleep_interruptible(1000);
112 }
113
114 if (counter == ehca_port_act_time) {
115 ehca_err(&shca->ib_device, "Port %x is not active.", port);
116 return H_HARDWARE;
117 }
118
119 return H_SUCCESS;
120}
121
122struct ib_perf {
123 struct ib_mad_hdr mad_hdr;
124 u8 reserved[40];
125 u8 data[192];
126} __attribute__ ((packed));
127
128/* TC/SL/FL packed into 32 bits, as in ClassPortInfo */
129struct tcslfl {
130 u32 tc:8;
131 u32 sl:4;
132 u32 fl:20;
133} __attribute__ ((packed));
134
135/* IP Version/TC/FL packed into 32 bits, as in GRH */
136struct vertcfl {
137 u32 ver:4;
138 u32 tc:8;
139 u32 fl:20;
140} __attribute__ ((packed));
141
142static int ehca_process_perf(struct ib_device *ibdev, u8 port_num,
143 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
144 const struct ib_mad *in_mad, struct ib_mad *out_mad)
145{
146 const struct ib_perf *in_perf = (const struct ib_perf *)in_mad;
147 struct ib_perf *out_perf = (struct ib_perf *)out_mad;
148 struct ib_class_port_info *poi =
149 (struct ib_class_port_info *)out_perf->data;
150 struct tcslfl *tcslfl =
151 (struct tcslfl *)&poi->redirect_tcslfl;
152 struct ehca_shca *shca =
153 container_of(ibdev, struct ehca_shca, ib_device);
154 struct ehca_sport *sport = &shca->sport[port_num - 1];
155
156 ehca_dbg(ibdev, "method=%x", in_perf->mad_hdr.method);
157
158 *out_mad = *in_mad;
159
160 if (in_perf->mad_hdr.class_version != 1) {
161 ehca_warn(ibdev, "Unsupported class_version=%x",
162 in_perf->mad_hdr.class_version);
163 out_perf->mad_hdr.status = IB_MAD_STATUS_UNSUP_VERSION;
164 goto perf_reply;
165 }
166
167 switch (in_perf->mad_hdr.method) {
168 case IB_MGMT_METHOD_GET:
169 case IB_MGMT_METHOD_SET:
170 /* set class port info for redirection */
171 out_perf->mad_hdr.attr_id = IB_PMA_CLASS_PORT_INFO;
172 out_perf->mad_hdr.status = IB_MAD_STATUS_REDIRECT;
173 memset(poi, 0, sizeof(*poi));
174 poi->base_version = 1;
175 poi->class_version = 1;
176 poi->resp_time_value = 18;
177
178 /* copy local routing information from WC where applicable */
179 tcslfl->sl = in_wc->sl;
180 poi->redirect_lid =
181 sport->saved_attr.lid | in_wc->dlid_path_bits;
182 poi->redirect_qp = sport->pma_qp_nr;
183 poi->redirect_qkey = IB_QP1_QKEY;
184
185 ehca_query_pkey(ibdev, port_num, in_wc->pkey_index,
186 &poi->redirect_pkey);
187
188 /* if request was globally routed, copy route info */
189 if (in_grh) {
190 const struct vertcfl *vertcfl =
191 (const struct vertcfl *)&in_grh->version_tclass_flow;
192 memcpy(poi->redirect_gid, in_grh->dgid.raw,
193 sizeof(poi->redirect_gid));
194 tcslfl->tc = vertcfl->tc;
195 tcslfl->fl = vertcfl->fl;
196 } else
197 /* else only fill in default GID */
198 ehca_query_gid(ibdev, port_num, 0,
199 (union ib_gid *)&poi->redirect_gid);
200
201 ehca_dbg(ibdev, "ehca_pma_lid=%x ehca_pma_qp=%x",
202 sport->saved_attr.lid, sport->pma_qp_nr);
203 break;
204
205 case IB_MGMT_METHOD_GET_RESP:
206 return IB_MAD_RESULT_FAILURE;
207
208 default:
209 out_perf->mad_hdr.status = IB_MAD_STATUS_UNSUP_METHOD;
210 break;
211 }
212
213perf_reply:
214 out_perf->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
215
216 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
217}
218
219int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
220 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
221 const struct ib_mad_hdr *in, size_t in_mad_size,
222 struct ib_mad_hdr *out, size_t *out_mad_size,
223 u16 *out_mad_pkey_index)
224{
225 int ret;
226 const struct ib_mad *in_mad = (const struct ib_mad *)in;
227 struct ib_mad *out_mad = (struct ib_mad *)out;
228
229 if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
230 *out_mad_size != sizeof(*out_mad)))
231 return IB_MAD_RESULT_FAILURE;
232
233 if (!port_num || port_num > ibdev->phys_port_cnt || !in_wc)
234 return IB_MAD_RESULT_FAILURE;
235
236 /* accept only pma request */
237 if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
238 return IB_MAD_RESULT_SUCCESS;
239
240 ehca_dbg(ibdev, "port_num=%x src_qp=%x", port_num, in_wc->src_qp);
241 ret = ehca_process_perf(ibdev, port_num, in_wc, in_grh,
242 in_mad, out_mad);
243
244 return ret;
245}
diff --git a/drivers/staging/rdma/ehca/ehca_tools.h b/drivers/staging/rdma/ehca/ehca_tools.h
deleted file mode 100644
index d280b12aae64..000000000000
--- a/drivers/staging/rdma/ehca/ehca_tools.h
+++ /dev/null
@@ -1,155 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * auxiliary functions
5 *
6 * Authors: Christoph Raisch <raisch@de.ibm.com>
7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 * Khadija Souissi <souissik@de.ibm.com>
9 * Waleri Fomin <fomin@de.ibm.com>
10 * Heiko J Schick <schickhj@de.ibm.com>
11 *
12 * Copyright (c) 2005 IBM Corporation
13 *
14 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
15 * BSD.
16 *
17 * OpenIB BSD License
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are met:
21 *
22 * Redistributions of source code must retain the above copyright notice, this
23 * list of conditions and the following disclaimer.
24 *
25 * Redistributions in binary form must reproduce the above copyright notice,
26 * this list of conditions and the following disclaimer in the documentation
27 * and/or other materials
28 * provided with the distribution.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
41 */
42
43
44#ifndef EHCA_TOOLS_H
45#define EHCA_TOOLS_H
46
47#include <linux/kernel.h>
48#include <linux/spinlock.h>
49#include <linux/delay.h>
50#include <linux/idr.h>
51#include <linux/kthread.h>
52#include <linux/mm.h>
53#include <linux/mman.h>
54#include <linux/module.h>
55#include <linux/moduleparam.h>
56#include <linux/vmalloc.h>
57#include <linux/notifier.h>
58#include <linux/cpu.h>
59#include <linux/device.h>
60
61#include <linux/atomic.h>
62#include <asm/ibmebus.h>
63#include <asm/io.h>
64#include <asm/pgtable.h>
65#include <asm/hvcall.h>
66
67extern int ehca_debug_level;
68
69#define ehca_dbg(ib_dev, format, arg...) \
70 do { \
71 if (unlikely(ehca_debug_level)) \
72 dev_printk(KERN_DEBUG, (ib_dev)->dma_device, \
73 "PU%04x EHCA_DBG:%s " format "\n", \
74 raw_smp_processor_id(), __func__, \
75 ## arg); \
76 } while (0)
77
78#define ehca_info(ib_dev, format, arg...) \
79 dev_info((ib_dev)->dma_device, "PU%04x EHCA_INFO:%s " format "\n", \
80 raw_smp_processor_id(), __func__, ## arg)
81
82#define ehca_warn(ib_dev, format, arg...) \
83 dev_warn((ib_dev)->dma_device, "PU%04x EHCA_WARN:%s " format "\n", \
84 raw_smp_processor_id(), __func__, ## arg)
85
86#define ehca_err(ib_dev, format, arg...) \
87 dev_err((ib_dev)->dma_device, "PU%04x EHCA_ERR:%s " format "\n", \
88 raw_smp_processor_id(), __func__, ## arg)
89
90/* use this one only if no ib_dev available */
91#define ehca_gen_dbg(format, arg...) \
92 do { \
93 if (unlikely(ehca_debug_level)) \
94 printk(KERN_DEBUG "PU%04x EHCA_DBG:%s " format "\n", \
95 raw_smp_processor_id(), __func__, ## arg); \
96 } while (0)
97
98#define ehca_gen_warn(format, arg...) \
99 printk(KERN_INFO "PU%04x EHCA_WARN:%s " format "\n", \
100 raw_smp_processor_id(), __func__, ## arg)
101
102#define ehca_gen_err(format, arg...) \
103 printk(KERN_ERR "PU%04x EHCA_ERR:%s " format "\n", \
104 raw_smp_processor_id(), __func__, ## arg)
105
106/**
107 * ehca_dmp - printk a memory block, whose length is n*8 bytes.
108 * Each line has the following layout:
109 * <format string> adr=X ofs=Y <8 bytes hex> <8 bytes hex>
110 */
111#define ehca_dmp(adr, len, format, args...) \
112 do { \
113 unsigned int x; \
114 unsigned int l = (unsigned int)(len); \
115 unsigned char *deb = (unsigned char *)(adr); \
116 for (x = 0; x < l; x += 16) { \
117 printk(KERN_INFO "EHCA_DMP:%s " format \
118 " adr=%p ofs=%04x %016llx %016llx\n", \
119 __func__, ##args, deb, x, \
120 *((u64 *)&deb[0]), *((u64 *)&deb[8])); \
121 deb += 16; \
122 } \
123 } while (0)
124
125/* define a bitmask, little endian version */
126#define EHCA_BMASK(pos, length) (((pos) << 16) + (length))
127
128/* define a bitmask, the ibm way... */
129#define EHCA_BMASK_IBM(from, to) (((63 - to) << 16) + ((to) - (from) + 1))
130
131/* internal function, don't use */
132#define EHCA_BMASK_SHIFTPOS(mask) (((mask) >> 16) & 0xffff)
133
134/* internal function, don't use */
135#define EHCA_BMASK_MASK(mask) (~0ULL >> ((64 - (mask)) & 0xffff))
136
137/**
138 * EHCA_BMASK_SET - return value shifted and masked by mask
139 * variable|=EHCA_BMASK_SET(MY_MASK,0x4711) ORs the bits in variable
140 * variable&=~EHCA_BMASK_SET(MY_MASK,-1) clears the bits from the mask
141 * in variable
142 */
143#define EHCA_BMASK_SET(mask, value) \
144 ((EHCA_BMASK_MASK(mask) & ((u64)(value))) << EHCA_BMASK_SHIFTPOS(mask))
145
146/**
147 * EHCA_BMASK_GET - extract a parameter from value by mask
148 */
149#define EHCA_BMASK_GET(mask, value) \
150 (EHCA_BMASK_MASK(mask) & (((u64)(value)) >> EHCA_BMASK_SHIFTPOS(mask)))
151
152/* Converts ehca to ib return code */
153int ehca2ib_return_code(u64 ehca_rc);
154
155#endif /* EHCA_TOOLS_H */
diff --git a/drivers/staging/rdma/ehca/ehca_uverbs.c b/drivers/staging/rdma/ehca/ehca_uverbs.c
deleted file mode 100644
index 1a1d5d99fcf9..000000000000
--- a/drivers/staging/rdma/ehca/ehca_uverbs.c
+++ /dev/null
@@ -1,309 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * userspace support verbs
5 *
6 * Authors: Christoph Raisch <raisch@de.ibm.com>
7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 * Heiko J Schick <schickhj@de.ibm.com>
9 *
10 * Copyright (c) 2005 IBM Corporation
11 *
12 * All rights reserved.
13 *
14 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
15 * BSD.
16 *
17 * OpenIB BSD License
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are met:
21 *
22 * Redistributions of source code must retain the above copyright notice, this
23 * list of conditions and the following disclaimer.
24 *
25 * Redistributions in binary form must reproduce the above copyright notice,
26 * this list of conditions and the following disclaimer in the documentation
27 * and/or other materials
28 * provided with the distribution.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
41 */
42
43#include <linux/slab.h>
44
45#include "ehca_classes.h"
46#include "ehca_iverbs.h"
47#include "ehca_mrmw.h"
48#include "ehca_tools.h"
49#include "hcp_if.h"
50
51struct ib_ucontext *ehca_alloc_ucontext(struct ib_device *device,
52 struct ib_udata *udata)
53{
54 struct ehca_ucontext *my_context;
55
56 my_context = kzalloc(sizeof *my_context, GFP_KERNEL);
57 if (!my_context) {
58 ehca_err(device, "Out of memory device=%p", device);
59 return ERR_PTR(-ENOMEM);
60 }
61
62 return &my_context->ib_ucontext;
63}
64
65int ehca_dealloc_ucontext(struct ib_ucontext *context)
66{
67 kfree(container_of(context, struct ehca_ucontext, ib_ucontext));
68 return 0;
69}
70
71static void ehca_mm_open(struct vm_area_struct *vma)
72{
73 u32 *count = (u32 *)vma->vm_private_data;
74 if (!count) {
75 ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
76 vma->vm_start, vma->vm_end);
77 return;
78 }
79 (*count)++;
80 if (!(*count))
81 ehca_gen_err("Use count overflow vm_start=%lx vm_end=%lx",
82 vma->vm_start, vma->vm_end);
83 ehca_gen_dbg("vm_start=%lx vm_end=%lx count=%x",
84 vma->vm_start, vma->vm_end, *count);
85}
86
87static void ehca_mm_close(struct vm_area_struct *vma)
88{
89 u32 *count = (u32 *)vma->vm_private_data;
90 if (!count) {
91 ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
92 vma->vm_start, vma->vm_end);
93 return;
94 }
95 (*count)--;
96 ehca_gen_dbg("vm_start=%lx vm_end=%lx count=%x",
97 vma->vm_start, vma->vm_end, *count);
98}
99
100static const struct vm_operations_struct vm_ops = {
101 .open = ehca_mm_open,
102 .close = ehca_mm_close,
103};
104
105static int ehca_mmap_fw(struct vm_area_struct *vma, struct h_galpas *galpas,
106 u32 *mm_count)
107{
108 int ret;
109 u64 vsize, physical;
110
111 vsize = vma->vm_end - vma->vm_start;
112 if (vsize < EHCA_PAGESIZE) {
113 ehca_gen_err("invalid vsize=%lx", vma->vm_end - vma->vm_start);
114 return -EINVAL;
115 }
116
117 physical = galpas->user.fw_handle;
118 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
119 ehca_gen_dbg("vsize=%llx physical=%llx", vsize, physical);
120 /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
121 ret = remap_4k_pfn(vma, vma->vm_start, physical >> EHCA_PAGESHIFT,
122 vma->vm_page_prot);
123 if (unlikely(ret)) {
124 ehca_gen_err("remap_pfn_range() failed ret=%i", ret);
125 return -ENOMEM;
126 }
127
128 vma->vm_private_data = mm_count;
129 (*mm_count)++;
130 vma->vm_ops = &vm_ops;
131
132 return 0;
133}
134
135static int ehca_mmap_queue(struct vm_area_struct *vma, struct ipz_queue *queue,
136 u32 *mm_count)
137{
138 int ret;
139 u64 start, ofs;
140 struct page *page;
141
142 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
143 start = vma->vm_start;
144 for (ofs = 0; ofs < queue->queue_length; ofs += PAGE_SIZE) {
145 u64 virt_addr = (u64)ipz_qeit_calc(queue, ofs);
146 page = virt_to_page(virt_addr);
147 ret = vm_insert_page(vma, start, page);
148 if (unlikely(ret)) {
149 ehca_gen_err("vm_insert_page() failed rc=%i", ret);
150 return ret;
151 }
152 start += PAGE_SIZE;
153 }
154 vma->vm_private_data = mm_count;
155 (*mm_count)++;
156 vma->vm_ops = &vm_ops;
157
158 return 0;
159}
160
161static int ehca_mmap_cq(struct vm_area_struct *vma, struct ehca_cq *cq,
162 u32 rsrc_type)
163{
164 int ret;
165
166 switch (rsrc_type) {
167 case 0: /* galpa fw handle */
168 ehca_dbg(cq->ib_cq.device, "cq_num=%x fw", cq->cq_number);
169 ret = ehca_mmap_fw(vma, &cq->galpas, &cq->mm_count_galpa);
170 if (unlikely(ret)) {
171 ehca_err(cq->ib_cq.device,
172 "ehca_mmap_fw() failed rc=%i cq_num=%x",
173 ret, cq->cq_number);
174 return ret;
175 }
176 break;
177
178 case 1: /* cq queue_addr */
179 ehca_dbg(cq->ib_cq.device, "cq_num=%x queue", cq->cq_number);
180 ret = ehca_mmap_queue(vma, &cq->ipz_queue, &cq->mm_count_queue);
181 if (unlikely(ret)) {
182 ehca_err(cq->ib_cq.device,
183 "ehca_mmap_queue() failed rc=%i cq_num=%x",
184 ret, cq->cq_number);
185 return ret;
186 }
187 break;
188
189 default:
190 ehca_err(cq->ib_cq.device, "bad resource type=%x cq_num=%x",
191 rsrc_type, cq->cq_number);
192 return -EINVAL;
193 }
194
195 return 0;
196}
197
198static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp,
199 u32 rsrc_type)
200{
201 int ret;
202
203 switch (rsrc_type) {
204 case 0: /* galpa fw handle */
205 ehca_dbg(qp->ib_qp.device, "qp_num=%x fw", qp->ib_qp.qp_num);
206 ret = ehca_mmap_fw(vma, &qp->galpas, &qp->mm_count_galpa);
207 if (unlikely(ret)) {
208 ehca_err(qp->ib_qp.device,
209 "remap_pfn_range() failed ret=%i qp_num=%x",
210 ret, qp->ib_qp.qp_num);
211 return -ENOMEM;
212 }
213 break;
214
215 case 1: /* qp rqueue_addr */
216 ehca_dbg(qp->ib_qp.device, "qp_num=%x rq", qp->ib_qp.qp_num);
217 ret = ehca_mmap_queue(vma, &qp->ipz_rqueue,
218 &qp->mm_count_rqueue);
219 if (unlikely(ret)) {
220 ehca_err(qp->ib_qp.device,
221 "ehca_mmap_queue(rq) failed rc=%i qp_num=%x",
222 ret, qp->ib_qp.qp_num);
223 return ret;
224 }
225 break;
226
227 case 2: /* qp squeue_addr */
228 ehca_dbg(qp->ib_qp.device, "qp_num=%x sq", qp->ib_qp.qp_num);
229 ret = ehca_mmap_queue(vma, &qp->ipz_squeue,
230 &qp->mm_count_squeue);
231 if (unlikely(ret)) {
232 ehca_err(qp->ib_qp.device,
233 "ehca_mmap_queue(sq) failed rc=%i qp_num=%x",
234 ret, qp->ib_qp.qp_num);
235 return ret;
236 }
237 break;
238
239 default:
240 ehca_err(qp->ib_qp.device, "bad resource type=%x qp=num=%x",
241 rsrc_type, qp->ib_qp.qp_num);
242 return -EINVAL;
243 }
244
245 return 0;
246}
247
248int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
249{
250 u64 fileoffset = vma->vm_pgoff;
251 u32 idr_handle = fileoffset & 0x1FFFFFF;
252 u32 q_type = (fileoffset >> 27) & 0x1; /* CQ, QP,... */
253 u32 rsrc_type = (fileoffset >> 25) & 0x3; /* sq,rq,cmnd_window */
254 u32 ret;
255 struct ehca_cq *cq;
256 struct ehca_qp *qp;
257 struct ib_uobject *uobject;
258
259 switch (q_type) {
260 case 0: /* CQ */
261 read_lock(&ehca_cq_idr_lock);
262 cq = idr_find(&ehca_cq_idr, idr_handle);
263 read_unlock(&ehca_cq_idr_lock);
264
265 /* make sure this mmap really belongs to the authorized user */
266 if (!cq)
267 return -EINVAL;
268
269 if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context != context)
270 return -EINVAL;
271
272 ret = ehca_mmap_cq(vma, cq, rsrc_type);
273 if (unlikely(ret)) {
274 ehca_err(cq->ib_cq.device,
275 "ehca_mmap_cq() failed rc=%i cq_num=%x",
276 ret, cq->cq_number);
277 return ret;
278 }
279 break;
280
281 case 1: /* QP */
282 read_lock(&ehca_qp_idr_lock);
283 qp = idr_find(&ehca_qp_idr, idr_handle);
284 read_unlock(&ehca_qp_idr_lock);
285
286 /* make sure this mmap really belongs to the authorized user */
287 if (!qp)
288 return -EINVAL;
289
290 uobject = IS_SRQ(qp) ? qp->ib_srq.uobject : qp->ib_qp.uobject;
291 if (!uobject || uobject->context != context)
292 return -EINVAL;
293
294 ret = ehca_mmap_qp(vma, qp, rsrc_type);
295 if (unlikely(ret)) {
296 ehca_err(qp->ib_qp.device,
297 "ehca_mmap_qp() failed rc=%i qp_num=%x",
298 ret, qp->ib_qp.qp_num);
299 return ret;
300 }
301 break;
302
303 default:
304 ehca_gen_err("bad queue type %x", q_type);
305 return -EINVAL;
306 }
307
308 return 0;
309}
diff --git a/drivers/staging/rdma/ehca/hcp_if.c b/drivers/staging/rdma/ehca/hcp_if.c
deleted file mode 100644
index 89517ffb4389..000000000000
--- a/drivers/staging/rdma/ehca/hcp_if.c
+++ /dev/null
@@ -1,949 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * Firmware Infiniband Interface code for POWER
5 *
6 * Authors: Christoph Raisch <raisch@de.ibm.com>
7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 * Joachim Fenkes <fenkes@de.ibm.com>
9 * Gerd Bayer <gerd.bayer@de.ibm.com>
10 * Waleri Fomin <fomin@de.ibm.com>
11 *
12 * Copyright (c) 2005 IBM Corporation
13 *
14 * All rights reserved.
15 *
16 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
17 * BSD.
18 *
19 * OpenIB BSD License
20 *
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions are met:
23 *
24 * Redistributions of source code must retain the above copyright notice, this
25 * list of conditions and the following disclaimer.
26 *
27 * Redistributions in binary form must reproduce the above copyright notice,
28 * this list of conditions and the following disclaimer in the documentation
29 * and/or other materials
30 * provided with the distribution.
31 *
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
33 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
36 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
37 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
38 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
39 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
40 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
41 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
42 * POSSIBILITY OF SUCH DAMAGE.
43 */
44
45#include <asm/hvcall.h>
46#include "ehca_tools.h"
47#include "hcp_if.h"
48#include "hcp_phyp.h"
49#include "hipz_fns.h"
50#include "ipz_pt_fn.h"
51
52#define H_ALL_RES_QP_ENHANCED_OPS EHCA_BMASK_IBM(9, 11)
53#define H_ALL_RES_QP_PTE_PIN EHCA_BMASK_IBM(12, 12)
54#define H_ALL_RES_QP_SERVICE_TYPE EHCA_BMASK_IBM(13, 15)
55#define H_ALL_RES_QP_STORAGE EHCA_BMASK_IBM(16, 17)
56#define H_ALL_RES_QP_LL_RQ_CQE_POSTING EHCA_BMASK_IBM(18, 18)
57#define H_ALL_RES_QP_LL_SQ_CQE_POSTING EHCA_BMASK_IBM(19, 21)
58#define H_ALL_RES_QP_SIGNALING_TYPE EHCA_BMASK_IBM(22, 23)
59#define H_ALL_RES_QP_UD_AV_LKEY_CTRL EHCA_BMASK_IBM(31, 31)
60#define H_ALL_RES_QP_SMALL_SQ_PAGE_SIZE EHCA_BMASK_IBM(32, 35)
61#define H_ALL_RES_QP_SMALL_RQ_PAGE_SIZE EHCA_BMASK_IBM(36, 39)
62#define H_ALL_RES_QP_RESOURCE_TYPE EHCA_BMASK_IBM(56, 63)
63
64#define H_ALL_RES_QP_MAX_OUTST_SEND_WR EHCA_BMASK_IBM(0, 15)
65#define H_ALL_RES_QP_MAX_OUTST_RECV_WR EHCA_BMASK_IBM(16, 31)
66#define H_ALL_RES_QP_MAX_SEND_SGE EHCA_BMASK_IBM(32, 39)
67#define H_ALL_RES_QP_MAX_RECV_SGE EHCA_BMASK_IBM(40, 47)
68
69#define H_ALL_RES_QP_UD_AV_LKEY EHCA_BMASK_IBM(32, 63)
70#define H_ALL_RES_QP_SRQ_QP_TOKEN EHCA_BMASK_IBM(0, 31)
71#define H_ALL_RES_QP_SRQ_QP_HANDLE EHCA_BMASK_IBM(0, 64)
72#define H_ALL_RES_QP_SRQ_LIMIT EHCA_BMASK_IBM(48, 63)
73#define H_ALL_RES_QP_SRQ_QPN EHCA_BMASK_IBM(40, 63)
74
75#define H_ALL_RES_QP_ACT_OUTST_SEND_WR EHCA_BMASK_IBM(16, 31)
76#define H_ALL_RES_QP_ACT_OUTST_RECV_WR EHCA_BMASK_IBM(48, 63)
77#define H_ALL_RES_QP_ACT_SEND_SGE EHCA_BMASK_IBM(8, 15)
78#define H_ALL_RES_QP_ACT_RECV_SGE EHCA_BMASK_IBM(24, 31)
79
80#define H_ALL_RES_QP_SQUEUE_SIZE_PAGES EHCA_BMASK_IBM(0, 31)
81#define H_ALL_RES_QP_RQUEUE_SIZE_PAGES EHCA_BMASK_IBM(32, 63)
82
83#define H_MP_INIT_TYPE EHCA_BMASK_IBM(44, 47)
84#define H_MP_SHUTDOWN EHCA_BMASK_IBM(48, 48)
85#define H_MP_RESET_QKEY_CTR EHCA_BMASK_IBM(49, 49)
86
87#define HCALL4_REGS_FORMAT "r4=%lx r5=%lx r6=%lx r7=%lx"
88#define HCALL7_REGS_FORMAT HCALL4_REGS_FORMAT " r8=%lx r9=%lx r10=%lx"
89#define HCALL9_REGS_FORMAT HCALL7_REGS_FORMAT " r11=%lx r12=%lx"
90
91static DEFINE_SPINLOCK(hcall_lock);
92
93static long ehca_plpar_hcall_norets(unsigned long opcode,
94 unsigned long arg1,
95 unsigned long arg2,
96 unsigned long arg3,
97 unsigned long arg4,
98 unsigned long arg5,
99 unsigned long arg6,
100 unsigned long arg7)
101{
102 long ret;
103 int i, sleep_msecs;
104 unsigned long flags = 0;
105
106 if (unlikely(ehca_debug_level >= 2))
107 ehca_gen_dbg("opcode=%lx " HCALL7_REGS_FORMAT,
108 opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
109
110 for (i = 0; i < 5; i++) {
111 /* serialize hCalls to work around firmware issue */
112 if (ehca_lock_hcalls)
113 spin_lock_irqsave(&hcall_lock, flags);
114
115 ret = plpar_hcall_norets(opcode, arg1, arg2, arg3, arg4,
116 arg5, arg6, arg7);
117
118 if (ehca_lock_hcalls)
119 spin_unlock_irqrestore(&hcall_lock, flags);
120
121 if (H_IS_LONG_BUSY(ret)) {
122 sleep_msecs = get_longbusy_msecs(ret);
123 msleep_interruptible(sleep_msecs);
124 continue;
125 }
126
127 if (ret < H_SUCCESS)
128 ehca_gen_err("opcode=%lx ret=%li " HCALL7_REGS_FORMAT,
129 opcode, ret, arg1, arg2, arg3,
130 arg4, arg5, arg6, arg7);
131 else
132 if (unlikely(ehca_debug_level >= 2))
133 ehca_gen_dbg("opcode=%lx ret=%li", opcode, ret);
134
135 return ret;
136 }
137
138 return H_BUSY;
139}
140
141static long ehca_plpar_hcall9(unsigned long opcode,
142 unsigned long *outs, /* array of 9 outputs */
143 unsigned long arg1,
144 unsigned long arg2,
145 unsigned long arg3,
146 unsigned long arg4,
147 unsigned long arg5,
148 unsigned long arg6,
149 unsigned long arg7,
150 unsigned long arg8,
151 unsigned long arg9)
152{
153 long ret;
154 int i, sleep_msecs;
155 unsigned long flags = 0;
156
157 if (unlikely(ehca_debug_level >= 2))
158 ehca_gen_dbg("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT, opcode,
159 arg1, arg2, arg3, arg4, arg5,
160 arg6, arg7, arg8, arg9);
161
162 for (i = 0; i < 5; i++) {
163 /* serialize hCalls to work around firmware issue */
164 if (ehca_lock_hcalls)
165 spin_lock_irqsave(&hcall_lock, flags);
166
167 ret = plpar_hcall9(opcode, outs,
168 arg1, arg2, arg3, arg4, arg5,
169 arg6, arg7, arg8, arg9);
170
171 if (ehca_lock_hcalls)
172 spin_unlock_irqrestore(&hcall_lock, flags);
173
174 if (H_IS_LONG_BUSY(ret)) {
175 sleep_msecs = get_longbusy_msecs(ret);
176 msleep_interruptible(sleep_msecs);
177 continue;
178 }
179
180 if (ret < H_SUCCESS) {
181 ehca_gen_err("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT,
182 opcode, arg1, arg2, arg3, arg4, arg5,
183 arg6, arg7, arg8, arg9);
184 ehca_gen_err("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT,
185 ret, outs[0], outs[1], outs[2], outs[3],
186 outs[4], outs[5], outs[6], outs[7],
187 outs[8]);
188 } else if (unlikely(ehca_debug_level >= 2))
189 ehca_gen_dbg("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT,
190 ret, outs[0], outs[1], outs[2], outs[3],
191 outs[4], outs[5], outs[6], outs[7],
192 outs[8]);
193 return ret;
194 }
195
196 return H_BUSY;
197}
198
199u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
200 struct ehca_pfeq *pfeq,
201 const u32 neq_control,
202 const u32 number_of_entries,
203 struct ipz_eq_handle *eq_handle,
204 u32 *act_nr_of_entries,
205 u32 *act_pages,
206 u32 *eq_ist)
207{
208 u64 ret;
209 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
210 u64 allocate_controls;
211
212 /* resource type */
213 allocate_controls = 3ULL;
214
215 /* ISN is associated */
216 if (neq_control != 1)
217 allocate_controls = (1ULL << (63 - 7)) | allocate_controls;
218 else /* notification event queue */
219 allocate_controls = (1ULL << 63) | allocate_controls;
220
221 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
222 adapter_handle.handle, /* r4 */
223 allocate_controls, /* r5 */
224 number_of_entries, /* r6 */
225 0, 0, 0, 0, 0, 0);
226 eq_handle->handle = outs[0];
227 *act_nr_of_entries = (u32)outs[3];
228 *act_pages = (u32)outs[4];
229 *eq_ist = (u32)outs[5];
230
231 if (ret == H_NOT_ENOUGH_RESOURCES)
232 ehca_gen_err("Not enough resource - ret=%lli ", ret);
233
234 return ret;
235}
236
237u64 hipz_h_reset_event(const struct ipz_adapter_handle adapter_handle,
238 struct ipz_eq_handle eq_handle,
239 const u64 event_mask)
240{
241 return ehca_plpar_hcall_norets(H_RESET_EVENTS,
242 adapter_handle.handle, /* r4 */
243 eq_handle.handle, /* r5 */
244 event_mask, /* r6 */
245 0, 0, 0, 0);
246}
247
248u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
249 struct ehca_cq *cq,
250 struct ehca_alloc_cq_parms *param)
251{
252 int rc;
253 u64 ret;
254 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
255
256 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
257 adapter_handle.handle, /* r4 */
258 2, /* r5 */
259 param->eq_handle.handle, /* r6 */
260 cq->token, /* r7 */
261 param->nr_cqe, /* r8 */
262 0, 0, 0, 0);
263 cq->ipz_cq_handle.handle = outs[0];
264 param->act_nr_of_entries = (u32)outs[3];
265 param->act_pages = (u32)outs[4];
266
267 if (ret == H_SUCCESS) {
268 rc = hcp_galpas_ctor(&cq->galpas, 0, outs[5], outs[6]);
269 if (rc) {
270 ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx",
271 rc, outs[5]);
272
273 ehca_plpar_hcall_norets(H_FREE_RESOURCE,
274 adapter_handle.handle, /* r4 */
275 cq->ipz_cq_handle.handle, /* r5 */
276 0, 0, 0, 0, 0);
277 ret = H_NO_MEM;
278 }
279 }
280
281 if (ret == H_NOT_ENOUGH_RESOURCES)
282 ehca_gen_err("Not enough resources. ret=%lli", ret);
283
284 return ret;
285}
286
287u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
288 struct ehca_alloc_qp_parms *parms, int is_user)
289{
290 int rc;
291 u64 ret;
292 u64 allocate_controls, max_r10_reg, r11, r12;
293 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
294
295 allocate_controls =
296 EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS, parms->ext_type)
297 | EHCA_BMASK_SET(H_ALL_RES_QP_PTE_PIN, 0)
298 | EHCA_BMASK_SET(H_ALL_RES_QP_SERVICE_TYPE, parms->servicetype)
299 | EHCA_BMASK_SET(H_ALL_RES_QP_SIGNALING_TYPE, parms->sigtype)
300 | EHCA_BMASK_SET(H_ALL_RES_QP_STORAGE, parms->qp_storage)
301 | EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_SQ_PAGE_SIZE,
302 parms->squeue.page_size)
303 | EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_RQ_PAGE_SIZE,
304 parms->rqueue.page_size)
305 | EHCA_BMASK_SET(H_ALL_RES_QP_LL_RQ_CQE_POSTING,
306 !!(parms->ll_comp_flags & LLQP_RECV_COMP))
307 | EHCA_BMASK_SET(H_ALL_RES_QP_LL_SQ_CQE_POSTING,
308 !!(parms->ll_comp_flags & LLQP_SEND_COMP))
309 | EHCA_BMASK_SET(H_ALL_RES_QP_UD_AV_LKEY_CTRL,
310 parms->ud_av_l_key_ctl)
311 | EHCA_BMASK_SET(H_ALL_RES_QP_RESOURCE_TYPE, 1);
312
313 max_r10_reg =
314 EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_SEND_WR,
315 parms->squeue.max_wr + 1)
316 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_RECV_WR,
317 parms->rqueue.max_wr + 1)
318 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_SEND_SGE,
319 parms->squeue.max_sge)
320 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_RECV_SGE,
321 parms->rqueue.max_sge);
322
323 r11 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QP_TOKEN, parms->srq_token);
324
325 if (parms->ext_type == EQPT_SRQ)
326 r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_LIMIT, parms->srq_limit);
327 else
328 r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QPN, parms->srq_qpn);
329
330 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
331 adapter_handle.handle, /* r4 */
332 allocate_controls, /* r5 */
333 parms->send_cq_handle.handle,
334 parms->recv_cq_handle.handle,
335 parms->eq_handle.handle,
336 ((u64)parms->token << 32) | parms->pd.value,
337 max_r10_reg, r11, r12);
338
339 parms->qp_handle.handle = outs[0];
340 parms->real_qp_num = (u32)outs[1];
341 parms->squeue.act_nr_wqes =
342 (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_SEND_WR, outs[2]);
343 parms->rqueue.act_nr_wqes =
344 (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_RECV_WR, outs[2]);
345 parms->squeue.act_nr_sges =
346 (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_SEND_SGE, outs[3]);
347 parms->rqueue.act_nr_sges =
348 (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_RECV_SGE, outs[3]);
349 parms->squeue.queue_size =
350 (u32)EHCA_BMASK_GET(H_ALL_RES_QP_SQUEUE_SIZE_PAGES, outs[4]);
351 parms->rqueue.queue_size =
352 (u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]);
353
354 if (ret == H_SUCCESS) {
355 rc = hcp_galpas_ctor(&parms->galpas, is_user, outs[6], outs[6]);
356 if (rc) {
357 ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx",
358 rc, outs[6]);
359
360 ehca_plpar_hcall_norets(H_FREE_RESOURCE,
361 adapter_handle.handle, /* r4 */
362 parms->qp_handle.handle, /* r5 */
363 0, 0, 0, 0, 0);
364 ret = H_NO_MEM;
365 }
366 }
367
368 if (ret == H_NOT_ENOUGH_RESOURCES)
369 ehca_gen_err("Not enough resources. ret=%lli", ret);
370
371 return ret;
372}
373
374u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
375 const u8 port_id,
376 struct hipz_query_port *query_port_response_block)
377{
378 u64 ret;
379 u64 r_cb = __pa(query_port_response_block);
380
381 if (r_cb & (EHCA_PAGESIZE-1)) {
382 ehca_gen_err("response block not page aligned");
383 return H_PARAMETER;
384 }
385
386 ret = ehca_plpar_hcall_norets(H_QUERY_PORT,
387 adapter_handle.handle, /* r4 */
388 port_id, /* r5 */
389 r_cb, /* r6 */
390 0, 0, 0, 0);
391
392 if (ehca_debug_level >= 2)
393 ehca_dmp(query_port_response_block, 64, "response_block");
394
395 return ret;
396}
397
398u64 hipz_h_modify_port(const struct ipz_adapter_handle adapter_handle,
399 const u8 port_id, const u32 port_cap,
400 const u8 init_type, const int modify_mask)
401{
402 u64 port_attributes = port_cap;
403
404 if (modify_mask & IB_PORT_SHUTDOWN)
405 port_attributes |= EHCA_BMASK_SET(H_MP_SHUTDOWN, 1);
406 if (modify_mask & IB_PORT_INIT_TYPE)
407 port_attributes |= EHCA_BMASK_SET(H_MP_INIT_TYPE, init_type);
408 if (modify_mask & IB_PORT_RESET_QKEY_CNTR)
409 port_attributes |= EHCA_BMASK_SET(H_MP_RESET_QKEY_CTR, 1);
410
411 return ehca_plpar_hcall_norets(H_MODIFY_PORT,
412 adapter_handle.handle, /* r4 */
413 port_id, /* r5 */
414 port_attributes, /* r6 */
415 0, 0, 0, 0);
416}
417
418u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle,
419 struct hipz_query_hca *query_hca_rblock)
420{
421 u64 r_cb = __pa(query_hca_rblock);
422
423 if (r_cb & (EHCA_PAGESIZE-1)) {
424 ehca_gen_err("response_block=%p not page aligned",
425 query_hca_rblock);
426 return H_PARAMETER;
427 }
428
429 return ehca_plpar_hcall_norets(H_QUERY_HCA,
430 adapter_handle.handle, /* r4 */
431 r_cb, /* r5 */
432 0, 0, 0, 0, 0);
433}
434
435u64 hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle,
436 const u8 pagesize,
437 const u8 queue_type,
438 const u64 resource_handle,
439 const u64 logical_address_of_page,
440 u64 count)
441{
442 return ehca_plpar_hcall_norets(H_REGISTER_RPAGES,
443 adapter_handle.handle, /* r4 */
444 (u64)queue_type | ((u64)pagesize) << 8,
445 /* r5 */
446 resource_handle, /* r6 */
447 logical_address_of_page, /* r7 */
448 count, /* r8 */
449 0, 0);
450}
451
452u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle,
453 const struct ipz_eq_handle eq_handle,
454 struct ehca_pfeq *pfeq,
455 const u8 pagesize,
456 const u8 queue_type,
457 const u64 logical_address_of_page,
458 const u64 count)
459{
460 if (count != 1) {
461 ehca_gen_err("Ppage counter=%llx", count);
462 return H_PARAMETER;
463 }
464 return hipz_h_register_rpage(adapter_handle,
465 pagesize,
466 queue_type,
467 eq_handle.handle,
468 logical_address_of_page, count);
469}
470
471u64 hipz_h_query_int_state(const struct ipz_adapter_handle adapter_handle,
472 u32 ist)
473{
474 u64 ret;
475 ret = ehca_plpar_hcall_norets(H_QUERY_INT_STATE,
476 adapter_handle.handle, /* r4 */
477 ist, /* r5 */
478 0, 0, 0, 0, 0);
479
480 if (ret != H_SUCCESS && ret != H_BUSY)
481 ehca_gen_err("Could not query interrupt state.");
482
483 return ret;
484}
485
486u64 hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle,
487 const struct ipz_cq_handle cq_handle,
488 struct ehca_pfcq *pfcq,
489 const u8 pagesize,
490 const u8 queue_type,
491 const u64 logical_address_of_page,
492 const u64 count,
493 const struct h_galpa gal)
494{
495 if (count != 1) {
496 ehca_gen_err("Page counter=%llx", count);
497 return H_PARAMETER;
498 }
499
500 return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
501 cq_handle.handle, logical_address_of_page,
502 count);
503}
504
505u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle,
506 const struct ipz_qp_handle qp_handle,
507 struct ehca_pfqp *pfqp,
508 const u8 pagesize,
509 const u8 queue_type,
510 const u64 logical_address_of_page,
511 const u64 count,
512 const struct h_galpa galpa)
513{
514 if (count > 1) {
515 ehca_gen_err("Page counter=%llx", count);
516 return H_PARAMETER;
517 }
518
519 return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
520 qp_handle.handle, logical_address_of_page,
521 count);
522}
523
524u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle,
525 const struct ipz_qp_handle qp_handle,
526 struct ehca_pfqp *pfqp,
527 void **log_addr_next_sq_wqe2processed,
528 void **log_addr_next_rq_wqe2processed,
529 int dis_and_get_function_code)
530{
531 u64 ret;
532 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
533
534 ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
535 adapter_handle.handle, /* r4 */
536 dis_and_get_function_code, /* r5 */
537 qp_handle.handle, /* r6 */
538 0, 0, 0, 0, 0, 0);
539 if (log_addr_next_sq_wqe2processed)
540 *log_addr_next_sq_wqe2processed = (void *)outs[0];
541 if (log_addr_next_rq_wqe2processed)
542 *log_addr_next_rq_wqe2processed = (void *)outs[1];
543
544 return ret;
545}
546
547u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle,
548 const struct ipz_qp_handle qp_handle,
549 struct ehca_pfqp *pfqp,
550 const u64 update_mask,
551 struct hcp_modify_qp_control_block *mqpcb,
552 struct h_galpa gal)
553{
554 u64 ret;
555 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
556 ret = ehca_plpar_hcall9(H_MODIFY_QP, outs,
557 adapter_handle.handle, /* r4 */
558 qp_handle.handle, /* r5 */
559 update_mask, /* r6 */
560 __pa(mqpcb), /* r7 */
561 0, 0, 0, 0, 0);
562
563 if (ret == H_NOT_ENOUGH_RESOURCES)
564 ehca_gen_err("Insufficient resources ret=%lli", ret);
565
566 return ret;
567}
568
569u64 hipz_h_query_qp(const struct ipz_adapter_handle adapter_handle,
570 const struct ipz_qp_handle qp_handle,
571 struct ehca_pfqp *pfqp,
572 struct hcp_modify_qp_control_block *qqpcb,
573 struct h_galpa gal)
574{
575 return ehca_plpar_hcall_norets(H_QUERY_QP,
576 adapter_handle.handle, /* r4 */
577 qp_handle.handle, /* r5 */
578 __pa(qqpcb), /* r6 */
579 0, 0, 0, 0);
580}
581
582u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
583 struct ehca_qp *qp)
584{
585 u64 ret;
586 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
587
588 ret = hcp_galpas_dtor(&qp->galpas);
589 if (ret) {
590 ehca_gen_err("Could not destruct qp->galpas");
591 return H_RESOURCE;
592 }
593 ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
594 adapter_handle.handle, /* r4 */
595 /* function code */
596 1, /* r5 */
597 qp->ipz_qp_handle.handle, /* r6 */
598 0, 0, 0, 0, 0, 0);
599 if (ret == H_HARDWARE)
600 ehca_gen_err("HCA not operational. ret=%lli", ret);
601
602 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
603 adapter_handle.handle, /* r4 */
604 qp->ipz_qp_handle.handle, /* r5 */
605 0, 0, 0, 0, 0);
606
607 if (ret == H_RESOURCE)
608 ehca_gen_err("Resource still in use. ret=%lli", ret);
609
610 return ret;
611}
612
613u64 hipz_h_define_aqp0(const struct ipz_adapter_handle adapter_handle,
614 const struct ipz_qp_handle qp_handle,
615 struct h_galpa gal,
616 u32 port)
617{
618 return ehca_plpar_hcall_norets(H_DEFINE_AQP0,
619 adapter_handle.handle, /* r4 */
620 qp_handle.handle, /* r5 */
621 port, /* r6 */
622 0, 0, 0, 0);
623}
624
625u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle,
626 const struct ipz_qp_handle qp_handle,
627 struct h_galpa gal,
628 u32 port, u32 * pma_qp_nr,
629 u32 * bma_qp_nr)
630{
631 u64 ret;
632 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
633
634 ret = ehca_plpar_hcall9(H_DEFINE_AQP1, outs,
635 adapter_handle.handle, /* r4 */
636 qp_handle.handle, /* r5 */
637 port, /* r6 */
638 0, 0, 0, 0, 0, 0);
639 *pma_qp_nr = (u32)outs[0];
640 *bma_qp_nr = (u32)outs[1];
641
642 if (ret == H_ALIAS_EXIST)
643 ehca_gen_err("AQP1 already exists. ret=%lli", ret);
644
645 return ret;
646}
647
648u64 hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle,
649 const struct ipz_qp_handle qp_handle,
650 struct h_galpa gal,
651 u16 mcg_dlid,
652 u64 subnet_prefix, u64 interface_id)
653{
654 u64 ret;
655
656 ret = ehca_plpar_hcall_norets(H_ATTACH_MCQP,
657 adapter_handle.handle, /* r4 */
658 qp_handle.handle, /* r5 */
659 mcg_dlid, /* r6 */
660 interface_id, /* r7 */
661 subnet_prefix, /* r8 */
662 0, 0);
663
664 if (ret == H_NOT_ENOUGH_RESOURCES)
665 ehca_gen_err("Not enough resources. ret=%lli", ret);
666
667 return ret;
668}
669
670u64 hipz_h_detach_mcqp(const struct ipz_adapter_handle adapter_handle,
671 const struct ipz_qp_handle qp_handle,
672 struct h_galpa gal,
673 u16 mcg_dlid,
674 u64 subnet_prefix, u64 interface_id)
675{
676 return ehca_plpar_hcall_norets(H_DETACH_MCQP,
677 adapter_handle.handle, /* r4 */
678 qp_handle.handle, /* r5 */
679 mcg_dlid, /* r6 */
680 interface_id, /* r7 */
681 subnet_prefix, /* r8 */
682 0, 0);
683}
684
685u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle,
686 struct ehca_cq *cq,
687 u8 force_flag)
688{
689 u64 ret;
690
691 ret = hcp_galpas_dtor(&cq->galpas);
692 if (ret) {
693 ehca_gen_err("Could not destruct cp->galpas");
694 return H_RESOURCE;
695 }
696
697 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
698 adapter_handle.handle, /* r4 */
699 cq->ipz_cq_handle.handle, /* r5 */
700 force_flag != 0 ? 1L : 0L, /* r6 */
701 0, 0, 0, 0);
702
703 if (ret == H_RESOURCE)
704 ehca_gen_err("H_FREE_RESOURCE failed ret=%lli ", ret);
705
706 return ret;
707}
708
709u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle,
710 struct ehca_eq *eq)
711{
712 u64 ret;
713
714 ret = hcp_galpas_dtor(&eq->galpas);
715 if (ret) {
716 ehca_gen_err("Could not destruct eq->galpas");
717 return H_RESOURCE;
718 }
719
720 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
721 adapter_handle.handle, /* r4 */
722 eq->ipz_eq_handle.handle, /* r5 */
723 0, 0, 0, 0, 0);
724
725 if (ret == H_RESOURCE)
726 ehca_gen_err("Resource in use. ret=%lli ", ret);
727
728 return ret;
729}
730
731u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
732 const struct ehca_mr *mr,
733 const u64 vaddr,
734 const u64 length,
735 const u32 access_ctrl,
736 const struct ipz_pd pd,
737 struct ehca_mr_hipzout_parms *outparms)
738{
739 u64 ret;
740 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
741
742 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
743 adapter_handle.handle, /* r4 */
744 5, /* r5 */
745 vaddr, /* r6 */
746 length, /* r7 */
747 (((u64)access_ctrl) << 32ULL), /* r8 */
748 pd.value, /* r9 */
749 0, 0, 0);
750 outparms->handle.handle = outs[0];
751 outparms->lkey = (u32)outs[2];
752 outparms->rkey = (u32)outs[3];
753
754 return ret;
755}
756
757u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
758 const struct ehca_mr *mr,
759 const u8 pagesize,
760 const u8 queue_type,
761 const u64 logical_address_of_page,
762 const u64 count)
763{
764 u64 ret;
765
766 if (unlikely(ehca_debug_level >= 3)) {
767 if (count > 1) {
768 u64 *kpage;
769 int i;
770 kpage = __va(logical_address_of_page);
771 for (i = 0; i < count; i++)
772 ehca_gen_dbg("kpage[%d]=%p",
773 i, (void *)kpage[i]);
774 } else
775 ehca_gen_dbg("kpage=%p",
776 (void *)logical_address_of_page);
777 }
778
779 if ((count > 1) && (logical_address_of_page & (EHCA_PAGESIZE-1))) {
780 ehca_gen_err("logical_address_of_page not on a 4k boundary "
781 "adapter_handle=%llx mr=%p mr_handle=%llx "
782 "pagesize=%x queue_type=%x "
783 "logical_address_of_page=%llx count=%llx",
784 adapter_handle.handle, mr,
785 mr->ipz_mr_handle.handle, pagesize, queue_type,
786 logical_address_of_page, count);
787 ret = H_PARAMETER;
788 } else
789 ret = hipz_h_register_rpage(adapter_handle, pagesize,
790 queue_type,
791 mr->ipz_mr_handle.handle,
792 logical_address_of_page, count);
793 return ret;
794}
795
796u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle,
797 const struct ehca_mr *mr,
798 struct ehca_mr_hipzout_parms *outparms)
799{
800 u64 ret;
801 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
802
803 ret = ehca_plpar_hcall9(H_QUERY_MR, outs,
804 adapter_handle.handle, /* r4 */
805 mr->ipz_mr_handle.handle, /* r5 */
806 0, 0, 0, 0, 0, 0, 0);
807 outparms->len = outs[0];
808 outparms->vaddr = outs[1];
809 outparms->acl = outs[4] >> 32;
810 outparms->lkey = (u32)(outs[5] >> 32);
811 outparms->rkey = (u32)(outs[5] & (0xffffffff));
812
813 return ret;
814}
815
816u64 hipz_h_free_resource_mr(const struct ipz_adapter_handle adapter_handle,
817 const struct ehca_mr *mr)
818{
819 return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
820 adapter_handle.handle, /* r4 */
821 mr->ipz_mr_handle.handle, /* r5 */
822 0, 0, 0, 0, 0);
823}
824
825u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle,
826 const struct ehca_mr *mr,
827 const u64 vaddr_in,
828 const u64 length,
829 const u32 access_ctrl,
830 const struct ipz_pd pd,
831 const u64 mr_addr_cb,
832 struct ehca_mr_hipzout_parms *outparms)
833{
834 u64 ret;
835 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
836
837 ret = ehca_plpar_hcall9(H_REREGISTER_PMR, outs,
838 adapter_handle.handle, /* r4 */
839 mr->ipz_mr_handle.handle, /* r5 */
840 vaddr_in, /* r6 */
841 length, /* r7 */
842 /* r8 */
843 ((((u64)access_ctrl) << 32ULL) | pd.value),
844 mr_addr_cb, /* r9 */
845 0, 0, 0);
846 outparms->vaddr = outs[1];
847 outparms->lkey = (u32)outs[2];
848 outparms->rkey = (u32)outs[3];
849
850 return ret;
851}
852
853u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle,
854 const struct ehca_mr *mr,
855 const struct ehca_mr *orig_mr,
856 const u64 vaddr_in,
857 const u32 access_ctrl,
858 const struct ipz_pd pd,
859 struct ehca_mr_hipzout_parms *outparms)
860{
861 u64 ret;
862 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
863
864 ret = ehca_plpar_hcall9(H_REGISTER_SMR, outs,
865 adapter_handle.handle, /* r4 */
866 orig_mr->ipz_mr_handle.handle, /* r5 */
867 vaddr_in, /* r6 */
868 (((u64)access_ctrl) << 32ULL), /* r7 */
869 pd.value, /* r8 */
870 0, 0, 0, 0);
871 outparms->handle.handle = outs[0];
872 outparms->lkey = (u32)outs[2];
873 outparms->rkey = (u32)outs[3];
874
875 return ret;
876}
877
878u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle,
879 const struct ehca_mw *mw,
880 const struct ipz_pd pd,
881 struct ehca_mw_hipzout_parms *outparms)
882{
883 u64 ret;
884 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
885
886 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
887 adapter_handle.handle, /* r4 */
888 6, /* r5 */
889 pd.value, /* r6 */
890 0, 0, 0, 0, 0, 0);
891 outparms->handle.handle = outs[0];
892 outparms->rkey = (u32)outs[3];
893
894 return ret;
895}
896
897u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle,
898 const struct ehca_mw *mw,
899 struct ehca_mw_hipzout_parms *outparms)
900{
901 u64 ret;
902 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
903
904 ret = ehca_plpar_hcall9(H_QUERY_MW, outs,
905 adapter_handle.handle, /* r4 */
906 mw->ipz_mw_handle.handle, /* r5 */
907 0, 0, 0, 0, 0, 0, 0);
908 outparms->rkey = (u32)outs[3];
909
910 return ret;
911}
912
913u64 hipz_h_free_resource_mw(const struct ipz_adapter_handle adapter_handle,
914 const struct ehca_mw *mw)
915{
916 return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
917 adapter_handle.handle, /* r4 */
918 mw->ipz_mw_handle.handle, /* r5 */
919 0, 0, 0, 0, 0);
920}
921
922u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle,
923 const u64 ressource_handle,
924 void *rblock,
925 unsigned long *byte_count)
926{
927 u64 r_cb = __pa(rblock);
928
929 if (r_cb & (EHCA_PAGESIZE-1)) {
930 ehca_gen_err("rblock not page aligned.");
931 return H_PARAMETER;
932 }
933
934 return ehca_plpar_hcall_norets(H_ERROR_DATA,
935 adapter_handle.handle,
936 ressource_handle,
937 r_cb,
938 0, 0, 0, 0);
939}
940
941u64 hipz_h_eoi(int irq)
942{
943 unsigned long xirr;
944
945 iosync();
946 xirr = (0xffULL << 24) | irq;
947
948 return plpar_hcall_norets(H_EOI, xirr);
949}
diff --git a/drivers/staging/rdma/ehca/hcp_if.h b/drivers/staging/rdma/ehca/hcp_if.h
deleted file mode 100644
index a46e514c367b..000000000000
--- a/drivers/staging/rdma/ehca/hcp_if.h
+++ /dev/null
@@ -1,265 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * Firmware Infiniband Interface code for POWER
5 *
6 * Authors: Christoph Raisch <raisch@de.ibm.com>
7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 * Gerd Bayer <gerd.bayer@de.ibm.com>
9 * Waleri Fomin <fomin@de.ibm.com>
10 *
11 * Copyright (c) 2005 IBM Corporation
12 *
13 * All rights reserved.
14 *
15 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
16 * BSD.
17 *
18 * OpenIB BSD License
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are met:
22 *
23 * Redistributions of source code must retain the above copyright notice, this
24 * list of conditions and the following disclaimer.
25 *
26 * Redistributions in binary form must reproduce the above copyright notice,
27 * this list of conditions and the following disclaimer in the documentation
28 * and/or other materials
29 * provided with the distribution.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
32 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
35 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
36 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
37 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
38 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
39 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGE.
42 */
43
44#ifndef __HCP_IF_H__
45#define __HCP_IF_H__
46
47#include "ehca_classes.h"
48#include "ehca_tools.h"
49#include "hipz_hw.h"
50
51/*
52 * hipz_h_alloc_resource_eq allocates EQ resources in HW and FW, initialize
53 * resources, create the empty EQPT (ring).
54 */
55u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
56 struct ehca_pfeq *pfeq,
57 const u32 neq_control,
58 const u32 number_of_entries,
59 struct ipz_eq_handle *eq_handle,
60 u32 * act_nr_of_entries,
61 u32 * act_pages,
62 u32 * eq_ist);
63
64u64 hipz_h_reset_event(const struct ipz_adapter_handle adapter_handle,
65 struct ipz_eq_handle eq_handle,
66 const u64 event_mask);
67/*
68 * hipz_h_allocate_resource_cq allocates CQ resources in HW and FW, initialize
69 * resources, create the empty CQPT (ring).
70 */
71u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
72 struct ehca_cq *cq,
73 struct ehca_alloc_cq_parms *param);
74
75
76/*
77 * hipz_h_alloc_resource_qp allocates QP resources in HW and FW,
78 * initialize resources, create empty QPPTs (2 rings).
79 */
80u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
81 struct ehca_alloc_qp_parms *parms, int is_user);
82
83u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
84 const u8 port_id,
85 struct hipz_query_port *query_port_response_block);
86
87u64 hipz_h_modify_port(const struct ipz_adapter_handle adapter_handle,
88 const u8 port_id, const u32 port_cap,
89 const u8 init_type, const int modify_mask);
90
91u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle,
92 struct hipz_query_hca *query_hca_rblock);
93
94/*
95 * hipz_h_register_rpage internal function in hcp_if.h for all
96 * hcp_H_REGISTER_RPAGE calls.
97 */
98u64 hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle,
99 const u8 pagesize,
100 const u8 queue_type,
101 const u64 resource_handle,
102 const u64 logical_address_of_page,
103 u64 count);
104
105u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle,
106 const struct ipz_eq_handle eq_handle,
107 struct ehca_pfeq *pfeq,
108 const u8 pagesize,
109 const u8 queue_type,
110 const u64 logical_address_of_page,
111 const u64 count);
112
113u64 hipz_h_query_int_state(const struct ipz_adapter_handle
114 hcp_adapter_handle,
115 u32 ist);
116
117u64 hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle,
118 const struct ipz_cq_handle cq_handle,
119 struct ehca_pfcq *pfcq,
120 const u8 pagesize,
121 const u8 queue_type,
122 const u64 logical_address_of_page,
123 const u64 count,
124 const struct h_galpa gal);
125
126u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle,
127 const struct ipz_qp_handle qp_handle,
128 struct ehca_pfqp *pfqp,
129 const u8 pagesize,
130 const u8 queue_type,
131 const u64 logical_address_of_page,
132 const u64 count,
133 const struct h_galpa galpa);
134
135u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle,
136 const struct ipz_qp_handle qp_handle,
137 struct ehca_pfqp *pfqp,
138 void **log_addr_next_sq_wqe_tb_processed,
139 void **log_addr_next_rq_wqe_tb_processed,
140 int dis_and_get_function_code);
141enum hcall_sigt {
142 HCALL_SIGT_NO_CQE = 0,
143 HCALL_SIGT_BY_WQE = 1,
144 HCALL_SIGT_EVERY = 2
145};
146
147u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle,
148 const struct ipz_qp_handle qp_handle,
149 struct ehca_pfqp *pfqp,
150 const u64 update_mask,
151 struct hcp_modify_qp_control_block *mqpcb,
152 struct h_galpa gal);
153
154u64 hipz_h_query_qp(const struct ipz_adapter_handle adapter_handle,
155 const struct ipz_qp_handle qp_handle,
156 struct ehca_pfqp *pfqp,
157 struct hcp_modify_qp_control_block *qqpcb,
158 struct h_galpa gal);
159
160u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
161 struct ehca_qp *qp);
162
163u64 hipz_h_define_aqp0(const struct ipz_adapter_handle adapter_handle,
164 const struct ipz_qp_handle qp_handle,
165 struct h_galpa gal,
166 u32 port);
167
168u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle,
169 const struct ipz_qp_handle qp_handle,
170 struct h_galpa gal,
171 u32 port, u32 * pma_qp_nr,
172 u32 * bma_qp_nr);
173
174u64 hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle,
175 const struct ipz_qp_handle qp_handle,
176 struct h_galpa gal,
177 u16 mcg_dlid,
178 u64 subnet_prefix, u64 interface_id);
179
180u64 hipz_h_detach_mcqp(const struct ipz_adapter_handle adapter_handle,
181 const struct ipz_qp_handle qp_handle,
182 struct h_galpa gal,
183 u16 mcg_dlid,
184 u64 subnet_prefix, u64 interface_id);
185
186u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle,
187 struct ehca_cq *cq,
188 u8 force_flag);
189
190u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle,
191 struct ehca_eq *eq);
192
193/*
194 * hipz_h_alloc_resource_mr allocates MR resources in HW and FW, initialize
195 * resources.
196 */
197u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
198 const struct ehca_mr *mr,
199 const u64 vaddr,
200 const u64 length,
201 const u32 access_ctrl,
202 const struct ipz_pd pd,
203 struct ehca_mr_hipzout_parms *outparms);
204
205/* hipz_h_register_rpage_mr registers MR resource pages in HW and FW */
206u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
207 const struct ehca_mr *mr,
208 const u8 pagesize,
209 const u8 queue_type,
210 const u64 logical_address_of_page,
211 const u64 count);
212
213/* hipz_h_query_mr queries MR in HW and FW */
214u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle,
215 const struct ehca_mr *mr,
216 struct ehca_mr_hipzout_parms *outparms);
217
218/* hipz_h_free_resource_mr frees MR resources in HW and FW */
219u64 hipz_h_free_resource_mr(const struct ipz_adapter_handle adapter_handle,
220 const struct ehca_mr *mr);
221
222/* hipz_h_reregister_pmr reregisters MR in HW and FW */
223u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle,
224 const struct ehca_mr *mr,
225 const u64 vaddr_in,
226 const u64 length,
227 const u32 access_ctrl,
228 const struct ipz_pd pd,
229 const u64 mr_addr_cb,
230 struct ehca_mr_hipzout_parms *outparms);
231
232/* hipz_h_register_smr register shared MR in HW and FW */
233u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle,
234 const struct ehca_mr *mr,
235 const struct ehca_mr *orig_mr,
236 const u64 vaddr_in,
237 const u32 access_ctrl,
238 const struct ipz_pd pd,
239 struct ehca_mr_hipzout_parms *outparms);
240
241/*
242 * hipz_h_alloc_resource_mw allocates MW resources in HW and FW, initialize
243 * resources.
244 */
245u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle,
246 const struct ehca_mw *mw,
247 const struct ipz_pd pd,
248 struct ehca_mw_hipzout_parms *outparms);
249
250/* hipz_h_query_mw queries MW in HW and FW */
251u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle,
252 const struct ehca_mw *mw,
253 struct ehca_mw_hipzout_parms *outparms);
254
255/* hipz_h_free_resource_mw frees MW resources in HW and FW */
256u64 hipz_h_free_resource_mw(const struct ipz_adapter_handle adapter_handle,
257 const struct ehca_mw *mw);
258
259u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle,
260 const u64 ressource_handle,
261 void *rblock,
262 unsigned long *byte_count);
263u64 hipz_h_eoi(int irq);
264
265#endif /* __HCP_IF_H__ */
diff --git a/drivers/staging/rdma/ehca/hcp_phyp.c b/drivers/staging/rdma/ehca/hcp_phyp.c
deleted file mode 100644
index 077376ff3d28..000000000000
--- a/drivers/staging/rdma/ehca/hcp_phyp.c
+++ /dev/null
@@ -1,82 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * load store abstraction for ehca register access with tracing
5 *
6 * Authors: Christoph Raisch <raisch@de.ibm.com>
7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 *
9 * Copyright (c) 2005 IBM Corporation
10 *
11 * All rights reserved.
12 *
13 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
14 * BSD.
15 *
16 * OpenIB BSD License
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are met:
20 *
21 * Redistributions of source code must retain the above copyright notice, this
22 * list of conditions and the following disclaimer.
23 *
24 * Redistributions in binary form must reproduce the above copyright notice,
25 * this list of conditions and the following disclaimer in the documentation
26 * and/or other materials
27 * provided with the distribution.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42#include "ehca_classes.h"
43#include "hipz_hw.h"
44
45u64 hcall_map_page(u64 physaddr)
46{
47 return (u64)ioremap(physaddr, EHCA_PAGESIZE);
48}
49
50int hcall_unmap_page(u64 mapaddr)
51{
52 iounmap((volatile void __iomem *) mapaddr);
53 return 0;
54}
55
56int hcp_galpas_ctor(struct h_galpas *galpas, int is_user,
57 u64 paddr_kernel, u64 paddr_user)
58{
59 if (!is_user) {
60 galpas->kernel.fw_handle = hcall_map_page(paddr_kernel);
61 if (!galpas->kernel.fw_handle)
62 return -ENOMEM;
63 } else
64 galpas->kernel.fw_handle = 0;
65
66 galpas->user.fw_handle = paddr_user;
67
68 return 0;
69}
70
71int hcp_galpas_dtor(struct h_galpas *galpas)
72{
73 if (galpas->kernel.fw_handle) {
74 int ret = hcall_unmap_page(galpas->kernel.fw_handle);
75 if (ret)
76 return ret;
77 }
78
79 galpas->user.fw_handle = galpas->kernel.fw_handle = 0;
80
81 return 0;
82}
diff --git a/drivers/staging/rdma/ehca/hcp_phyp.h b/drivers/staging/rdma/ehca/hcp_phyp.h
deleted file mode 100644
index d1b029910249..000000000000
--- a/drivers/staging/rdma/ehca/hcp_phyp.h
+++ /dev/null
@@ -1,90 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * Firmware calls
5 *
6 * Authors: Christoph Raisch <raisch@de.ibm.com>
7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 * Waleri Fomin <fomin@de.ibm.com>
9 * Gerd Bayer <gerd.bayer@de.ibm.com>
10 *
11 * Copyright (c) 2005 IBM Corporation
12 *
13 * All rights reserved.
14 *
15 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
16 * BSD.
17 *
18 * OpenIB BSD License
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are met:
22 *
23 * Redistributions of source code must retain the above copyright notice, this
24 * list of conditions and the following disclaimer.
25 *
26 * Redistributions in binary form must reproduce the above copyright notice,
27 * this list of conditions and the following disclaimer in the documentation
28 * and/or other materials
29 * provided with the distribution.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
32 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
35 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
36 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
37 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
38 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
39 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGE.
42 */
43
44#ifndef __HCP_PHYP_H__
45#define __HCP_PHYP_H__
46
47
48/*
49 * eHCA page (mapped into memory)
50 * resource to access eHCA register pages in CPU address space
51*/
52struct h_galpa {
53 u64 fw_handle;
54 /* for pSeries this is a 64bit memory address where
55 I/O memory is mapped into CPU address space (kv) */
56};
57
58/*
59 * resource to access eHCA address space registers, all types
60 */
61struct h_galpas {
62 u32 pid; /*PID of userspace galpa checking */
63 struct h_galpa user; /* user space accessible resource,
64 set to 0 if unused */
65 struct h_galpa kernel; /* kernel space accessible resource,
66 set to 0 if unused */
67};
68
69static inline u64 hipz_galpa_load(struct h_galpa galpa, u32 offset)
70{
71 u64 addr = galpa.fw_handle + offset;
72 return *(volatile u64 __force *)addr;
73}
74
75static inline void hipz_galpa_store(struct h_galpa galpa, u32 offset, u64 value)
76{
77 u64 addr = galpa.fw_handle + offset;
78 *(volatile u64 __force *)addr = value;
79}
80
81int hcp_galpas_ctor(struct h_galpas *galpas, int is_user,
82 u64 paddr_kernel, u64 paddr_user);
83
84int hcp_galpas_dtor(struct h_galpas *galpas);
85
86u64 hcall_map_page(u64 physaddr);
87
88int hcall_unmap_page(u64 mapaddr);
89
90#endif
diff --git a/drivers/staging/rdma/ehca/hipz_fns.h b/drivers/staging/rdma/ehca/hipz_fns.h
deleted file mode 100644
index 9dac93d02140..000000000000
--- a/drivers/staging/rdma/ehca/hipz_fns.h
+++ /dev/null
@@ -1,68 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * HW abstraction register functions
5 *
6 * Authors: Christoph Raisch <raisch@de.ibm.com>
7 * Reinhard Ernst <rernst@de.ibm.com>
8 *
9 * Copyright (c) 2005 IBM Corporation
10 *
11 * All rights reserved.
12 *
13 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
14 * BSD.
15 *
16 * OpenIB BSD License
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are met:
20 *
21 * Redistributions of source code must retain the above copyright notice, this
22 * list of conditions and the following disclaimer.
23 *
24 * Redistributions in binary form must reproduce the above copyright notice,
25 * this list of conditions and the following disclaimer in the documentation
26 * and/or other materials
27 * provided with the distribution.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42#ifndef __HIPZ_FNS_H__
43#define __HIPZ_FNS_H__
44
45#include "ehca_classes.h"
46#include "hipz_hw.h"
47
48#include "hipz_fns_core.h"
49
50#define hipz_galpa_store_eq(gal, offset, value) \
51 hipz_galpa_store(gal, EQTEMM_OFFSET(offset), value)
52
53#define hipz_galpa_load_eq(gal, offset) \
54 hipz_galpa_load(gal, EQTEMM_OFFSET(offset))
55
56#define hipz_galpa_store_qped(gal, offset, value) \
57 hipz_galpa_store(gal, QPEDMM_OFFSET(offset), value)
58
59#define hipz_galpa_load_qped(gal, offset) \
60 hipz_galpa_load(gal, QPEDMM_OFFSET(offset))
61
62#define hipz_galpa_store_mrmw(gal, offset, value) \
63 hipz_galpa_store(gal, MRMWMM_OFFSET(offset), value)
64
65#define hipz_galpa_load_mrmw(gal, offset) \
66 hipz_galpa_load(gal, MRMWMM_OFFSET(offset))
67
68#endif
diff --git a/drivers/staging/rdma/ehca/hipz_fns_core.h b/drivers/staging/rdma/ehca/hipz_fns_core.h
deleted file mode 100644
index 868735fd3187..000000000000
--- a/drivers/staging/rdma/ehca/hipz_fns_core.h
+++ /dev/null
@@ -1,100 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * HW abstraction register functions
5 *
6 * Authors: Christoph Raisch <raisch@de.ibm.com>
7 * Heiko J Schick <schickhj@de.ibm.com>
8 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
9 * Reinhard Ernst <rernst@de.ibm.com>
10 *
11 * Copyright (c) 2005 IBM Corporation
12 *
13 * All rights reserved.
14 *
15 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
16 * BSD.
17 *
18 * OpenIB BSD License
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are met:
22 *
23 * Redistributions of source code must retain the above copyright notice, this
24 * list of conditions and the following disclaimer.
25 *
26 * Redistributions in binary form must reproduce the above copyright notice,
27 * this list of conditions and the following disclaimer in the documentation
28 * and/or other materials
29 * provided with the distribution.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
32 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
35 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
36 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
37 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
38 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
39 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGE.
42 */
43
44#ifndef __HIPZ_FNS_CORE_H__
45#define __HIPZ_FNS_CORE_H__
46
47#include "hcp_phyp.h"
48#include "hipz_hw.h"
49
50#define hipz_galpa_store_cq(gal, offset, value) \
51 hipz_galpa_store(gal, CQTEMM_OFFSET(offset), value)
52
53#define hipz_galpa_load_cq(gal, offset) \
54 hipz_galpa_load(gal, CQTEMM_OFFSET(offset))
55
56#define hipz_galpa_store_qp(gal, offset, value) \
57 hipz_galpa_store(gal, QPTEMM_OFFSET(offset), value)
58#define hipz_galpa_load_qp(gal, offset) \
59 hipz_galpa_load(gal, QPTEMM_OFFSET(offset))
60
61static inline void hipz_update_sqa(struct ehca_qp *qp, u16 nr_wqes)
62{
63 /* ringing doorbell :-) */
64 hipz_galpa_store_qp(qp->galpas.kernel, qpx_sqa,
65 EHCA_BMASK_SET(QPX_SQADDER, nr_wqes));
66}
67
68static inline void hipz_update_rqa(struct ehca_qp *qp, u16 nr_wqes)
69{
70 /* ringing doorbell :-) */
71 hipz_galpa_store_qp(qp->galpas.kernel, qpx_rqa,
72 EHCA_BMASK_SET(QPX_RQADDER, nr_wqes));
73}
74
75static inline void hipz_update_feca(struct ehca_cq *cq, u32 nr_cqes)
76{
77 hipz_galpa_store_cq(cq->galpas.kernel, cqx_feca,
78 EHCA_BMASK_SET(CQX_FECADDER, nr_cqes));
79}
80
81static inline void hipz_set_cqx_n0(struct ehca_cq *cq, u32 value)
82{
83 u64 cqx_n0_reg;
84
85 hipz_galpa_store_cq(cq->galpas.kernel, cqx_n0,
86 EHCA_BMASK_SET(CQX_N0_GENERATE_SOLICITED_COMP_EVENT,
87 value));
88 cqx_n0_reg = hipz_galpa_load_cq(cq->galpas.kernel, cqx_n0);
89}
90
91static inline void hipz_set_cqx_n1(struct ehca_cq *cq, u32 value)
92{
93 u64 cqx_n1_reg;
94
95 hipz_galpa_store_cq(cq->galpas.kernel, cqx_n1,
96 EHCA_BMASK_SET(CQX_N1_GENERATE_COMP_EVENT, value));
97 cqx_n1_reg = hipz_galpa_load_cq(cq->galpas.kernel, cqx_n1);
98}
99
100#endif /* __HIPZ_FNC_CORE_H__ */
diff --git a/drivers/staging/rdma/ehca/hipz_hw.h b/drivers/staging/rdma/ehca/hipz_hw.h
deleted file mode 100644
index bf996c7acc42..000000000000
--- a/drivers/staging/rdma/ehca/hipz_hw.h
+++ /dev/null
@@ -1,414 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * eHCA register definitions
5 *
6 * Authors: Waleri Fomin <fomin@de.ibm.com>
7 * Christoph Raisch <raisch@de.ibm.com>
8 * Reinhard Ernst <rernst@de.ibm.com>
9 *
10 * Copyright (c) 2005 IBM Corporation
11 *
12 * All rights reserved.
13 *
14 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
15 * BSD.
16 *
17 * OpenIB BSD License
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are met:
21 *
22 * Redistributions of source code must retain the above copyright notice, this
23 * list of conditions and the following disclaimer.
24 *
25 * Redistributions in binary form must reproduce the above copyright notice,
26 * this list of conditions and the following disclaimer in the documentation
27 * and/or other materials
28 * provided with the distribution.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
41 */
42
43#ifndef __HIPZ_HW_H__
44#define __HIPZ_HW_H__
45
46#include "ehca_tools.h"
47
48#define EHCA_MAX_MTU 4
49
50/* QP Table Entry Memory Map */
51struct hipz_qptemm {
52 u64 qpx_hcr;
53 u64 qpx_c;
54 u64 qpx_herr;
55 u64 qpx_aer;
56/* 0x20*/
57 u64 qpx_sqa;
58 u64 qpx_sqc;
59 u64 qpx_rqa;
60 u64 qpx_rqc;
61/* 0x40*/
62 u64 qpx_st;
63 u64 qpx_pmstate;
64 u64 qpx_pmfa;
65 u64 qpx_pkey;
66/* 0x60*/
67 u64 qpx_pkeya;
68 u64 qpx_pkeyb;
69 u64 qpx_pkeyc;
70 u64 qpx_pkeyd;
71/* 0x80*/
72 u64 qpx_qkey;
73 u64 qpx_dqp;
74 u64 qpx_dlidp;
75 u64 qpx_portp;
76/* 0xa0*/
77 u64 qpx_slidp;
78 u64 qpx_slidpp;
79 u64 qpx_dlida;
80 u64 qpx_porta;
81/* 0xc0*/
82 u64 qpx_slida;
83 u64 qpx_slidpa;
84 u64 qpx_slvl;
85 u64 qpx_ipd;
86/* 0xe0*/
87 u64 qpx_mtu;
88 u64 qpx_lato;
89 u64 qpx_rlimit;
90 u64 qpx_rnrlimit;
91/* 0x100*/
92 u64 qpx_t;
93 u64 qpx_sqhp;
94 u64 qpx_sqptp;
95 u64 qpx_nspsn;
96/* 0x120*/
97 u64 qpx_nspsnhwm;
98 u64 reserved1;
99 u64 qpx_sdsi;
100 u64 qpx_sdsbc;
101/* 0x140*/
102 u64 qpx_sqwsize;
103 u64 qpx_sqwts;
104 u64 qpx_lsn;
105 u64 qpx_nssn;
106/* 0x160 */
107 u64 qpx_mor;
108 u64 qpx_cor;
109 u64 qpx_sqsize;
110 u64 qpx_erc;
111/* 0x180*/
112 u64 qpx_rnrrc;
113 u64 qpx_ernrwt;
114 u64 qpx_rnrresp;
115 u64 qpx_lmsna;
116/* 0x1a0 */
117 u64 qpx_sqhpc;
118 u64 qpx_sqcptp;
119 u64 qpx_sigt;
120 u64 qpx_wqecnt;
121/* 0x1c0*/
122 u64 qpx_rqhp;
123 u64 qpx_rqptp;
124 u64 qpx_rqsize;
125 u64 qpx_nrr;
126/* 0x1e0*/
127 u64 qpx_rdmac;
128 u64 qpx_nrpsn;
129 u64 qpx_lapsn;
130 u64 qpx_lcr;
131/* 0x200*/
132 u64 qpx_rwc;
133 u64 qpx_rwva;
134 u64 qpx_rdsi;
135 u64 qpx_rdsbc;
136/* 0x220*/
137 u64 qpx_rqwsize;
138 u64 qpx_crmsn;
139 u64 qpx_rdd;
140 u64 qpx_larpsn;
141/* 0x240*/
142 u64 qpx_pd;
143 u64 qpx_scqn;
144 u64 qpx_rcqn;
145 u64 qpx_aeqn;
146/* 0x260*/
147 u64 qpx_aaelog;
148 u64 qpx_ram;
149 u64 qpx_rdmaqe0;
150 u64 qpx_rdmaqe1;
151/* 0x280*/
152 u64 qpx_rdmaqe2;
153 u64 qpx_rdmaqe3;
154 u64 qpx_nrpsnhwm;
155/* 0x298*/
156 u64 reserved[(0x400 - 0x298) / 8];
157/* 0x400 extended data */
158 u64 reserved_ext[(0x500 - 0x400) / 8];
159/* 0x500 */
160 u64 reserved2[(0x1000 - 0x500) / 8];
161/* 0x1000 */
162};
163
164#define QPX_SQADDER EHCA_BMASK_IBM(48, 63)
165#define QPX_RQADDER EHCA_BMASK_IBM(48, 63)
166#define QPX_AAELOG_RESET_SRQ_LIMIT EHCA_BMASK_IBM(3, 3)
167
168#define QPTEMM_OFFSET(x) offsetof(struct hipz_qptemm, x)
169
170/* MRMWPT Entry Memory Map */
171struct hipz_mrmwmm {
172 /* 0x00 */
173 u64 mrx_hcr;
174
175 u64 mrx_c;
176 u64 mrx_herr;
177 u64 mrx_aer;
178 /* 0x20 */
179 u64 mrx_pp;
180 u64 reserved1;
181 u64 reserved2;
182 u64 reserved3;
183 /* 0x40 */
184 u64 reserved4[(0x200 - 0x40) / 8];
185 /* 0x200 */
186 u64 mrx_ctl[64];
187
188};
189
190#define MRMWMM_OFFSET(x) offsetof(struct hipz_mrmwmm, x)
191
192struct hipz_qpedmm {
193 /* 0x00 */
194 u64 reserved0[(0x400) / 8];
195 /* 0x400 */
196 u64 qpedx_phh;
197 u64 qpedx_ppsgp;
198 /* 0x410 */
199 u64 qpedx_ppsgu;
200 u64 qpedx_ppdgp;
201 /* 0x420 */
202 u64 qpedx_ppdgu;
203 u64 qpedx_aph;
204 /* 0x430 */
205 u64 qpedx_apsgp;
206 u64 qpedx_apsgu;
207 /* 0x440 */
208 u64 qpedx_apdgp;
209 u64 qpedx_apdgu;
210 /* 0x450 */
211 u64 qpedx_apav;
212 u64 qpedx_apsav;
213 /* 0x460 */
214 u64 qpedx_hcr;
215 u64 reserved1[4];
216 /* 0x488 */
217 u64 qpedx_rrl0;
218 /* 0x490 */
219 u64 qpedx_rrrkey0;
220 u64 qpedx_rrva0;
221 /* 0x4a0 */
222 u64 reserved2;
223 u64 qpedx_rrl1;
224 /* 0x4b0 */
225 u64 qpedx_rrrkey1;
226 u64 qpedx_rrva1;
227 /* 0x4c0 */
228 u64 reserved3;
229 u64 qpedx_rrl2;
230 /* 0x4d0 */
231 u64 qpedx_rrrkey2;
232 u64 qpedx_rrva2;
233 /* 0x4e0 */
234 u64 reserved4;
235 u64 qpedx_rrl3;
236 /* 0x4f0 */
237 u64 qpedx_rrrkey3;
238 u64 qpedx_rrva3;
239};
240
241#define QPEDMM_OFFSET(x) offsetof(struct hipz_qpedmm, x)
242
243/* CQ Table Entry Memory Map */
244struct hipz_cqtemm {
245 u64 cqx_hcr;
246 u64 cqx_c;
247 u64 cqx_herr;
248 u64 cqx_aer;
249/* 0x20 */
250 u64 cqx_ptp;
251 u64 cqx_tp;
252 u64 cqx_fec;
253 u64 cqx_feca;
254/* 0x40 */
255 u64 cqx_ep;
256 u64 cqx_eq;
257/* 0x50 */
258 u64 reserved1;
259 u64 cqx_n0;
260/* 0x60 */
261 u64 cqx_n1;
262 u64 reserved2[(0x1000 - 0x60) / 8];
263/* 0x1000 */
264};
265
266#define CQX_FEC_CQE_CNT EHCA_BMASK_IBM(32, 63)
267#define CQX_FECADDER EHCA_BMASK_IBM(32, 63)
268#define CQX_N0_GENERATE_SOLICITED_COMP_EVENT EHCA_BMASK_IBM(0, 0)
269#define CQX_N1_GENERATE_COMP_EVENT EHCA_BMASK_IBM(0, 0)
270
271#define CQTEMM_OFFSET(x) offsetof(struct hipz_cqtemm, x)
272
273/* EQ Table Entry Memory Map */
274struct hipz_eqtemm {
275 u64 eqx_hcr;
276 u64 eqx_c;
277
278 u64 eqx_herr;
279 u64 eqx_aer;
280/* 0x20 */
281 u64 eqx_ptp;
282 u64 eqx_tp;
283 u64 eqx_ssba;
284 u64 eqx_psba;
285
286/* 0x40 */
287 u64 eqx_cec;
288 u64 eqx_meql;
289 u64 eqx_xisbi;
290 u64 eqx_xisc;
291/* 0x60 */
292 u64 eqx_it;
293
294};
295
296#define EQTEMM_OFFSET(x) offsetof(struct hipz_eqtemm, x)
297
298/* access control defines for MR/MW */
299#define HIPZ_ACCESSCTRL_L_WRITE 0x00800000
300#define HIPZ_ACCESSCTRL_R_WRITE 0x00400000
301#define HIPZ_ACCESSCTRL_R_READ 0x00200000
302#define HIPZ_ACCESSCTRL_R_ATOMIC 0x00100000
303#define HIPZ_ACCESSCTRL_MW_BIND 0x00080000
304
305/* query hca response block */
306struct hipz_query_hca {
307 u32 cur_reliable_dg;
308 u32 cur_qp;
309 u32 cur_cq;
310 u32 cur_eq;
311 u32 cur_mr;
312 u32 cur_mw;
313 u32 cur_ee_context;
314 u32 cur_mcast_grp;
315 u32 cur_qp_attached_mcast_grp;
316 u32 reserved1;
317 u32 cur_ipv6_qp;
318 u32 cur_eth_qp;
319 u32 cur_hp_mr;
320 u32 reserved2[3];
321 u32 max_rd_domain;
322 u32 max_qp;
323 u32 max_cq;
324 u32 max_eq;
325 u32 max_mr;
326 u32 max_hp_mr;
327 u32 max_mw;
328 u32 max_mrwpte;
329 u32 max_special_mrwpte;
330 u32 max_rd_ee_context;
331 u32 max_mcast_grp;
332 u32 max_total_mcast_qp_attach;
333 u32 max_mcast_qp_attach;
334 u32 max_raw_ipv6_qp;
335 u32 max_raw_ethy_qp;
336 u32 internal_clock_frequency;
337 u32 max_pd;
338 u32 max_ah;
339 u32 max_cqe;
340 u32 max_wqes_wq;
341 u32 max_partitions;
342 u32 max_rr_ee_context;
343 u32 max_rr_qp;
344 u32 max_rr_hca;
345 u32 max_act_wqs_ee_context;
346 u32 max_act_wqs_qp;
347 u32 max_sge;
348 u32 max_sge_rd;
349 u32 memory_page_size_supported;
350 u64 max_mr_size;
351 u32 local_ca_ack_delay;
352 u32 num_ports;
353 u32 vendor_id;
354 u32 vendor_part_id;
355 u32 hw_ver;
356 u64 node_guid;
357 u64 hca_cap_indicators;
358 u32 data_counter_register_size;
359 u32 max_shared_rq;
360 u32 max_isns_eq;
361 u32 max_neq;
362} __attribute__ ((packed));
363
364#define HCA_CAP_AH_PORT_NR_CHECK EHCA_BMASK_IBM( 0, 0)
365#define HCA_CAP_ATOMIC EHCA_BMASK_IBM( 1, 1)
366#define HCA_CAP_AUTO_PATH_MIG EHCA_BMASK_IBM( 2, 2)
367#define HCA_CAP_BAD_P_KEY_CTR EHCA_BMASK_IBM( 3, 3)
368#define HCA_CAP_SQD_RTS_PORT_CHANGE EHCA_BMASK_IBM( 4, 4)
369#define HCA_CAP_CUR_QP_STATE_MOD EHCA_BMASK_IBM( 5, 5)
370#define HCA_CAP_INIT_TYPE EHCA_BMASK_IBM( 6, 6)
371#define HCA_CAP_PORT_ACTIVE_EVENT EHCA_BMASK_IBM( 7, 7)
372#define HCA_CAP_Q_KEY_VIOL_CTR EHCA_BMASK_IBM( 8, 8)
373#define HCA_CAP_WQE_RESIZE EHCA_BMASK_IBM( 9, 9)
374#define HCA_CAP_RAW_PACKET_MCAST EHCA_BMASK_IBM(10, 10)
375#define HCA_CAP_SHUTDOWN_PORT EHCA_BMASK_IBM(11, 11)
376#define HCA_CAP_RC_LL_QP EHCA_BMASK_IBM(12, 12)
377#define HCA_CAP_SRQ EHCA_BMASK_IBM(13, 13)
378#define HCA_CAP_UD_LL_QP EHCA_BMASK_IBM(16, 16)
379#define HCA_CAP_RESIZE_MR EHCA_BMASK_IBM(17, 17)
380#define HCA_CAP_MINI_QP EHCA_BMASK_IBM(18, 18)
381#define HCA_CAP_H_ALLOC_RES_SYNC EHCA_BMASK_IBM(19, 19)
382
383/* query port response block */
384struct hipz_query_port {
385 u32 state;
386 u32 bad_pkey_cntr;
387 u32 lmc;
388 u32 lid;
389 u32 subnet_timeout;
390 u32 qkey_viol_cntr;
391 u32 sm_sl;
392 u32 sm_lid;
393 u32 capability_mask;
394 u32 init_type_reply;
395 u32 pkey_tbl_len;
396 u32 gid_tbl_len;
397 u64 gid_prefix;
398 u32 port_nr;
399 u16 pkey_entries[16];
400 u8 reserved1[32];
401 u32 trent_size;
402 u32 trbuf_size;
403 u64 max_msg_sz;
404 u32 max_mtu;
405 u32 vl_cap;
406 u32 phys_pstate;
407 u32 phys_state;
408 u32 phys_speed;
409 u32 phys_width;
410 u8 reserved2[1884];
411 u64 guid_entries[255];
412} __attribute__ ((packed));
413
414#endif
diff --git a/drivers/staging/rdma/ehca/ipz_pt_fn.c b/drivers/staging/rdma/ehca/ipz_pt_fn.c
deleted file mode 100644
index 7ffc748cb973..000000000000
--- a/drivers/staging/rdma/ehca/ipz_pt_fn.c
+++ /dev/null
@@ -1,289 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * internal queue handling
5 *
6 * Authors: Waleri Fomin <fomin@de.ibm.com>
7 * Reinhard Ernst <rernst@de.ibm.com>
8 * Christoph Raisch <raisch@de.ibm.com>
9 *
10 * Copyright (c) 2005 IBM Corporation
11 *
12 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
13 * BSD.
14 *
15 * OpenIB BSD License
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are met:
19 *
20 * Redistributions of source code must retain the above copyright notice, this
21 * list of conditions and the following disclaimer.
22 *
23 * Redistributions in binary form must reproduce the above copyright notice,
24 * this list of conditions and the following disclaimer in the documentation
25 * and/or other materials
26 * provided with the distribution.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
29 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
32 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
35 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
36 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGE.
39 */
40
41#include <linux/slab.h>
42
43#include "ehca_tools.h"
44#include "ipz_pt_fn.h"
45#include "ehca_classes.h"
46
47#define PAGES_PER_KPAGE (PAGE_SIZE >> EHCA_PAGESHIFT)
48
49struct kmem_cache *small_qp_cache;
50
51void *ipz_qpageit_get_inc(struct ipz_queue *queue)
52{
53 void *ret = ipz_qeit_get(queue);
54 queue->current_q_offset += queue->pagesize;
55 if (queue->current_q_offset > queue->queue_length) {
56 queue->current_q_offset -= queue->pagesize;
57 ret = NULL;
58 }
59 if (((u64)ret) % queue->pagesize) {
60 ehca_gen_err("ERROR!! not at PAGE-Boundary");
61 return NULL;
62 }
63 return ret;
64}
65
66void *ipz_qeit_eq_get_inc(struct ipz_queue *queue)
67{
68 void *ret = ipz_qeit_get(queue);
69 u64 last_entry_in_q = queue->queue_length - queue->qe_size;
70
71 queue->current_q_offset += queue->qe_size;
72 if (queue->current_q_offset > last_entry_in_q) {
73 queue->current_q_offset = 0;
74 queue->toggle_state = (~queue->toggle_state) & 1;
75 }
76
77 return ret;
78}
79
80int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset)
81{
82 int i;
83 for (i = 0; i < queue->queue_length / queue->pagesize; i++) {
84 u64 page = __pa(queue->queue_pages[i]);
85 if (addr >= page && addr < page + queue->pagesize) {
86 *q_offset = addr - page + i * queue->pagesize;
87 return 0;
88 }
89 }
90 return -EINVAL;
91}
92
93#if PAGE_SHIFT < EHCA_PAGESHIFT
94#error Kernel pages must be at least as large than eHCA pages (4K) !
95#endif
96
97/*
98 * allocate pages for queue:
99 * outer loop allocates whole kernel pages (page aligned) and
100 * inner loop divides a kernel page into smaller hca queue pages
101 */
102static int alloc_queue_pages(struct ipz_queue *queue, const u32 nr_of_pages)
103{
104 int k, f = 0;
105 u8 *kpage;
106
107 while (f < nr_of_pages) {
108 kpage = (u8 *)get_zeroed_page(GFP_KERNEL);
109 if (!kpage)
110 goto out;
111
112 for (k = 0; k < PAGES_PER_KPAGE && f < nr_of_pages; k++) {
113 queue->queue_pages[f] = (struct ipz_page *)kpage;
114 kpage += EHCA_PAGESIZE;
115 f++;
116 }
117 }
118 return 1;
119
120out:
121 for (f = 0; f < nr_of_pages && queue->queue_pages[f];
122 f += PAGES_PER_KPAGE)
123 free_page((unsigned long)(queue->queue_pages)[f]);
124 return 0;
125}
126
127static int alloc_small_queue_page(struct ipz_queue *queue, struct ehca_pd *pd)
128{
129 int order = ilog2(queue->pagesize) - 9;
130 struct ipz_small_queue_page *page;
131 unsigned long bit;
132
133 mutex_lock(&pd->lock);
134
135 if (!list_empty(&pd->free[order]))
136 page = list_entry(pd->free[order].next,
137 struct ipz_small_queue_page, list);
138 else {
139 page = kmem_cache_zalloc(small_qp_cache, GFP_KERNEL);
140 if (!page)
141 goto out;
142
143 page->page = get_zeroed_page(GFP_KERNEL);
144 if (!page->page) {
145 kmem_cache_free(small_qp_cache, page);
146 goto out;
147 }
148
149 list_add(&page->list, &pd->free[order]);
150 }
151
152 bit = find_first_zero_bit(page->bitmap, IPZ_SPAGE_PER_KPAGE >> order);
153 __set_bit(bit, page->bitmap);
154 page->fill++;
155
156 if (page->fill == IPZ_SPAGE_PER_KPAGE >> order)
157 list_move(&page->list, &pd->full[order]);
158
159 mutex_unlock(&pd->lock);
160
161 queue->queue_pages[0] = (void *)(page->page | (bit << (order + 9)));
162 queue->small_page = page;
163 queue->offset = bit << (order + 9);
164 return 1;
165
166out:
167 ehca_err(pd->ib_pd.device, "failed to allocate small queue page");
168 mutex_unlock(&pd->lock);
169 return 0;
170}
171
172static void free_small_queue_page(struct ipz_queue *queue, struct ehca_pd *pd)
173{
174 int order = ilog2(queue->pagesize) - 9;
175 struct ipz_small_queue_page *page = queue->small_page;
176 unsigned long bit;
177 int free_page = 0;
178
179 bit = ((unsigned long)queue->queue_pages[0] & ~PAGE_MASK)
180 >> (order + 9);
181
182 mutex_lock(&pd->lock);
183
184 __clear_bit(bit, page->bitmap);
185 page->fill--;
186
187 if (page->fill == 0) {
188 list_del(&page->list);
189 free_page = 1;
190 }
191
192 if (page->fill == (IPZ_SPAGE_PER_KPAGE >> order) - 1)
193 /* the page was full until we freed the chunk */
194 list_move_tail(&page->list, &pd->free[order]);
195
196 mutex_unlock(&pd->lock);
197
198 if (free_page) {
199 free_page(page->page);
200 kmem_cache_free(small_qp_cache, page);
201 }
202}
203
204int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
205 const u32 nr_of_pages, const u32 pagesize,
206 const u32 qe_size, const u32 nr_of_sg,
207 int is_small)
208{
209 if (pagesize > PAGE_SIZE) {
210 ehca_gen_err("FATAL ERROR: pagesize=%x "
211 "is greater than kernel page size", pagesize);
212 return 0;
213 }
214
215 /* init queue fields */
216 queue->queue_length = nr_of_pages * pagesize;
217 queue->pagesize = pagesize;
218 queue->qe_size = qe_size;
219 queue->act_nr_of_sg = nr_of_sg;
220 queue->current_q_offset = 0;
221 queue->toggle_state = 1;
222 queue->small_page = NULL;
223
224 /* allocate queue page pointers */
225 queue->queue_pages = kzalloc(nr_of_pages * sizeof(void *),
226 GFP_KERNEL | __GFP_NOWARN);
227 if (!queue->queue_pages) {
228 queue->queue_pages = vzalloc(nr_of_pages * sizeof(void *));
229 if (!queue->queue_pages) {
230 ehca_gen_err("Couldn't allocate queue page list");
231 return 0;
232 }
233 }
234
235 /* allocate actual queue pages */
236 if (is_small) {
237 if (!alloc_small_queue_page(queue, pd))
238 goto ipz_queue_ctor_exit0;
239 } else
240 if (!alloc_queue_pages(queue, nr_of_pages))
241 goto ipz_queue_ctor_exit0;
242
243 return 1;
244
245ipz_queue_ctor_exit0:
246 ehca_gen_err("Couldn't alloc pages queue=%p "
247 "nr_of_pages=%x", queue, nr_of_pages);
248 kvfree(queue->queue_pages);
249
250 return 0;
251}
252
253int ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue)
254{
255 int i, nr_pages;
256
257 if (!queue || !queue->queue_pages) {
258 ehca_gen_dbg("queue or queue_pages is NULL");
259 return 0;
260 }
261
262 if (queue->small_page)
263 free_small_queue_page(queue, pd);
264 else {
265 nr_pages = queue->queue_length / queue->pagesize;
266 for (i = 0; i < nr_pages; i += PAGES_PER_KPAGE)
267 free_page((unsigned long)queue->queue_pages[i]);
268 }
269
270 kvfree(queue->queue_pages);
271
272 return 1;
273}
274
275int ehca_init_small_qp_cache(void)
276{
277 small_qp_cache = kmem_cache_create("ehca_cache_small_qp",
278 sizeof(struct ipz_small_queue_page),
279 0, SLAB_HWCACHE_ALIGN, NULL);
280 if (!small_qp_cache)
281 return -ENOMEM;
282
283 return 0;
284}
285
286void ehca_cleanup_small_qp_cache(void)
287{
288 kmem_cache_destroy(small_qp_cache);
289}
diff --git a/drivers/staging/rdma/ehca/ipz_pt_fn.h b/drivers/staging/rdma/ehca/ipz_pt_fn.h
deleted file mode 100644
index a801274ea337..000000000000
--- a/drivers/staging/rdma/ehca/ipz_pt_fn.h
+++ /dev/null
@@ -1,289 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * internal queue handling
5 *
6 * Authors: Waleri Fomin <fomin@de.ibm.com>
7 * Reinhard Ernst <rernst@de.ibm.com>
8 * Christoph Raisch <raisch@de.ibm.com>
9 *
10 * Copyright (c) 2005 IBM Corporation
11 *
12 * All rights reserved.
13 *
14 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
15 * BSD.
16 *
17 * OpenIB BSD License
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are met:
21 *
22 * Redistributions of source code must retain the above copyright notice, this
23 * list of conditions and the following disclaimer.
24 *
25 * Redistributions in binary form must reproduce the above copyright notice,
26 * this list of conditions and the following disclaimer in the documentation
27 * and/or other materials
28 * provided with the distribution.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
41 */
42
43#ifndef __IPZ_PT_FN_H__
44#define __IPZ_PT_FN_H__
45
46#define EHCA_PAGESHIFT 12
47#define EHCA_PAGESIZE 4096UL
48#define EHCA_PAGEMASK (~(EHCA_PAGESIZE-1))
49#define EHCA_PT_ENTRIES 512UL
50
51#include "ehca_tools.h"
52#include "ehca_qes.h"
53
54struct ehca_pd;
55struct ipz_small_queue_page;
56
57extern struct kmem_cache *small_qp_cache;
58
59/* struct generic ehca page */
60struct ipz_page {
61 u8 entries[EHCA_PAGESIZE];
62};
63
64#define IPZ_SPAGE_PER_KPAGE (PAGE_SIZE / 512)
65
66struct ipz_small_queue_page {
67 unsigned long page;
68 unsigned long bitmap[IPZ_SPAGE_PER_KPAGE / BITS_PER_LONG];
69 int fill;
70 void *mapped_addr;
71 u32 mmap_count;
72 struct list_head list;
73};
74
75/* struct generic queue in linux kernel virtual memory (kv) */
76struct ipz_queue {
77 u64 current_q_offset; /* current queue entry */
78
79 struct ipz_page **queue_pages; /* array of pages belonging to queue */
80 u32 qe_size; /* queue entry size */
81 u32 act_nr_of_sg;
82 u32 queue_length; /* queue length allocated in bytes */
83 u32 pagesize;
84 u32 toggle_state; /* toggle flag - per page */
85 u32 offset; /* save offset within page for small_qp */
86 struct ipz_small_queue_page *small_page;
87};
88
89/*
90 * return current Queue Entry for a certain q_offset
91 * returns address (kv) of Queue Entry
92 */
93static inline void *ipz_qeit_calc(struct ipz_queue *queue, u64 q_offset)
94{
95 struct ipz_page *current_page;
96 if (q_offset >= queue->queue_length)
97 return NULL;
98 current_page = (queue->queue_pages)[q_offset >> EHCA_PAGESHIFT];
99 return &current_page->entries[q_offset & (EHCA_PAGESIZE - 1)];
100}
101
102/*
103 * return current Queue Entry
104 * returns address (kv) of Queue Entry
105 */
106static inline void *ipz_qeit_get(struct ipz_queue *queue)
107{
108 return ipz_qeit_calc(queue, queue->current_q_offset);
109}
110
111/*
112 * return current Queue Page , increment Queue Page iterator from
113 * page to page in struct ipz_queue, last increment will return 0! and
114 * NOT wrap
115 * returns address (kv) of Queue Page
116 * warning don't use in parallel with ipz_QE_get_inc()
117 */
118void *ipz_qpageit_get_inc(struct ipz_queue *queue);
119
120/*
121 * return current Queue Entry, increment Queue Entry iterator by one
122 * step in struct ipz_queue, will wrap in ringbuffer
123 * returns address (kv) of Queue Entry BEFORE increment
124 * warning don't use in parallel with ipz_qpageit_get_inc()
125 */
126static inline void *ipz_qeit_get_inc(struct ipz_queue *queue)
127{
128 void *ret = ipz_qeit_get(queue);
129 queue->current_q_offset += queue->qe_size;
130 if (queue->current_q_offset >= queue->queue_length) {
131 queue->current_q_offset = 0;
132 /* toggle the valid flag */
133 queue->toggle_state = (~queue->toggle_state) & 1;
134 }
135
136 return ret;
137}
138
139/*
140 * return a bool indicating whether current Queue Entry is valid
141 */
142static inline int ipz_qeit_is_valid(struct ipz_queue *queue)
143{
144 struct ehca_cqe *cqe = ipz_qeit_get(queue);
145 return ((cqe->cqe_flags >> 7) == (queue->toggle_state & 1));
146}
147
148/*
149 * return current Queue Entry, increment Queue Entry iterator by one
150 * step in struct ipz_queue, will wrap in ringbuffer
151 * returns address (kv) of Queue Entry BEFORE increment
152 * returns 0 and does not increment, if wrong valid state
153 * warning don't use in parallel with ipz_qpageit_get_inc()
154 */
155static inline void *ipz_qeit_get_inc_valid(struct ipz_queue *queue)
156{
157 return ipz_qeit_is_valid(queue) ? ipz_qeit_get_inc(queue) : NULL;
158}
159
160/*
161 * returns and resets Queue Entry iterator
162 * returns address (kv) of first Queue Entry
163 */
164static inline void *ipz_qeit_reset(struct ipz_queue *queue)
165{
166 queue->current_q_offset = 0;
167 return ipz_qeit_get(queue);
168}
169
170/*
171 * return the q_offset corresponding to an absolute address
172 */
173int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset);
174
175/*
176 * return the next queue offset. don't modify the queue.
177 */
178static inline u64 ipz_queue_advance_offset(struct ipz_queue *queue, u64 offset)
179{
180 offset += queue->qe_size;
181 if (offset >= queue->queue_length) offset = 0;
182 return offset;
183}
184
185/* struct generic page table */
186struct ipz_pt {
187 u64 entries[EHCA_PT_ENTRIES];
188};
189
190/* struct page table for a queue, only to be used in pf */
191struct ipz_qpt {
192 /* queue page tables (kv), use u64 because we know the element length */
193 u64 *qpts;
194 u32 n_qpts;
195 u32 n_ptes; /* number of page table entries */
196 u64 *current_pte_addr;
197};
198
199/*
200 * constructor for a ipz_queue_t, placement new for ipz_queue_t,
201 * new for all dependent datastructors
202 * all QP Tables are the same
203 * flow:
204 * allocate+pin queue
205 * see ipz_qpt_ctor()
206 * returns true if ok, false if out of memory
207 */
208int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
209 const u32 nr_of_pages, const u32 pagesize,
210 const u32 qe_size, const u32 nr_of_sg,
211 int is_small);
212
213/*
214 * destructor for a ipz_queue_t
215 * -# free queue
216 * see ipz_queue_ctor()
217 * returns true if ok, false if queue was NULL-ptr of free failed
218 */
219int ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue);
220
221/*
222 * constructor for a ipz_qpt_t,
223 * placement new for struct ipz_queue, new for all dependent datastructors
224 * all QP Tables are the same,
225 * flow:
226 * -# allocate+pin queue
227 * -# initialise ptcb
228 * -# allocate+pin PTs
229 * -# link PTs to a ring, according to HCA Arch, set bit62 id needed
230 * -# the ring must have room for exactly nr_of_PTEs
231 * see ipz_qpt_ctor()
232 */
233void ipz_qpt_ctor(struct ipz_qpt *qpt,
234 const u32 nr_of_qes,
235 const u32 pagesize,
236 const u32 qe_size,
237 const u8 lowbyte, const u8 toggle,
238 u32 * act_nr_of_QEs, u32 * act_nr_of_pages);
239
240/*
241 * return current Queue Entry, increment Queue Entry iterator by one
242 * step in struct ipz_queue, will wrap in ringbuffer
243 * returns address (kv) of Queue Entry BEFORE increment
244 * warning don't use in parallel with ipz_qpageit_get_inc()
245 * warning unpredictable results may occur if steps>act_nr_of_queue_entries
246 * fix EQ page problems
247 */
248void *ipz_qeit_eq_get_inc(struct ipz_queue *queue);
249
250/*
251 * return current Event Queue Entry, increment Queue Entry iterator
252 * by one step in struct ipz_queue if valid, will wrap in ringbuffer
253 * returns address (kv) of Queue Entry BEFORE increment
254 * returns 0 and does not increment, if wrong valid state
255 * warning don't use in parallel with ipz_queue_QPageit_get_inc()
256 * warning unpredictable results may occur if steps>act_nr_of_queue_entries
257 */
258static inline void *ipz_eqit_eq_get_inc_valid(struct ipz_queue *queue)
259{
260 void *ret = ipz_qeit_get(queue);
261 u32 qe = *(u8 *)ret;
262 if ((qe >> 7) != (queue->toggle_state & 1))
263 return NULL;
264 ipz_qeit_eq_get_inc(queue); /* this is a good one */
265 return ret;
266}
267
268static inline void *ipz_eqit_eq_peek_valid(struct ipz_queue *queue)
269{
270 void *ret = ipz_qeit_get(queue);
271 u32 qe = *(u8 *)ret;
272 if ((qe >> 7) != (queue->toggle_state & 1))
273 return NULL;
274 return ret;
275}
276
277/* returns address (GX) of first queue entry */
278static inline u64 ipz_qpt_get_firstpage(struct ipz_qpt *qpt)
279{
280 return be64_to_cpu(qpt->qpts[0]);
281}
282
283/* returns address (kv) of first page of queue page table */
284static inline void *ipz_qpt_get_qpt(struct ipz_qpt *qpt)
285{
286 return qpt->qpts;
287}
288
289#endif /* __IPZ_PT_FN_H__ */