aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorSteve Wise <swise@opengridcomputing.com>2010-04-21 18:30:06 -0400
committerRoland Dreier <rolandd@cisco.com>2010-04-21 18:30:06 -0400
commitcfdda9d764362ab77b11a410bb928400e6520d57 (patch)
tree3634e5aca12414d40f4e50a3d73543cc479b525f /drivers/infiniband
parent0eddb519b9127c73d53db4bf3ec1d45b13f844d1 (diff)
RDMA/cxgb4: Add driver for Chelsio T4 RNIC
Add an RDMA/iWARP driver for Chelsio T4 Ethernet adapters. Signed-off-by: Steve Wise <swise@opengridcomputing.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/Makefile1
-rw-r--r--drivers/infiniband/hw/cxgb4/Kconfig18
-rw-r--r--drivers/infiniband/hw/cxgb4/Makefile5
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c2330
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c882
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c520
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c193
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h743
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c811
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c518
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c1577
-rw-r--r--drivers/infiniband/hw/cxgb4/resource.c417
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h536
-rw-r--r--drivers/infiniband/hw/cxgb4/t4fw_ri_api.h829
-rw-r--r--drivers/infiniband/hw/cxgb4/user.h66
16 files changed, 9447 insertions, 0 deletions
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 975adce5f40c..330d2a423362 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -46,6 +46,7 @@ source "drivers/infiniband/hw/ipath/Kconfig"
46source "drivers/infiniband/hw/ehca/Kconfig" 46source "drivers/infiniband/hw/ehca/Kconfig"
47source "drivers/infiniband/hw/amso1100/Kconfig" 47source "drivers/infiniband/hw/amso1100/Kconfig"
48source "drivers/infiniband/hw/cxgb3/Kconfig" 48source "drivers/infiniband/hw/cxgb3/Kconfig"
49source "drivers/infiniband/hw/cxgb4/Kconfig"
49source "drivers/infiniband/hw/mlx4/Kconfig" 50source "drivers/infiniband/hw/mlx4/Kconfig"
50source "drivers/infiniband/hw/nes/Kconfig" 51source "drivers/infiniband/hw/nes/Kconfig"
51 52
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile
index ed35e4496241..0c4e589d746e 100644
--- a/drivers/infiniband/Makefile
+++ b/drivers/infiniband/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_INFINIBAND_IPATH) += hw/ipath/
4obj-$(CONFIG_INFINIBAND_EHCA) += hw/ehca/ 4obj-$(CONFIG_INFINIBAND_EHCA) += hw/ehca/
5obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/ 5obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/
6obj-$(CONFIG_INFINIBAND_CXGB3) += hw/cxgb3/ 6obj-$(CONFIG_INFINIBAND_CXGB3) += hw/cxgb3/
7obj-$(CONFIG_INFINIBAND_CXGB4) += hw/cxgb4/
7obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/ 8obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/
8obj-$(CONFIG_INFINIBAND_NES) += hw/nes/ 9obj-$(CONFIG_INFINIBAND_NES) += hw/nes/
9obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/ 10obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/
diff --git a/drivers/infiniband/hw/cxgb4/Kconfig b/drivers/infiniband/hw/cxgb4/Kconfig
new file mode 100644
index 000000000000..ccb85eaaad75
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/Kconfig
@@ -0,0 +1,18 @@
1config INFINIBAND_CXGB4
2 tristate "Chelsio T4 RDMA Driver"
3 depends on CHELSIO_T4 && INET
4 select GENERIC_ALLOCATOR
5 ---help---
6 This is an iWARP/RDMA driver for the Chelsio T4 1GbE and
7 10GbE adapters.
8
9 For general information about Chelsio and our products, visit
10 our website at <http://www.chelsio.com>.
11
12 For customer support, please visit our customer support page at
13 <http://www.chelsio.com/support.htm>.
14
15 Please send feedback to <linux-bugs@chelsio.com>.
16
17 To compile this driver as a module, choose M here: the module
18 will be called iw_cxgb4.
diff --git a/drivers/infiniband/hw/cxgb4/Makefile b/drivers/infiniband/hw/cxgb4/Makefile
new file mode 100644
index 000000000000..e31a499f0172
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/Makefile
@@ -0,0 +1,5 @@
1EXTRA_CFLAGS += -Idrivers/net/cxgb4
2
3obj-$(CONFIG_INFINIBAND_CXGB4) += iw_cxgb4.o
4
5iw_cxgb4-y := device.o cm.o provider.o mem.o cq.o qp.o resource.o ev.o
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
new file mode 100644
index 000000000000..07b068be0cfa
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -0,0 +1,2330 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <linux/list.h>
34#include <linux/workqueue.h>
35#include <linux/skbuff.h>
36#include <linux/timer.h>
37#include <linux/notifier.h>
38#include <linux/inetdevice.h>
39#include <linux/ip.h>
40#include <linux/tcp.h>
41
42#include <net/neighbour.h>
43#include <net/netevent.h>
44#include <net/route.h>
45
46#include "iw_cxgb4.h"
47
48static char *states[] = {
49 "idle",
50 "listen",
51 "connecting",
52 "mpa_wait_req",
53 "mpa_req_sent",
54 "mpa_req_rcvd",
55 "mpa_rep_sent",
56 "fpdu_mode",
57 "aborting",
58 "closing",
59 "moribund",
60 "dead",
61 NULL,
62};
63
64static int enable_tcp_timestamps;
65module_param(enable_tcp_timestamps, int, 0644);
66MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)");
67
68static int enable_tcp_sack;
69module_param(enable_tcp_sack, int, 0644);
70MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)");
71
72static int enable_tcp_window_scaling = 1;
73module_param(enable_tcp_window_scaling, int, 0644);
74MODULE_PARM_DESC(enable_tcp_window_scaling,
75 "Enable tcp window scaling (default=1)");
76
77int c4iw_debug;
78module_param(c4iw_debug, int, 0644);
79MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)");
80
81static int peer2peer;
82module_param(peer2peer, int, 0644);
83MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)");
84
85static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
86module_param(p2p_type, int, 0644);
87MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: "
88 "1=RDMA_READ 0=RDMA_WRITE (default 1)");
89
90static int ep_timeout_secs = 60;
91module_param(ep_timeout_secs, int, 0644);
92MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
93 "in seconds (default=60)");
94
95static int mpa_rev = 1;
96module_param(mpa_rev, int, 0644);
97MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
98 "1 is spec compliant. (default=1)");
99
100static int markers_enabled;
101module_param(markers_enabled, int, 0644);
102MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
103
104static int crc_enabled = 1;
105module_param(crc_enabled, int, 0644);
106MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
107
108static int rcv_win = 256 * 1024;
109module_param(rcv_win, int, 0644);
110MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)");
111
112static int snd_win = 32 * 1024;
113module_param(snd_win, int, 0644);
114MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)");
115
116static void process_work(struct work_struct *work);
117static struct workqueue_struct *workq;
118static DECLARE_WORK(skb_work, process_work);
119
120static struct sk_buff_head rxq;
121static c4iw_handler_func work_handlers[NUM_CPL_CMDS];
122c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
123
124static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
125static void ep_timeout(unsigned long arg);
126static void connect_reply_upcall(struct c4iw_ep *ep, int status);
127
128static void start_ep_timer(struct c4iw_ep *ep)
129{
130 PDBG("%s ep %p\n", __func__, ep);
131 if (timer_pending(&ep->timer)) {
132 PDBG("%s stopped / restarted timer ep %p\n", __func__, ep);
133 del_timer_sync(&ep->timer);
134 } else
135 c4iw_get_ep(&ep->com);
136 ep->timer.expires = jiffies + ep_timeout_secs * HZ;
137 ep->timer.data = (unsigned long)ep;
138 ep->timer.function = ep_timeout;
139 add_timer(&ep->timer);
140}
141
142static void stop_ep_timer(struct c4iw_ep *ep)
143{
144 PDBG("%s ep %p\n", __func__, ep);
145 if (!timer_pending(&ep->timer)) {
146 printk(KERN_ERR "%s timer stopped when its not running! "
147 "ep %p state %u\n", __func__, ep, ep->com.state);
148 WARN_ON(1);
149 return;
150 }
151 del_timer_sync(&ep->timer);
152 c4iw_put_ep(&ep->com);
153}
154
155static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
156 struct l2t_entry *l2e)
157{
158 int error = 0;
159
160 if (c4iw_fatal_error(rdev)) {
161 kfree_skb(skb);
162 PDBG("%s - device in error state - dropping\n", __func__);
163 return -EIO;
164 }
165 error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
166 if (error < 0)
167 kfree_skb(skb);
168 return error;
169}
170
171int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
172{
173 int error = 0;
174
175 if (c4iw_fatal_error(rdev)) {
176 kfree_skb(skb);
177 PDBG("%s - device in error state - dropping\n", __func__);
178 return -EIO;
179 }
180 error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
181 if (error < 0)
182 kfree_skb(skb);
183 return error;
184}
185
186static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
187{
188 struct cpl_tid_release *req;
189
190 skb = get_skb(skb, sizeof *req, GFP_KERNEL);
191 if (!skb)
192 return;
193 req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
194 INIT_TP_WR(req, hwtid);
195 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
196 set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
197 c4iw_ofld_send(rdev, skb);
198 return;
199}
200
201static void set_emss(struct c4iw_ep *ep, u16 opt)
202{
203 ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 40;
204 ep->mss = ep->emss;
205 if (GET_TCPOPT_TSTAMP(opt))
206 ep->emss -= 12;
207 if (ep->emss < 128)
208 ep->emss = 128;
209 PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt),
210 ep->mss, ep->emss);
211}
212
213static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
214{
215 unsigned long flags;
216 enum c4iw_ep_state state;
217
218 spin_lock_irqsave(&epc->lock, flags);
219 state = epc->state;
220 spin_unlock_irqrestore(&epc->lock, flags);
221 return state;
222}
223
224static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
225{
226 epc->state = new;
227}
228
229static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
230{
231 unsigned long flags;
232
233 spin_lock_irqsave(&epc->lock, flags);
234 PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
235 __state_set(epc, new);
236 spin_unlock_irqrestore(&epc->lock, flags);
237 return;
238}
239
240static void *alloc_ep(int size, gfp_t gfp)
241{
242 struct c4iw_ep_common *epc;
243
244 epc = kzalloc(size, gfp);
245 if (epc) {
246 kref_init(&epc->kref);
247 spin_lock_init(&epc->lock);
248 init_waitqueue_head(&epc->waitq);
249 }
250 PDBG("%s alloc ep %p\n", __func__, epc);
251 return epc;
252}
253
254void _c4iw_free_ep(struct kref *kref)
255{
256 struct c4iw_ep *ep;
257
258 ep = container_of(kref, struct c4iw_ep, com.kref);
259 PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
260 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
261 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
262 dst_release(ep->dst);
263 cxgb4_l2t_release(ep->l2t);
264 }
265 kfree(ep);
266}
267
268static void release_ep_resources(struct c4iw_ep *ep)
269{
270 set_bit(RELEASE_RESOURCES, &ep->com.flags);
271 c4iw_put_ep(&ep->com);
272}
273
274static void process_work(struct work_struct *work)
275{
276 struct sk_buff *skb = NULL;
277 struct c4iw_dev *dev;
278 struct cpl_act_establish *rpl = cplhdr(skb);
279 unsigned int opcode;
280 int ret;
281
282 while ((skb = skb_dequeue(&rxq))) {
283 rpl = cplhdr(skb);
284 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
285 opcode = rpl->ot.opcode;
286
287 BUG_ON(!work_handlers[opcode]);
288 ret = work_handlers[opcode](dev, skb);
289 if (!ret)
290 kfree_skb(skb);
291 }
292}
293
294static int status2errno(int status)
295{
296 switch (status) {
297 case CPL_ERR_NONE:
298 return 0;
299 case CPL_ERR_CONN_RESET:
300 return -ECONNRESET;
301 case CPL_ERR_ARP_MISS:
302 return -EHOSTUNREACH;
303 case CPL_ERR_CONN_TIMEDOUT:
304 return -ETIMEDOUT;
305 case CPL_ERR_TCAM_FULL:
306 return -ENOMEM;
307 case CPL_ERR_CONN_EXIST:
308 return -EADDRINUSE;
309 default:
310 return -EIO;
311 }
312}
313
314/*
315 * Try and reuse skbs already allocated...
316 */
317static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
318{
319 if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
320 skb_trim(skb, 0);
321 skb_get(skb);
322 skb_reset_transport_header(skb);
323 } else {
324 skb = alloc_skb(len, gfp);
325 }
326 return skb;
327}
328
329static struct rtable *find_route(struct c4iw_dev *dev, __be32 local_ip,
330 __be32 peer_ip, __be16 local_port,
331 __be16 peer_port, u8 tos)
332{
333 struct rtable *rt;
334 struct flowi fl = {
335 .oif = 0,
336 .nl_u = {
337 .ip4_u = {
338 .daddr = peer_ip,
339 .saddr = local_ip,
340 .tos = tos}
341 },
342 .proto = IPPROTO_TCP,
343 .uli_u = {
344 .ports = {
345 .sport = local_port,
346 .dport = peer_port}
347 }
348 };
349
350 if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
351 return NULL;
352 return rt;
353}
354
355static void arp_failure_discard(void *handle, struct sk_buff *skb)
356{
357 PDBG("%s c4iw_dev %p\n", __func__, handle);
358 kfree_skb(skb);
359}
360
361/*
362 * Handle an ARP failure for an active open.
363 */
364static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
365{
366 printk(KERN_ERR MOD "ARP failure duing connect\n");
367 kfree_skb(skb);
368}
369
370/*
371 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
372 * and send it along.
373 */
374static void abort_arp_failure(void *handle, struct sk_buff *skb)
375{
376 struct c4iw_rdev *rdev = handle;
377 struct cpl_abort_req *req = cplhdr(skb);
378
379 PDBG("%s rdev %p\n", __func__, rdev);
380 req->cmd = CPL_ABORT_NO_RST;
381 c4iw_ofld_send(rdev, skb);
382}
383
384static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
385{
386 unsigned int flowclen = 80;
387 struct fw_flowc_wr *flowc;
388 int i;
389
390 skb = get_skb(skb, flowclen, GFP_KERNEL);
391 flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
392
393 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) |
394 FW_FLOWC_WR_NPARAMS(8));
395 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen,
396 16)) | FW_WR_FLOWID(ep->hwtid));
397
398 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
399 flowc->mnemval[0].val = cpu_to_be32(0);
400 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
401 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
402 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
403 flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan);
404 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
405 flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid);
406 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
407 flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq);
408 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
409 flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq);
410 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
411 flowc->mnemval[6].val = cpu_to_be32(snd_win);
412 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
413 flowc->mnemval[7].val = cpu_to_be32(ep->emss);
414 /* Pad WR to 16 byte boundary */
415 flowc->mnemval[8].mnemonic = 0;
416 flowc->mnemval[8].val = 0;
417 for (i = 0; i < 9; i++) {
418 flowc->mnemval[i].r4[0] = 0;
419 flowc->mnemval[i].r4[1] = 0;
420 flowc->mnemval[i].r4[2] = 0;
421 }
422
423 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
424 c4iw_ofld_send(&ep->com.dev->rdev, skb);
425}
426
427static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp)
428{
429 struct cpl_close_con_req *req;
430 struct sk_buff *skb;
431 int wrlen = roundup(sizeof *req, 16);
432
433 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
434 skb = get_skb(NULL, wrlen, gfp);
435 if (!skb) {
436 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
437 return -ENOMEM;
438 }
439 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
440 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
441 req = (struct cpl_close_con_req *) skb_put(skb, wrlen);
442 memset(req, 0, wrlen);
443 INIT_TP_WR(req, ep->hwtid);
444 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ,
445 ep->hwtid));
446 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
447}
448
449static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
450{
451 struct cpl_abort_req *req;
452 int wrlen = roundup(sizeof *req, 16);
453
454 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
455 skb = get_skb(skb, wrlen, gfp);
456 if (!skb) {
457 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
458 __func__);
459 return -ENOMEM;
460 }
461 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
462 t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure);
463 req = (struct cpl_abort_req *) skb_put(skb, wrlen);
464 memset(req, 0, wrlen);
465 INIT_TP_WR(req, ep->hwtid);
466 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
467 req->cmd = CPL_ABORT_SEND_RST;
468 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
469}
470
471static int send_connect(struct c4iw_ep *ep)
472{
473 struct cpl_act_open_req *req;
474 struct sk_buff *skb;
475 u64 opt0;
476 u32 opt2;
477 unsigned int mtu_idx;
478 int wscale;
479 int wrlen = roundup(sizeof *req, 16);
480
481 PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid);
482
483 skb = get_skb(NULL, wrlen, GFP_KERNEL);
484 if (!skb) {
485 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
486 __func__);
487 return -ENOMEM;
488 }
489 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->txq_idx);
490
491 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
492 wscale = compute_wscale(rcv_win);
493 opt0 = KEEP_ALIVE(1) |
494 WND_SCALE(wscale) |
495 MSS_IDX(mtu_idx) |
496 L2T_IDX(ep->l2t->idx) |
497 TX_CHAN(ep->tx_chan) |
498 SMAC_SEL(ep->smac_idx) |
499 DSCP(ep->tos) |
500 RCV_BUFSIZ(rcv_win>>10);
501 opt2 = RX_CHANNEL(0) |
502 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
503 if (enable_tcp_timestamps)
504 opt2 |= TSTAMPS_EN(1);
505 if (enable_tcp_sack)
506 opt2 |= SACK_EN(1);
507 if (wscale && enable_tcp_window_scaling)
508 opt2 |= WND_SCALE_EN(1);
509 t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
510
511 req = (struct cpl_act_open_req *) skb_put(skb, wrlen);
512 INIT_TP_WR(req, 0);
513 OPCODE_TID(req) = cpu_to_be32(
514 MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ((ep->rss_qid<<14)|ep->atid)));
515 req->local_port = ep->com.local_addr.sin_port;
516 req->peer_port = ep->com.remote_addr.sin_port;
517 req->local_ip = ep->com.local_addr.sin_addr.s_addr;
518 req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
519 req->opt0 = cpu_to_be64(opt0);
520 req->params = 0;
521 req->opt2 = cpu_to_be32(opt2);
522 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
523}
524
525static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb)
526{
527 int mpalen, wrlen;
528 struct fw_ofld_tx_data_wr *req;
529 struct mpa_message *mpa;
530
531 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
532
533 BUG_ON(skb_cloned(skb));
534
535 mpalen = sizeof(*mpa) + ep->plen;
536 wrlen = roundup(mpalen + sizeof *req, 16);
537 skb = get_skb(skb, wrlen, GFP_KERNEL);
538 if (!skb) {
539 connect_reply_upcall(ep, -ENOMEM);
540 return;
541 }
542 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
543
544 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
545 memset(req, 0, wrlen);
546 req->op_to_immdlen = cpu_to_be32(
547 FW_WR_OP(FW_OFLD_TX_DATA_WR) |
548 FW_WR_COMPL(1) |
549 FW_WR_IMMDLEN(mpalen));
550 req->flowid_len16 = cpu_to_be32(
551 FW_WR_FLOWID(ep->hwtid) |
552 FW_WR_LEN16(wrlen >> 4));
553 req->plen = cpu_to_be32(mpalen);
554 req->tunnel_to_proxy = cpu_to_be32(
555 FW_OFLD_TX_DATA_WR_FLUSH(1) |
556 FW_OFLD_TX_DATA_WR_SHOVE(1));
557
558 mpa = (struct mpa_message *)(req + 1);
559 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
560 mpa->flags = (crc_enabled ? MPA_CRC : 0) |
561 (markers_enabled ? MPA_MARKERS : 0);
562 mpa->private_data_size = htons(ep->plen);
563 mpa->revision = mpa_rev;
564
565 if (ep->plen)
566 memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen);
567
568 /*
569 * Reference the mpa skb. This ensures the data area
570 * will remain in memory until the hw acks the tx.
571 * Function fw4_ack() will deref it.
572 */
573 skb_get(skb);
574 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
575 BUG_ON(ep->mpa_skb);
576 ep->mpa_skb = skb;
577 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
578 start_ep_timer(ep);
579 state_set(&ep->com, MPA_REQ_SENT);
580 ep->mpa_attr.initiator = 1;
581 return;
582}
583
584static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
585{
586 int mpalen, wrlen;
587 struct fw_ofld_tx_data_wr *req;
588 struct mpa_message *mpa;
589 struct sk_buff *skb;
590
591 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
592
593 mpalen = sizeof(*mpa) + plen;
594 wrlen = roundup(mpalen + sizeof *req, 16);
595
596 skb = get_skb(NULL, wrlen, GFP_KERNEL);
597 if (!skb) {
598 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
599 return -ENOMEM;
600 }
601 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
602
603 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
604 memset(req, 0, wrlen);
605 req->op_to_immdlen = cpu_to_be32(
606 FW_WR_OP(FW_OFLD_TX_DATA_WR) |
607 FW_WR_COMPL(1) |
608 FW_WR_IMMDLEN(mpalen));
609 req->flowid_len16 = cpu_to_be32(
610 FW_WR_FLOWID(ep->hwtid) |
611 FW_WR_LEN16(wrlen >> 4));
612 req->plen = cpu_to_be32(mpalen);
613 req->tunnel_to_proxy = cpu_to_be32(
614 FW_OFLD_TX_DATA_WR_FLUSH(1) |
615 FW_OFLD_TX_DATA_WR_SHOVE(1));
616
617 mpa = (struct mpa_message *)(req + 1);
618 memset(mpa, 0, sizeof(*mpa));
619 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
620 mpa->flags = MPA_REJECT;
621 mpa->revision = mpa_rev;
622 mpa->private_data_size = htons(plen);
623 if (plen)
624 memcpy(mpa->private_data, pdata, plen);
625
626 /*
627 * Reference the mpa skb again. This ensures the data area
628 * will remain in memory until the hw acks the tx.
629 * Function fw4_ack() will deref it.
630 */
631 skb_get(skb);
632 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
633 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
634 BUG_ON(ep->mpa_skb);
635 ep->mpa_skb = skb;
636 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
637}
638
639static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
640{
641 int mpalen, wrlen;
642 struct fw_ofld_tx_data_wr *req;
643 struct mpa_message *mpa;
644 struct sk_buff *skb;
645
646 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
647
648 mpalen = sizeof(*mpa) + plen;
649 wrlen = roundup(mpalen + sizeof *req, 16);
650
651 skb = get_skb(NULL, wrlen, GFP_KERNEL);
652 if (!skb) {
653 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
654 return -ENOMEM;
655 }
656 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
657
658 req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen);
659 memset(req, 0, wrlen);
660 req->op_to_immdlen = cpu_to_be32(
661 FW_WR_OP(FW_OFLD_TX_DATA_WR) |
662 FW_WR_COMPL(1) |
663 FW_WR_IMMDLEN(mpalen));
664 req->flowid_len16 = cpu_to_be32(
665 FW_WR_FLOWID(ep->hwtid) |
666 FW_WR_LEN16(wrlen >> 4));
667 req->plen = cpu_to_be32(mpalen);
668 req->tunnel_to_proxy = cpu_to_be32(
669 FW_OFLD_TX_DATA_WR_FLUSH(1) |
670 FW_OFLD_TX_DATA_WR_SHOVE(1));
671
672 mpa = (struct mpa_message *)(req + 1);
673 memset(mpa, 0, sizeof(*mpa));
674 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
675 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
676 (markers_enabled ? MPA_MARKERS : 0);
677 mpa->revision = mpa_rev;
678 mpa->private_data_size = htons(plen);
679 if (plen)
680 memcpy(mpa->private_data, pdata, plen);
681
682 /*
683 * Reference the mpa skb. This ensures the data area
684 * will remain in memory until the hw acks the tx.
685 * Function fw4_ack() will deref it.
686 */
687 skb_get(skb);
688 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
689 ep->mpa_skb = skb;
690 state_set(&ep->com, MPA_REP_SENT);
691 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
692}
693
694static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
695{
696 struct c4iw_ep *ep;
697 struct cpl_act_establish *req = cplhdr(skb);
698 unsigned int tid = GET_TID(req);
699 unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
700 struct tid_info *t = dev->rdev.lldi.tids;
701
702 ep = lookup_atid(t, atid);
703
704 PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid,
705 be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
706
707 dst_confirm(ep->dst);
708
709 /* setup the hwtid for this connection */
710 ep->hwtid = tid;
711 cxgb4_insert_tid(t, ep, tid);
712
713 ep->snd_seq = be32_to_cpu(req->snd_isn);
714 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
715
716 set_emss(ep, ntohs(req->tcp_opt));
717
718 /* dealloc the atid */
719 cxgb4_free_atid(t, atid);
720
721 /* start MPA negotiation */
722 send_flowc(ep, NULL);
723 send_mpa_req(ep, skb);
724
725 return 0;
726}
727
728static void close_complete_upcall(struct c4iw_ep *ep)
729{
730 struct iw_cm_event event;
731
732 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
733 memset(&event, 0, sizeof(event));
734 event.event = IW_CM_EVENT_CLOSE;
735 if (ep->com.cm_id) {
736 PDBG("close complete delivered ep %p cm_id %p tid %u\n",
737 ep, ep->com.cm_id, ep->hwtid);
738 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
739 ep->com.cm_id->rem_ref(ep->com.cm_id);
740 ep->com.cm_id = NULL;
741 ep->com.qp = NULL;
742 }
743}
744
745static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
746{
747 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
748 close_complete_upcall(ep);
749 state_set(&ep->com, ABORTING);
750 return send_abort(ep, skb, gfp);
751}
752
753static void peer_close_upcall(struct c4iw_ep *ep)
754{
755 struct iw_cm_event event;
756
757 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
758 memset(&event, 0, sizeof(event));
759 event.event = IW_CM_EVENT_DISCONNECT;
760 if (ep->com.cm_id) {
761 PDBG("peer close delivered ep %p cm_id %p tid %u\n",
762 ep, ep->com.cm_id, ep->hwtid);
763 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
764 }
765}
766
767static void peer_abort_upcall(struct c4iw_ep *ep)
768{
769 struct iw_cm_event event;
770
771 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
772 memset(&event, 0, sizeof(event));
773 event.event = IW_CM_EVENT_CLOSE;
774 event.status = -ECONNRESET;
775 if (ep->com.cm_id) {
776 PDBG("abort delivered ep %p cm_id %p tid %u\n", ep,
777 ep->com.cm_id, ep->hwtid);
778 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
779 ep->com.cm_id->rem_ref(ep->com.cm_id);
780 ep->com.cm_id = NULL;
781 ep->com.qp = NULL;
782 }
783}
784
785static void connect_reply_upcall(struct c4iw_ep *ep, int status)
786{
787 struct iw_cm_event event;
788
789 PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status);
790 memset(&event, 0, sizeof(event));
791 event.event = IW_CM_EVENT_CONNECT_REPLY;
792 event.status = status;
793 event.local_addr = ep->com.local_addr;
794 event.remote_addr = ep->com.remote_addr;
795
796 if ((status == 0) || (status == -ECONNREFUSED)) {
797 event.private_data_len = ep->plen;
798 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
799 }
800 if (ep->com.cm_id) {
801 PDBG("%s ep %p tid %u status %d\n", __func__, ep,
802 ep->hwtid, status);
803 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
804 }
805 if (status < 0) {
806 ep->com.cm_id->rem_ref(ep->com.cm_id);
807 ep->com.cm_id = NULL;
808 ep->com.qp = NULL;
809 }
810}
811
812static void connect_request_upcall(struct c4iw_ep *ep)
813{
814 struct iw_cm_event event;
815
816 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
817 memset(&event, 0, sizeof(event));
818 event.event = IW_CM_EVENT_CONNECT_REQUEST;
819 event.local_addr = ep->com.local_addr;
820 event.remote_addr = ep->com.remote_addr;
821 event.private_data_len = ep->plen;
822 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
823 event.provider_data = ep;
824 if (state_read(&ep->parent_ep->com) != DEAD) {
825 c4iw_get_ep(&ep->com);
826 ep->parent_ep->com.cm_id->event_handler(
827 ep->parent_ep->com.cm_id,
828 &event);
829 }
830 c4iw_put_ep(&ep->parent_ep->com);
831 ep->parent_ep = NULL;
832}
833
834static void established_upcall(struct c4iw_ep *ep)
835{
836 struct iw_cm_event event;
837
838 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
839 memset(&event, 0, sizeof(event));
840 event.event = IW_CM_EVENT_ESTABLISHED;
841 if (ep->com.cm_id) {
842 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
843 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
844 }
845}
846
847static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
848{
849 struct cpl_rx_data_ack *req;
850 struct sk_buff *skb;
851 int wrlen = roundup(sizeof *req, 16);
852
853 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
854 skb = get_skb(NULL, wrlen, GFP_KERNEL);
855 if (!skb) {
856 printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
857 return 0;
858 }
859
860 req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen);
861 memset(req, 0, wrlen);
862 INIT_TP_WR(req, ep->hwtid);
863 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
864 ep->hwtid));
865 req->credit_dack = cpu_to_be32(credits);
866 set_wr_txq(skb, CPL_PRIORITY_ACK, ep->txq_idx);
867 c4iw_ofld_send(&ep->com.dev->rdev, skb);
868 return credits;
869}
870
871static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
872{
873 struct mpa_message *mpa;
874 u16 plen;
875 struct c4iw_qp_attributes attrs;
876 enum c4iw_qp_attr_mask mask;
877 int err;
878
879 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
880
881 /*
882 * Stop mpa timer. If it expired, then the state has
883 * changed and we bail since ep_timeout already aborted
884 * the connection.
885 */
886 stop_ep_timer(ep);
887 if (state_read(&ep->com) != MPA_REQ_SENT)
888 return;
889
890 /*
891 * If we get more than the supported amount of private data
892 * then we must fail this connection.
893 */
894 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
895 err = -EINVAL;
896 goto err;
897 }
898
899 /*
900 * copy the new data into our accumulation buffer.
901 */
902 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
903 skb->len);
904 ep->mpa_pkt_len += skb->len;
905
906 /*
907 * if we don't even have the mpa message, then bail.
908 */
909 if (ep->mpa_pkt_len < sizeof(*mpa))
910 return;
911 mpa = (struct mpa_message *) ep->mpa_pkt;
912
913 /* Validate MPA header. */
914 if (mpa->revision != mpa_rev) {
915 err = -EPROTO;
916 goto err;
917 }
918 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
919 err = -EPROTO;
920 goto err;
921 }
922
923 plen = ntohs(mpa->private_data_size);
924
925 /*
926 * Fail if there's too much private data.
927 */
928 if (plen > MPA_MAX_PRIVATE_DATA) {
929 err = -EPROTO;
930 goto err;
931 }
932
933 /*
934 * If plen does not account for pkt size
935 */
936 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
937 err = -EPROTO;
938 goto err;
939 }
940
941 ep->plen = (u8) plen;
942
943 /*
944 * If we don't have all the pdata yet, then bail.
945 * We'll continue process when more data arrives.
946 */
947 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
948 return;
949
950 if (mpa->flags & MPA_REJECT) {
951 err = -ECONNREFUSED;
952 goto err;
953 }
954
955 /*
956 * If we get here we have accumulated the entire mpa
957 * start reply message including private data. And
958 * the MPA header is valid.
959 */
960 state_set(&ep->com, FPDU_MODE);
961 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
962 ep->mpa_attr.recv_marker_enabled = markers_enabled;
963 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
964 ep->mpa_attr.version = mpa_rev;
965 ep->mpa_attr.p2p_type = peer2peer ? p2p_type :
966 FW_RI_INIT_P2PTYPE_DISABLED;
967 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
968 "xmit_marker_enabled=%d, version=%d\n", __func__,
969 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
970 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
971
972 attrs.mpa_attr = ep->mpa_attr;
973 attrs.max_ird = ep->ird;
974 attrs.max_ord = ep->ord;
975 attrs.llp_stream_handle = ep;
976 attrs.next_state = C4IW_QP_STATE_RTS;
977
978 mask = C4IW_QP_ATTR_NEXT_STATE |
979 C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
980 C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
981
982 /* bind QP and TID with INIT_WR */
983 err = c4iw_modify_qp(ep->com.qp->rhp,
984 ep->com.qp, mask, &attrs, 1);
985 if (err)
986 goto err;
987 goto out;
988err:
989 abort_connection(ep, skb, GFP_KERNEL);
990out:
991 connect_reply_upcall(ep, err);
992 return;
993}
994
995static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
996{
997 struct mpa_message *mpa;
998 u16 plen;
999
1000 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1001
1002 if (state_read(&ep->com) != MPA_REQ_WAIT)
1003 return;
1004
1005 /*
1006 * If we get more than the supported amount of private data
1007 * then we must fail this connection.
1008 */
1009 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
1010 stop_ep_timer(ep);
1011 abort_connection(ep, skb, GFP_KERNEL);
1012 return;
1013 }
1014
1015 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
1016
1017 /*
1018 * Copy the new data into our accumulation buffer.
1019 */
1020 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
1021 skb->len);
1022 ep->mpa_pkt_len += skb->len;
1023
1024 /*
1025 * If we don't even have the mpa message, then bail.
1026 * We'll continue process when more data arrives.
1027 */
1028 if (ep->mpa_pkt_len < sizeof(*mpa))
1029 return;
1030
1031 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
1032 stop_ep_timer(ep);
1033 mpa = (struct mpa_message *) ep->mpa_pkt;
1034
1035 /*
1036 * Validate MPA Header.
1037 */
1038 if (mpa->revision != mpa_rev) {
1039 abort_connection(ep, skb, GFP_KERNEL);
1040 return;
1041 }
1042
1043 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
1044 abort_connection(ep, skb, GFP_KERNEL);
1045 return;
1046 }
1047
1048 plen = ntohs(mpa->private_data_size);
1049
1050 /*
1051 * Fail if there's too much private data.
1052 */
1053 if (plen > MPA_MAX_PRIVATE_DATA) {
1054 abort_connection(ep, skb, GFP_KERNEL);
1055 return;
1056 }
1057
1058 /*
1059 * If plen does not account for pkt size
1060 */
1061 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1062 abort_connection(ep, skb, GFP_KERNEL);
1063 return;
1064 }
1065 ep->plen = (u8) plen;
1066
1067 /*
1068 * If we don't have all the pdata yet, then bail.
1069 */
1070 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1071 return;
1072
1073 /*
1074 * If we get here we have accumulated the entire mpa
1075 * start reply message including private data.
1076 */
1077 ep->mpa_attr.initiator = 0;
1078 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1079 ep->mpa_attr.recv_marker_enabled = markers_enabled;
1080 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1081 ep->mpa_attr.version = mpa_rev;
1082 ep->mpa_attr.p2p_type = peer2peer ? p2p_type :
1083 FW_RI_INIT_P2PTYPE_DISABLED;
1084 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1085 "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__,
1086 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1087 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1088 ep->mpa_attr.p2p_type);
1089
1090 state_set(&ep->com, MPA_REQ_RCVD);
1091
1092 /* drive upcall */
1093 connect_request_upcall(ep);
1094 return;
1095}
1096
1097static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
1098{
1099 struct c4iw_ep *ep;
1100 struct cpl_rx_data *hdr = cplhdr(skb);
1101 unsigned int dlen = ntohs(hdr->len);
1102 unsigned int tid = GET_TID(hdr);
1103 struct tid_info *t = dev->rdev.lldi.tids;
1104
1105 ep = lookup_tid(t, tid);
1106 PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
1107 skb_pull(skb, sizeof(*hdr));
1108 skb_trim(skb, dlen);
1109
1110 ep->rcv_seq += dlen;
1111 BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen));
1112
1113 /* update RX credits */
1114 update_rx_credits(ep, dlen);
1115
1116 switch (state_read(&ep->com)) {
1117 case MPA_REQ_SENT:
1118 process_mpa_reply(ep, skb);
1119 break;
1120 case MPA_REQ_WAIT:
1121 process_mpa_request(ep, skb);
1122 break;
1123 case MPA_REP_SENT:
1124 break;
1125 default:
1126 printk(KERN_ERR MOD "%s Unexpected streaming data."
1127 " ep %p state %d tid %u\n",
1128 __func__, ep, state_read(&ep->com), ep->hwtid);
1129
1130 /*
1131 * The ep will timeout and inform the ULP of the failure.
1132 * See ep_timeout().
1133 */
1134 break;
1135 }
1136 return 0;
1137}
1138
1139static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1140{
1141 struct c4iw_ep *ep;
1142 struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
1143 unsigned long flags;
1144 int release = 0;
1145 unsigned int tid = GET_TID(rpl);
1146 struct tid_info *t = dev->rdev.lldi.tids;
1147
1148 ep = lookup_tid(t, tid);
1149 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1150 BUG_ON(!ep);
1151 spin_lock_irqsave(&ep->com.lock, flags);
1152 switch (ep->com.state) {
1153 case ABORTING:
1154 __state_set(&ep->com, DEAD);
1155 release = 1;
1156 break;
1157 default:
1158 printk(KERN_ERR "%s ep %p state %d\n",
1159 __func__, ep, ep->com.state);
1160 break;
1161 }
1162 spin_unlock_irqrestore(&ep->com.lock, flags);
1163
1164 if (release)
1165 release_ep_resources(ep);
1166 return 0;
1167}
1168
1169/*
1170 * Return whether a failed active open has allocated a TID
1171 */
1172static inline int act_open_has_tid(int status)
1173{
1174 return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
1175 status != CPL_ERR_ARP_MISS;
1176}
1177
1178static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1179{
1180 struct c4iw_ep *ep;
1181 struct cpl_act_open_rpl *rpl = cplhdr(skb);
1182 unsigned int atid = GET_TID_TID(GET_AOPEN_ATID(
1183 ntohl(rpl->atid_status)));
1184 struct tid_info *t = dev->rdev.lldi.tids;
1185 int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status));
1186
1187 ep = lookup_atid(t, atid);
1188
1189 PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
1190 status, status2errno(status));
1191
1192 if (status == CPL_ERR_RTX_NEG_ADVICE) {
1193 printk(KERN_WARNING MOD "Connection problems for atid %u\n",
1194 atid);
1195 return 0;
1196 }
1197
1198 connect_reply_upcall(ep, status2errno(status));
1199 state_set(&ep->com, DEAD);
1200
1201 if (status && act_open_has_tid(status))
1202 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl));
1203
1204 cxgb4_free_atid(t, atid);
1205 dst_release(ep->dst);
1206 cxgb4_l2t_release(ep->l2t);
1207 c4iw_put_ep(&ep->com);
1208
1209 return 0;
1210}
1211
1212static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1213{
1214 struct cpl_pass_open_rpl *rpl = cplhdr(skb);
1215 struct tid_info *t = dev->rdev.lldi.tids;
1216 unsigned int stid = GET_TID(rpl);
1217 struct c4iw_listen_ep *ep = lookup_stid(t, stid);
1218
1219 if (!ep) {
1220 printk(KERN_ERR MOD "stid %d lookup failure!\n", stid);
1221 return 0;
1222 }
1223 PDBG("%s ep %p status %d error %d\n", __func__, ep,
1224 rpl->status, status2errno(rpl->status));
1225 ep->com.rpl_err = status2errno(rpl->status);
1226 ep->com.rpl_done = 1;
1227 wake_up(&ep->com.waitq);
1228
1229 return 0;
1230}
1231
1232static int listen_stop(struct c4iw_listen_ep *ep)
1233{
1234 struct sk_buff *skb;
1235 struct cpl_close_listsvr_req *req;
1236
1237 PDBG("%s ep %p\n", __func__, ep);
1238 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1239 if (!skb) {
1240 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
1241 return -ENOMEM;
1242 }
1243 req = (struct cpl_close_listsvr_req *) skb_put(skb, sizeof(*req));
1244 INIT_TP_WR(req, 0);
1245 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ,
1246 ep->stid));
1247 req->reply_ctrl = cpu_to_be16(
1248 QUEUENO(ep->com.dev->rdev.lldi.rxq_ids[0]));
1249 set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
1250 return c4iw_ofld_send(&ep->com.dev->rdev, skb);
1251}
1252
1253static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1254{
1255 struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
1256 struct tid_info *t = dev->rdev.lldi.tids;
1257 unsigned int stid = GET_TID(rpl);
1258 struct c4iw_listen_ep *ep = lookup_stid(t, stid);
1259
1260 PDBG("%s ep %p\n", __func__, ep);
1261 ep->com.rpl_err = status2errno(rpl->status);
1262 ep->com.rpl_done = 1;
1263 wake_up(&ep->com.waitq);
1264 return 0;
1265}
1266
1267static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,
1268 struct cpl_pass_accept_req *req)
1269{
1270 struct cpl_pass_accept_rpl *rpl;
1271 unsigned int mtu_idx;
1272 u64 opt0;
1273 u32 opt2;
1274 int wscale;
1275
1276 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1277 BUG_ON(skb_cloned(skb));
1278 skb_trim(skb, sizeof(*rpl));
1279 skb_get(skb);
1280 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
1281 wscale = compute_wscale(rcv_win);
1282 opt0 = KEEP_ALIVE(1) |
1283 WND_SCALE(wscale) |
1284 MSS_IDX(mtu_idx) |
1285 L2T_IDX(ep->l2t->idx) |
1286 TX_CHAN(ep->tx_chan) |
1287 SMAC_SEL(ep->smac_idx) |
1288 DSCP(ep->tos) |
1289 RCV_BUFSIZ(rcv_win>>10);
1290 opt2 = RX_CHANNEL(0) |
1291 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
1292
1293 if (enable_tcp_timestamps && req->tcpopt.tstamp)
1294 opt2 |= TSTAMPS_EN(1);
1295 if (enable_tcp_sack && req->tcpopt.sack)
1296 opt2 |= SACK_EN(1);
1297 if (wscale && enable_tcp_window_scaling)
1298 opt2 |= WND_SCALE_EN(1);
1299
1300 rpl = cplhdr(skb);
1301 INIT_TP_WR(rpl, ep->hwtid);
1302 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
1303 ep->hwtid));
1304 rpl->opt0 = cpu_to_be64(opt0);
1305 rpl->opt2 = cpu_to_be32(opt2);
1306 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->txq_idx);
1307 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1308
1309 return;
1310}
1311
1312static void reject_cr(struct c4iw_dev *dev, u32 hwtid, __be32 peer_ip,
1313 struct sk_buff *skb)
1314{
1315 PDBG("%s c4iw_dev %p tid %u peer_ip %x\n", __func__, dev, hwtid,
1316 peer_ip);
1317 BUG_ON(skb_cloned(skb));
1318 skb_trim(skb, sizeof(struct cpl_tid_release));
1319 skb_get(skb);
1320 release_tid(&dev->rdev, hwtid, skb);
1321 return;
1322}
1323
1324static void get_4tuple(struct cpl_pass_accept_req *req,
1325 __be32 *local_ip, __be32 *peer_ip,
1326 __be16 *local_port, __be16 *peer_port)
1327{
1328 int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len));
1329 int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len));
1330 struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
1331 struct tcphdr *tcp = (struct tcphdr *)
1332 ((u8 *)(req + 1) + eth_len + ip_len);
1333
1334 PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__,
1335 ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source),
1336 ntohs(tcp->dest));
1337
1338 *peer_ip = ip->saddr;
1339 *local_ip = ip->daddr;
1340 *peer_port = tcp->source;
1341 *local_port = tcp->dest;
1342
1343 return;
1344}
1345
1346static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
1347{
1348 struct c4iw_ep *child_ep, *parent_ep;
1349 struct cpl_pass_accept_req *req = cplhdr(skb);
1350 unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));
1351 struct tid_info *t = dev->rdev.lldi.tids;
1352 unsigned int hwtid = GET_TID(req);
1353 struct dst_entry *dst;
1354 struct l2t_entry *l2t;
1355 struct rtable *rt;
1356 __be32 local_ip, peer_ip;
1357 __be16 local_port, peer_port;
1358 struct net_device *pdev;
1359 u32 tx_chan, smac_idx;
1360 u16 rss_qid;
1361 u32 mtu;
1362 int step;
1363 int txq_idx;
1364
1365 parent_ep = lookup_stid(t, stid);
1366 PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
1367
1368 get_4tuple(req, &local_ip, &peer_ip, &local_port, &peer_port);
1369
1370 if (state_read(&parent_ep->com) != LISTEN) {
1371 printk(KERN_ERR "%s - listening ep not in LISTEN\n",
1372 __func__);
1373 goto reject;
1374 }
1375
1376 /* Find output route */
1377 rt = find_route(dev, local_ip, peer_ip, local_port, peer_port,
1378 GET_POPEN_TOS(ntohl(req->tos_stid)));
1379 if (!rt) {
1380 printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
1381 __func__);
1382 goto reject;
1383 }
1384 dst = &rt->u.dst;
1385 if (dst->neighbour->dev->flags & IFF_LOOPBACK) {
1386 pdev = ip_dev_find(&init_net, peer_ip);
1387 BUG_ON(!pdev);
1388 l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, dst->neighbour,
1389 pdev, 0);
1390 mtu = pdev->mtu;
1391 tx_chan = cxgb4_port_chan(pdev);
1392 smac_idx = tx_chan << 1;
1393 step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
1394 txq_idx = cxgb4_port_idx(pdev) * step;
1395 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
1396 rss_qid = dev->rdev.lldi.rxq_ids[cxgb4_port_idx(pdev) * step];
1397 dev_put(pdev);
1398 } else {
1399 l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, dst->neighbour,
1400 dst->neighbour->dev, 0);
1401 mtu = dst_mtu(dst);
1402 tx_chan = cxgb4_port_chan(dst->neighbour->dev);
1403 smac_idx = tx_chan << 1;
1404 step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
1405 txq_idx = cxgb4_port_idx(dst->neighbour->dev) * step;
1406 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
1407 rss_qid = dev->rdev.lldi.rxq_ids[
1408 cxgb4_port_idx(dst->neighbour->dev) * step];
1409 }
1410 if (!l2t) {
1411 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
1412 __func__);
1413 dst_release(dst);
1414 goto reject;
1415 }
1416
1417 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
1418 if (!child_ep) {
1419 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
1420 __func__);
1421 cxgb4_l2t_release(l2t);
1422 dst_release(dst);
1423 goto reject;
1424 }
1425 state_set(&child_ep->com, CONNECTING);
1426 child_ep->com.dev = dev;
1427 child_ep->com.cm_id = NULL;
1428 child_ep->com.local_addr.sin_family = PF_INET;
1429 child_ep->com.local_addr.sin_port = local_port;
1430 child_ep->com.local_addr.sin_addr.s_addr = local_ip;
1431 child_ep->com.remote_addr.sin_family = PF_INET;
1432 child_ep->com.remote_addr.sin_port = peer_port;
1433 child_ep->com.remote_addr.sin_addr.s_addr = peer_ip;
1434 c4iw_get_ep(&parent_ep->com);
1435 child_ep->parent_ep = parent_ep;
1436 child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid));
1437 child_ep->l2t = l2t;
1438 child_ep->dst = dst;
1439 child_ep->hwtid = hwtid;
1440 child_ep->tx_chan = tx_chan;
1441 child_ep->smac_idx = smac_idx;
1442 child_ep->rss_qid = rss_qid;
1443 child_ep->mtu = mtu;
1444 child_ep->txq_idx = txq_idx;
1445
1446 PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
1447 tx_chan, smac_idx, rss_qid);
1448
1449 init_timer(&child_ep->timer);
1450 cxgb4_insert_tid(t, child_ep, hwtid);
1451 accept_cr(child_ep, peer_ip, skb, req);
1452 goto out;
1453reject:
1454 reject_cr(dev, hwtid, peer_ip, skb);
1455out:
1456 return 0;
1457}
1458
1459static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
1460{
1461 struct c4iw_ep *ep;
1462 struct cpl_pass_establish *req = cplhdr(skb);
1463 struct tid_info *t = dev->rdev.lldi.tids;
1464 unsigned int tid = GET_TID(req);
1465
1466 ep = lookup_tid(t, tid);
1467 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1468 ep->snd_seq = be32_to_cpu(req->snd_isn);
1469 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
1470
1471 set_emss(ep, ntohs(req->tcp_opt));
1472
1473 dst_confirm(ep->dst);
1474 state_set(&ep->com, MPA_REQ_WAIT);
1475 start_ep_timer(ep);
1476 send_flowc(ep, skb);
1477
1478 return 0;
1479}
1480
1481static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
1482{
1483 struct cpl_peer_close *hdr = cplhdr(skb);
1484 struct c4iw_ep *ep;
1485 struct c4iw_qp_attributes attrs;
1486 unsigned long flags;
1487 int disconnect = 1;
1488 int release = 0;
1489 int closing = 0;
1490 struct tid_info *t = dev->rdev.lldi.tids;
1491 unsigned int tid = GET_TID(hdr);
1492 int start_timer = 0;
1493 int stop_timer = 0;
1494
1495 ep = lookup_tid(t, tid);
1496 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1497 dst_confirm(ep->dst);
1498
1499 spin_lock_irqsave(&ep->com.lock, flags);
1500 switch (ep->com.state) {
1501 case MPA_REQ_WAIT:
1502 __state_set(&ep->com, CLOSING);
1503 break;
1504 case MPA_REQ_SENT:
1505 __state_set(&ep->com, CLOSING);
1506 connect_reply_upcall(ep, -ECONNRESET);
1507 break;
1508 case MPA_REQ_RCVD:
1509
1510 /*
1511 * We're gonna mark this puppy DEAD, but keep
1512 * the reference on it until the ULP accepts or
1513 * rejects the CR. Also wake up anyone waiting
1514 * in rdma connection migration (see c4iw_accept_cr()).
1515 */
1516 __state_set(&ep->com, CLOSING);
1517 ep->com.rpl_done = 1;
1518 ep->com.rpl_err = -ECONNRESET;
1519 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
1520 wake_up(&ep->com.waitq);
1521 break;
1522 case MPA_REP_SENT:
1523 __state_set(&ep->com, CLOSING);
1524 ep->com.rpl_done = 1;
1525 ep->com.rpl_err = -ECONNRESET;
1526 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
1527 wake_up(&ep->com.waitq);
1528 break;
1529 case FPDU_MODE:
1530 start_timer = 1;
1531 __state_set(&ep->com, CLOSING);
1532 closing = 1;
1533 peer_close_upcall(ep);
1534 break;
1535 case ABORTING:
1536 disconnect = 0;
1537 break;
1538 case CLOSING:
1539 __state_set(&ep->com, MORIBUND);
1540 disconnect = 0;
1541 break;
1542 case MORIBUND:
1543 stop_timer = 1;
1544 if (ep->com.cm_id && ep->com.qp) {
1545 attrs.next_state = C4IW_QP_STATE_IDLE;
1546 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1547 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1548 }
1549 close_complete_upcall(ep);
1550 __state_set(&ep->com, DEAD);
1551 release = 1;
1552 disconnect = 0;
1553 break;
1554 case DEAD:
1555 disconnect = 0;
1556 break;
1557 default:
1558 BUG_ON(1);
1559 }
1560 spin_unlock_irqrestore(&ep->com.lock, flags);
1561 if (closing) {
1562 attrs.next_state = C4IW_QP_STATE_CLOSING;
1563 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1564 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1565 }
1566 if (start_timer)
1567 start_ep_timer(ep);
1568 if (stop_timer)
1569 stop_ep_timer(ep);
1570 if (disconnect)
1571 c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
1572 if (release)
1573 release_ep_resources(ep);
1574 return 0;
1575}
1576
1577/*
1578 * Returns whether an ABORT_REQ_RSS message is a negative advice.
1579 */
1580static int is_neg_adv_abort(unsigned int status)
1581{
1582 return status == CPL_ERR_RTX_NEG_ADVICE ||
1583 status == CPL_ERR_PERSIST_NEG_ADVICE;
1584}
1585
1586static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
1587{
1588 struct cpl_abort_req_rss *req = cplhdr(skb);
1589 struct c4iw_ep *ep;
1590 struct cpl_abort_rpl *rpl;
1591 struct sk_buff *rpl_skb;
1592 struct c4iw_qp_attributes attrs;
1593 int ret;
1594 int release = 0;
1595 unsigned long flags;
1596 struct tid_info *t = dev->rdev.lldi.tids;
1597 unsigned int tid = GET_TID(req);
1598 int stop_timer = 0;
1599
1600 ep = lookup_tid(t, tid);
1601 if (is_neg_adv_abort(req->status)) {
1602 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
1603 ep->hwtid);
1604 return 0;
1605 }
1606 spin_lock_irqsave(&ep->com.lock, flags);
1607 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
1608 ep->com.state);
1609 switch (ep->com.state) {
1610 case CONNECTING:
1611 break;
1612 case MPA_REQ_WAIT:
1613 stop_timer = 1;
1614 break;
1615 case MPA_REQ_SENT:
1616 stop_timer = 1;
1617 connect_reply_upcall(ep, -ECONNRESET);
1618 break;
1619 case MPA_REP_SENT:
1620 ep->com.rpl_done = 1;
1621 ep->com.rpl_err = -ECONNRESET;
1622 PDBG("waking up ep %p\n", ep);
1623 wake_up(&ep->com.waitq);
1624 break;
1625 case MPA_REQ_RCVD:
1626
1627 /*
1628 * We're gonna mark this puppy DEAD, but keep
1629 * the reference on it until the ULP accepts or
1630 * rejects the CR. Also wake up anyone waiting
1631 * in rdma connection migration (see c4iw_accept_cr()).
1632 */
1633 ep->com.rpl_done = 1;
1634 ep->com.rpl_err = -ECONNRESET;
1635 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
1636 wake_up(&ep->com.waitq);
1637 break;
1638 case MORIBUND:
1639 case CLOSING:
1640 stop_timer = 1;
1641 /*FALLTHROUGH*/
1642 case FPDU_MODE:
1643 if (ep->com.cm_id && ep->com.qp) {
1644 attrs.next_state = C4IW_QP_STATE_ERROR;
1645 ret = c4iw_modify_qp(ep->com.qp->rhp,
1646 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
1647 &attrs, 1);
1648 if (ret)
1649 printk(KERN_ERR MOD
1650 "%s - qp <- error failed!\n",
1651 __func__);
1652 }
1653 peer_abort_upcall(ep);
1654 break;
1655 case ABORTING:
1656 break;
1657 case DEAD:
1658 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
1659 spin_unlock_irqrestore(&ep->com.lock, flags);
1660 return 0;
1661 default:
1662 BUG_ON(1);
1663 break;
1664 }
1665 dst_confirm(ep->dst);
1666 if (ep->com.state != ABORTING) {
1667 __state_set(&ep->com, DEAD);
1668 release = 1;
1669 }
1670 spin_unlock_irqrestore(&ep->com.lock, flags);
1671
1672 rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
1673 if (!rpl_skb) {
1674 printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
1675 __func__);
1676 release = 1;
1677 goto out;
1678 }
1679 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1680 rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
1681 INIT_TP_WR(rpl, ep->hwtid);
1682 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
1683 rpl->cmd = CPL_ABORT_NO_RST;
1684 c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb);
1685out:
1686 if (stop_timer)
1687 stop_ep_timer(ep);
1688 if (release)
1689 release_ep_resources(ep);
1690 return 0;
1691}
1692
1693static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1694{
1695 struct c4iw_ep *ep;
1696 struct c4iw_qp_attributes attrs;
1697 struct cpl_close_con_rpl *rpl = cplhdr(skb);
1698 unsigned long flags;
1699 int release = 0;
1700 struct tid_info *t = dev->rdev.lldi.tids;
1701 unsigned int tid = GET_TID(rpl);
1702 int stop_timer = 0;
1703
1704 ep = lookup_tid(t, tid);
1705
1706 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1707 BUG_ON(!ep);
1708
1709 /* The cm_id may be null if we failed to connect */
1710 spin_lock_irqsave(&ep->com.lock, flags);
1711 switch (ep->com.state) {
1712 case CLOSING:
1713 __state_set(&ep->com, MORIBUND);
1714 break;
1715 case MORIBUND:
1716 stop_timer = 1;
1717 if ((ep->com.cm_id) && (ep->com.qp)) {
1718 attrs.next_state = C4IW_QP_STATE_IDLE;
1719 c4iw_modify_qp(ep->com.qp->rhp,
1720 ep->com.qp,
1721 C4IW_QP_ATTR_NEXT_STATE,
1722 &attrs, 1);
1723 }
1724 close_complete_upcall(ep);
1725 __state_set(&ep->com, DEAD);
1726 release = 1;
1727 break;
1728 case ABORTING:
1729 case DEAD:
1730 break;
1731 default:
1732 BUG_ON(1);
1733 break;
1734 }
1735 spin_unlock_irqrestore(&ep->com.lock, flags);
1736 if (stop_timer)
1737 stop_ep_timer(ep);
1738 if (release)
1739 release_ep_resources(ep);
1740 return 0;
1741}
1742
1743static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
1744{
1745 struct c4iw_ep *ep;
1746 struct cpl_rdma_terminate *term = cplhdr(skb);
1747 struct tid_info *t = dev->rdev.lldi.tids;
1748 unsigned int tid = GET_TID(term);
1749
1750 ep = lookup_tid(t, tid);
1751
1752 if (state_read(&ep->com) != FPDU_MODE)
1753 return 0;
1754
1755 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1756 skb_pull(skb, sizeof *term);
1757 PDBG("%s saving %d bytes of term msg\n", __func__, skb->len);
1758 skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,
1759 skb->len);
1760 ep->com.qp->attr.terminate_msg_len = skb->len;
1761 ep->com.qp->attr.is_terminate_local = 0;
1762 return 0;
1763}
1764
1765/*
1766 * Upcall from the adapter indicating data has been transmitted.
1767 * For us its just the single MPA request or reply. We can now free
1768 * the skb holding the mpa message.
1769 */
1770static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
1771{
1772 struct c4iw_ep *ep;
1773 struct cpl_fw4_ack *hdr = cplhdr(skb);
1774 u8 credits = hdr->credits;
1775 unsigned int tid = GET_TID(hdr);
1776 struct tid_info *t = dev->rdev.lldi.tids;
1777
1778
1779 ep = lookup_tid(t, tid);
1780 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
1781 if (credits == 0) {
1782 PDBG(KERN_ERR "%s 0 credit ack ep %p tid %u state %u\n",
1783 __func__, ep, ep->hwtid, state_read(&ep->com));
1784 return 0;
1785 }
1786
1787 dst_confirm(ep->dst);
1788 if (ep->mpa_skb) {
1789 PDBG("%s last streaming msg ack ep %p tid %u state %u "
1790 "initiator %u freeing skb\n", __func__, ep, ep->hwtid,
1791 state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0);
1792 kfree_skb(ep->mpa_skb);
1793 ep->mpa_skb = NULL;
1794 }
1795 return 0;
1796}
1797
1798static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
1799{
1800 struct cpl_fw6_msg *rpl = cplhdr(skb);
1801 struct c4iw_wr_wait *wr_waitp;
1802 int ret;
1803
1804 PDBG("%s type %u\n", __func__, rpl->type);
1805
1806 switch (rpl->type) {
1807 case 1:
1808 ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
1809 wr_waitp = (__force struct c4iw_wr_wait *)rpl->data[1];
1810 PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
1811 if (wr_waitp) {
1812 wr_waitp->ret = ret;
1813 wr_waitp->done = 1;
1814 wake_up(&wr_waitp->wait);
1815 }
1816 break;
1817 case 2:
1818 c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
1819 break;
1820 default:
1821 printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
1822 rpl->type);
1823 break;
1824 }
1825 return 0;
1826}
1827
1828static void ep_timeout(unsigned long arg)
1829{
1830 struct c4iw_ep *ep = (struct c4iw_ep *)arg;
1831 struct c4iw_qp_attributes attrs;
1832 unsigned long flags;
1833 int abort = 1;
1834
1835 spin_lock_irqsave(&ep->com.lock, flags);
1836 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
1837 ep->com.state);
1838 switch (ep->com.state) {
1839 case MPA_REQ_SENT:
1840 __state_set(&ep->com, ABORTING);
1841 connect_reply_upcall(ep, -ETIMEDOUT);
1842 break;
1843 case MPA_REQ_WAIT:
1844 __state_set(&ep->com, ABORTING);
1845 break;
1846 case CLOSING:
1847 case MORIBUND:
1848 if (ep->com.cm_id && ep->com.qp) {
1849 attrs.next_state = C4IW_QP_STATE_ERROR;
1850 c4iw_modify_qp(ep->com.qp->rhp,
1851 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
1852 &attrs, 1);
1853 }
1854 __state_set(&ep->com, ABORTING);
1855 break;
1856 default:
1857 printk(KERN_ERR "%s unexpected state ep %p tid %u state %u\n",
1858 __func__, ep, ep->hwtid, ep->com.state);
1859 WARN_ON(1);
1860 abort = 0;
1861 }
1862 spin_unlock_irqrestore(&ep->com.lock, flags);
1863 if (abort)
1864 abort_connection(ep, NULL, GFP_ATOMIC);
1865 c4iw_put_ep(&ep->com);
1866}
1867
1868int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
1869{
1870 int err;
1871 struct c4iw_ep *ep = to_ep(cm_id);
1872 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1873
1874 if (state_read(&ep->com) == DEAD) {
1875 c4iw_put_ep(&ep->com);
1876 return -ECONNRESET;
1877 }
1878 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1879 if (mpa_rev == 0)
1880 abort_connection(ep, NULL, GFP_KERNEL);
1881 else {
1882 err = send_mpa_reject(ep, pdata, pdata_len);
1883 err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
1884 }
1885 c4iw_put_ep(&ep->com);
1886 return 0;
1887}
1888
1889int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1890{
1891 int err;
1892 struct c4iw_qp_attributes attrs;
1893 enum c4iw_qp_attr_mask mask;
1894 struct c4iw_ep *ep = to_ep(cm_id);
1895 struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
1896 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
1897
1898 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1899 if (state_read(&ep->com) == DEAD) {
1900 err = -ECONNRESET;
1901 goto err;
1902 }
1903
1904 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1905 BUG_ON(!qp);
1906
1907 if ((conn_param->ord > T4_MAX_READ_DEPTH) ||
1908 (conn_param->ird > T4_MAX_READ_DEPTH)) {
1909 abort_connection(ep, NULL, GFP_KERNEL);
1910 err = -EINVAL;
1911 goto err;
1912 }
1913
1914 cm_id->add_ref(cm_id);
1915 ep->com.cm_id = cm_id;
1916 ep->com.qp = qp;
1917
1918 ep->ird = conn_param->ird;
1919 ep->ord = conn_param->ord;
1920
1921 if (peer2peer && ep->ird == 0)
1922 ep->ird = 1;
1923
1924 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
1925
1926 /* bind QP to EP and move to RTS */
1927 attrs.mpa_attr = ep->mpa_attr;
1928 attrs.max_ird = ep->ird;
1929 attrs.max_ord = ep->ord;
1930 attrs.llp_stream_handle = ep;
1931 attrs.next_state = C4IW_QP_STATE_RTS;
1932
1933 /* bind QP and TID with INIT_WR */
1934 mask = C4IW_QP_ATTR_NEXT_STATE |
1935 C4IW_QP_ATTR_LLP_STREAM_HANDLE |
1936 C4IW_QP_ATTR_MPA_ATTR |
1937 C4IW_QP_ATTR_MAX_IRD |
1938 C4IW_QP_ATTR_MAX_ORD;
1939
1940 err = c4iw_modify_qp(ep->com.qp->rhp,
1941 ep->com.qp, mask, &attrs, 1);
1942 if (err)
1943 goto err1;
1944 err = send_mpa_reply(ep, conn_param->private_data,
1945 conn_param->private_data_len);
1946 if (err)
1947 goto err1;
1948
1949 state_set(&ep->com, FPDU_MODE);
1950 established_upcall(ep);
1951 c4iw_put_ep(&ep->com);
1952 return 0;
1953err1:
1954 ep->com.cm_id = NULL;
1955 ep->com.qp = NULL;
1956 cm_id->rem_ref(cm_id);
1957err:
1958 c4iw_put_ep(&ep->com);
1959 return err;
1960}
1961
1962int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1963{
1964 int err = 0;
1965 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
1966 struct c4iw_ep *ep;
1967 struct rtable *rt;
1968 struct net_device *pdev;
1969 int step;
1970
1971 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
1972 if (!ep) {
1973 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
1974 err = -ENOMEM;
1975 goto out;
1976 }
1977 init_timer(&ep->timer);
1978 ep->plen = conn_param->private_data_len;
1979 if (ep->plen)
1980 memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
1981 conn_param->private_data, ep->plen);
1982 ep->ird = conn_param->ird;
1983 ep->ord = conn_param->ord;
1984
1985 if (peer2peer && ep->ord == 0)
1986 ep->ord = 1;
1987
1988 cm_id->add_ref(cm_id);
1989 ep->com.dev = dev;
1990 ep->com.cm_id = cm_id;
1991 ep->com.qp = get_qhp(dev, conn_param->qpn);
1992 BUG_ON(!ep->com.qp);
1993 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
1994 ep->com.qp, cm_id);
1995
1996 /*
1997 * Allocate an active TID to initiate a TCP connection.
1998 */
1999 ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep);
2000 if (ep->atid == -1) {
2001 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
2002 err = -ENOMEM;
2003 goto fail2;
2004 }
2005
2006 PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__,
2007 ntohl(cm_id->local_addr.sin_addr.s_addr),
2008 ntohs(cm_id->local_addr.sin_port),
2009 ntohl(cm_id->remote_addr.sin_addr.s_addr),
2010 ntohs(cm_id->remote_addr.sin_port));
2011
2012 /* find a route */
2013 rt = find_route(dev,
2014 cm_id->local_addr.sin_addr.s_addr,
2015 cm_id->remote_addr.sin_addr.s_addr,
2016 cm_id->local_addr.sin_port,
2017 cm_id->remote_addr.sin_port, 0);
2018 if (!rt) {
2019 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
2020 err = -EHOSTUNREACH;
2021 goto fail3;
2022 }
2023 ep->dst = &rt->u.dst;
2024
2025 /* get a l2t entry */
2026 if (ep->dst->neighbour->dev->flags & IFF_LOOPBACK) {
2027 PDBG("%s LOOPBACK\n", __func__);
2028 pdev = ip_dev_find(&init_net,
2029 cm_id->remote_addr.sin_addr.s_addr);
2030 ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
2031 ep->dst->neighbour,
2032 pdev, 0);
2033 ep->mtu = pdev->mtu;
2034 ep->tx_chan = cxgb4_port_chan(pdev);
2035 ep->smac_idx = ep->tx_chan << 1;
2036 step = ep->com.dev->rdev.lldi.ntxq /
2037 ep->com.dev->rdev.lldi.nchan;
2038 ep->txq_idx = cxgb4_port_idx(pdev) * step;
2039 step = ep->com.dev->rdev.lldi.nrxq /
2040 ep->com.dev->rdev.lldi.nchan;
2041 ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
2042 cxgb4_port_idx(pdev) * step];
2043 dev_put(pdev);
2044 } else {
2045 ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
2046 ep->dst->neighbour,
2047 ep->dst->neighbour->dev, 0);
2048 ep->mtu = dst_mtu(ep->dst);
2049 ep->tx_chan = cxgb4_port_chan(ep->dst->neighbour->dev);
2050 ep->smac_idx = ep->tx_chan << 1;
2051 step = ep->com.dev->rdev.lldi.ntxq /
2052 ep->com.dev->rdev.lldi.nchan;
2053 ep->txq_idx = cxgb4_port_idx(ep->dst->neighbour->dev) * step;
2054 step = ep->com.dev->rdev.lldi.nrxq /
2055 ep->com.dev->rdev.lldi.nchan;
2056 ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
2057 cxgb4_port_idx(ep->dst->neighbour->dev) * step];
2058 }
2059 if (!ep->l2t) {
2060 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
2061 err = -ENOMEM;
2062 goto fail4;
2063 }
2064
2065 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
2066 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
2067 ep->l2t->idx);
2068
2069 state_set(&ep->com, CONNECTING);
2070 ep->tos = 0;
2071 ep->com.local_addr = cm_id->local_addr;
2072 ep->com.remote_addr = cm_id->remote_addr;
2073
2074 /* send connect request to rnic */
2075 err = send_connect(ep);
2076 if (!err)
2077 goto out;
2078
2079 cxgb4_l2t_release(ep->l2t);
2080fail4:
2081 dst_release(ep->dst);
2082fail3:
2083 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
2084fail2:
2085 cm_id->rem_ref(cm_id);
2086 c4iw_put_ep(&ep->com);
2087out:
2088 return err;
2089}
2090
2091int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
2092{
2093 int err = 0;
2094 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2095 struct c4iw_listen_ep *ep;
2096
2097
2098 might_sleep();
2099
2100 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
2101 if (!ep) {
2102 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
2103 err = -ENOMEM;
2104 goto fail1;
2105 }
2106 PDBG("%s ep %p\n", __func__, ep);
2107 cm_id->add_ref(cm_id);
2108 ep->com.cm_id = cm_id;
2109 ep->com.dev = dev;
2110 ep->backlog = backlog;
2111 ep->com.local_addr = cm_id->local_addr;
2112
2113 /*
2114 * Allocate a server TID.
2115 */
2116 ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep);
2117 if (ep->stid == -1) {
2118 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
2119 err = -ENOMEM;
2120 goto fail2;
2121 }
2122
2123 state_set(&ep->com, LISTEN);
2124 err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], ep->stid,
2125 ep->com.local_addr.sin_addr.s_addr,
2126 ep->com.local_addr.sin_port,
2127 ep->com.dev->rdev.lldi.rxq_ids[0]);
2128 if (err)
2129 goto fail3;
2130
2131 /* wait for pass_open_rpl */
2132 wait_event(ep->com.waitq, ep->com.rpl_done);
2133 err = ep->com.rpl_err;
2134 if (!err) {
2135 cm_id->provider_data = ep;
2136 goto out;
2137 }
2138fail3:
2139 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
2140fail2:
2141 cm_id->rem_ref(cm_id);
2142 c4iw_put_ep(&ep->com);
2143fail1:
2144out:
2145 return err;
2146}
2147
2148int c4iw_destroy_listen(struct iw_cm_id *cm_id)
2149{
2150 int err;
2151 struct c4iw_listen_ep *ep = to_listen_ep(cm_id);
2152
2153 PDBG("%s ep %p\n", __func__, ep);
2154
2155 might_sleep();
2156 state_set(&ep->com, DEAD);
2157 ep->com.rpl_done = 0;
2158 ep->com.rpl_err = 0;
2159 err = listen_stop(ep);
2160 if (err)
2161 goto done;
2162 wait_event(ep->com.waitq, ep->com.rpl_done);
2163 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
2164done:
2165 err = ep->com.rpl_err;
2166 cm_id->rem_ref(cm_id);
2167 c4iw_put_ep(&ep->com);
2168 return err;
2169}
2170
2171int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2172{
2173 int ret = 0;
2174 unsigned long flags;
2175 int close = 0;
2176 int fatal = 0;
2177 struct c4iw_rdev *rdev;
2178 int start_timer = 0;
2179 int stop_timer = 0;
2180
2181 spin_lock_irqsave(&ep->com.lock, flags);
2182
2183 PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
2184 states[ep->com.state], abrupt);
2185
2186 rdev = &ep->com.dev->rdev;
2187 if (c4iw_fatal_error(rdev)) {
2188 fatal = 1;
2189 close_complete_upcall(ep);
2190 ep->com.state = DEAD;
2191 }
2192 switch (ep->com.state) {
2193 case MPA_REQ_WAIT:
2194 case MPA_REQ_SENT:
2195 case MPA_REQ_RCVD:
2196 case MPA_REP_SENT:
2197 case FPDU_MODE:
2198 close = 1;
2199 if (abrupt)
2200 ep->com.state = ABORTING;
2201 else {
2202 ep->com.state = CLOSING;
2203 start_timer = 1;
2204 }
2205 set_bit(CLOSE_SENT, &ep->com.flags);
2206 break;
2207 case CLOSING:
2208 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
2209 close = 1;
2210 if (abrupt) {
2211 stop_timer = 1;
2212 ep->com.state = ABORTING;
2213 } else
2214 ep->com.state = MORIBUND;
2215 }
2216 break;
2217 case MORIBUND:
2218 case ABORTING:
2219 case DEAD:
2220 PDBG("%s ignoring disconnect ep %p state %u\n",
2221 __func__, ep, ep->com.state);
2222 break;
2223 default:
2224 BUG();
2225 break;
2226 }
2227
2228 spin_unlock_irqrestore(&ep->com.lock, flags);
2229 if (start_timer)
2230 start_ep_timer(ep);
2231 if (stop_timer)
2232 stop_ep_timer(ep);
2233 if (close) {
2234 if (abrupt)
2235 ret = abort_connection(ep, NULL, gfp);
2236 else
2237 ret = send_halfclose(ep, gfp);
2238 if (ret)
2239 fatal = 1;
2240 }
2241 if (fatal)
2242 release_ep_resources(ep);
2243 return ret;
2244}
2245
2246/*
2247 * All the CM events are handled on a work queue to have a safe context.
2248 */
2249static int sched(struct c4iw_dev *dev, struct sk_buff *skb)
2250{
2251
2252 /*
2253 * Save dev in the skb->cb area.
2254 */
2255 *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev;
2256
2257 /*
2258 * Queue the skb and schedule the worker thread.
2259 */
2260 skb_queue_tail(&rxq, skb);
2261 queue_work(workq, &skb_work);
2262 return 0;
2263}
2264
2265static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2266{
2267 struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
2268
2269 if (rpl->status != CPL_ERR_NONE) {
2270 printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
2271 "for tid %u\n", rpl->status, GET_TID(rpl));
2272 }
2273 return 0;
2274}
2275
2276int __init c4iw_cm_init(void)
2277{
2278 skb_queue_head_init(&rxq);
2279
2280 workq = create_singlethread_workqueue("iw_cxgb4");
2281 if (!workq)
2282 return -ENOMEM;
2283
2284 /*
2285 * Most upcalls from the T4 Core go to sched() to
2286 * schedule the processing on a work queue.
2287 */
2288 c4iw_handlers[CPL_ACT_ESTABLISH] = sched;
2289 c4iw_handlers[CPL_ACT_OPEN_RPL] = sched;
2290 c4iw_handlers[CPL_RX_DATA] = sched;
2291 c4iw_handlers[CPL_ABORT_RPL_RSS] = sched;
2292 c4iw_handlers[CPL_ABORT_RPL] = sched;
2293 c4iw_handlers[CPL_PASS_OPEN_RPL] = sched;
2294 c4iw_handlers[CPL_CLOSE_LISTSRV_RPL] = sched;
2295 c4iw_handlers[CPL_PASS_ACCEPT_REQ] = sched;
2296 c4iw_handlers[CPL_PASS_ESTABLISH] = sched;
2297 c4iw_handlers[CPL_PEER_CLOSE] = sched;
2298 c4iw_handlers[CPL_CLOSE_CON_RPL] = sched;
2299 c4iw_handlers[CPL_ABORT_REQ_RSS] = sched;
2300 c4iw_handlers[CPL_RDMA_TERMINATE] = sched;
2301 c4iw_handlers[CPL_FW4_ACK] = sched;
2302 c4iw_handlers[CPL_SET_TCB_RPL] = set_tcb_rpl;
2303 c4iw_handlers[CPL_FW6_MSG] = fw6_msg;
2304
2305 /*
2306 * These are the real handlers that are called from a
2307 * work queue.
2308 */
2309 work_handlers[CPL_ACT_ESTABLISH] = act_establish;
2310 work_handlers[CPL_ACT_OPEN_RPL] = act_open_rpl;
2311 work_handlers[CPL_RX_DATA] = rx_data;
2312 work_handlers[CPL_ABORT_RPL_RSS] = abort_rpl;
2313 work_handlers[CPL_ABORT_RPL] = abort_rpl;
2314 work_handlers[CPL_PASS_OPEN_RPL] = pass_open_rpl;
2315 work_handlers[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl;
2316 work_handlers[CPL_PASS_ACCEPT_REQ] = pass_accept_req;
2317 work_handlers[CPL_PASS_ESTABLISH] = pass_establish;
2318 work_handlers[CPL_PEER_CLOSE] = peer_close;
2319 work_handlers[CPL_ABORT_REQ_RSS] = peer_abort;
2320 work_handlers[CPL_CLOSE_CON_RPL] = close_con_rpl;
2321 work_handlers[CPL_RDMA_TERMINATE] = terminate;
2322 work_handlers[CPL_FW4_ACK] = fw4_ack;
2323 return 0;
2324}
2325
2326void __exit c4iw_cm_term(void)
2327{
2328 flush_workqueue(workq);
2329 destroy_workqueue(workq);
2330}
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
new file mode 100644
index 000000000000..fb1aafcc294f
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -0,0 +1,882 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include "iw_cxgb4.h"
34
35static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
36 struct c4iw_dev_ucontext *uctx)
37{
38 struct fw_ri_res_wr *res_wr;
39 struct fw_ri_res *res;
40 int wr_len;
41 struct c4iw_wr_wait wr_wait;
42 struct sk_buff *skb;
43 int ret;
44
45 wr_len = sizeof *res_wr + sizeof *res;
46 skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
47 if (!skb)
48 return -ENOMEM;
49 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
50
51 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
52 memset(res_wr, 0, wr_len);
53 res_wr->op_nres = cpu_to_be32(
54 FW_WR_OP(FW_RI_RES_WR) |
55 V_FW_RI_RES_WR_NRES(1) |
56 FW_WR_COMPL(1));
57 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
58 res_wr->cookie = (u64)&wr_wait;
59 res = res_wr->res;
60 res->u.cq.restype = FW_RI_RES_TYPE_CQ;
61 res->u.cq.op = FW_RI_RES_OP_RESET;
62 res->u.cq.iqid = cpu_to_be32(cq->cqid);
63
64 c4iw_init_wr_wait(&wr_wait);
65 ret = c4iw_ofld_send(rdev, skb);
66 if (!ret) {
67 wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
68 if (!wr_wait.done) {
69 printk(KERN_ERR MOD "Device %s not responding!\n",
70 pci_name(rdev->lldi.pdev));
71 rdev->flags = T4_FATAL_ERROR;
72 ret = -EIO;
73 } else
74 ret = wr_wait.ret;
75 }
76
77 kfree(cq->sw_queue);
78 dma_free_coherent(&(rdev->lldi.pdev->dev),
79 cq->memsize, cq->queue,
80 pci_unmap_addr(cq, mapping));
81 c4iw_put_cqid(rdev, cq->cqid, uctx);
82 return ret;
83}
84
85static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
86 struct c4iw_dev_ucontext *uctx)
87{
88 struct fw_ri_res_wr *res_wr;
89 struct fw_ri_res *res;
90 int wr_len;
91 int user = (uctx != &rdev->uctx);
92 struct c4iw_wr_wait wr_wait;
93 int ret;
94 struct sk_buff *skb;
95
96 cq->cqid = c4iw_get_cqid(rdev, uctx);
97 if (!cq->cqid) {
98 ret = -ENOMEM;
99 goto err1;
100 }
101
102 if (!user) {
103 cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
104 if (!cq->sw_queue) {
105 ret = -ENOMEM;
106 goto err2;
107 }
108 }
109 cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize,
110 &cq->dma_addr, GFP_KERNEL);
111 if (!cq->queue) {
112 ret = -ENOMEM;
113 goto err3;
114 }
115 pci_unmap_addr_set(cq, mapping, cq->dma_addr);
116 memset(cq->queue, 0, cq->memsize);
117
118 /* build fw_ri_res_wr */
119 wr_len = sizeof *res_wr + sizeof *res;
120
121 skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
122 if (!skb) {
123 ret = -ENOMEM;
124 goto err4;
125 }
126 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
127
128 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
129 memset(res_wr, 0, wr_len);
130 res_wr->op_nres = cpu_to_be32(
131 FW_WR_OP(FW_RI_RES_WR) |
132 V_FW_RI_RES_WR_NRES(1) |
133 FW_WR_COMPL(1));
134 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
135 res_wr->cookie = (u64)&wr_wait;
136 res = res_wr->res;
137 res->u.cq.restype = FW_RI_RES_TYPE_CQ;
138 res->u.cq.op = FW_RI_RES_OP_WRITE;
139 res->u.cq.iqid = cpu_to_be32(cq->cqid);
140 res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
141 V_FW_RI_RES_WR_IQANUS(0) |
142 V_FW_RI_RES_WR_IQANUD(1) |
143 F_FW_RI_RES_WR_IQANDST |
144 V_FW_RI_RES_WR_IQANDSTINDEX(*rdev->lldi.rxq_ids));
145 res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
146 F_FW_RI_RES_WR_IQDROPRSS |
147 V_FW_RI_RES_WR_IQPCIECH(2) |
148 V_FW_RI_RES_WR_IQINTCNTTHRESH(0) |
149 F_FW_RI_RES_WR_IQO |
150 V_FW_RI_RES_WR_IQESIZE(1));
151 res->u.cq.iqsize = cpu_to_be16(cq->size);
152 res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
153
154 c4iw_init_wr_wait(&wr_wait);
155
156 ret = c4iw_ofld_send(rdev, skb);
157 if (ret)
158 goto err4;
159 PDBG("%s wait_event wr_wait %p\n", __func__, &wr_wait);
160 wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
161 if (!wr_wait.done) {
162 printk(KERN_ERR MOD "Device %s not responding!\n",
163 pci_name(rdev->lldi.pdev));
164 rdev->flags = T4_FATAL_ERROR;
165 ret = -EIO;
166 } else
167 ret = wr_wait.ret;
168 if (ret)
169 goto err4;
170
171 cq->gen = 1;
172 cq->gts = rdev->lldi.gts_reg;
173 cq->rdev = rdev;
174 if (user) {
175 cq->ugts = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
176 (cq->cqid << rdev->cqshift);
177 cq->ugts &= PAGE_MASK;
178 }
179 return 0;
180err4:
181 dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
182 pci_unmap_addr(cq, mapping));
183err3:
184 kfree(cq->sw_queue);
185err2:
186 c4iw_put_cqid(rdev, cq->cqid, uctx);
187err1:
188 return ret;
189}
190
191static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
192{
193 struct t4_cqe cqe;
194
195 PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
196 wq, cq, cq->sw_cidx, cq->sw_pidx);
197 memset(&cqe, 0, sizeof(cqe));
198 cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
199 V_CQE_OPCODE(FW_RI_SEND) |
200 V_CQE_TYPE(0) |
201 V_CQE_SWCQE(1) |
202 V_CQE_QPID(wq->rq.qid));
203 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
204 cq->sw_queue[cq->sw_pidx] = cqe;
205 t4_swcq_produce(cq);
206}
207
208int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
209{
210 int flushed = 0;
211 int in_use = wq->rq.in_use - count;
212
213 BUG_ON(in_use < 0);
214 PDBG("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__,
215 wq, cq, wq->rq.in_use, count);
216 while (in_use--) {
217 insert_recv_cqe(wq, cq);
218 flushed++;
219 }
220 return flushed;
221}
222
223static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
224 struct t4_swsqe *swcqe)
225{
226 struct t4_cqe cqe;
227
228 PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
229 wq, cq, cq->sw_cidx, cq->sw_pidx);
230 memset(&cqe, 0, sizeof(cqe));
231 cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
232 V_CQE_OPCODE(swcqe->opcode) |
233 V_CQE_TYPE(1) |
234 V_CQE_SWCQE(1) |
235 V_CQE_QPID(wq->sq.qid));
236 CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
237 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
238 cq->sw_queue[cq->sw_pidx] = cqe;
239 t4_swcq_produce(cq);
240}
241
242int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count)
243{
244 int flushed = 0;
245 struct t4_swsqe *swsqe = &wq->sq.sw_sq[wq->sq.cidx + count];
246 int in_use = wq->sq.in_use - count;
247
248 BUG_ON(in_use < 0);
249 while (in_use--) {
250 swsqe->signaled = 0;
251 insert_sq_cqe(wq, cq, swsqe);
252 swsqe++;
253 if (swsqe == (wq->sq.sw_sq + wq->sq.size))
254 swsqe = wq->sq.sw_sq;
255 flushed++;
256 }
257 return flushed;
258}
259
260/*
261 * Move all CQEs from the HWCQ into the SWCQ.
262 */
263void c4iw_flush_hw_cq(struct t4_cq *cq)
264{
265 struct t4_cqe *cqe = NULL, *swcqe;
266 int ret;
267
268 PDBG("%s cq %p cqid 0x%x\n", __func__, cq, cq->cqid);
269 ret = t4_next_hw_cqe(cq, &cqe);
270 while (!ret) {
271 PDBG("%s flushing hwcq cidx 0x%x swcq pidx 0x%x\n",
272 __func__, cq->cidx, cq->sw_pidx);
273 swcqe = &cq->sw_queue[cq->sw_pidx];
274 *swcqe = *cqe;
275 swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
276 t4_swcq_produce(cq);
277 t4_hwcq_consume(cq);
278 ret = t4_next_hw_cqe(cq, &cqe);
279 }
280}
281
282static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
283{
284 if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
285 return 0;
286
287 if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe))
288 return 0;
289
290 if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe))
291 return 0;
292
293 if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq))
294 return 0;
295 return 1;
296}
297
298void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
299{
300 struct t4_cqe *cqe;
301 u32 ptr;
302
303 *count = 0;
304 ptr = cq->sw_cidx;
305 while (ptr != cq->sw_pidx) {
306 cqe = &cq->sw_queue[ptr];
307 if ((SQ_TYPE(cqe) || ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) &&
308 wq->sq.oldest_read)) &&
309 (CQE_QPID(cqe) == wq->sq.qid))
310 (*count)++;
311 if (++ptr == cq->size)
312 ptr = 0;
313 }
314 PDBG("%s cq %p count %d\n", __func__, cq, *count);
315}
316
317void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
318{
319 struct t4_cqe *cqe;
320 u32 ptr;
321
322 *count = 0;
323 PDBG("%s count zero %d\n", __func__, *count);
324 ptr = cq->sw_cidx;
325 while (ptr != cq->sw_pidx) {
326 cqe = &cq->sw_queue[ptr];
327 if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
328 (CQE_QPID(cqe) == wq->rq.qid) && cqe_completes_wr(cqe, wq))
329 (*count)++;
330 if (++ptr == cq->size)
331 ptr = 0;
332 }
333 PDBG("%s cq %p count %d\n", __func__, cq, *count);
334}
335
336static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
337{
338 struct t4_swsqe *swsqe;
339 u16 ptr = wq->sq.cidx;
340 int count = wq->sq.in_use;
341 int unsignaled = 0;
342
343 swsqe = &wq->sq.sw_sq[ptr];
344 while (count--)
345 if (!swsqe->signaled) {
346 if (++ptr == wq->sq.size)
347 ptr = 0;
348 swsqe = &wq->sq.sw_sq[ptr];
349 unsignaled++;
350 } else if (swsqe->complete) {
351
352 /*
353 * Insert this completed cqe into the swcq.
354 */
355 PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n",
356 __func__, ptr, cq->sw_pidx);
357 swsqe->cqe.header |= htonl(V_CQE_SWCQE(1));
358 cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
359 t4_swcq_produce(cq);
360 swsqe->signaled = 0;
361 wq->sq.in_use -= unsignaled;
362 break;
363 } else
364 break;
365}
366
367static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
368 struct t4_cqe *read_cqe)
369{
370 read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
371 read_cqe->len = cpu_to_be32(wq->sq.oldest_read->read_len);
372 read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(hw_cqe)) |
373 V_CQE_SWCQE(SW_CQE(hw_cqe)) |
374 V_CQE_OPCODE(FW_RI_READ_REQ) |
375 V_CQE_TYPE(1));
376}
377
378/*
379 * Return a ptr to the next read wr in the SWSQ or NULL.
380 */
381static void advance_oldest_read(struct t4_wq *wq)
382{
383
384 u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
385
386 if (rptr == wq->sq.size)
387 rptr = 0;
388 while (rptr != wq->sq.pidx) {
389 wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
390
391 if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
392 return;
393 if (++rptr == wq->sq.size)
394 rptr = 0;
395 }
396 wq->sq.oldest_read = NULL;
397}
398
399/*
400 * poll_cq
401 *
402 * Caller must:
403 * check the validity of the first CQE,
404 * supply the wq assicated with the qpid.
405 *
406 * credit: cq credit to return to sge.
407 * cqe_flushed: 1 iff the CQE is flushed.
408 * cqe: copy of the polled CQE.
409 *
410 * return value:
411 * 0 CQE returned ok.
412 * -EAGAIN CQE skipped, try again.
413 * -EOVERFLOW CQ overflow detected.
414 */
415static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
416 u8 *cqe_flushed, u64 *cookie, u32 *credit)
417{
418 int ret = 0;
419 struct t4_cqe *hw_cqe, read_cqe;
420
421 *cqe_flushed = 0;
422 *credit = 0;
423 ret = t4_next_cqe(cq, &hw_cqe);
424 if (ret)
425 return ret;
426
427 PDBG("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x"
428 " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
429 __func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
430 CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
431 CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
432 CQE_WRID_LOW(hw_cqe));
433
434 /*
435 * skip cqe's not affiliated with a QP.
436 */
437 if (wq == NULL) {
438 ret = -EAGAIN;
439 goto skip_cqe;
440 }
441
442 /*
443 * Gotta tweak READ completions:
444 * 1) the cqe doesn't contain the sq_wptr from the wr.
445 * 2) opcode not reflected from the wr.
446 * 3) read_len not reflected from the wr.
447 * 4) cq_type is RQ_TYPE not SQ_TYPE.
448 */
449 if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {
450
451 /*
452 * If this is an unsolicited read response, then the read
453 * was generated by the kernel driver as part of peer-2-peer
454 * connection setup. So ignore the completion.
455 */
456 if (!wq->sq.oldest_read) {
457 if (CQE_STATUS(hw_cqe))
458 t4_set_wq_in_error(wq);
459 ret = -EAGAIN;
460 goto skip_cqe;
461 }
462
463 /*
464 * Don't write to the HWCQ, so create a new read req CQE
465 * in local memory.
466 */
467 create_read_req_cqe(wq, hw_cqe, &read_cqe);
468 hw_cqe = &read_cqe;
469 advance_oldest_read(wq);
470 }
471
472 if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
473 *cqe_flushed = t4_wq_in_error(wq);
474 t4_set_wq_in_error(wq);
475 goto proc_cqe;
476 }
477
478 /*
479 * RECV completion.
480 */
481 if (RQ_TYPE(hw_cqe)) {
482
483 /*
484 * HW only validates 4 bits of MSN. So we must validate that
485 * the MSN in the SEND is the next expected MSN. If its not,
486 * then we complete this with T4_ERR_MSN and mark the wq in
487 * error.
488 */
489
490 if (t4_rq_empty(wq)) {
491 t4_set_wq_in_error(wq);
492 ret = -EAGAIN;
493 goto skip_cqe;
494 }
495 if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
496 t4_set_wq_in_error(wq);
497 hw_cqe->header |= htonl(V_CQE_STATUS(T4_ERR_MSN));
498 goto proc_cqe;
499 }
500 goto proc_cqe;
501 }
502
503 /*
504 * If we get here its a send completion.
505 *
506 * Handle out of order completion. These get stuffed
507 * in the SW SQ. Then the SW SQ is walked to move any
508 * now in-order completions into the SW CQ. This handles
509 * 2 cases:
510 * 1) reaping unsignaled WRs when the first subsequent
511 * signaled WR is completed.
512 * 2) out of order read completions.
513 */
514 if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
515 struct t4_swsqe *swsqe;
516
517 PDBG("%s out of order completion going in sw_sq at idx %u\n",
518 __func__, CQE_WRID_SQ_IDX(hw_cqe));
519 swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
520 swsqe->cqe = *hw_cqe;
521 swsqe->complete = 1;
522 ret = -EAGAIN;
523 goto flush_wq;
524 }
525
526proc_cqe:
527 *cqe = *hw_cqe;
528
529 /*
530 * Reap the associated WR(s) that are freed up with this
531 * completion.
532 */
533 if (SQ_TYPE(hw_cqe)) {
534 wq->sq.cidx = CQE_WRID_SQ_IDX(hw_cqe);
535 PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx);
536 *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
537 t4_sq_consume(wq);
538 } else {
539 PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx);
540 *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
541 BUG_ON(t4_rq_empty(wq));
542 t4_rq_consume(wq);
543 }
544
545flush_wq:
546 /*
547 * Flush any completed cqes that are now in-order.
548 */
549 flush_completed_wrs(wq, cq);
550
551skip_cqe:
552 if (SW_CQE(hw_cqe)) {
553 PDBG("%s cq %p cqid 0x%x skip sw cqe cidx %u\n",
554 __func__, cq, cq->cqid, cq->sw_cidx);
555 t4_swcq_consume(cq);
556 } else {
557 PDBG("%s cq %p cqid 0x%x skip hw cqe cidx %u\n",
558 __func__, cq, cq->cqid, cq->cidx);
559 t4_hwcq_consume(cq);
560 }
561 return ret;
562}
563
564/*
565 * Get one cq entry from c4iw and map it to openib.
566 *
567 * Returns:
568 * 0 cqe returned
569 * -ENODATA EMPTY;
570 * -EAGAIN caller must try again
571 * any other -errno fatal error
572 */
573static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
574{
575 struct c4iw_qp *qhp = NULL;
576 struct t4_cqe cqe = {0, 0}, *rd_cqe;
577 struct t4_wq *wq;
578 u32 credit = 0;
579 u8 cqe_flushed;
580 u64 cookie = 0;
581 int ret;
582
583 ret = t4_next_cqe(&chp->cq, &rd_cqe);
584
585 if (ret)
586 return ret;
587
588 qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
589 if (!qhp)
590 wq = NULL;
591 else {
592 spin_lock(&qhp->lock);
593 wq = &(qhp->wq);
594 }
595 ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit);
596 if (ret)
597 goto out;
598
599 wc->wr_id = cookie;
600 wc->qp = &qhp->ibqp;
601 wc->vendor_err = CQE_STATUS(&cqe);
602 wc->wc_flags = 0;
603
604 PDBG("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x "
605 "lo 0x%x cookie 0x%llx\n", __func__, CQE_QPID(&cqe),
606 CQE_TYPE(&cqe), CQE_OPCODE(&cqe), CQE_STATUS(&cqe), CQE_LEN(&cqe),
607 CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe), (unsigned long long)cookie);
608
609 if (CQE_TYPE(&cqe) == 0) {
610 if (!CQE_STATUS(&cqe))
611 wc->byte_len = CQE_LEN(&cqe);
612 else
613 wc->byte_len = 0;
614 wc->opcode = IB_WC_RECV;
615 if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV ||
616 CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
617 wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
618 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
619 }
620 } else {
621 switch (CQE_OPCODE(&cqe)) {
622 case FW_RI_RDMA_WRITE:
623 wc->opcode = IB_WC_RDMA_WRITE;
624 break;
625 case FW_RI_READ_REQ:
626 wc->opcode = IB_WC_RDMA_READ;
627 wc->byte_len = CQE_LEN(&cqe);
628 break;
629 case FW_RI_SEND_WITH_INV:
630 case FW_RI_SEND_WITH_SE_INV:
631 wc->opcode = IB_WC_SEND;
632 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
633 break;
634 case FW_RI_SEND:
635 case FW_RI_SEND_WITH_SE:
636 wc->opcode = IB_WC_SEND;
637 break;
638 case FW_RI_BIND_MW:
639 wc->opcode = IB_WC_BIND_MW;
640 break;
641
642 case FW_RI_LOCAL_INV:
643 wc->opcode = IB_WC_LOCAL_INV;
644 break;
645 case FW_RI_FAST_REGISTER:
646 wc->opcode = IB_WC_FAST_REG_MR;
647 break;
648 default:
649 printk(KERN_ERR MOD "Unexpected opcode %d "
650 "in the CQE received for QPID=0x%0x\n",
651 CQE_OPCODE(&cqe), CQE_QPID(&cqe));
652 ret = -EINVAL;
653 goto out;
654 }
655 }
656
657 if (cqe_flushed)
658 wc->status = IB_WC_WR_FLUSH_ERR;
659 else {
660
661 switch (CQE_STATUS(&cqe)) {
662 case T4_ERR_SUCCESS:
663 wc->status = IB_WC_SUCCESS;
664 break;
665 case T4_ERR_STAG:
666 wc->status = IB_WC_LOC_ACCESS_ERR;
667 break;
668 case T4_ERR_PDID:
669 wc->status = IB_WC_LOC_PROT_ERR;
670 break;
671 case T4_ERR_QPID:
672 case T4_ERR_ACCESS:
673 wc->status = IB_WC_LOC_ACCESS_ERR;
674 break;
675 case T4_ERR_WRAP:
676 wc->status = IB_WC_GENERAL_ERR;
677 break;
678 case T4_ERR_BOUND:
679 wc->status = IB_WC_LOC_LEN_ERR;
680 break;
681 case T4_ERR_INVALIDATE_SHARED_MR:
682 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
683 wc->status = IB_WC_MW_BIND_ERR;
684 break;
685 case T4_ERR_CRC:
686 case T4_ERR_MARKER:
687 case T4_ERR_PDU_LEN_ERR:
688 case T4_ERR_OUT_OF_RQE:
689 case T4_ERR_DDP_VERSION:
690 case T4_ERR_RDMA_VERSION:
691 case T4_ERR_DDP_QUEUE_NUM:
692 case T4_ERR_MSN:
693 case T4_ERR_TBIT:
694 case T4_ERR_MO:
695 case T4_ERR_MSN_RANGE:
696 case T4_ERR_IRD_OVERFLOW:
697 case T4_ERR_OPCODE:
698 wc->status = IB_WC_FATAL_ERR;
699 break;
700 case T4_ERR_SWFLUSH:
701 wc->status = IB_WC_WR_FLUSH_ERR;
702 break;
703 default:
704 printk(KERN_ERR MOD
705 "Unexpected cqe_status 0x%x for QPID=0x%0x\n",
706 CQE_STATUS(&cqe), CQE_QPID(&cqe));
707 ret = -EINVAL;
708 }
709 }
710out:
711 if (wq)
712 spin_unlock(&qhp->lock);
713 return ret;
714}
715
716int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
717{
718 struct c4iw_cq *chp;
719 unsigned long flags;
720 int npolled;
721 int err = 0;
722
723 chp = to_c4iw_cq(ibcq);
724
725 spin_lock_irqsave(&chp->lock, flags);
726 for (npolled = 0; npolled < num_entries; ++npolled) {
727 do {
728 err = c4iw_poll_cq_one(chp, wc + npolled);
729 } while (err == -EAGAIN);
730 if (err)
731 break;
732 }
733 spin_unlock_irqrestore(&chp->lock, flags);
734 return !err || err == -ENODATA ? npolled : err;
735}
736
737int c4iw_destroy_cq(struct ib_cq *ib_cq)
738{
739 struct c4iw_cq *chp;
740 struct c4iw_ucontext *ucontext;
741
742 PDBG("%s ib_cq %p\n", __func__, ib_cq);
743 chp = to_c4iw_cq(ib_cq);
744
745 remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
746 atomic_dec(&chp->refcnt);
747 wait_event(chp->wait, !atomic_read(&chp->refcnt));
748
749 ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
750 : NULL;
751 destroy_cq(&chp->rhp->rdev, &chp->cq,
752 ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx);
753 kfree(chp);
754 return 0;
755}
756
757struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
758 int vector, struct ib_ucontext *ib_context,
759 struct ib_udata *udata)
760{
761 struct c4iw_dev *rhp;
762 struct c4iw_cq *chp;
763 struct c4iw_create_cq_resp uresp;
764 struct c4iw_ucontext *ucontext = NULL;
765 int ret;
766 size_t memsize;
767 struct c4iw_mm_entry *mm, *mm2;
768
769 PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
770
771 rhp = to_c4iw_dev(ibdev);
772
773 chp = kzalloc(sizeof(*chp), GFP_KERNEL);
774 if (!chp)
775 return ERR_PTR(-ENOMEM);
776
777 if (ib_context)
778 ucontext = to_c4iw_ucontext(ib_context);
779
780 /* account for the status page. */
781 entries++;
782
783 /*
784 * entries must be multiple of 16 for HW.
785 */
786 entries = roundup(entries, 16);
787 memsize = entries * sizeof *chp->cq.queue;
788
789 /*
790 * memsize must be a multiple of the page size if its a user cq.
791 */
792 if (ucontext)
793 memsize = roundup(memsize, PAGE_SIZE);
794 chp->cq.size = entries;
795 chp->cq.memsize = memsize;
796
797 ret = create_cq(&rhp->rdev, &chp->cq,
798 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
799 if (ret)
800 goto err1;
801
802 chp->rhp = rhp;
803 chp->cq.size--; /* status page */
804 chp->ibcq.cqe = chp->cq.size;
805 spin_lock_init(&chp->lock);
806 atomic_set(&chp->refcnt, 1);
807 init_waitqueue_head(&chp->wait);
808 ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
809 if (ret)
810 goto err2;
811
812 if (ucontext) {
813 mm = kmalloc(sizeof *mm, GFP_KERNEL);
814 if (!mm)
815 goto err3;
816 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
817 if (!mm2)
818 goto err4;
819
820 uresp.qid_mask = rhp->rdev.cqmask;
821 uresp.cqid = chp->cq.cqid;
822 uresp.size = chp->cq.size;
823 uresp.memsize = chp->cq.memsize;
824 spin_lock(&ucontext->mmap_lock);
825 uresp.key = ucontext->key;
826 ucontext->key += PAGE_SIZE;
827 uresp.gts_key = ucontext->key;
828 ucontext->key += PAGE_SIZE;
829 spin_unlock(&ucontext->mmap_lock);
830 ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
831 if (ret)
832 goto err5;
833
834 mm->key = uresp.key;
835 mm->addr = virt_to_phys(chp->cq.queue);
836 mm->len = chp->cq.memsize;
837 insert_mmap(ucontext, mm);
838
839 mm2->key = uresp.gts_key;
840 mm2->addr = chp->cq.ugts;
841 mm2->len = PAGE_SIZE;
842 insert_mmap(ucontext, mm2);
843 }
844 PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
845 __func__, chp->cq.cqid, chp, chp->cq.size,
846 chp->cq.memsize,
847 (unsigned long long) chp->cq.dma_addr);
848 return &chp->ibcq;
849err5:
850 kfree(mm2);
851err4:
852 kfree(mm);
853err3:
854 remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
855err2:
856 destroy_cq(&chp->rhp->rdev, &chp->cq,
857 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
858err1:
859 kfree(chp);
860 return ERR_PTR(ret);
861}
862
863int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
864{
865 return -ENOSYS;
866}
867
868int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
869{
870 struct c4iw_cq *chp;
871 int ret;
872 unsigned long flag;
873
874 chp = to_c4iw_cq(ibcq);
875 spin_lock_irqsave(&chp->lock, flag);
876 ret = t4_arm_cq(&chp->cq,
877 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
878 spin_unlock_irqrestore(&chp->lock, flag);
879 if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
880 ret = 0;
881 return ret;
882}
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
new file mode 100644
index 000000000000..be23b5eab13b
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -0,0 +1,520 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/debugfs.h>
35
36#include <rdma/ib_verbs.h>
37
38#include "iw_cxgb4.h"
39
40#define DRV_VERSION "0.1"
41
42MODULE_AUTHOR("Steve Wise");
43MODULE_DESCRIPTION("Chelsio T4 RDMA Driver");
44MODULE_LICENSE("Dual BSD/GPL");
45MODULE_VERSION(DRV_VERSION);
46
47static LIST_HEAD(dev_list);
48static DEFINE_MUTEX(dev_mutex);
49
50static struct dentry *c4iw_debugfs_root;
51
52struct debugfs_qp_data {
53 struct c4iw_dev *devp;
54 char *buf;
55 int bufsize;
56 int pos;
57};
58
59static int count_qps(int id, void *p, void *data)
60{
61 struct c4iw_qp *qp = p;
62 int *countp = data;
63
64 if (id != qp->wq.sq.qid)
65 return 0;
66
67 *countp = *countp + 1;
68 return 0;
69}
70
71static int dump_qps(int id, void *p, void *data)
72{
73 struct c4iw_qp *qp = p;
74 struct debugfs_qp_data *qpd = data;
75 int space;
76 int cc;
77
78 if (id != qp->wq.sq.qid)
79 return 0;
80
81 space = qpd->bufsize - qpd->pos - 1;
82 if (space == 0)
83 return 1;
84
85 if (qp->ep)
86 cc = snprintf(qpd->buf + qpd->pos, space, "qp id %u state %u "
87 "ep tid %u state %u %pI4:%u->%pI4:%u\n",
88 qp->wq.sq.qid, (int)qp->attr.state,
89 qp->ep->hwtid, (int)qp->ep->com.state,
90 &qp->ep->com.local_addr.sin_addr.s_addr,
91 ntohs(qp->ep->com.local_addr.sin_port),
92 &qp->ep->com.remote_addr.sin_addr.s_addr,
93 ntohs(qp->ep->com.remote_addr.sin_port));
94 else
95 cc = snprintf(qpd->buf + qpd->pos, space, "qp id %u state %u\n",
96 qp->wq.sq.qid, (int)qp->attr.state);
97 if (cc < space)
98 qpd->pos += cc;
99 return 0;
100}
101
102static int qp_release(struct inode *inode, struct file *file)
103{
104 struct debugfs_qp_data *qpd = file->private_data;
105 if (!qpd) {
106 printk(KERN_INFO "%s null qpd?\n", __func__);
107 return 0;
108 }
109 kfree(qpd->buf);
110 kfree(qpd);
111 return 0;
112}
113
114static int qp_open(struct inode *inode, struct file *file)
115{
116 struct debugfs_qp_data *qpd;
117 int ret = 0;
118 int count = 1;
119
120 qpd = kmalloc(sizeof *qpd, GFP_KERNEL);
121 if (!qpd) {
122 ret = -ENOMEM;
123 goto out;
124 }
125 qpd->devp = inode->i_private;
126 qpd->pos = 0;
127
128 spin_lock_irq(&qpd->devp->lock);
129 idr_for_each(&qpd->devp->qpidr, count_qps, &count);
130 spin_unlock_irq(&qpd->devp->lock);
131
132 qpd->bufsize = count * 128;
133 qpd->buf = kmalloc(qpd->bufsize, GFP_KERNEL);
134 if (!qpd->buf) {
135 ret = -ENOMEM;
136 goto err1;
137 }
138
139 spin_lock_irq(&qpd->devp->lock);
140 idr_for_each(&qpd->devp->qpidr, dump_qps, qpd);
141 spin_unlock_irq(&qpd->devp->lock);
142
143 qpd->buf[qpd->pos++] = 0;
144 file->private_data = qpd;
145 goto out;
146err1:
147 kfree(qpd);
148out:
149 return ret;
150}
151
152static ssize_t qp_read(struct file *file, char __user *buf, size_t count,
153 loff_t *ppos)
154{
155 struct debugfs_qp_data *qpd = file->private_data;
156 loff_t pos = *ppos;
157 loff_t avail = qpd->pos;
158
159 if (pos < 0)
160 return -EINVAL;
161 if (pos >= avail)
162 return 0;
163 if (count > avail - pos)
164 count = avail - pos;
165
166 while (count) {
167 size_t len = 0;
168
169 len = min((int)count, (int)qpd->pos - (int)pos);
170 if (copy_to_user(buf, qpd->buf + pos, len))
171 return -EFAULT;
172 if (len == 0)
173 return -EINVAL;
174
175 buf += len;
176 pos += len;
177 count -= len;
178 }
179 count = pos - *ppos;
180 *ppos = pos;
181 return count;
182}
183
184static const struct file_operations qp_debugfs_fops = {
185 .owner = THIS_MODULE,
186 .open = qp_open,
187 .release = qp_release,
188 .read = qp_read,
189};
190
191static int setup_debugfs(struct c4iw_dev *devp)
192{
193 struct dentry *de;
194
195 if (!devp->debugfs_root)
196 return -1;
197
198 de = debugfs_create_file("qps", S_IWUSR, devp->debugfs_root,
199 (void *)devp, &qp_debugfs_fops);
200 if (de && de->d_inode)
201 de->d_inode->i_size = 4096;
202 return 0;
203}
204
205void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
206 struct c4iw_dev_ucontext *uctx)
207{
208 struct list_head *pos, *nxt;
209 struct c4iw_qid_list *entry;
210
211 mutex_lock(&uctx->lock);
212 list_for_each_safe(pos, nxt, &uctx->qpids) {
213 entry = list_entry(pos, struct c4iw_qid_list, entry);
214 list_del_init(&entry->entry);
215 if (!(entry->qid & rdev->qpmask))
216 c4iw_put_resource(&rdev->resource.qid_fifo, entry->qid,
217 &rdev->resource.qid_fifo_lock);
218 kfree(entry);
219 }
220
221 list_for_each_safe(pos, nxt, &uctx->qpids) {
222 entry = list_entry(pos, struct c4iw_qid_list, entry);
223 list_del_init(&entry->entry);
224 kfree(entry);
225 }
226 mutex_unlock(&uctx->lock);
227}
228
229void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
230 struct c4iw_dev_ucontext *uctx)
231{
232 INIT_LIST_HEAD(&uctx->qpids);
233 INIT_LIST_HEAD(&uctx->cqids);
234 mutex_init(&uctx->lock);
235}
236
237/* Caller takes care of locking if needed */
238static int c4iw_rdev_open(struct c4iw_rdev *rdev)
239{
240 int err;
241
242 c4iw_init_dev_ucontext(rdev, &rdev->uctx);
243
244 /*
245 * qpshift is the number of bits to shift the qpid left in order
246 * to get the correct address of the doorbell for that qp.
247 */
248 rdev->qpshift = PAGE_SHIFT - ilog2(rdev->lldi.udb_density);
249 rdev->qpmask = rdev->lldi.udb_density - 1;
250 rdev->cqshift = PAGE_SHIFT - ilog2(rdev->lldi.ucq_density);
251 rdev->cqmask = rdev->lldi.ucq_density - 1;
252 PDBG("%s dev %s stag start 0x%0x size 0x%0x num stags %d "
253 "pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x\n",
254 __func__, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
255 rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
256 rdev->lldi.vr->pbl.start,
257 rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start,
258 rdev->lldi.vr->rq.size);
259 PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu "
260 "qpmask 0x%x cqshift %lu cqmask 0x%x\n",
261 (unsigned)pci_resource_len(rdev->lldi.pdev, 2),
262 (void *)pci_resource_start(rdev->lldi.pdev, 2),
263 rdev->lldi.db_reg,
264 rdev->lldi.gts_reg,
265 rdev->qpshift, rdev->qpmask,
266 rdev->cqshift, rdev->cqmask);
267
268 if (c4iw_num_stags(rdev) == 0) {
269 err = -EINVAL;
270 goto err1;
271 }
272
273 err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
274 if (err) {
275 printk(KERN_ERR MOD "error %d initializing resources\n", err);
276 goto err1;
277 }
278 err = c4iw_pblpool_create(rdev);
279 if (err) {
280 printk(KERN_ERR MOD "error %d initializing pbl pool\n", err);
281 goto err2;
282 }
283 err = c4iw_rqtpool_create(rdev);
284 if (err) {
285 printk(KERN_ERR MOD "error %d initializing rqt pool\n", err);
286 goto err3;
287 }
288 return 0;
289err3:
290 c4iw_pblpool_destroy(rdev);
291err2:
292 c4iw_destroy_resource(&rdev->resource);
293err1:
294 return err;
295}
296
297static void c4iw_rdev_close(struct c4iw_rdev *rdev)
298{
299 c4iw_pblpool_destroy(rdev);
300 c4iw_rqtpool_destroy(rdev);
301 c4iw_destroy_resource(&rdev->resource);
302}
303
304static void c4iw_remove(struct c4iw_dev *dev)
305{
306 PDBG("%s c4iw_dev %p\n", __func__, dev);
307 cancel_delayed_work_sync(&dev->db_drop_task);
308 list_del(&dev->entry);
309 c4iw_unregister_device(dev);
310 c4iw_rdev_close(&dev->rdev);
311 idr_destroy(&dev->cqidr);
312 idr_destroy(&dev->qpidr);
313 idr_destroy(&dev->mmidr);
314 ib_dealloc_device(&dev->ibdev);
315}
316
317static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
318{
319 struct c4iw_dev *devp;
320 int ret;
321
322 devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
323 if (!devp) {
324 printk(KERN_ERR MOD "Cannot allocate ib device\n");
325 return NULL;
326 }
327 devp->rdev.lldi = *infop;
328
329 mutex_lock(&dev_mutex);
330
331 ret = c4iw_rdev_open(&devp->rdev);
332 if (ret) {
333 mutex_unlock(&dev_mutex);
334 printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret);
335 ib_dealloc_device(&devp->ibdev);
336 return NULL;
337 }
338
339 idr_init(&devp->cqidr);
340 idr_init(&devp->qpidr);
341 idr_init(&devp->mmidr);
342 spin_lock_init(&devp->lock);
343 list_add_tail(&devp->entry, &dev_list);
344 mutex_unlock(&dev_mutex);
345
346 if (c4iw_register_device(devp)) {
347 printk(KERN_ERR MOD "Unable to register device\n");
348 mutex_lock(&dev_mutex);
349 c4iw_remove(devp);
350 mutex_unlock(&dev_mutex);
351 }
352 if (c4iw_debugfs_root) {
353 devp->debugfs_root = debugfs_create_dir(
354 pci_name(devp->rdev.lldi.pdev),
355 c4iw_debugfs_root);
356 setup_debugfs(devp);
357 }
358 return devp;
359}
360
361static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
362{
363 struct c4iw_dev *dev;
364 static int vers_printed;
365 int i;
366
367 if (!vers_printed++)
368 printk(KERN_INFO MOD "Chelsio T4 RDMA Driver - version %s\n",
369 DRV_VERSION);
370
371 dev = c4iw_alloc(infop);
372 if (!dev)
373 goto out;
374
375 PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
376 __func__, pci_name(dev->rdev.lldi.pdev),
377 dev->rdev.lldi.nchan, dev->rdev.lldi.nrxq,
378 dev->rdev.lldi.ntxq, dev->rdev.lldi.nports);
379
380 for (i = 0; i < dev->rdev.lldi.nrxq; i++)
381 PDBG("rxqid[%u] %u\n", i, dev->rdev.lldi.rxq_ids[i]);
382
383 printk(KERN_INFO MOD "Initialized device %s\n",
384 pci_name(dev->rdev.lldi.pdev));
385out:
386 return dev;
387}
388
389static struct sk_buff *t4_pktgl_to_skb(const struct pkt_gl *gl,
390 unsigned int skb_len,
391 unsigned int pull_len)
392{
393 struct sk_buff *skb;
394 struct skb_shared_info *ssi;
395
396 if (gl->tot_len <= 512) {
397 skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
398 if (unlikely(!skb))
399 goto out;
400 __skb_put(skb, gl->tot_len);
401 skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
402 } else {
403 skb = alloc_skb(skb_len, GFP_ATOMIC);
404 if (unlikely(!skb))
405 goto out;
406 __skb_put(skb, pull_len);
407 skb_copy_to_linear_data(skb, gl->va, pull_len);
408
409 ssi = skb_shinfo(skb);
410 ssi->frags[0].page = gl->frags[0].page;
411 ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len;
412 ssi->frags[0].size = gl->frags[0].size - pull_len;
413 if (gl->nfrags > 1)
414 memcpy(&ssi->frags[1], &gl->frags[1],
415 (gl->nfrags - 1) * sizeof(skb_frag_t));
416 ssi->nr_frags = gl->nfrags;
417
418 skb->len = gl->tot_len;
419 skb->data_len = skb->len - pull_len;
420 skb->truesize += skb->data_len;
421
422 /* Get a reference for the last page, we don't own it */
423 get_page(gl->frags[gl->nfrags - 1].page);
424 }
425out:
426 return skb;
427}
428
429static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
430 const struct pkt_gl *gl)
431{
432 struct c4iw_dev *dev = handle;
433 struct sk_buff *skb;
434 const struct cpl_act_establish *rpl;
435 unsigned int opcode;
436
437 if (gl == NULL) {
438 /* omit RSS and rsp_ctrl at end of descriptor */
439 unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
440
441 skb = alloc_skb(256, GFP_ATOMIC);
442 if (!skb)
443 goto nomem;
444 __skb_put(skb, len);
445 skb_copy_to_linear_data(skb, &rsp[1], len);
446 } else if (gl == CXGB4_MSG_AN) {
447 const struct rsp_ctrl *rc = (void *)rsp;
448
449 u32 qid = be32_to_cpu(rc->pldbuflen_qid);
450 c4iw_ev_handler(dev, qid);
451 return 0;
452 } else {
453 skb = t4_pktgl_to_skb(gl, 128, 128);
454 if (unlikely(!skb))
455 goto nomem;
456 }
457
458 rpl = cplhdr(skb);
459 opcode = rpl->ot.opcode;
460
461 if (c4iw_handlers[opcode])
462 c4iw_handlers[opcode](dev, skb);
463 else
464 printk(KERN_INFO "%s no handler opcode 0x%x...\n", __func__,
465 opcode);
466
467 return 0;
468nomem:
469 return -1;
470}
471
472static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
473{
474 PDBG("%s new_state %u\n", __func__, new_state);
475 return 0;
476}
477
478static struct cxgb4_uld_info c4iw_uld_info = {
479 .name = DRV_NAME,
480 .add = c4iw_uld_add,
481 .rx_handler = c4iw_uld_rx_handler,
482 .state_change = c4iw_uld_state_change,
483};
484
485static int __init c4iw_init_module(void)
486{
487 int err;
488
489 err = c4iw_cm_init();
490 if (err)
491 return err;
492
493 c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL);
494 if (!c4iw_debugfs_root)
495 printk(KERN_WARNING MOD
496 "could not create debugfs entry, continuing\n");
497
498 cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
499
500 return 0;
501}
502
503static void __exit c4iw_exit_module(void)
504{
505 struct c4iw_dev *dev, *tmp;
506
507 cxgb4_unregister_uld(CXGB4_ULD_RDMA);
508
509 mutex_lock(&dev_mutex);
510 list_for_each_entry_safe(dev, tmp, &dev_list, entry) {
511 c4iw_remove(dev);
512 }
513 mutex_unlock(&dev_mutex);
514
515 c4iw_cm_term();
516 debugfs_remove_recursive(c4iw_debugfs_root);
517}
518
519module_init(c4iw_init_module);
520module_exit(c4iw_exit_module);
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
new file mode 100644
index 000000000000..1bd6a3e531af
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -0,0 +1,193 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/slab.h>
33#include <linux/mman.h>
34#include <net/sock.h>
35
36#include "iw_cxgb4.h"
37
38static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
39 struct c4iw_qp *qhp,
40 struct t4_cqe *err_cqe,
41 enum ib_event_type ib_event)
42{
43 struct ib_event event;
44 struct c4iw_qp_attributes attrs;
45
46 if ((qhp->attr.state == C4IW_QP_STATE_ERROR) ||
47 (qhp->attr.state == C4IW_QP_STATE_TERMINATE)) {
48 PDBG("%s AE received after RTS - "
49 "qp state %d qpid 0x%x status 0x%x\n", __func__,
50 qhp->attr.state, qhp->wq.sq.qid, CQE_STATUS(err_cqe));
51 return;
52 }
53
54 printk(KERN_ERR "%s - AE qpid 0x%x opcode %d status 0x%x "
55 "type %d wrid.hi 0x%x wrid.lo 0x%x\n", __func__,
56 CQE_QPID(err_cqe), CQE_OPCODE(err_cqe),
57 CQE_STATUS(err_cqe), CQE_TYPE(err_cqe),
58 CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
59
60 if (qhp->attr.state == C4IW_QP_STATE_RTS) {
61 attrs.next_state = C4IW_QP_STATE_TERMINATE;
62 c4iw_modify_qp(qhp->rhp, qhp, C4IW_QP_ATTR_NEXT_STATE,
63 &attrs, 0);
64 }
65
66 event.event = ib_event;
67 event.device = chp->ibcq.device;
68 if (ib_event == IB_EVENT_CQ_ERR)
69 event.element.cq = &chp->ibcq;
70 else
71 event.element.qp = &qhp->ibqp;
72 if (qhp->ibqp.event_handler)
73 (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
74
75 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
76}
77
78void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
79{
80 struct c4iw_cq *chp;
81 struct c4iw_qp *qhp;
82 u32 cqid;
83
84 spin_lock(&dev->lock);
85 qhp = get_qhp(dev, CQE_QPID(err_cqe));
86 if (!qhp) {
87 printk(KERN_ERR MOD "BAD AE qpid 0x%x opcode %d "
88 "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
89 CQE_QPID(err_cqe),
90 CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
91 CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
92 CQE_WRID_LOW(err_cqe));
93 spin_unlock(&dev->lock);
94 goto out;
95 }
96
97 if (SQ_TYPE(err_cqe))
98 cqid = qhp->attr.scq;
99 else
100 cqid = qhp->attr.rcq;
101 chp = get_chp(dev, cqid);
102 if (!chp) {
103 printk(KERN_ERR MOD "BAD AE cqid 0x%x qpid 0x%x opcode %d "
104 "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
105 cqid, CQE_QPID(err_cqe),
106 CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
107 CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
108 CQE_WRID_LOW(err_cqe));
109 spin_unlock(&dev->lock);
110 goto out;
111 }
112
113 c4iw_qp_add_ref(&qhp->ibqp);
114 atomic_inc(&chp->refcnt);
115 spin_unlock(&dev->lock);
116
117 /* Bad incoming write */
118 if (RQ_TYPE(err_cqe) &&
119 (CQE_OPCODE(err_cqe) == FW_RI_RDMA_WRITE)) {
120 post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_REQ_ERR);
121 goto done;
122 }
123
124 switch (CQE_STATUS(err_cqe)) {
125
126 /* Completion Events */
127 case T4_ERR_SUCCESS:
128 printk(KERN_ERR MOD "AE with status 0!\n");
129 break;
130
131 case T4_ERR_STAG:
132 case T4_ERR_PDID:
133 case T4_ERR_QPID:
134 case T4_ERR_ACCESS:
135 case T4_ERR_WRAP:
136 case T4_ERR_BOUND:
137 case T4_ERR_INVALIDATE_SHARED_MR:
138 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
139 post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_ACCESS_ERR);
140 break;
141
142 /* Device Fatal Errors */
143 case T4_ERR_ECC:
144 case T4_ERR_ECC_PSTAG:
145 case T4_ERR_INTERNAL_ERR:
146 post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_DEVICE_FATAL);
147 break;
148
149 /* QP Fatal Errors */
150 case T4_ERR_OUT_OF_RQE:
151 case T4_ERR_PBL_ADDR_BOUND:
152 case T4_ERR_CRC:
153 case T4_ERR_MARKER:
154 case T4_ERR_PDU_LEN_ERR:
155 case T4_ERR_DDP_VERSION:
156 case T4_ERR_RDMA_VERSION:
157 case T4_ERR_OPCODE:
158 case T4_ERR_DDP_QUEUE_NUM:
159 case T4_ERR_MSN:
160 case T4_ERR_TBIT:
161 case T4_ERR_MO:
162 case T4_ERR_MSN_GAP:
163 case T4_ERR_MSN_RANGE:
164 case T4_ERR_RQE_ADDR_BOUND:
165 case T4_ERR_IRD_OVERFLOW:
166 post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL);
167 break;
168
169 default:
170 printk(KERN_ERR MOD "Unknown T4 status 0x%x QPID 0x%x\n",
171 CQE_STATUS(err_cqe), qhp->wq.sq.qid);
172 post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL);
173 break;
174 }
175done:
176 if (atomic_dec_and_test(&chp->refcnt))
177 wake_up(&chp->wait);
178 c4iw_qp_rem_ref(&qhp->ibqp);
179out:
180 return;
181}
182
183int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
184{
185 struct c4iw_cq *chp;
186
187 chp = get_chp(dev, qid);
188 if (chp)
189 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
190 else
191 PDBG("%s unknown cqid 0x%x\n", __func__, qid);
192 return 0;
193}
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
new file mode 100644
index 000000000000..ccce6fe75701
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -0,0 +1,743 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 * SOFTWARE.
30 */
31#ifndef __IW_CXGB4_H__
32#define __IW_CXGB4_H__
33
34#include <linux/mutex.h>
35#include <linux/list.h>
36#include <linux/spinlock.h>
37#include <linux/idr.h>
38#include <linux/workqueue.h>
39#include <linux/netdevice.h>
40#include <linux/sched.h>
41#include <linux/pci.h>
42#include <linux/dma-mapping.h>
43#include <linux/inet.h>
44#include <linux/wait.h>
45#include <linux/kref.h>
46#include <linux/timer.h>
47#include <linux/io.h>
48#include <linux/kfifo.h>
49
50#include <asm/byteorder.h>
51
52#include <net/net_namespace.h>
53
54#include <rdma/ib_verbs.h>
55#include <rdma/iw_cm.h>
56
57#include "cxgb4.h"
58#include "cxgb4_uld.h"
59#include "l2t.h"
60#include "user.h"
61
62#define DRV_NAME "iw_cxgb4"
63#define MOD DRV_NAME ":"
64
65extern int c4iw_debug;
66#define PDBG(fmt, args...) \
67do { \
68 if (c4iw_debug) \
69 printk(MOD fmt, ## args); \
70} while (0)
71
72#include "t4.h"
73
74#define PBL_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->pbl.start)
75#define RQT_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->rq.start)
76
77static inline void *cplhdr(struct sk_buff *skb)
78{
79 return skb->data;
80}
81
82#define C4IW_WR_TO (10*HZ)
83
84struct c4iw_wr_wait {
85 wait_queue_head_t wait;
86 int done;
87 int ret;
88};
89
90static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
91{
92 wr_waitp->ret = 0;
93 wr_waitp->done = 0;
94 init_waitqueue_head(&wr_waitp->wait);
95}
96
97struct c4iw_resource {
98 struct kfifo tpt_fifo;
99 spinlock_t tpt_fifo_lock;
100 struct kfifo qid_fifo;
101 spinlock_t qid_fifo_lock;
102 struct kfifo pdid_fifo;
103 spinlock_t pdid_fifo_lock;
104};
105
106struct c4iw_qid_list {
107 struct list_head entry;
108 u32 qid;
109};
110
111struct c4iw_dev_ucontext {
112 struct list_head qpids;
113 struct list_head cqids;
114 struct mutex lock;
115};
116
117enum c4iw_rdev_flags {
118 T4_FATAL_ERROR = (1<<0),
119};
120
121struct c4iw_rdev {
122 struct c4iw_resource resource;
123 unsigned long qpshift;
124 u32 qpmask;
125 unsigned long cqshift;
126 u32 cqmask;
127 struct c4iw_dev_ucontext uctx;
128 struct gen_pool *pbl_pool;
129 struct gen_pool *rqt_pool;
130 u32 flags;
131 struct cxgb4_lld_info lldi;
132};
133
134static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
135{
136 return rdev->flags & T4_FATAL_ERROR;
137}
138
139static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
140{
141 return min((int)T4_MAX_NUM_STAG, (int)(rdev->lldi.vr->stag.size >> 5));
142}
143
144struct c4iw_dev {
145 struct ib_device ibdev;
146 struct c4iw_rdev rdev;
147 u32 device_cap_flags;
148 struct idr cqidr;
149 struct idr qpidr;
150 struct idr mmidr;
151 spinlock_t lock;
152 struct list_head entry;
153 struct delayed_work db_drop_task;
154 struct dentry *debugfs_root;
155};
156
157static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
158{
159 return container_of(ibdev, struct c4iw_dev, ibdev);
160}
161
162static inline struct c4iw_dev *rdev_to_c4iw_dev(struct c4iw_rdev *rdev)
163{
164 return container_of(rdev, struct c4iw_dev, rdev);
165}
166
167static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid)
168{
169 return idr_find(&rhp->cqidr, cqid);
170}
171
172static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid)
173{
174 return idr_find(&rhp->qpidr, qpid);
175}
176
177static inline struct c4iw_mr *get_mhp(struct c4iw_dev *rhp, u32 mmid)
178{
179 return idr_find(&rhp->mmidr, mmid);
180}
181
182static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr,
183 void *handle, u32 id)
184{
185 int ret;
186 int newid;
187
188 do {
189 if (!idr_pre_get(idr, GFP_KERNEL))
190 return -ENOMEM;
191 spin_lock_irq(&rhp->lock);
192 ret = idr_get_new_above(idr, handle, id, &newid);
193 BUG_ON(newid != id);
194 spin_unlock_irq(&rhp->lock);
195 } while (ret == -EAGAIN);
196
197 return ret;
198}
199
200static inline void remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id)
201{
202 spin_lock_irq(&rhp->lock);
203 idr_remove(idr, id);
204 spin_unlock_irq(&rhp->lock);
205}
206
207struct c4iw_pd {
208 struct ib_pd ibpd;
209 u32 pdid;
210 struct c4iw_dev *rhp;
211};
212
213static inline struct c4iw_pd *to_c4iw_pd(struct ib_pd *ibpd)
214{
215 return container_of(ibpd, struct c4iw_pd, ibpd);
216}
217
218struct tpt_attributes {
219 u64 len;
220 u64 va_fbo;
221 enum fw_ri_mem_perms perms;
222 u32 stag;
223 u32 pdid;
224 u32 qpid;
225 u32 pbl_addr;
226 u32 pbl_size;
227 u32 state:1;
228 u32 type:2;
229 u32 rsvd:1;
230 u32 remote_invaliate_disable:1;
231 u32 zbva:1;
232 u32 mw_bind_enable:1;
233 u32 page_size:5;
234};
235
236struct c4iw_mr {
237 struct ib_mr ibmr;
238 struct ib_umem *umem;
239 struct c4iw_dev *rhp;
240 u64 kva;
241 struct tpt_attributes attr;
242};
243
244static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr)
245{
246 return container_of(ibmr, struct c4iw_mr, ibmr);
247}
248
249struct c4iw_mw {
250 struct ib_mw ibmw;
251 struct c4iw_dev *rhp;
252 u64 kva;
253 struct tpt_attributes attr;
254};
255
256static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw)
257{
258 return container_of(ibmw, struct c4iw_mw, ibmw);
259}
260
261struct c4iw_fr_page_list {
262 struct ib_fast_reg_page_list ibpl;
263 DECLARE_PCI_UNMAP_ADDR(mapping);
264 dma_addr_t dma_addr;
265 struct c4iw_dev *dev;
266 int size;
267};
268
269static inline struct c4iw_fr_page_list *to_c4iw_fr_page_list(
270 struct ib_fast_reg_page_list *ibpl)
271{
272 return container_of(ibpl, struct c4iw_fr_page_list, ibpl);
273}
274
275struct c4iw_cq {
276 struct ib_cq ibcq;
277 struct c4iw_dev *rhp;
278 struct t4_cq cq;
279 spinlock_t lock;
280 atomic_t refcnt;
281 wait_queue_head_t wait;
282};
283
284static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq)
285{
286 return container_of(ibcq, struct c4iw_cq, ibcq);
287}
288
289struct c4iw_mpa_attributes {
290 u8 initiator;
291 u8 recv_marker_enabled;
292 u8 xmit_marker_enabled;
293 u8 crc_enabled;
294 u8 version;
295 u8 p2p_type;
296};
297
298struct c4iw_qp_attributes {
299 u32 scq;
300 u32 rcq;
301 u32 sq_num_entries;
302 u32 rq_num_entries;
303 u32 sq_max_sges;
304 u32 sq_max_sges_rdma_write;
305 u32 rq_max_sges;
306 u32 state;
307 u8 enable_rdma_read;
308 u8 enable_rdma_write;
309 u8 enable_bind;
310 u8 enable_mmid0_fastreg;
311 u32 max_ord;
312 u32 max_ird;
313 u32 pd;
314 u32 next_state;
315 char terminate_buffer[52];
316 u32 terminate_msg_len;
317 u8 is_terminate_local;
318 struct c4iw_mpa_attributes mpa_attr;
319 struct c4iw_ep *llp_stream_handle;
320};
321
322struct c4iw_qp {
323 struct ib_qp ibqp;
324 struct c4iw_dev *rhp;
325 struct c4iw_ep *ep;
326 struct c4iw_qp_attributes attr;
327 struct t4_wq wq;
328 spinlock_t lock;
329 atomic_t refcnt;
330 wait_queue_head_t wait;
331 struct timer_list timer;
332};
333
334static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
335{
336 return container_of(ibqp, struct c4iw_qp, ibqp);
337}
338
339struct c4iw_ucontext {
340 struct ib_ucontext ibucontext;
341 struct c4iw_dev_ucontext uctx;
342 u32 key;
343 spinlock_t mmap_lock;
344 struct list_head mmaps;
345};
346
347static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
348{
349 return container_of(c, struct c4iw_ucontext, ibucontext);
350}
351
352struct c4iw_mm_entry {
353 struct list_head entry;
354 u64 addr;
355 u32 key;
356 unsigned len;
357};
358
359static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
360 u32 key, unsigned len)
361{
362 struct list_head *pos, *nxt;
363 struct c4iw_mm_entry *mm;
364
365 spin_lock(&ucontext->mmap_lock);
366 list_for_each_safe(pos, nxt, &ucontext->mmaps) {
367
368 mm = list_entry(pos, struct c4iw_mm_entry, entry);
369 if (mm->key == key && mm->len == len) {
370 list_del_init(&mm->entry);
371 spin_unlock(&ucontext->mmap_lock);
372 PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
373 key, (unsigned long long) mm->addr, mm->len);
374 return mm;
375 }
376 }
377 spin_unlock(&ucontext->mmap_lock);
378 return NULL;
379}
380
381static inline void insert_mmap(struct c4iw_ucontext *ucontext,
382 struct c4iw_mm_entry *mm)
383{
384 spin_lock(&ucontext->mmap_lock);
385 PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
386 mm->key, (unsigned long long) mm->addr, mm->len);
387 list_add_tail(&mm->entry, &ucontext->mmaps);
388 spin_unlock(&ucontext->mmap_lock);
389}
390
391enum c4iw_qp_attr_mask {
392 C4IW_QP_ATTR_NEXT_STATE = 1 << 0,
393 C4IW_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
394 C4IW_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
395 C4IW_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
396 C4IW_QP_ATTR_MAX_ORD = 1 << 11,
397 C4IW_QP_ATTR_MAX_IRD = 1 << 12,
398 C4IW_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22,
399 C4IW_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23,
400 C4IW_QP_ATTR_MPA_ATTR = 1 << 24,
401 C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25,
402 C4IW_QP_ATTR_VALID_MODIFY = (C4IW_QP_ATTR_ENABLE_RDMA_READ |
403 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
404 C4IW_QP_ATTR_MAX_ORD |
405 C4IW_QP_ATTR_MAX_IRD |
406 C4IW_QP_ATTR_LLP_STREAM_HANDLE |
407 C4IW_QP_ATTR_STREAM_MSG_BUFFER |
408 C4IW_QP_ATTR_MPA_ATTR |
409 C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE)
410};
411
412int c4iw_modify_qp(struct c4iw_dev *rhp,
413 struct c4iw_qp *qhp,
414 enum c4iw_qp_attr_mask mask,
415 struct c4iw_qp_attributes *attrs,
416 int internal);
417
418enum c4iw_qp_state {
419 C4IW_QP_STATE_IDLE,
420 C4IW_QP_STATE_RTS,
421 C4IW_QP_STATE_ERROR,
422 C4IW_QP_STATE_TERMINATE,
423 C4IW_QP_STATE_CLOSING,
424 C4IW_QP_STATE_TOT
425};
426
427static inline int c4iw_convert_state(enum ib_qp_state ib_state)
428{
429 switch (ib_state) {
430 case IB_QPS_RESET:
431 case IB_QPS_INIT:
432 return C4IW_QP_STATE_IDLE;
433 case IB_QPS_RTS:
434 return C4IW_QP_STATE_RTS;
435 case IB_QPS_SQD:
436 return C4IW_QP_STATE_CLOSING;
437 case IB_QPS_SQE:
438 return C4IW_QP_STATE_TERMINATE;
439 case IB_QPS_ERR:
440 return C4IW_QP_STATE_ERROR;
441 default:
442 return -1;
443 }
444}
445
446static inline u32 c4iw_ib_to_tpt_access(int a)
447{
448 return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
449 (a & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0) |
450 (a & IB_ACCESS_LOCAL_WRITE ? FW_RI_MEM_ACCESS_LOCAL_WRITE : 0) |
451 FW_RI_MEM_ACCESS_LOCAL_READ;
452}
453
454static inline u32 c4iw_ib_to_tpt_bind_access(int acc)
455{
456 return (acc & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
457 (acc & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0);
458}
459
460enum c4iw_mmid_state {
461 C4IW_STAG_STATE_VALID,
462 C4IW_STAG_STATE_INVALID
463};
464
465#define C4IW_NODE_DESC "cxgb4 Chelsio Communications"
466
467#define MPA_KEY_REQ "MPA ID Req Frame"
468#define MPA_KEY_REP "MPA ID Rep Frame"
469
470#define MPA_MAX_PRIVATE_DATA 256
471#define MPA_REJECT 0x20
472#define MPA_CRC 0x40
473#define MPA_MARKERS 0x80
474#define MPA_FLAGS_MASK 0xE0
475
476#define c4iw_put_ep(ep) { \
477 PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__, \
478 ep, atomic_read(&((ep)->kref.refcount))); \
479 WARN_ON(atomic_read(&((ep)->kref.refcount)) < 1); \
480 kref_put(&((ep)->kref), _c4iw_free_ep); \
481}
482
483#define c4iw_get_ep(ep) { \
484 PDBG("get_ep (via %s:%u) ep %p, refcnt %d\n", __func__, __LINE__, \
485 ep, atomic_read(&((ep)->kref.refcount))); \
486 kref_get(&((ep)->kref)); \
487}
488void _c4iw_free_ep(struct kref *kref);
489
490struct mpa_message {
491 u8 key[16];
492 u8 flags;
493 u8 revision;
494 __be16 private_data_size;
495 u8 private_data[0];
496};
497
498struct terminate_message {
499 u8 layer_etype;
500 u8 ecode;
501 __be16 hdrct_rsvd;
502 u8 len_hdrs[0];
503};
504
505#define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28)
506
507enum c4iw_layers_types {
508 LAYER_RDMAP = 0x00,
509 LAYER_DDP = 0x10,
510 LAYER_MPA = 0x20,
511 RDMAP_LOCAL_CATA = 0x00,
512 RDMAP_REMOTE_PROT = 0x01,
513 RDMAP_REMOTE_OP = 0x02,
514 DDP_LOCAL_CATA = 0x00,
515 DDP_TAGGED_ERR = 0x01,
516 DDP_UNTAGGED_ERR = 0x02,
517 DDP_LLP = 0x03
518};
519
520enum c4iw_rdma_ecodes {
521 RDMAP_INV_STAG = 0x00,
522 RDMAP_BASE_BOUNDS = 0x01,
523 RDMAP_ACC_VIOL = 0x02,
524 RDMAP_STAG_NOT_ASSOC = 0x03,
525 RDMAP_TO_WRAP = 0x04,
526 RDMAP_INV_VERS = 0x05,
527 RDMAP_INV_OPCODE = 0x06,
528 RDMAP_STREAM_CATA = 0x07,
529 RDMAP_GLOBAL_CATA = 0x08,
530 RDMAP_CANT_INV_STAG = 0x09,
531 RDMAP_UNSPECIFIED = 0xff
532};
533
534enum c4iw_ddp_ecodes {
535 DDPT_INV_STAG = 0x00,
536 DDPT_BASE_BOUNDS = 0x01,
537 DDPT_STAG_NOT_ASSOC = 0x02,
538 DDPT_TO_WRAP = 0x03,
539 DDPT_INV_VERS = 0x04,
540 DDPU_INV_QN = 0x01,
541 DDPU_INV_MSN_NOBUF = 0x02,
542 DDPU_INV_MSN_RANGE = 0x03,
543 DDPU_INV_MO = 0x04,
544 DDPU_MSG_TOOBIG = 0x05,
545 DDPU_INV_VERS = 0x06
546};
547
548enum c4iw_mpa_ecodes {
549 MPA_CRC_ERR = 0x02,
550 MPA_MARKER_ERR = 0x03
551};
552
553enum c4iw_ep_state {
554 IDLE = 0,
555 LISTEN,
556 CONNECTING,
557 MPA_REQ_WAIT,
558 MPA_REQ_SENT,
559 MPA_REQ_RCVD,
560 MPA_REP_SENT,
561 FPDU_MODE,
562 ABORTING,
563 CLOSING,
564 MORIBUND,
565 DEAD,
566};
567
568enum c4iw_ep_flags {
569 PEER_ABORT_IN_PROGRESS = 0,
570 ABORT_REQ_IN_PROGRESS = 1,
571 RELEASE_RESOURCES = 2,
572 CLOSE_SENT = 3,
573};
574
575struct c4iw_ep_common {
576 struct iw_cm_id *cm_id;
577 struct c4iw_qp *qp;
578 struct c4iw_dev *dev;
579 enum c4iw_ep_state state;
580 struct kref kref;
581 spinlock_t lock;
582 struct sockaddr_in local_addr;
583 struct sockaddr_in remote_addr;
584 wait_queue_head_t waitq;
585 int rpl_done;
586 int rpl_err;
587 unsigned long flags;
588};
589
590struct c4iw_listen_ep {
591 struct c4iw_ep_common com;
592 unsigned int stid;
593 int backlog;
594};
595
596struct c4iw_ep {
597 struct c4iw_ep_common com;
598 struct c4iw_ep *parent_ep;
599 struct timer_list timer;
600 unsigned int atid;
601 u32 hwtid;
602 u32 snd_seq;
603 u32 rcv_seq;
604 struct l2t_entry *l2t;
605 struct dst_entry *dst;
606 struct sk_buff *mpa_skb;
607 struct c4iw_mpa_attributes mpa_attr;
608 u8 mpa_pkt[sizeof(struct mpa_message) + MPA_MAX_PRIVATE_DATA];
609 unsigned int mpa_pkt_len;
610 u32 ird;
611 u32 ord;
612 u32 smac_idx;
613 u32 tx_chan;
614 u32 mtu;
615 u16 mss;
616 u16 emss;
617 u16 plen;
618 u16 rss_qid;
619 u16 txq_idx;
620 u8 tos;
621};
622
623static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
624{
625 return cm_id->provider_data;
626}
627
628static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id)
629{
630 return cm_id->provider_data;
631}
632
633static inline int compute_wscale(int win)
634{
635 int wscale = 0;
636
637 while (wscale < 14 && (65535<<wscale) < win)
638 wscale++;
639 return wscale;
640}
641
642typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct sk_buff *skb);
643
644int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
645 struct l2t_entry *l2t);
646void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qpid,
647 struct c4iw_dev_ucontext *uctx);
648u32 c4iw_get_resource(struct kfifo *fifo, spinlock_t *lock);
649void c4iw_put_resource(struct kfifo *fifo, u32 entry, spinlock_t *lock);
650int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid);
651int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
652int c4iw_pblpool_create(struct c4iw_rdev *rdev);
653int c4iw_rqtpool_create(struct c4iw_rdev *rdev);
654void c4iw_pblpool_destroy(struct c4iw_rdev *rdev);
655void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev);
656void c4iw_destroy_resource(struct c4iw_resource *rscp);
657int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
658int c4iw_register_device(struct c4iw_dev *dev);
659void c4iw_unregister_device(struct c4iw_dev *dev);
660int __init c4iw_cm_init(void);
661void __exit c4iw_cm_term(void);
662void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
663 struct c4iw_dev_ucontext *uctx);
664void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
665 struct c4iw_dev_ucontext *uctx);
666int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
667int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
668 struct ib_send_wr **bad_wr);
669int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
670 struct ib_recv_wr **bad_wr);
671int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
672 struct ib_mw_bind *mw_bind);
673int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
674int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog);
675int c4iw_destroy_listen(struct iw_cm_id *cm_id);
676int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
677int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
678void c4iw_qp_add_ref(struct ib_qp *qp);
679void c4iw_qp_rem_ref(struct ib_qp *qp);
680void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *page_list);
681struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(
682 struct ib_device *device,
683 int page_list_len);
684struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth);
685int c4iw_dealloc_mw(struct ib_mw *mw);
686struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd);
687struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
688 u64 length, u64 virt, int acc,
689 struct ib_udata *udata);
690struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
691struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
692 struct ib_phys_buf *buffer_list,
693 int num_phys_buf,
694 int acc,
695 u64 *iova_start);
696int c4iw_reregister_phys_mem(struct ib_mr *mr,
697 int mr_rereg_mask,
698 struct ib_pd *pd,
699 struct ib_phys_buf *buffer_list,
700 int num_phys_buf,
701 int acc, u64 *iova_start);
702int c4iw_dereg_mr(struct ib_mr *ib_mr);
703int c4iw_destroy_cq(struct ib_cq *ib_cq);
704struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
705 int vector,
706 struct ib_ucontext *ib_context,
707 struct ib_udata *udata);
708int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
709int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
710int c4iw_destroy_qp(struct ib_qp *ib_qp);
711struct ib_qp *c4iw_create_qp(struct ib_pd *pd,
712 struct ib_qp_init_attr *attrs,
713 struct ib_udata *udata);
714int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
715 int attr_mask, struct ib_udata *udata);
716struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn);
717u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size);
718void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
719u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size);
720void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
721int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb);
722void c4iw_flush_hw_cq(struct t4_cq *cq);
723void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
724void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
725int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
726int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
727int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count);
728int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid);
729u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
730int c4iw_post_zb_read(struct c4iw_qp *qhp);
731int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
732u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
733void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
734 struct c4iw_dev_ucontext *uctx);
735u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
736void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
737 struct c4iw_dev_ucontext *uctx);
738void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe);
739
740extern struct cxgb4_client t4c_client;
741extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
742
743#endif
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
new file mode 100644
index 000000000000..e54ff6d25691
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -0,0 +1,811 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <rdma/ib_umem.h>
34#include <asm/atomic.h>
35
36#include "iw_cxgb4.h"
37
38#define T4_ULPTX_MIN_IO 32
39#define C4IW_MAX_INLINE_SIZE 96
40
41static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
42 void *data)
43{
44 struct sk_buff *skb;
45 struct ulp_mem_io *req;
46 struct ulptx_idata *sc;
47 u8 wr_len, *to_dp, *from_dp;
48 int copy_len, num_wqe, i, ret = 0;
49 struct c4iw_wr_wait wr_wait;
50
51 addr &= 0x7FFFFFF;
52 PDBG("%s addr 0x%x len %u\n", __func__, addr, len);
53 num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE);
54 c4iw_init_wr_wait(&wr_wait);
55 for (i = 0; i < num_wqe; i++) {
56
57 copy_len = len > C4IW_MAX_INLINE_SIZE ? C4IW_MAX_INLINE_SIZE :
58 len;
59 wr_len = roundup(sizeof *req + sizeof *sc +
60 roundup(copy_len, T4_ULPTX_MIN_IO), 16);
61
62 skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
63 if (!skb)
64 return -ENOMEM;
65 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
66
67 req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
68 memset(req, 0, wr_len);
69 INIT_ULPTX_WR(req, wr_len, 0, 0);
70
71 if (i == (num_wqe-1)) {
72 req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) |
73 FW_WR_COMPL(1));
74 req->wr.wr_lo = (__force __be64)&wr_wait;
75 } else
76 req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR));
77 req->wr.wr_mid = cpu_to_be32(
78 FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
79
80 req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE) | (1<<23));
81 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(
82 DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
83 req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr),
84 16));
85 req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR(addr + i * 3));
86
87 sc = (struct ulptx_idata *)(req + 1);
88 sc->cmd_more = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_IMM));
89 sc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO));
90
91 to_dp = (u8 *)(sc + 1);
92 from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE;
93 if (data)
94 memcpy(to_dp, from_dp, copy_len);
95 else
96 memset(to_dp, 0, copy_len);
97 if (copy_len % T4_ULPTX_MIN_IO)
98 memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
99 (copy_len % T4_ULPTX_MIN_IO));
100 ret = c4iw_ofld_send(rdev, skb);
101 if (ret)
102 return ret;
103 len -= C4IW_MAX_INLINE_SIZE;
104 }
105
106 wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
107 if (!wr_wait.done) {
108 printk(KERN_ERR MOD "Device %s not responding!\n",
109 pci_name(rdev->lldi.pdev));
110 rdev->flags = T4_FATAL_ERROR;
111 ret = -EIO;
112 } else
113 ret = wr_wait.ret;
114 return ret;
115}
116
117/*
118 * Build and write a TPT entry.
119 * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size,
120 * pbl_size and pbl_addr
121 * OUT: stag index
122 */
123static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
124 u32 *stag, u8 stag_state, u32 pdid,
125 enum fw_ri_stag_type type, enum fw_ri_mem_perms perm,
126 int bind_enabled, u32 zbva, u64 to,
127 u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr)
128{
129 int err;
130 struct fw_ri_tpte tpt;
131 u32 stag_idx;
132 static atomic_t key;
133
134 if (c4iw_fatal_error(rdev))
135 return -EIO;
136
137 stag_state = stag_state > 0;
138 stag_idx = (*stag) >> 8;
139
140 if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) {
141 stag_idx = c4iw_get_resource(&rdev->resource.tpt_fifo,
142 &rdev->resource.tpt_fifo_lock);
143 if (!stag_idx)
144 return -ENOMEM;
145 *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
146 }
147 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
148 __func__, stag_state, type, pdid, stag_idx);
149
150 /* write TPT entry */
151 if (reset_tpt_entry)
152 memset(&tpt, 0, sizeof(tpt));
153 else {
154 tpt.valid_to_pdid = cpu_to_be32(F_FW_RI_TPTE_VALID |
155 V_FW_RI_TPTE_STAGKEY((*stag & M_FW_RI_TPTE_STAGKEY)) |
156 V_FW_RI_TPTE_STAGSTATE(stag_state) |
157 V_FW_RI_TPTE_STAGTYPE(type) | V_FW_RI_TPTE_PDID(pdid));
158 tpt.locread_to_qpid = cpu_to_be32(V_FW_RI_TPTE_PERM(perm) |
159 (bind_enabled ? F_FW_RI_TPTE_MWBINDEN : 0) |
160 V_FW_RI_TPTE_ADDRTYPE((zbva ? FW_RI_ZERO_BASED_TO :
161 FW_RI_VA_BASED_TO))|
162 V_FW_RI_TPTE_PS(page_size));
163 tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
164 V_FW_RI_TPTE_PBLADDR(PBL_OFF(rdev, pbl_addr)>>3));
165 tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
166 tpt.va_hi = cpu_to_be32((u32)(to >> 32));
167 tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
168 tpt.dca_mwbcnt_pstag = cpu_to_be32(0);
169 tpt.len_hi = cpu_to_be32((u32)(len >> 32));
170 }
171 err = write_adapter_mem(rdev, stag_idx +
172 (rdev->lldi.vr->stag.start >> 5),
173 sizeof(tpt), &tpt);
174
175 if (reset_tpt_entry)
176 c4iw_put_resource(&rdev->resource.tpt_fifo, stag_idx,
177 &rdev->resource.tpt_fifo_lock);
178 return err;
179}
180
181static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
182 u32 pbl_addr, u32 pbl_size)
183{
184 int err;
185
186 PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
187 __func__, pbl_addr, rdev->lldi.vr->pbl.start,
188 pbl_size);
189
190 err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl);
191 return err;
192}
193
194static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size,
195 u32 pbl_addr)
196{
197 return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0,
198 pbl_size, pbl_addr);
199}
200
201static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid)
202{
203 *stag = T4_STAG_UNSET;
204 return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0,
205 0UL, 0, 0, 0, 0);
206}
207
208static int deallocate_window(struct c4iw_rdev *rdev, u32 stag)
209{
210 return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0,
211 0);
212}
213
214static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
215 u32 pbl_size, u32 pbl_addr)
216{
217 *stag = T4_STAG_UNSET;
218 return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0,
219 0UL, 0, 0, pbl_size, pbl_addr);
220}
221
222static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
223{
224 u32 mmid;
225
226 mhp->attr.state = 1;
227 mhp->attr.stag = stag;
228 mmid = stag >> 8;
229 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
230 PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
231 return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
232}
233
234static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
235 struct c4iw_mr *mhp, int shift)
236{
237 u32 stag = T4_STAG_UNSET;
238 int ret;
239
240 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
241 FW_RI_STAG_NSMR, mhp->attr.perms,
242 mhp->attr.mw_bind_enable, mhp->attr.zbva,
243 mhp->attr.va_fbo, mhp->attr.len, shift - 12,
244 mhp->attr.pbl_size, mhp->attr.pbl_addr);
245 if (ret)
246 return ret;
247
248 ret = finish_mem_reg(mhp, stag);
249 if (ret)
250 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
251 mhp->attr.pbl_addr);
252 return ret;
253}
254
255static int reregister_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
256 struct c4iw_mr *mhp, int shift, int npages)
257{
258 u32 stag;
259 int ret;
260
261 if (npages > mhp->attr.pbl_size)
262 return -ENOMEM;
263
264 stag = mhp->attr.stag;
265 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
266 FW_RI_STAG_NSMR, mhp->attr.perms,
267 mhp->attr.mw_bind_enable, mhp->attr.zbva,
268 mhp->attr.va_fbo, mhp->attr.len, shift - 12,
269 mhp->attr.pbl_size, mhp->attr.pbl_addr);
270 if (ret)
271 return ret;
272
273 ret = finish_mem_reg(mhp, stag);
274 if (ret)
275 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
276 mhp->attr.pbl_addr);
277
278 return ret;
279}
280
281static int alloc_pbl(struct c4iw_mr *mhp, int npages)
282{
283 mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev,
284 npages << 3);
285
286 if (!mhp->attr.pbl_addr)
287 return -ENOMEM;
288
289 mhp->attr.pbl_size = npages;
290
291 return 0;
292}
293
294static int build_phys_page_list(struct ib_phys_buf *buffer_list,
295 int num_phys_buf, u64 *iova_start,
296 u64 *total_size, int *npages,
297 int *shift, __be64 **page_list)
298{
299 u64 mask;
300 int i, j, n;
301
302 mask = 0;
303 *total_size = 0;
304 for (i = 0; i < num_phys_buf; ++i) {
305 if (i != 0 && buffer_list[i].addr & ~PAGE_MASK)
306 return -EINVAL;
307 if (i != 0 && i != num_phys_buf - 1 &&
308 (buffer_list[i].size & ~PAGE_MASK))
309 return -EINVAL;
310 *total_size += buffer_list[i].size;
311 if (i > 0)
312 mask |= buffer_list[i].addr;
313 else
314 mask |= buffer_list[i].addr & PAGE_MASK;
315 if (i != num_phys_buf - 1)
316 mask |= buffer_list[i].addr + buffer_list[i].size;
317 else
318 mask |= (buffer_list[i].addr + buffer_list[i].size +
319 PAGE_SIZE - 1) & PAGE_MASK;
320 }
321
322 if (*total_size > 0xFFFFFFFFULL)
323 return -ENOMEM;
324
325 /* Find largest page shift we can use to cover buffers */
326 for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift))
327 if ((1ULL << *shift) & mask)
328 break;
329
330 buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1);
331 buffer_list[0].addr &= ~0ull << *shift;
332
333 *npages = 0;
334 for (i = 0; i < num_phys_buf; ++i)
335 *npages += (buffer_list[i].size +
336 (1ULL << *shift) - 1) >> *shift;
337
338 if (!*npages)
339 return -EINVAL;
340
341 *page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL);
342 if (!*page_list)
343 return -ENOMEM;
344
345 n = 0;
346 for (i = 0; i < num_phys_buf; ++i)
347 for (j = 0;
348 j < (buffer_list[i].size + (1ULL << *shift) - 1) >> *shift;
349 ++j)
350 (*page_list)[n++] = cpu_to_be64(buffer_list[i].addr +
351 ((u64) j << *shift));
352
353 PDBG("%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d\n",
354 __func__, (unsigned long long)*iova_start,
355 (unsigned long long)mask, *shift, (unsigned long long)*total_size,
356 *npages);
357
358 return 0;
359
360}
361
362int c4iw_reregister_phys_mem(struct ib_mr *mr, int mr_rereg_mask,
363 struct ib_pd *pd, struct ib_phys_buf *buffer_list,
364 int num_phys_buf, int acc, u64 *iova_start)
365{
366
367 struct c4iw_mr mh, *mhp;
368 struct c4iw_pd *php;
369 struct c4iw_dev *rhp;
370 __be64 *page_list = NULL;
371 int shift = 0;
372 u64 total_size;
373 int npages;
374 int ret;
375
376 PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd);
377
378 /* There can be no memory windows */
379 if (atomic_read(&mr->usecnt))
380 return -EINVAL;
381
382 mhp = to_c4iw_mr(mr);
383 rhp = mhp->rhp;
384 php = to_c4iw_pd(mr->pd);
385
386 /* make sure we are on the same adapter */
387 if (rhp != php->rhp)
388 return -EINVAL;
389
390 memcpy(&mh, mhp, sizeof *mhp);
391
392 if (mr_rereg_mask & IB_MR_REREG_PD)
393 php = to_c4iw_pd(pd);
394 if (mr_rereg_mask & IB_MR_REREG_ACCESS) {
395 mh.attr.perms = c4iw_ib_to_tpt_access(acc);
396 mh.attr.mw_bind_enable = (acc & IB_ACCESS_MW_BIND) ==
397 IB_ACCESS_MW_BIND;
398 }
399 if (mr_rereg_mask & IB_MR_REREG_TRANS) {
400 ret = build_phys_page_list(buffer_list, num_phys_buf,
401 iova_start,
402 &total_size, &npages,
403 &shift, &page_list);
404 if (ret)
405 return ret;
406 }
407
408 ret = reregister_mem(rhp, php, &mh, shift, npages);
409 kfree(page_list);
410 if (ret)
411 return ret;
412 if (mr_rereg_mask & IB_MR_REREG_PD)
413 mhp->attr.pdid = php->pdid;
414 if (mr_rereg_mask & IB_MR_REREG_ACCESS)
415 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
416 if (mr_rereg_mask & IB_MR_REREG_TRANS) {
417 mhp->attr.zbva = 0;
418 mhp->attr.va_fbo = *iova_start;
419 mhp->attr.page_size = shift - 12;
420 mhp->attr.len = (u32) total_size;
421 mhp->attr.pbl_size = npages;
422 }
423
424 return 0;
425}
426
427struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
428 struct ib_phys_buf *buffer_list,
429 int num_phys_buf, int acc, u64 *iova_start)
430{
431 __be64 *page_list;
432 int shift;
433 u64 total_size;
434 int npages;
435 struct c4iw_dev *rhp;
436 struct c4iw_pd *php;
437 struct c4iw_mr *mhp;
438 int ret;
439
440 PDBG("%s ib_pd %p\n", __func__, pd);
441 php = to_c4iw_pd(pd);
442 rhp = php->rhp;
443
444 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
445 if (!mhp)
446 return ERR_PTR(-ENOMEM);
447
448 mhp->rhp = rhp;
449
450 /* First check that we have enough alignment */
451 if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {
452 ret = -EINVAL;
453 goto err;
454 }
455
456 if (num_phys_buf > 1 &&
457 ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) {
458 ret = -EINVAL;
459 goto err;
460 }
461
462 ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start,
463 &total_size, &npages, &shift,
464 &page_list);
465 if (ret)
466 goto err;
467
468 ret = alloc_pbl(mhp, npages);
469 if (ret) {
470 kfree(page_list);
471 goto err_pbl;
472 }
473
474 ret = write_pbl(&mhp->rhp->rdev, page_list, mhp->attr.pbl_addr,
475 npages);
476 kfree(page_list);
477 if (ret)
478 goto err_pbl;
479
480 mhp->attr.pdid = php->pdid;
481 mhp->attr.zbva = 0;
482
483 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
484 mhp->attr.va_fbo = *iova_start;
485 mhp->attr.page_size = shift - 12;
486
487 mhp->attr.len = (u32) total_size;
488 mhp->attr.pbl_size = npages;
489 ret = register_mem(rhp, php, mhp, shift);
490 if (ret)
491 goto err_pbl;
492
493 return &mhp->ibmr;
494
495err_pbl:
496 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
497 mhp->attr.pbl_size << 3);
498
499err:
500 kfree(mhp);
501 return ERR_PTR(ret);
502
503}
504
505struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
506{
507 struct c4iw_dev *rhp;
508 struct c4iw_pd *php;
509 struct c4iw_mr *mhp;
510 int ret;
511 u32 stag = T4_STAG_UNSET;
512
513 PDBG("%s ib_pd %p\n", __func__, pd);
514 php = to_c4iw_pd(pd);
515 rhp = php->rhp;
516
517 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
518 if (!mhp)
519 return ERR_PTR(-ENOMEM);
520
521 mhp->rhp = rhp;
522 mhp->attr.pdid = php->pdid;
523 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
524 mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND;
525 mhp->attr.zbva = 0;
526 mhp->attr.va_fbo = 0;
527 mhp->attr.page_size = 0;
528 mhp->attr.len = ~0UL;
529 mhp->attr.pbl_size = 0;
530
531 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
532 FW_RI_STAG_NSMR, mhp->attr.perms,
533 mhp->attr.mw_bind_enable, 0, 0, ~0UL, 0, 0, 0);
534 if (ret)
535 goto err1;
536
537 ret = finish_mem_reg(mhp, stag);
538 if (ret)
539 goto err2;
540 return &mhp->ibmr;
541err2:
542 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
543 mhp->attr.pbl_addr);
544err1:
545 kfree(mhp);
546 return ERR_PTR(ret);
547}
548
549struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
550 u64 virt, int acc, struct ib_udata *udata)
551{
552 __be64 *pages;
553 int shift, n, len;
554 int i, j, k;
555 int err = 0;
556 struct ib_umem_chunk *chunk;
557 struct c4iw_dev *rhp;
558 struct c4iw_pd *php;
559 struct c4iw_mr *mhp;
560
561 PDBG("%s ib_pd %p\n", __func__, pd);
562
563 if (length == ~0ULL)
564 return ERR_PTR(-EINVAL);
565
566 if ((length + start) < start)
567 return ERR_PTR(-EINVAL);
568
569 php = to_c4iw_pd(pd);
570 rhp = php->rhp;
571 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
572 if (!mhp)
573 return ERR_PTR(-ENOMEM);
574
575 mhp->rhp = rhp;
576
577 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
578 if (IS_ERR(mhp->umem)) {
579 err = PTR_ERR(mhp->umem);
580 kfree(mhp);
581 return ERR_PTR(err);
582 }
583
584 shift = ffs(mhp->umem->page_size) - 1;
585
586 n = 0;
587 list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
588 n += chunk->nents;
589
590 err = alloc_pbl(mhp, n);
591 if (err)
592 goto err;
593
594 pages = (__be64 *) __get_free_page(GFP_KERNEL);
595 if (!pages) {
596 err = -ENOMEM;
597 goto err_pbl;
598 }
599
600 i = n = 0;
601
602 list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
603 for (j = 0; j < chunk->nmap; ++j) {
604 len = sg_dma_len(&chunk->page_list[j]) >> shift;
605 for (k = 0; k < len; ++k) {
606 pages[i++] = cpu_to_be64(sg_dma_address(
607 &chunk->page_list[j]) +
608 mhp->umem->page_size * k);
609 if (i == PAGE_SIZE / sizeof *pages) {
610 err = write_pbl(&mhp->rhp->rdev,
611 pages,
612 mhp->attr.pbl_addr + (n << 3), i);
613 if (err)
614 goto pbl_done;
615 n += i;
616 i = 0;
617 }
618 }
619 }
620
621 if (i)
622 err = write_pbl(&mhp->rhp->rdev, pages,
623 mhp->attr.pbl_addr + (n << 3), i);
624
625pbl_done:
626 free_page((unsigned long) pages);
627 if (err)
628 goto err_pbl;
629
630 mhp->attr.pdid = php->pdid;
631 mhp->attr.zbva = 0;
632 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
633 mhp->attr.va_fbo = virt;
634 mhp->attr.page_size = shift - 12;
635 mhp->attr.len = (u32) length;
636
637 err = register_mem(rhp, php, mhp, shift);
638 if (err)
639 goto err_pbl;
640
641 return &mhp->ibmr;
642
643err_pbl:
644 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
645 mhp->attr.pbl_size << 3);
646
647err:
648 ib_umem_release(mhp->umem);
649 kfree(mhp);
650 return ERR_PTR(err);
651}
652
653struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd)
654{
655 struct c4iw_dev *rhp;
656 struct c4iw_pd *php;
657 struct c4iw_mw *mhp;
658 u32 mmid;
659 u32 stag = 0;
660 int ret;
661
662 php = to_c4iw_pd(pd);
663 rhp = php->rhp;
664 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
665 if (!mhp)
666 return ERR_PTR(-ENOMEM);
667 ret = allocate_window(&rhp->rdev, &stag, php->pdid);
668 if (ret) {
669 kfree(mhp);
670 return ERR_PTR(ret);
671 }
672 mhp->rhp = rhp;
673 mhp->attr.pdid = php->pdid;
674 mhp->attr.type = FW_RI_STAG_MW;
675 mhp->attr.stag = stag;
676 mmid = (stag) >> 8;
677 mhp->ibmw.rkey = stag;
678 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
679 deallocate_window(&rhp->rdev, mhp->attr.stag);
680 kfree(mhp);
681 return ERR_PTR(-ENOMEM);
682 }
683 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
684 return &(mhp->ibmw);
685}
686
687int c4iw_dealloc_mw(struct ib_mw *mw)
688{
689 struct c4iw_dev *rhp;
690 struct c4iw_mw *mhp;
691 u32 mmid;
692
693 mhp = to_c4iw_mw(mw);
694 rhp = mhp->rhp;
695 mmid = (mw->rkey) >> 8;
696 deallocate_window(&rhp->rdev, mhp->attr.stag);
697 remove_handle(rhp, &rhp->mmidr, mmid);
698 kfree(mhp);
699 PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
700 return 0;
701}
702
703struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
704{
705 struct c4iw_dev *rhp;
706 struct c4iw_pd *php;
707 struct c4iw_mr *mhp;
708 u32 mmid;
709 u32 stag = 0;
710 int ret = 0;
711
712 php = to_c4iw_pd(pd);
713 rhp = php->rhp;
714 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
715 if (!mhp)
716 goto err;
717
718 mhp->rhp = rhp;
719 ret = alloc_pbl(mhp, pbl_depth);
720 if (ret)
721 goto err1;
722 mhp->attr.pbl_size = pbl_depth;
723 ret = allocate_stag(&rhp->rdev, &stag, php->pdid,
724 mhp->attr.pbl_size, mhp->attr.pbl_addr);
725 if (ret)
726 goto err2;
727 mhp->attr.pdid = php->pdid;
728 mhp->attr.type = FW_RI_STAG_NSMR;
729 mhp->attr.stag = stag;
730 mhp->attr.state = 1;
731 mmid = (stag) >> 8;
732 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
733 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid))
734 goto err3;
735
736 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
737 return &(mhp->ibmr);
738err3:
739 dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
740 mhp->attr.pbl_addr);
741err2:
742 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
743 mhp->attr.pbl_size << 3);
744err1:
745 kfree(mhp);
746err:
747 return ERR_PTR(ret);
748}
749
750struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device,
751 int page_list_len)
752{
753 struct c4iw_fr_page_list *c4pl;
754 struct c4iw_dev *dev = to_c4iw_dev(device);
755 dma_addr_t dma_addr;
756 int size = sizeof *c4pl + page_list_len * sizeof(u64);
757
758 if (page_list_len > T4_MAX_FR_DEPTH)
759 return ERR_PTR(-EINVAL);
760
761 c4pl = dma_alloc_coherent(&dev->rdev.lldi.pdev->dev, size,
762 &dma_addr, GFP_KERNEL);
763 if (!c4pl)
764 return ERR_PTR(-ENOMEM);
765
766 pci_unmap_addr_set(c4pl, mapping, dma_addr);
767 c4pl->dma_addr = dma_addr;
768 c4pl->dev = dev;
769 c4pl->size = size;
770 c4pl->ibpl.page_list = (u64 *)(c4pl + 1);
771 c4pl->ibpl.max_page_list_len = page_list_len;
772
773 return &c4pl->ibpl;
774}
775
776void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *ibpl)
777{
778 struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl);
779
780 dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev, c4pl->size,
781 c4pl, pci_unmap_addr(c4pl, mapping));
782}
783
784int c4iw_dereg_mr(struct ib_mr *ib_mr)
785{
786 struct c4iw_dev *rhp;
787 struct c4iw_mr *mhp;
788 u32 mmid;
789
790 PDBG("%s ib_mr %p\n", __func__, ib_mr);
791 /* There can be no memory windows */
792 if (atomic_read(&ib_mr->usecnt))
793 return -EINVAL;
794
795 mhp = to_c4iw_mr(ib_mr);
796 rhp = mhp->rhp;
797 mmid = mhp->attr.stag >> 8;
798 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
799 mhp->attr.pbl_addr);
800 if (mhp->attr.pbl_size)
801 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
802 mhp->attr.pbl_size << 3);
803 remove_handle(rhp, &rhp->mmidr, mmid);
804 if (mhp->kva)
805 kfree((void *) (unsigned long) mhp->kva);
806 if (mhp->umem)
807 ib_umem_release(mhp->umem);
808 PDBG("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
809 kfree(mhp);
810 return 0;
811}
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
new file mode 100644
index 000000000000..3cb50af3e52a
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -0,0 +1,518 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/device.h>
35#include <linux/netdevice.h>
36#include <linux/etherdevice.h>
37#include <linux/delay.h>
38#include <linux/errno.h>
39#include <linux/list.h>
40#include <linux/spinlock.h>
41#include <linux/ethtool.h>
42#include <linux/rtnetlink.h>
43#include <linux/inetdevice.h>
44#include <linux/io.h>
45
46#include <asm/irq.h>
47#include <asm/byteorder.h>
48
49#include <rdma/iw_cm.h>
50#include <rdma/ib_verbs.h>
51#include <rdma/ib_smi.h>
52#include <rdma/ib_umem.h>
53#include <rdma/ib_user_verbs.h>
54
55#include "iw_cxgb4.h"
56
57static int fastreg_support;
58module_param(fastreg_support, int, 0644);
59MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=0)");
60
61static int c4iw_modify_port(struct ib_device *ibdev,
62 u8 port, int port_modify_mask,
63 struct ib_port_modify *props)
64{
65 return -ENOSYS;
66}
67
68static struct ib_ah *c4iw_ah_create(struct ib_pd *pd,
69 struct ib_ah_attr *ah_attr)
70{
71 return ERR_PTR(-ENOSYS);
72}
73
74static int c4iw_ah_destroy(struct ib_ah *ah)
75{
76 return -ENOSYS;
77}
78
79static int c4iw_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
80{
81 return -ENOSYS;
82}
83
84static int c4iw_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
85{
86 return -ENOSYS;
87}
88
89static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags,
90 u8 port_num, struct ib_wc *in_wc,
91 struct ib_grh *in_grh, struct ib_mad *in_mad,
92 struct ib_mad *out_mad)
93{
94 return -ENOSYS;
95}
96
97static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
98{
99 struct c4iw_dev *rhp = to_c4iw_dev(context->device);
100 struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
101 struct c4iw_mm_entry *mm, *tmp;
102
103 PDBG("%s context %p\n", __func__, context);
104 list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
105 kfree(mm);
106 c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
107 kfree(ucontext);
108 return 0;
109}
110
111static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
112 struct ib_udata *udata)
113{
114 struct c4iw_ucontext *context;
115 struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
116
117 PDBG("%s ibdev %p\n", __func__, ibdev);
118 context = kzalloc(sizeof(*context), GFP_KERNEL);
119 if (!context)
120 return ERR_PTR(-ENOMEM);
121 c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
122 INIT_LIST_HEAD(&context->mmaps);
123 spin_lock_init(&context->mmap_lock);
124 return &context->ibucontext;
125}
126
127static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
128{
129 int len = vma->vm_end - vma->vm_start;
130 u32 key = vma->vm_pgoff << PAGE_SHIFT;
131 struct c4iw_rdev *rdev;
132 int ret = 0;
133 struct c4iw_mm_entry *mm;
134 struct c4iw_ucontext *ucontext;
135 u64 addr;
136
137 PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
138 key, len);
139
140 if (vma->vm_start & (PAGE_SIZE-1))
141 return -EINVAL;
142
143 rdev = &(to_c4iw_dev(context->device)->rdev);
144 ucontext = to_c4iw_ucontext(context);
145
146 mm = remove_mmap(ucontext, key, len);
147 if (!mm)
148 return -EINVAL;
149 addr = mm->addr;
150 kfree(mm);
151
152 if ((addr >= pci_resource_start(rdev->lldi.pdev, 2)) &&
153 (addr < (pci_resource_start(rdev->lldi.pdev, 2) +
154 pci_resource_len(rdev->lldi.pdev, 2)))) {
155
156 /*
157 * Map T4 DB register.
158 */
159 if (vma->vm_flags & VM_READ)
160 return -EPERM;
161
162 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
163 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
164 vma->vm_flags &= ~VM_MAYREAD;
165 ret = io_remap_pfn_range(vma, vma->vm_start,
166 addr >> PAGE_SHIFT,
167 len, vma->vm_page_prot);
168 } else {
169
170 /*
171 * Map WQ or CQ contig dma memory...
172 */
173 ret = remap_pfn_range(vma, vma->vm_start,
174 addr >> PAGE_SHIFT,
175 len, vma->vm_page_prot);
176 }
177
178 return ret;
179}
180
181static int c4iw_deallocate_pd(struct ib_pd *pd)
182{
183 struct c4iw_dev *rhp;
184 struct c4iw_pd *php;
185
186 php = to_c4iw_pd(pd);
187 rhp = php->rhp;
188 PDBG("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
189 c4iw_put_resource(&rhp->rdev.resource.pdid_fifo, php->pdid,
190 &rhp->rdev.resource.pdid_fifo_lock);
191 kfree(php);
192 return 0;
193}
194
195static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev,
196 struct ib_ucontext *context,
197 struct ib_udata *udata)
198{
199 struct c4iw_pd *php;
200 u32 pdid;
201 struct c4iw_dev *rhp;
202
203 PDBG("%s ibdev %p\n", __func__, ibdev);
204 rhp = (struct c4iw_dev *) ibdev;
205 pdid = c4iw_get_resource(&rhp->rdev.resource.pdid_fifo,
206 &rhp->rdev.resource.pdid_fifo_lock);
207 if (!pdid)
208 return ERR_PTR(-EINVAL);
209 php = kzalloc(sizeof(*php), GFP_KERNEL);
210 if (!php) {
211 c4iw_put_resource(&rhp->rdev.resource.pdid_fifo, pdid,
212 &rhp->rdev.resource.pdid_fifo_lock);
213 return ERR_PTR(-ENOMEM);
214 }
215 php->pdid = pdid;
216 php->rhp = rhp;
217 if (context) {
218 if (ib_copy_to_udata(udata, &php->pdid, sizeof(u32))) {
219 c4iw_deallocate_pd(&php->ibpd);
220 return ERR_PTR(-EFAULT);
221 }
222 }
223 PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
224 return &php->ibpd;
225}
226
227static int c4iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
228 u16 *pkey)
229{
230 PDBG("%s ibdev %p\n", __func__, ibdev);
231 *pkey = 0;
232 return 0;
233}
234
235static int c4iw_query_gid(struct ib_device *ibdev, u8 port, int index,
236 union ib_gid *gid)
237{
238 struct c4iw_dev *dev;
239
240 PDBG("%s ibdev %p, port %d, index %d, gid %p\n",
241 __func__, ibdev, port, index, gid);
242 dev = to_c4iw_dev(ibdev);
243 BUG_ON(port == 0);
244 memset(&(gid->raw[0]), 0, sizeof(gid->raw));
245 memcpy(&(gid->raw[0]), dev->rdev.lldi.ports[port-1]->dev_addr, 6);
246 return 0;
247}
248
249static int c4iw_query_device(struct ib_device *ibdev,
250 struct ib_device_attr *props)
251{
252
253 struct c4iw_dev *dev;
254 PDBG("%s ibdev %p\n", __func__, ibdev);
255
256 dev = to_c4iw_dev(ibdev);
257 memset(props, 0, sizeof *props);
258 memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
259 props->hw_ver = dev->rdev.lldi.adapter_type;
260 props->fw_ver = dev->rdev.lldi.fw_vers;
261 props->device_cap_flags = dev->device_cap_flags;
262 props->page_size_cap = T4_PAGESIZE_MASK;
263 props->vendor_id = (u32)dev->rdev.lldi.pdev->vendor;
264 props->vendor_part_id = (u32)dev->rdev.lldi.pdev->device;
265 props->max_mr_size = T4_MAX_MR_SIZE;
266 props->max_qp = T4_MAX_NUM_QP;
267 props->max_qp_wr = T4_MAX_QP_DEPTH;
268 props->max_sge = T4_MAX_RECV_SGE;
269 props->max_sge_rd = 1;
270 props->max_qp_rd_atom = T4_MAX_READ_DEPTH;
271 props->max_qp_init_rd_atom = T4_MAX_READ_DEPTH;
272 props->max_cq = T4_MAX_NUM_CQ;
273 props->max_cqe = T4_MAX_CQ_DEPTH;
274 props->max_mr = c4iw_num_stags(&dev->rdev);
275 props->max_pd = T4_MAX_NUM_PD;
276 props->local_ca_ack_delay = 0;
277 props->max_fast_reg_page_list_len = T4_MAX_FR_DEPTH;
278
279 return 0;
280}
281
282static int c4iw_query_port(struct ib_device *ibdev, u8 port,
283 struct ib_port_attr *props)
284{
285 struct c4iw_dev *dev;
286 struct net_device *netdev;
287 struct in_device *inetdev;
288
289 PDBG("%s ibdev %p\n", __func__, ibdev);
290
291 dev = to_c4iw_dev(ibdev);
292 netdev = dev->rdev.lldi.ports[port-1];
293
294 memset(props, 0, sizeof(struct ib_port_attr));
295 props->max_mtu = IB_MTU_4096;
296 if (netdev->mtu >= 4096)
297 props->active_mtu = IB_MTU_4096;
298 else if (netdev->mtu >= 2048)
299 props->active_mtu = IB_MTU_2048;
300 else if (netdev->mtu >= 1024)
301 props->active_mtu = IB_MTU_1024;
302 else if (netdev->mtu >= 512)
303 props->active_mtu = IB_MTU_512;
304 else
305 props->active_mtu = IB_MTU_256;
306
307 if (!netif_carrier_ok(netdev))
308 props->state = IB_PORT_DOWN;
309 else {
310 inetdev = in_dev_get(netdev);
311 if (inetdev) {
312 if (inetdev->ifa_list)
313 props->state = IB_PORT_ACTIVE;
314 else
315 props->state = IB_PORT_INIT;
316 in_dev_put(inetdev);
317 } else
318 props->state = IB_PORT_INIT;
319 }
320
321 props->port_cap_flags =
322 IB_PORT_CM_SUP |
323 IB_PORT_SNMP_TUNNEL_SUP |
324 IB_PORT_REINIT_SUP |
325 IB_PORT_DEVICE_MGMT_SUP |
326 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
327 props->gid_tbl_len = 1;
328 props->pkey_tbl_len = 1;
329 props->active_width = 2;
330 props->active_speed = 2;
331 props->max_msg_sz = -1;
332
333 return 0;
334}
335
336static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
337 char *buf)
338{
339 struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
340 ibdev.dev);
341 PDBG("%s dev 0x%p\n", __func__, dev);
342 return sprintf(buf, "%d\n", c4iw_dev->rdev.lldi.adapter_type);
343}
344
345static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
346 char *buf)
347{
348 struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
349 ibdev.dev);
350 PDBG("%s dev 0x%p\n", __func__, dev);
351
352 return sprintf(buf, "%u.%u.%u.%u\n",
353 FW_HDR_FW_VER_MAJOR_GET(c4iw_dev->rdev.lldi.fw_vers),
354 FW_HDR_FW_VER_MINOR_GET(c4iw_dev->rdev.lldi.fw_vers),
355 FW_HDR_FW_VER_MICRO_GET(c4iw_dev->rdev.lldi.fw_vers),
356 FW_HDR_FW_VER_BUILD_GET(c4iw_dev->rdev.lldi.fw_vers));
357}
358
359static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
360 char *buf)
361{
362 struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
363 ibdev.dev);
364 struct ethtool_drvinfo info;
365 struct net_device *lldev = c4iw_dev->rdev.lldi.ports[0];
366
367 PDBG("%s dev 0x%p\n", __func__, dev);
368 lldev->ethtool_ops->get_drvinfo(lldev, &info);
369 return sprintf(buf, "%s\n", info.driver);
370}
371
372static ssize_t show_board(struct device *dev, struct device_attribute *attr,
373 char *buf)
374{
375 struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
376 ibdev.dev);
377 PDBG("%s dev 0x%p\n", __func__, dev);
378 return sprintf(buf, "%x.%x\n", c4iw_dev->rdev.lldi.pdev->vendor,
379 c4iw_dev->rdev.lldi.pdev->device);
380}
381
382static int c4iw_get_mib(struct ib_device *ibdev,
383 union rdma_protocol_stats *stats)
384{
385 return -ENOSYS;
386}
387
388static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
389static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
390static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
391static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
392
393static struct device_attribute *c4iw_class_attributes[] = {
394 &dev_attr_hw_rev,
395 &dev_attr_fw_ver,
396 &dev_attr_hca_type,
397 &dev_attr_board_id,
398};
399
400int c4iw_register_device(struct c4iw_dev *dev)
401{
402 int ret;
403 int i;
404
405 PDBG("%s c4iw_dev %p\n", __func__, dev);
406 BUG_ON(!dev->rdev.lldi.ports[0]);
407 strlcpy(dev->ibdev.name, "cxgb4_%d", IB_DEVICE_NAME_MAX);
408 memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
409 memcpy(&dev->ibdev.node_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
410 dev->ibdev.owner = THIS_MODULE;
411 dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW;
412 if (fastreg_support)
413 dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
414 dev->ibdev.local_dma_lkey = 0;
415 dev->ibdev.uverbs_cmd_mask =
416 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
417 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
418 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
419 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
420 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
421 (1ull << IB_USER_VERBS_CMD_REG_MR) |
422 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
423 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
424 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
425 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
426 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
427 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
428 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
429 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
430 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
431 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
432 (1ull << IB_USER_VERBS_CMD_POST_RECV);
433 dev->ibdev.node_type = RDMA_NODE_RNIC;
434 memcpy(dev->ibdev.node_desc, C4IW_NODE_DESC, sizeof(C4IW_NODE_DESC));
435 dev->ibdev.phys_port_cnt = dev->rdev.lldi.nports;
436 dev->ibdev.num_comp_vectors = 1;
437 dev->ibdev.dma_device = &(dev->rdev.lldi.pdev->dev);
438 dev->ibdev.query_device = c4iw_query_device;
439 dev->ibdev.query_port = c4iw_query_port;
440 dev->ibdev.modify_port = c4iw_modify_port;
441 dev->ibdev.query_pkey = c4iw_query_pkey;
442 dev->ibdev.query_gid = c4iw_query_gid;
443 dev->ibdev.alloc_ucontext = c4iw_alloc_ucontext;
444 dev->ibdev.dealloc_ucontext = c4iw_dealloc_ucontext;
445 dev->ibdev.mmap = c4iw_mmap;
446 dev->ibdev.alloc_pd = c4iw_allocate_pd;
447 dev->ibdev.dealloc_pd = c4iw_deallocate_pd;
448 dev->ibdev.create_ah = c4iw_ah_create;
449 dev->ibdev.destroy_ah = c4iw_ah_destroy;
450 dev->ibdev.create_qp = c4iw_create_qp;
451 dev->ibdev.modify_qp = c4iw_ib_modify_qp;
452 dev->ibdev.destroy_qp = c4iw_destroy_qp;
453 dev->ibdev.create_cq = c4iw_create_cq;
454 dev->ibdev.destroy_cq = c4iw_destroy_cq;
455 dev->ibdev.resize_cq = c4iw_resize_cq;
456 dev->ibdev.poll_cq = c4iw_poll_cq;
457 dev->ibdev.get_dma_mr = c4iw_get_dma_mr;
458 dev->ibdev.reg_phys_mr = c4iw_register_phys_mem;
459 dev->ibdev.rereg_phys_mr = c4iw_reregister_phys_mem;
460 dev->ibdev.reg_user_mr = c4iw_reg_user_mr;
461 dev->ibdev.dereg_mr = c4iw_dereg_mr;
462 dev->ibdev.alloc_mw = c4iw_alloc_mw;
463 dev->ibdev.bind_mw = c4iw_bind_mw;
464 dev->ibdev.dealloc_mw = c4iw_dealloc_mw;
465 dev->ibdev.alloc_fast_reg_mr = c4iw_alloc_fast_reg_mr;
466 dev->ibdev.alloc_fast_reg_page_list = c4iw_alloc_fastreg_pbl;
467 dev->ibdev.free_fast_reg_page_list = c4iw_free_fastreg_pbl;
468 dev->ibdev.attach_mcast = c4iw_multicast_attach;
469 dev->ibdev.detach_mcast = c4iw_multicast_detach;
470 dev->ibdev.process_mad = c4iw_process_mad;
471 dev->ibdev.req_notify_cq = c4iw_arm_cq;
472 dev->ibdev.post_send = c4iw_post_send;
473 dev->ibdev.post_recv = c4iw_post_receive;
474 dev->ibdev.get_protocol_stats = c4iw_get_mib;
475
476 dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
477 if (!dev->ibdev.iwcm)
478 return -ENOMEM;
479
480 dev->ibdev.iwcm->connect = c4iw_connect;
481 dev->ibdev.iwcm->accept = c4iw_accept_cr;
482 dev->ibdev.iwcm->reject = c4iw_reject_cr;
483 dev->ibdev.iwcm->create_listen = c4iw_create_listen;
484 dev->ibdev.iwcm->destroy_listen = c4iw_destroy_listen;
485 dev->ibdev.iwcm->add_ref = c4iw_qp_add_ref;
486 dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref;
487 dev->ibdev.iwcm->get_qp = c4iw_get_qp;
488
489 ret = ib_register_device(&dev->ibdev);
490 if (ret)
491 goto bail1;
492
493 for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i) {
494 ret = device_create_file(&dev->ibdev.dev,
495 c4iw_class_attributes[i]);
496 if (ret)
497 goto bail2;
498 }
499 return 0;
500bail2:
501 ib_unregister_device(&dev->ibdev);
502bail1:
503 kfree(dev->ibdev.iwcm);
504 return ret;
505}
506
507void c4iw_unregister_device(struct c4iw_dev *dev)
508{
509 int i;
510
511 PDBG("%s c4iw_dev %p\n", __func__, dev);
512 for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i)
513 device_remove_file(&dev->ibdev.dev,
514 c4iw_class_attributes[i]);
515 ib_unregister_device(&dev->ibdev);
516 kfree(dev->ibdev.iwcm);
517 return;
518}
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
new file mode 100644
index 000000000000..bd56c841ef75
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -0,0 +1,1577 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "iw_cxgb4.h"
33
34static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
35 struct c4iw_dev_ucontext *uctx)
36{
37 /*
38 * uP clears EQ contexts when the connection exits rdma mode,
39 * so no need to post a RESET WR for these EQs.
40 */
41 dma_free_coherent(&(rdev->lldi.pdev->dev),
42 wq->rq.memsize, wq->rq.queue,
43 pci_unmap_addr(&wq->rq, mapping));
44 dma_free_coherent(&(rdev->lldi.pdev->dev),
45 wq->sq.memsize, wq->sq.queue,
46 pci_unmap_addr(&wq->sq, mapping));
47 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
48 kfree(wq->rq.sw_rq);
49 kfree(wq->sq.sw_sq);
50 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
51 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
52 return 0;
53}
54
55static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
56 struct t4_cq *rcq, struct t4_cq *scq,
57 struct c4iw_dev_ucontext *uctx)
58{
59 int user = (uctx != &rdev->uctx);
60 struct fw_ri_res_wr *res_wr;
61 struct fw_ri_res *res;
62 int wr_len;
63 struct c4iw_wr_wait wr_wait;
64 struct sk_buff *skb;
65 int ret;
66 int eqsize;
67
68 wq->sq.qid = c4iw_get_qpid(rdev, uctx);
69 if (!wq->sq.qid)
70 return -ENOMEM;
71
72 wq->rq.qid = c4iw_get_qpid(rdev, uctx);
73 if (!wq->rq.qid)
74 goto err1;
75
76 if (!user) {
77 wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
78 GFP_KERNEL);
79 if (!wq->sq.sw_sq)
80 goto err2;
81
82 wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
83 GFP_KERNEL);
84 if (!wq->rq.sw_rq)
85 goto err3;
86 }
87
88 /*
89 * RQT must be a power of 2.
90 */
91 wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size);
92 wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
93 if (!wq->rq.rqt_hwaddr)
94 goto err4;
95
96 wq->sq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
97 wq->sq.memsize, &(wq->sq.dma_addr),
98 GFP_KERNEL);
99 if (!wq->sq.queue)
100 goto err5;
101 memset(wq->sq.queue, 0, wq->sq.memsize);
102 pci_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
103
104 wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
105 wq->rq.memsize, &(wq->rq.dma_addr),
106 GFP_KERNEL);
107 if (!wq->rq.queue)
108 goto err6;
109 PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
110 __func__, wq->sq.queue,
111 (unsigned long long)virt_to_phys(wq->sq.queue),
112 wq->rq.queue,
113 (unsigned long long)virt_to_phys(wq->rq.queue));
114 memset(wq->rq.queue, 0, wq->rq.memsize);
115 pci_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
116
117 wq->db = rdev->lldi.db_reg;
118 wq->gts = rdev->lldi.gts_reg;
119 if (user) {
120 wq->sq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
121 (wq->sq.qid << rdev->qpshift);
122 wq->sq.udb &= PAGE_MASK;
123 wq->rq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
124 (wq->rq.qid << rdev->qpshift);
125 wq->rq.udb &= PAGE_MASK;
126 }
127 wq->rdev = rdev;
128 wq->rq.msn = 1;
129
130 /* build fw_ri_res_wr */
131 wr_len = sizeof *res_wr + 2 * sizeof *res;
132
133 skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
134 if (!skb) {
135 ret = -ENOMEM;
136 goto err7;
137 }
138 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
139
140 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
141 memset(res_wr, 0, wr_len);
142 res_wr->op_nres = cpu_to_be32(
143 FW_WR_OP(FW_RI_RES_WR) |
144 V_FW_RI_RES_WR_NRES(2) |
145 FW_WR_COMPL(1));
146 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
147 res_wr->cookie = (u64)&wr_wait;
148 res = res_wr->res;
149 res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
150 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
151
152 /*
153 * eqsize is the number of 64B entries plus the status page size.
154 */
155 eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
156
157 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
158 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
159 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
160 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
161 V_FW_RI_RES_WR_IQID(scq->cqid));
162 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
163 V_FW_RI_RES_WR_DCAEN(0) |
164 V_FW_RI_RES_WR_DCACPU(0) |
165 V_FW_RI_RES_WR_FBMIN(3) |
166 V_FW_RI_RES_WR_FBMAX(3) |
167 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
168 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
169 V_FW_RI_RES_WR_EQSIZE(eqsize));
170 res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
171 res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
172 res++;
173 res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
174 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
175
176 /*
177 * eqsize is the number of 64B entries plus the status page size.
178 */
179 eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
180 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
181 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
182 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
183 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
184 V_FW_RI_RES_WR_IQID(rcq->cqid));
185 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
186 V_FW_RI_RES_WR_DCAEN(0) |
187 V_FW_RI_RES_WR_DCACPU(0) |
188 V_FW_RI_RES_WR_FBMIN(3) |
189 V_FW_RI_RES_WR_FBMAX(3) |
190 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
191 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
192 V_FW_RI_RES_WR_EQSIZE(eqsize));
193 res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
194 res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
195
196 c4iw_init_wr_wait(&wr_wait);
197
198 ret = c4iw_ofld_send(rdev, skb);
199 if (ret)
200 goto err7;
201 wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
202 if (!wr_wait.done) {
203 printk(KERN_ERR MOD "Device %s not responding!\n",
204 pci_name(rdev->lldi.pdev));
205 rdev->flags = T4_FATAL_ERROR;
206 ret = -EIO;
207 } else
208 ret = wr_wait.ret;
209 if (ret)
210 goto err7;
211
212 PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n",
213 __func__, wq->sq.qid, wq->rq.qid, wq->db,
214 (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb);
215
216 return 0;
217err7:
218 dma_free_coherent(&(rdev->lldi.pdev->dev),
219 wq->rq.memsize, wq->rq.queue,
220 pci_unmap_addr(&wq->rq, mapping));
221err6:
222 dma_free_coherent(&(rdev->lldi.pdev->dev),
223 wq->sq.memsize, wq->sq.queue,
224 pci_unmap_addr(&wq->sq, mapping));
225err5:
226 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
227err4:
228 kfree(wq->rq.sw_rq);
229err3:
230 kfree(wq->sq.sw_sq);
231err2:
232 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
233err1:
234 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
235 return -ENOMEM;
236}
237
238static int build_rdma_send(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
239{
240 int i;
241 u32 plen;
242 int size;
243 u8 *datap;
244
245 if (wr->num_sge > T4_MAX_SEND_SGE)
246 return -EINVAL;
247 switch (wr->opcode) {
248 case IB_WR_SEND:
249 if (wr->send_flags & IB_SEND_SOLICITED)
250 wqe->send.sendop_pkd = cpu_to_be32(
251 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE));
252 else
253 wqe->send.sendop_pkd = cpu_to_be32(
254 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND));
255 wqe->send.stag_inv = 0;
256 break;
257 case IB_WR_SEND_WITH_INV:
258 if (wr->send_flags & IB_SEND_SOLICITED)
259 wqe->send.sendop_pkd = cpu_to_be32(
260 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV));
261 else
262 wqe->send.sendop_pkd = cpu_to_be32(
263 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV));
264 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
265 break;
266
267 default:
268 return -EINVAL;
269 }
270 plen = 0;
271 if (wr->num_sge) {
272 if (wr->send_flags & IB_SEND_INLINE) {
273 datap = (u8 *)wqe->send.u.immd_src[0].data;
274 for (i = 0; i < wr->num_sge; i++) {
275 if ((plen + wr->sg_list[i].length) >
276 T4_MAX_SEND_INLINE) {
277 return -EMSGSIZE;
278 }
279 plen += wr->sg_list[i].length;
280 memcpy(datap,
281 (void *)(unsigned long)wr->sg_list[i].addr,
282 wr->sg_list[i].length);
283 datap += wr->sg_list[i].length;
284 }
285 wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
286 wqe->send.u.immd_src[0].r1 = 0;
287 wqe->send.u.immd_src[0].r2 = 0;
288 wqe->send.u.immd_src[0].immdlen = cpu_to_be32(plen);
289 size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
290 plen;
291 } else {
292 for (i = 0; i < wr->num_sge; i++) {
293 if ((plen + wr->sg_list[i].length) < plen)
294 return -EMSGSIZE;
295 plen += wr->sg_list[i].length;
296 wqe->send.u.isgl_src[0].sge[i].stag =
297 cpu_to_be32(wr->sg_list[i].lkey);
298 wqe->send.u.isgl_src[0].sge[i].len =
299 cpu_to_be32(wr->sg_list[i].length);
300 wqe->send.u.isgl_src[0].sge[i].to =
301 cpu_to_be64(wr->sg_list[i].addr);
302 }
303 wqe->send.u.isgl_src[0].op = FW_RI_DATA_ISGL;
304 wqe->send.u.isgl_src[0].r1 = 0;
305 wqe->send.u.isgl_src[0].nsge = cpu_to_be16(wr->num_sge);
306 wqe->send.u.isgl_src[0].r2 = 0;
307 size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
308 wr->num_sge * sizeof(struct fw_ri_sge);
309 }
310 } else {
311 wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
312 wqe->send.u.immd_src[0].r1 = 0;
313 wqe->send.u.immd_src[0].r2 = 0;
314 wqe->send.u.immd_src[0].immdlen = 0;
315 size = sizeof wqe->send + sizeof(struct fw_ri_immd);
316 }
317 *len16 = DIV_ROUND_UP(size, 16);
318 wqe->send.plen = cpu_to_be32(plen);
319 return 0;
320}
321
322static int build_rdma_write(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
323{
324 int i;
325 u32 plen;
326 int size;
327 u8 *datap;
328
329 if (wr->num_sge > T4_MAX_WRITE_SGE)
330 return -EINVAL;
331 wqe->write.r2 = 0;
332 wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey);
333 wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr);
334 plen = 0;
335 if (wr->num_sge) {
336 if (wr->send_flags & IB_SEND_INLINE) {
337 datap = (u8 *)wqe->write.u.immd_src[0].data;
338 for (i = 0; i < wr->num_sge; i++) {
339 if ((plen + wr->sg_list[i].length) >
340 T4_MAX_WRITE_INLINE) {
341 return -EMSGSIZE;
342 }
343 plen += wr->sg_list[i].length;
344 memcpy(datap,
345 (void *)(unsigned long)wr->sg_list[i].addr,
346 wr->sg_list[i].length);
347 datap += wr->sg_list[i].length;
348 }
349 wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
350 wqe->write.u.immd_src[0].r1 = 0;
351 wqe->write.u.immd_src[0].r2 = 0;
352 wqe->write.u.immd_src[0].immdlen = cpu_to_be32(plen);
353 size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
354 plen;
355 } else {
356 for (i = 0; i < wr->num_sge; i++) {
357 if ((plen + wr->sg_list[i].length) < plen)
358 return -EMSGSIZE;
359 plen += wr->sg_list[i].length;
360 wqe->write.u.isgl_src[0].sge[i].stag =
361 cpu_to_be32(wr->sg_list[i].lkey);
362 wqe->write.u.isgl_src[0].sge[i].len =
363 cpu_to_be32(wr->sg_list[i].length);
364 wqe->write.u.isgl_src[0].sge[i].to =
365 cpu_to_be64(wr->sg_list[i].addr);
366 }
367 wqe->write.u.isgl_src[0].op = FW_RI_DATA_ISGL;
368 wqe->write.u.isgl_src[0].r1 = 0;
369 wqe->write.u.isgl_src[0].nsge =
370 cpu_to_be16(wr->num_sge);
371 wqe->write.u.isgl_src[0].r2 = 0;
372 size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
373 wr->num_sge * sizeof(struct fw_ri_sge);
374 }
375 } else {
376 wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
377 wqe->write.u.immd_src[0].r1 = 0;
378 wqe->write.u.immd_src[0].r2 = 0;
379 wqe->write.u.immd_src[0].immdlen = 0;
380 size = sizeof wqe->write + sizeof(struct fw_ri_immd);
381 }
382 *len16 = DIV_ROUND_UP(size, 16);
383 wqe->write.plen = cpu_to_be32(plen);
384 return 0;
385}
386
387static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
388{
389 if (wr->num_sge > 1)
390 return -EINVAL;
391 if (wr->num_sge) {
392 wqe->read.stag_src = cpu_to_be32(wr->wr.rdma.rkey);
393 wqe->read.to_src_hi = cpu_to_be32((u32)(wr->wr.rdma.remote_addr
394 >> 32));
395 wqe->read.to_src_lo = cpu_to_be32((u32)wr->wr.rdma.remote_addr);
396 wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
397 wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
398 wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
399 >> 32));
400 wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr));
401 } else {
402 wqe->read.stag_src = cpu_to_be32(2);
403 wqe->read.to_src_hi = 0;
404 wqe->read.to_src_lo = 0;
405 wqe->read.stag_sink = cpu_to_be32(2);
406 wqe->read.plen = 0;
407 wqe->read.to_sink_hi = 0;
408 wqe->read.to_sink_lo = 0;
409 }
410 wqe->read.r2 = 0;
411 wqe->read.r5 = 0;
412 *len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
413 return 0;
414}
415
416static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
417 struct ib_recv_wr *wr, u8 *len16)
418{
419 int i;
420 int plen = 0;
421
422 for (i = 0; i < wr->num_sge; i++) {
423 if ((plen + wr->sg_list[i].length) < plen)
424 return -EMSGSIZE;
425 plen += wr->sg_list[i].length;
426 wqe->recv.isgl.sge[i].stag =
427 cpu_to_be32(wr->sg_list[i].lkey);
428 wqe->recv.isgl.sge[i].len =
429 cpu_to_be32(wr->sg_list[i].length);
430 wqe->recv.isgl.sge[i].to =
431 cpu_to_be64(wr->sg_list[i].addr);
432 }
433 for (; i < T4_MAX_RECV_SGE; i++) {
434 wqe->recv.isgl.sge[i].stag = 0;
435 wqe->recv.isgl.sge[i].len = 0;
436 wqe->recv.isgl.sge[i].to = 0;
437 }
438 wqe->recv.isgl.op = FW_RI_DATA_ISGL;
439 wqe->recv.isgl.r1 = 0;
440 wqe->recv.isgl.nsge = cpu_to_be16(wr->num_sge);
441 wqe->recv.isgl.r2 = 0;
442 *len16 = DIV_ROUND_UP(sizeof wqe->recv +
443 wr->num_sge * sizeof(struct fw_ri_sge), 16);
444 return 0;
445}
446
447static int build_fastreg(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
448{
449
450 struct fw_ri_immd *imdp;
451 __be64 *p;
452 int i;
453 int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32);
454
455 if (wr->wr.fast_reg.page_list_len > T4_MAX_FR_DEPTH)
456 return -EINVAL;
457
458 wqe->fr.qpbinde_to_dcacpu = 0;
459 wqe->fr.pgsz_shift = wr->wr.fast_reg.page_shift - 12;
460 wqe->fr.addr_type = FW_RI_VA_BASED_TO;
461 wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->wr.fast_reg.access_flags);
462 wqe->fr.len_hi = 0;
463 wqe->fr.len_lo = cpu_to_be32(wr->wr.fast_reg.length);
464 wqe->fr.stag = cpu_to_be32(wr->wr.fast_reg.rkey);
465 wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
466 wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start &
467 0xffffffff);
468 if (pbllen > T4_MAX_FR_IMMD) {
469 struct c4iw_fr_page_list *c4pl =
470 to_c4iw_fr_page_list(wr->wr.fast_reg.page_list);
471 struct fw_ri_dsgl *sglp;
472
473 sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
474 sglp->op = FW_RI_DATA_DSGL;
475 sglp->r1 = 0;
476 sglp->nsge = cpu_to_be16(1);
477 sglp->addr0 = cpu_to_be64(c4pl->dma_addr);
478 sglp->len0 = cpu_to_be32(pbllen);
479
480 *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *sglp, 16);
481 } else {
482 imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
483 imdp->op = FW_RI_DATA_IMMD;
484 imdp->r1 = 0;
485 imdp->r2 = 0;
486 imdp->immdlen = cpu_to_be32(pbllen);
487 p = (__be64 *)(imdp + 1);
488 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++, p++)
489 *p = cpu_to_be64(
490 (u64)wr->wr.fast_reg.page_list->page_list[i]);
491 *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen,
492 16);
493 }
494 return 0;
495}
496
497static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
498 u8 *len16)
499{
500 wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
501 wqe->inv.r2 = 0;
502 *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
503 return 0;
504}
505
506void c4iw_qp_add_ref(struct ib_qp *qp)
507{
508 PDBG("%s ib_qp %p\n", __func__, qp);
509 atomic_inc(&(to_c4iw_qp(qp)->refcnt));
510}
511
512void c4iw_qp_rem_ref(struct ib_qp *qp)
513{
514 PDBG("%s ib_qp %p\n", __func__, qp);
515 if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt)))
516 wake_up(&(to_c4iw_qp(qp)->wait));
517}
518
519int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
520 struct ib_send_wr **bad_wr)
521{
522 int err = 0;
523 u8 len16 = 0;
524 enum fw_wr_opcodes fw_opcode = 0;
525 enum fw_ri_wr_flags fw_flags;
526 struct c4iw_qp *qhp;
527 union t4_wr *wqe;
528 u32 num_wrs;
529 struct t4_swsqe *swsqe;
530 unsigned long flag;
531 u16 idx = 0;
532
533 qhp = to_c4iw_qp(ibqp);
534 spin_lock_irqsave(&qhp->lock, flag);
535 if (t4_wq_in_error(&qhp->wq)) {
536 spin_unlock_irqrestore(&qhp->lock, flag);
537 return -EINVAL;
538 }
539 num_wrs = t4_sq_avail(&qhp->wq);
540 if (num_wrs == 0) {
541 spin_unlock_irqrestore(&qhp->lock, flag);
542 return -ENOMEM;
543 }
544 while (wr) {
545 if (num_wrs == 0) {
546 err = -ENOMEM;
547 *bad_wr = wr;
548 break;
549 }
550 wqe = &qhp->wq.sq.queue[qhp->wq.sq.pidx];
551 fw_flags = 0;
552 if (wr->send_flags & IB_SEND_SOLICITED)
553 fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
554 if (wr->send_flags & IB_SEND_SIGNALED)
555 fw_flags |= FW_RI_COMPLETION_FLAG;
556 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
557 switch (wr->opcode) {
558 case IB_WR_SEND_WITH_INV:
559 case IB_WR_SEND:
560 if (wr->send_flags & IB_SEND_FENCE)
561 fw_flags |= FW_RI_READ_FENCE_FLAG;
562 fw_opcode = FW_RI_SEND_WR;
563 if (wr->opcode == IB_WR_SEND)
564 swsqe->opcode = FW_RI_SEND;
565 else
566 swsqe->opcode = FW_RI_SEND_WITH_INV;
567 err = build_rdma_send(wqe, wr, &len16);
568 break;
569 case IB_WR_RDMA_WRITE:
570 fw_opcode = FW_RI_RDMA_WRITE_WR;
571 swsqe->opcode = FW_RI_RDMA_WRITE;
572 err = build_rdma_write(wqe, wr, &len16);
573 break;
574 case IB_WR_RDMA_READ:
575 fw_opcode = FW_RI_RDMA_READ_WR;
576 swsqe->opcode = FW_RI_READ_REQ;
577 fw_flags = 0;
578 err = build_rdma_read(wqe, wr, &len16);
579 if (err)
580 break;
581 swsqe->read_len = wr->sg_list[0].length;
582 if (!qhp->wq.sq.oldest_read)
583 qhp->wq.sq.oldest_read = swsqe;
584 break;
585 case IB_WR_FAST_REG_MR:
586 fw_opcode = FW_RI_FR_NSMR_WR;
587 swsqe->opcode = FW_RI_FAST_REGISTER;
588 err = build_fastreg(wqe, wr, &len16);
589 break;
590 case IB_WR_LOCAL_INV:
591 fw_opcode = FW_RI_INV_LSTAG_WR;
592 swsqe->opcode = FW_RI_LOCAL_INV;
593 err = build_inv_stag(wqe, wr, &len16);
594 break;
595 default:
596 PDBG("%s post of type=%d TBD!\n", __func__,
597 wr->opcode);
598 err = -EINVAL;
599 }
600 if (err) {
601 *bad_wr = wr;
602 break;
603 }
604 swsqe->idx = qhp->wq.sq.pidx;
605 swsqe->complete = 0;
606 swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED);
607 swsqe->wr_id = wr->wr_id;
608
609 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
610
611 PDBG("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
612 __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
613 swsqe->opcode, swsqe->read_len);
614 wr = wr->next;
615 num_wrs--;
616 t4_sq_produce(&qhp->wq);
617 idx++;
618 }
619 if (t4_wq_db_enabled(&qhp->wq))
620 t4_ring_sq_db(&qhp->wq, idx);
621 spin_unlock_irqrestore(&qhp->lock, flag);
622 return err;
623}
624
625int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
626 struct ib_recv_wr **bad_wr)
627{
628 int err = 0;
629 struct c4iw_qp *qhp;
630 union t4_recv_wr *wqe;
631 u32 num_wrs;
632 u8 len16 = 0;
633 unsigned long flag;
634 u16 idx = 0;
635
636 qhp = to_c4iw_qp(ibqp);
637 spin_lock_irqsave(&qhp->lock, flag);
638 if (t4_wq_in_error(&qhp->wq)) {
639 spin_unlock_irqrestore(&qhp->lock, flag);
640 return -EINVAL;
641 }
642 num_wrs = t4_rq_avail(&qhp->wq);
643 if (num_wrs == 0) {
644 spin_unlock_irqrestore(&qhp->lock, flag);
645 return -ENOMEM;
646 }
647 while (wr) {
648 if (wr->num_sge > T4_MAX_RECV_SGE) {
649 err = -EINVAL;
650 *bad_wr = wr;
651 break;
652 }
653 wqe = &qhp->wq.rq.queue[qhp->wq.rq.pidx];
654 if (num_wrs)
655 err = build_rdma_recv(qhp, wqe, wr, &len16);
656 else
657 err = -ENOMEM;
658 if (err) {
659 *bad_wr = wr;
660 break;
661 }
662
663 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
664
665 wqe->recv.opcode = FW_RI_RECV_WR;
666 wqe->recv.r1 = 0;
667 wqe->recv.wrid = qhp->wq.rq.pidx;
668 wqe->recv.r2[0] = 0;
669 wqe->recv.r2[1] = 0;
670 wqe->recv.r2[2] = 0;
671 wqe->recv.len16 = len16;
672 if (len16 < 5)
673 wqe->flits[8] = 0;
674
675 PDBG("%s cookie 0x%llx pidx %u\n", __func__,
676 (unsigned long long) wr->wr_id, qhp->wq.rq.pidx);
677 t4_rq_produce(&qhp->wq);
678 wr = wr->next;
679 num_wrs--;
680 idx++;
681 }
682 if (t4_wq_db_enabled(&qhp->wq))
683 t4_ring_rq_db(&qhp->wq, idx);
684 spin_unlock_irqrestore(&qhp->lock, flag);
685 return err;
686}
687
688int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind)
689{
690 return -ENOSYS;
691}
692
693static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
694 u8 *ecode)
695{
696 int status;
697 int tagged;
698 int opcode;
699 int rqtype;
700 int send_inv;
701
702 if (!err_cqe) {
703 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
704 *ecode = 0;
705 return;
706 }
707
708 status = CQE_STATUS(err_cqe);
709 opcode = CQE_OPCODE(err_cqe);
710 rqtype = RQ_TYPE(err_cqe);
711 send_inv = (opcode == FW_RI_SEND_WITH_INV) ||
712 (opcode == FW_RI_SEND_WITH_SE_INV);
713 tagged = (opcode == FW_RI_RDMA_WRITE) ||
714 (rqtype && (opcode == FW_RI_READ_RESP));
715
716 switch (status) {
717 case T4_ERR_STAG:
718 if (send_inv) {
719 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
720 *ecode = RDMAP_CANT_INV_STAG;
721 } else {
722 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
723 *ecode = RDMAP_INV_STAG;
724 }
725 break;
726 case T4_ERR_PDID:
727 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
728 if ((opcode == FW_RI_SEND_WITH_INV) ||
729 (opcode == FW_RI_SEND_WITH_SE_INV))
730 *ecode = RDMAP_CANT_INV_STAG;
731 else
732 *ecode = RDMAP_STAG_NOT_ASSOC;
733 break;
734 case T4_ERR_QPID:
735 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
736 *ecode = RDMAP_STAG_NOT_ASSOC;
737 break;
738 case T4_ERR_ACCESS:
739 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
740 *ecode = RDMAP_ACC_VIOL;
741 break;
742 case T4_ERR_WRAP:
743 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
744 *ecode = RDMAP_TO_WRAP;
745 break;
746 case T4_ERR_BOUND:
747 if (tagged) {
748 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
749 *ecode = DDPT_BASE_BOUNDS;
750 } else {
751 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
752 *ecode = RDMAP_BASE_BOUNDS;
753 }
754 break;
755 case T4_ERR_INVALIDATE_SHARED_MR:
756 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
757 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
758 *ecode = RDMAP_CANT_INV_STAG;
759 break;
760 case T4_ERR_ECC:
761 case T4_ERR_ECC_PSTAG:
762 case T4_ERR_INTERNAL_ERR:
763 *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
764 *ecode = 0;
765 break;
766 case T4_ERR_OUT_OF_RQE:
767 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
768 *ecode = DDPU_INV_MSN_NOBUF;
769 break;
770 case T4_ERR_PBL_ADDR_BOUND:
771 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
772 *ecode = DDPT_BASE_BOUNDS;
773 break;
774 case T4_ERR_CRC:
775 *layer_type = LAYER_MPA|DDP_LLP;
776 *ecode = MPA_CRC_ERR;
777 break;
778 case T4_ERR_MARKER:
779 *layer_type = LAYER_MPA|DDP_LLP;
780 *ecode = MPA_MARKER_ERR;
781 break;
782 case T4_ERR_PDU_LEN_ERR:
783 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
784 *ecode = DDPU_MSG_TOOBIG;
785 break;
786 case T4_ERR_DDP_VERSION:
787 if (tagged) {
788 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
789 *ecode = DDPT_INV_VERS;
790 } else {
791 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
792 *ecode = DDPU_INV_VERS;
793 }
794 break;
795 case T4_ERR_RDMA_VERSION:
796 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
797 *ecode = RDMAP_INV_VERS;
798 break;
799 case T4_ERR_OPCODE:
800 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
801 *ecode = RDMAP_INV_OPCODE;
802 break;
803 case T4_ERR_DDP_QUEUE_NUM:
804 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
805 *ecode = DDPU_INV_QN;
806 break;
807 case T4_ERR_MSN:
808 case T4_ERR_MSN_GAP:
809 case T4_ERR_MSN_RANGE:
810 case T4_ERR_IRD_OVERFLOW:
811 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
812 *ecode = DDPU_INV_MSN_RANGE;
813 break;
814 case T4_ERR_TBIT:
815 *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
816 *ecode = 0;
817 break;
818 case T4_ERR_MO:
819 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
820 *ecode = DDPU_INV_MO;
821 break;
822 default:
823 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
824 *ecode = 0;
825 break;
826 }
827}
828
829int c4iw_post_zb_read(struct c4iw_qp *qhp)
830{
831 union t4_wr *wqe;
832 struct sk_buff *skb;
833 u8 len16;
834
835 PDBG("%s enter\n", __func__);
836 skb = alloc_skb(40, GFP_KERNEL);
837 if (!skb) {
838 printk(KERN_ERR "%s cannot send zb_read!!\n", __func__);
839 return -ENOMEM;
840 }
841 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
842
843 wqe = (union t4_wr *)skb_put(skb, sizeof wqe->read);
844 memset(wqe, 0, sizeof wqe->read);
845 wqe->read.r2 = cpu_to_be64(0);
846 wqe->read.stag_sink = cpu_to_be32(1);
847 wqe->read.to_sink_hi = cpu_to_be32(0);
848 wqe->read.to_sink_lo = cpu_to_be32(1);
849 wqe->read.stag_src = cpu_to_be32(1);
850 wqe->read.plen = cpu_to_be32(0);
851 wqe->read.to_src_hi = cpu_to_be32(0);
852 wqe->read.to_src_lo = cpu_to_be32(1);
853 len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
854 init_wr_hdr(wqe, 0, FW_RI_RDMA_READ_WR, FW_RI_COMPLETION_FLAG, len16);
855
856 return c4iw_ofld_send(&qhp->rhp->rdev, skb);
857}
858
859int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe)
860{
861 struct fw_ri_wr *wqe;
862 struct sk_buff *skb;
863 struct terminate_message *term;
864
865 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
866 qhp->ep->hwtid);
867
868 skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL);
869 if (!skb)
870 return -ENOMEM;
871 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
872
873 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
874 memset(wqe, 0, sizeof *wqe);
875 wqe->op_compl = cpu_to_be32(FW_WR_OP(FW_RI_INIT_WR));
876 wqe->flowid_len16 = cpu_to_be32(
877 FW_WR_FLOWID(qhp->ep->hwtid) |
878 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
879
880 wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
881 wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
882 term = (struct terminate_message *)wqe->u.terminate.termmsg;
883 build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
884 return c4iw_ofld_send(&qhp->rhp->rdev, skb);
885}
886
887/*
888 * Assumes qhp lock is held.
889 */
890static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
891 struct c4iw_cq *schp, unsigned long *flag)
892{
893 int count;
894 int flushed;
895
896 PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
897 /* take a ref on the qhp since we must release the lock */
898 atomic_inc(&qhp->refcnt);
899 spin_unlock_irqrestore(&qhp->lock, *flag);
900
901 /* locking heirarchy: cq lock first, then qp lock. */
902 spin_lock_irqsave(&rchp->lock, *flag);
903 spin_lock(&qhp->lock);
904 c4iw_flush_hw_cq(&rchp->cq);
905 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
906 flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
907 spin_unlock(&qhp->lock);
908 spin_unlock_irqrestore(&rchp->lock, *flag);
909 if (flushed)
910 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
911
912 /* locking heirarchy: cq lock first, then qp lock. */
913 spin_lock_irqsave(&schp->lock, *flag);
914 spin_lock(&qhp->lock);
915 c4iw_flush_hw_cq(&schp->cq);
916 c4iw_count_scqes(&schp->cq, &qhp->wq, &count);
917 flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count);
918 spin_unlock(&qhp->lock);
919 spin_unlock_irqrestore(&schp->lock, *flag);
920 if (flushed)
921 (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
922
923 /* deref */
924 if (atomic_dec_and_test(&qhp->refcnt))
925 wake_up(&qhp->wait);
926
927 spin_lock_irqsave(&qhp->lock, *flag);
928}
929
930static void flush_qp(struct c4iw_qp *qhp, unsigned long *flag)
931{
932 struct c4iw_cq *rchp, *schp;
933
934 rchp = get_chp(qhp->rhp, qhp->attr.rcq);
935 schp = get_chp(qhp->rhp, qhp->attr.scq);
936
937 if (qhp->ibqp.uobject) {
938 t4_set_wq_in_error(&qhp->wq);
939 t4_set_cq_in_error(&rchp->cq);
940 if (schp != rchp)
941 t4_set_cq_in_error(&schp->cq);
942 return;
943 }
944 __flush_qp(qhp, rchp, schp, flag);
945}
946
947static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
948{
949 struct fw_ri_wr *wqe;
950 int ret;
951 struct c4iw_wr_wait wr_wait;
952 struct sk_buff *skb;
953
954 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
955 qhp->ep->hwtid);
956
957 skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL);
958 if (!skb)
959 return -ENOMEM;
960 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
961
962 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
963 memset(wqe, 0, sizeof *wqe);
964 wqe->op_compl = cpu_to_be32(
965 FW_WR_OP(FW_RI_INIT_WR) |
966 FW_WR_COMPL(1));
967 wqe->flowid_len16 = cpu_to_be32(
968 FW_WR_FLOWID(qhp->ep->hwtid) |
969 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
970 wqe->cookie = (u64)&wr_wait;
971
972 wqe->u.fini.type = FW_RI_TYPE_FINI;
973 c4iw_init_wr_wait(&wr_wait);
974 ret = c4iw_ofld_send(&rhp->rdev, skb);
975 if (ret)
976 goto out;
977
978 wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
979 if (!wr_wait.done) {
980 printk(KERN_ERR MOD "Device %s not responding!\n",
981 pci_name(rhp->rdev.lldi.pdev));
982 rhp->rdev.flags = T4_FATAL_ERROR;
983 ret = -EIO;
984 } else {
985 ret = wr_wait.ret;
986 if (ret)
987 printk(KERN_WARNING MOD
988 "%s: Abnormal close qpid %d ret %u\n",
989 pci_name(rhp->rdev.lldi.pdev), qhp->wq.sq.qid,
990 ret);
991 }
992out:
993 PDBG("%s ret %d\n", __func__, ret);
994 return ret;
995}
996
997static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
998{
999 memset(&init->u, 0, sizeof init->u);
1000 switch (p2p_type) {
1001 case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
1002 init->u.write.opcode = FW_RI_RDMA_WRITE_WR;
1003 init->u.write.stag_sink = cpu_to_be32(1);
1004 init->u.write.to_sink = cpu_to_be64(1);
1005 init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD;
1006 init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write +
1007 sizeof(struct fw_ri_immd),
1008 16);
1009 break;
1010 case FW_RI_INIT_P2PTYPE_READ_REQ:
1011 init->u.write.opcode = FW_RI_RDMA_READ_WR;
1012 init->u.read.stag_src = cpu_to_be32(1);
1013 init->u.read.to_src_lo = cpu_to_be32(1);
1014 init->u.read.stag_sink = cpu_to_be32(1);
1015 init->u.read.to_sink_lo = cpu_to_be32(1);
1016 init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16);
1017 break;
1018 }
1019}
1020
1021static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1022{
1023 struct fw_ri_wr *wqe;
1024 int ret;
1025 struct c4iw_wr_wait wr_wait;
1026 struct sk_buff *skb;
1027
1028 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
1029 qhp->ep->hwtid);
1030
1031 skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL);
1032 if (!skb)
1033 return -ENOMEM;
1034 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1035
1036 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
1037 memset(wqe, 0, sizeof *wqe);
1038 wqe->op_compl = cpu_to_be32(
1039 FW_WR_OP(FW_RI_INIT_WR) |
1040 FW_WR_COMPL(1));
1041 wqe->flowid_len16 = cpu_to_be32(
1042 FW_WR_FLOWID(qhp->ep->hwtid) |
1043 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
1044
1045 wqe->cookie = (u64)&wr_wait;
1046
1047 wqe->u.init.type = FW_RI_TYPE_INIT;
1048 wqe->u.init.mpareqbit_p2ptype =
1049 V_FW_RI_WR_MPAREQBIT(qhp->attr.mpa_attr.initiator) |
1050 V_FW_RI_WR_P2PTYPE(qhp->attr.mpa_attr.p2p_type);
1051 wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
1052 if (qhp->attr.mpa_attr.recv_marker_enabled)
1053 wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
1054 if (qhp->attr.mpa_attr.xmit_marker_enabled)
1055 wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE;
1056 if (qhp->attr.mpa_attr.crc_enabled)
1057 wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE;
1058
1059 wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE |
1060 FW_RI_QP_RDMA_WRITE_ENABLE |
1061 FW_RI_QP_BIND_ENABLE;
1062 if (!qhp->ibqp.uobject)
1063 wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE |
1064 FW_RI_QP_STAG0_ENABLE;
1065 wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq));
1066 wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
1067 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
1068 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
1069 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
1070 wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
1071 wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
1072 wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
1073 wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
1074 wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq);
1075 wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq);
1076 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
1077 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
1078 rhp->rdev.lldi.vr->rq.start);
1079 if (qhp->attr.mpa_attr.initiator)
1080 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
1081
1082 c4iw_init_wr_wait(&wr_wait);
1083 ret = c4iw_ofld_send(&rhp->rdev, skb);
1084 if (ret)
1085 goto out;
1086
1087 wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
1088 if (!wr_wait.done) {
1089 printk(KERN_ERR MOD "Device %s not responding!\n",
1090 pci_name(rhp->rdev.lldi.pdev));
1091 rhp->rdev.flags = T4_FATAL_ERROR;
1092 ret = -EIO;
1093 } else
1094 ret = wr_wait.ret;
1095out:
1096 PDBG("%s ret %d\n", __func__, ret);
1097 return ret;
1098}
1099
1100int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1101 enum c4iw_qp_attr_mask mask,
1102 struct c4iw_qp_attributes *attrs,
1103 int internal)
1104{
1105 int ret = 0;
1106 struct c4iw_qp_attributes newattr = qhp->attr;
1107 unsigned long flag;
1108 int disconnect = 0;
1109 int terminate = 0;
1110 int abort = 0;
1111 int free = 0;
1112 struct c4iw_ep *ep = NULL;
1113
1114 PDBG("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n", __func__,
1115 qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
1116 (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
1117
1118 spin_lock_irqsave(&qhp->lock, flag);
1119
1120 /* Process attr changes if in IDLE */
1121 if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
1122 if (qhp->attr.state != C4IW_QP_STATE_IDLE) {
1123 ret = -EIO;
1124 goto out;
1125 }
1126 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ)
1127 newattr.enable_rdma_read = attrs->enable_rdma_read;
1128 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE)
1129 newattr.enable_rdma_write = attrs->enable_rdma_write;
1130 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND)
1131 newattr.enable_bind = attrs->enable_bind;
1132 if (mask & C4IW_QP_ATTR_MAX_ORD) {
1133 if (attrs->max_ord > T4_MAX_READ_DEPTH) {
1134 ret = -EINVAL;
1135 goto out;
1136 }
1137 newattr.max_ord = attrs->max_ord;
1138 }
1139 if (mask & C4IW_QP_ATTR_MAX_IRD) {
1140 if (attrs->max_ird > T4_MAX_READ_DEPTH) {
1141 ret = -EINVAL;
1142 goto out;
1143 }
1144 newattr.max_ird = attrs->max_ird;
1145 }
1146 qhp->attr = newattr;
1147 }
1148
1149 if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
1150 goto out;
1151 if (qhp->attr.state == attrs->next_state)
1152 goto out;
1153
1154 switch (qhp->attr.state) {
1155 case C4IW_QP_STATE_IDLE:
1156 switch (attrs->next_state) {
1157 case C4IW_QP_STATE_RTS:
1158 if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) {
1159 ret = -EINVAL;
1160 goto out;
1161 }
1162 if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) {
1163 ret = -EINVAL;
1164 goto out;
1165 }
1166 qhp->attr.mpa_attr = attrs->mpa_attr;
1167 qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
1168 qhp->ep = qhp->attr.llp_stream_handle;
1169 qhp->attr.state = C4IW_QP_STATE_RTS;
1170
1171 /*
1172 * Ref the endpoint here and deref when we
1173 * disassociate the endpoint from the QP. This
1174 * happens in CLOSING->IDLE transition or *->ERROR
1175 * transition.
1176 */
1177 c4iw_get_ep(&qhp->ep->com);
1178 spin_unlock_irqrestore(&qhp->lock, flag);
1179 ret = rdma_init(rhp, qhp);
1180 spin_lock_irqsave(&qhp->lock, flag);
1181 if (ret)
1182 goto err;
1183 break;
1184 case C4IW_QP_STATE_ERROR:
1185 qhp->attr.state = C4IW_QP_STATE_ERROR;
1186 flush_qp(qhp, &flag);
1187 break;
1188 default:
1189 ret = -EINVAL;
1190 goto out;
1191 }
1192 break;
1193 case C4IW_QP_STATE_RTS:
1194 switch (attrs->next_state) {
1195 case C4IW_QP_STATE_CLOSING:
1196 BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
1197 qhp->attr.state = C4IW_QP_STATE_CLOSING;
1198 if (!internal) {
1199 abort = 0;
1200 disconnect = 1;
1201 ep = qhp->ep;
1202 c4iw_get_ep(&ep->com);
1203 }
1204 spin_unlock_irqrestore(&qhp->lock, flag);
1205 ret = rdma_fini(rhp, qhp);
1206 spin_lock_irqsave(&qhp->lock, flag);
1207 if (ret) {
1208 ep = qhp->ep;
1209 c4iw_get_ep(&ep->com);
1210 disconnect = abort = 1;
1211 goto err;
1212 }
1213 break;
1214 case C4IW_QP_STATE_TERMINATE:
1215 qhp->attr.state = C4IW_QP_STATE_TERMINATE;
1216 if (qhp->ibqp.uobject)
1217 t4_set_wq_in_error(&qhp->wq);
1218 if (!internal) {
1219 ep = qhp->ep;
1220 c4iw_get_ep(&ep->com);
1221 terminate = 1;
1222 disconnect = 1;
1223 }
1224 break;
1225 case C4IW_QP_STATE_ERROR:
1226 qhp->attr.state = C4IW_QP_STATE_ERROR;
1227 if (!internal) {
1228 abort = 1;
1229 disconnect = 1;
1230 ep = qhp->ep;
1231 c4iw_get_ep(&ep->com);
1232 }
1233 goto err;
1234 break;
1235 default:
1236 ret = -EINVAL;
1237 goto out;
1238 }
1239 break;
1240 case C4IW_QP_STATE_CLOSING:
1241 if (!internal) {
1242 ret = -EINVAL;
1243 goto out;
1244 }
1245 switch (attrs->next_state) {
1246 case C4IW_QP_STATE_IDLE:
1247 flush_qp(qhp, &flag);
1248 qhp->attr.state = C4IW_QP_STATE_IDLE;
1249 qhp->attr.llp_stream_handle = NULL;
1250 c4iw_put_ep(&qhp->ep->com);
1251 qhp->ep = NULL;
1252 wake_up(&qhp->wait);
1253 break;
1254 case C4IW_QP_STATE_ERROR:
1255 goto err;
1256 default:
1257 ret = -EINVAL;
1258 goto err;
1259 }
1260 break;
1261 case C4IW_QP_STATE_ERROR:
1262 if (attrs->next_state != C4IW_QP_STATE_IDLE) {
1263 ret = -EINVAL;
1264 goto out;
1265 }
1266 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) {
1267 ret = -EINVAL;
1268 goto out;
1269 }
1270 qhp->attr.state = C4IW_QP_STATE_IDLE;
1271 break;
1272 case C4IW_QP_STATE_TERMINATE:
1273 if (!internal) {
1274 ret = -EINVAL;
1275 goto out;
1276 }
1277 goto err;
1278 break;
1279 default:
1280 printk(KERN_ERR "%s in a bad state %d\n",
1281 __func__, qhp->attr.state);
1282 ret = -EINVAL;
1283 goto err;
1284 break;
1285 }
1286 goto out;
1287err:
1288 PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
1289 qhp->wq.sq.qid);
1290
1291 /* disassociate the LLP connection */
1292 qhp->attr.llp_stream_handle = NULL;
1293 ep = qhp->ep;
1294 qhp->ep = NULL;
1295 qhp->attr.state = C4IW_QP_STATE_ERROR;
1296 free = 1;
1297 wake_up(&qhp->wait);
1298 BUG_ON(!ep);
1299 flush_qp(qhp, &flag);
1300out:
1301 spin_unlock_irqrestore(&qhp->lock, flag);
1302
1303 if (terminate)
1304 c4iw_post_terminate(qhp, NULL);
1305
1306 /*
1307 * If disconnect is 1, then we need to initiate a disconnect
1308 * on the EP. This can be a normal close (RTS->CLOSING) or
1309 * an abnormal close (RTS/CLOSING->ERROR).
1310 */
1311 if (disconnect) {
1312 c4iw_ep_disconnect(ep, abort, GFP_KERNEL);
1313 c4iw_put_ep(&ep->com);
1314 }
1315
1316 /*
1317 * If free is 1, then we've disassociated the EP from the QP
1318 * and we need to dereference the EP.
1319 */
1320 if (free)
1321 c4iw_put_ep(&ep->com);
1322
1323 PDBG("%s exit state %d\n", __func__, qhp->attr.state);
1324 return ret;
1325}
1326
1327int c4iw_destroy_qp(struct ib_qp *ib_qp)
1328{
1329 struct c4iw_dev *rhp;
1330 struct c4iw_qp *qhp;
1331 struct c4iw_qp_attributes attrs;
1332 struct c4iw_ucontext *ucontext;
1333
1334 qhp = to_c4iw_qp(ib_qp);
1335 rhp = qhp->rhp;
1336
1337 attrs.next_state = C4IW_QP_STATE_ERROR;
1338 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
1339 wait_event(qhp->wait, !qhp->ep);
1340
1341 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1342 remove_handle(rhp, &rhp->qpidr, qhp->wq.rq.qid);
1343 atomic_dec(&qhp->refcnt);
1344 wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
1345
1346 ucontext = ib_qp->uobject ?
1347 to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
1348 destroy_qp(&rhp->rdev, &qhp->wq,
1349 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1350
1351 PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
1352 kfree(qhp);
1353 return 0;
1354}
1355
1356struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1357 struct ib_udata *udata)
1358{
1359 struct c4iw_dev *rhp;
1360 struct c4iw_qp *qhp;
1361 struct c4iw_pd *php;
1362 struct c4iw_cq *schp;
1363 struct c4iw_cq *rchp;
1364 struct c4iw_create_qp_resp uresp;
1365 int sqsize, rqsize;
1366 struct c4iw_ucontext *ucontext;
1367 int ret;
1368 struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4;
1369
1370 PDBG("%s ib_pd %p\n", __func__, pd);
1371
1372 if (attrs->qp_type != IB_QPT_RC)
1373 return ERR_PTR(-EINVAL);
1374
1375 php = to_c4iw_pd(pd);
1376 rhp = php->rhp;
1377 schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
1378 rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
1379 if (!schp || !rchp)
1380 return ERR_PTR(-EINVAL);
1381
1382 if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
1383 return ERR_PTR(-EINVAL);
1384
1385 rqsize = roundup(attrs->cap.max_recv_wr + 1, 16);
1386 if (rqsize > T4_MAX_RQ_SIZE)
1387 return ERR_PTR(-E2BIG);
1388
1389 sqsize = roundup(attrs->cap.max_send_wr + 1, 16);
1390 if (sqsize > T4_MAX_SQ_SIZE)
1391 return ERR_PTR(-E2BIG);
1392
1393 ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
1394
1395
1396 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
1397 if (!qhp)
1398 return ERR_PTR(-ENOMEM);
1399 qhp->wq.sq.size = sqsize;
1400 qhp->wq.sq.memsize = (sqsize + 1) * sizeof *qhp->wq.sq.queue;
1401 qhp->wq.rq.size = rqsize;
1402 qhp->wq.rq.memsize = (rqsize + 1) * sizeof *qhp->wq.rq.queue;
1403
1404 if (ucontext) {
1405 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
1406 qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE);
1407 }
1408
1409 PDBG("%s sqsize %u sqmemsize %zu rqsize %u rqmemsize %zu\n",
1410 __func__, sqsize, qhp->wq.sq.memsize, rqsize, qhp->wq.rq.memsize);
1411
1412 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
1413 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1414 if (ret)
1415 goto err1;
1416
1417 attrs->cap.max_recv_wr = rqsize - 1;
1418 attrs->cap.max_send_wr = sqsize - 1;
1419 attrs->cap.max_inline_data = T4_MAX_SEND_INLINE;
1420
1421 qhp->rhp = rhp;
1422 qhp->attr.pd = php->pdid;
1423 qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
1424 qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
1425 qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
1426 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
1427 qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
1428 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
1429 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
1430 qhp->attr.state = C4IW_QP_STATE_IDLE;
1431 qhp->attr.next_state = C4IW_QP_STATE_IDLE;
1432 qhp->attr.enable_rdma_read = 1;
1433 qhp->attr.enable_rdma_write = 1;
1434 qhp->attr.enable_bind = 1;
1435 qhp->attr.max_ord = 1;
1436 qhp->attr.max_ird = 1;
1437 spin_lock_init(&qhp->lock);
1438 init_waitqueue_head(&qhp->wait);
1439 atomic_set(&qhp->refcnt, 1);
1440
1441 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
1442 if (ret)
1443 goto err2;
1444
1445 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.rq.qid);
1446 if (ret)
1447 goto err3;
1448
1449 if (udata) {
1450 mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
1451 if (!mm1) {
1452 ret = -ENOMEM;
1453 goto err4;
1454 }
1455 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
1456 if (!mm2) {
1457 ret = -ENOMEM;
1458 goto err5;
1459 }
1460 mm3 = kmalloc(sizeof *mm3, GFP_KERNEL);
1461 if (!mm3) {
1462 ret = -ENOMEM;
1463 goto err6;
1464 }
1465 mm4 = kmalloc(sizeof *mm4, GFP_KERNEL);
1466 if (!mm4) {
1467 ret = -ENOMEM;
1468 goto err7;
1469 }
1470
1471 uresp.qid_mask = rhp->rdev.qpmask;
1472 uresp.sqid = qhp->wq.sq.qid;
1473 uresp.sq_size = qhp->wq.sq.size;
1474 uresp.sq_memsize = qhp->wq.sq.memsize;
1475 uresp.rqid = qhp->wq.rq.qid;
1476 uresp.rq_size = qhp->wq.rq.size;
1477 uresp.rq_memsize = qhp->wq.rq.memsize;
1478 spin_lock(&ucontext->mmap_lock);
1479 uresp.sq_key = ucontext->key;
1480 ucontext->key += PAGE_SIZE;
1481 uresp.rq_key = ucontext->key;
1482 ucontext->key += PAGE_SIZE;
1483 uresp.sq_db_gts_key = ucontext->key;
1484 ucontext->key += PAGE_SIZE;
1485 uresp.rq_db_gts_key = ucontext->key;
1486 ucontext->key += PAGE_SIZE;
1487 spin_unlock(&ucontext->mmap_lock);
1488 ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
1489 if (ret)
1490 goto err8;
1491 mm1->key = uresp.sq_key;
1492 mm1->addr = virt_to_phys(qhp->wq.sq.queue);
1493 mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize);
1494 insert_mmap(ucontext, mm1);
1495 mm2->key = uresp.rq_key;
1496 mm2->addr = virt_to_phys(qhp->wq.rq.queue);
1497 mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize);
1498 insert_mmap(ucontext, mm2);
1499 mm3->key = uresp.sq_db_gts_key;
1500 mm3->addr = qhp->wq.sq.udb;
1501 mm3->len = PAGE_SIZE;
1502 insert_mmap(ucontext, mm3);
1503 mm4->key = uresp.rq_db_gts_key;
1504 mm4->addr = qhp->wq.rq.udb;
1505 mm4->len = PAGE_SIZE;
1506 insert_mmap(ucontext, mm4);
1507 }
1508 qhp->ibqp.qp_num = qhp->wq.sq.qid;
1509 init_timer(&(qhp->timer));
1510 PDBG("%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x\n",
1511 __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
1512 qhp->wq.sq.qid);
1513 return &qhp->ibqp;
1514err8:
1515 kfree(mm4);
1516err7:
1517 kfree(mm3);
1518err6:
1519 kfree(mm2);
1520err5:
1521 kfree(mm1);
1522err4:
1523 remove_handle(rhp, &rhp->qpidr, qhp->wq.rq.qid);
1524err3:
1525 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1526err2:
1527 destroy_qp(&rhp->rdev, &qhp->wq,
1528 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1529err1:
1530 kfree(qhp);
1531 return ERR_PTR(ret);
1532}
1533
1534int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1535 int attr_mask, struct ib_udata *udata)
1536{
1537 struct c4iw_dev *rhp;
1538 struct c4iw_qp *qhp;
1539 enum c4iw_qp_attr_mask mask = 0;
1540 struct c4iw_qp_attributes attrs;
1541
1542 PDBG("%s ib_qp %p\n", __func__, ibqp);
1543
1544 /* iwarp does not support the RTR state */
1545 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
1546 attr_mask &= ~IB_QP_STATE;
1547
1548 /* Make sure we still have something left to do */
1549 if (!attr_mask)
1550 return 0;
1551
1552 memset(&attrs, 0, sizeof attrs);
1553 qhp = to_c4iw_qp(ibqp);
1554 rhp = qhp->rhp;
1555
1556 attrs.next_state = c4iw_convert_state(attr->qp_state);
1557 attrs.enable_rdma_read = (attr->qp_access_flags &
1558 IB_ACCESS_REMOTE_READ) ? 1 : 0;
1559 attrs.enable_rdma_write = (attr->qp_access_flags &
1560 IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
1561 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
1562
1563
1564 mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0;
1565 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
1566 (C4IW_QP_ATTR_ENABLE_RDMA_READ |
1567 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
1568 C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
1569
1570 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
1571}
1572
1573struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
1574{
1575 PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
1576 return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
1577}
diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c
new file mode 100644
index 000000000000..fb195d1d9015
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/resource.c
@@ -0,0 +1,417 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32/* Crude resource management */
33#include <linux/kernel.h>
34#include <linux/random.h>
35#include <linux/slab.h>
36#include <linux/kfifo.h>
37#include <linux/spinlock.h>
38#include <linux/errno.h>
39#include <linux/genalloc.h>
40#include "iw_cxgb4.h"
41
42#define RANDOM_SIZE 16
43
44static int __c4iw_init_resource_fifo(struct kfifo *fifo,
45 spinlock_t *fifo_lock,
46 u32 nr, u32 skip_low,
47 u32 skip_high,
48 int random)
49{
50 u32 i, j, entry = 0, idx;
51 u32 random_bytes;
52 u32 rarray[16];
53 spin_lock_init(fifo_lock);
54
55 if (kfifo_alloc(fifo, nr * sizeof(u32), GFP_KERNEL))
56 return -ENOMEM;
57
58 for (i = 0; i < skip_low + skip_high; i++)
59 kfifo_in(fifo, (unsigned char *) &entry, sizeof(u32));
60 if (random) {
61 j = 0;
62 random_bytes = random32();
63 for (i = 0; i < RANDOM_SIZE; i++)
64 rarray[i] = i + skip_low;
65 for (i = skip_low + RANDOM_SIZE; i < nr - skip_high; i++) {
66 if (j >= RANDOM_SIZE) {
67 j = 0;
68 random_bytes = random32();
69 }
70 idx = (random_bytes >> (j * 2)) & 0xF;
71 kfifo_in(fifo,
72 (unsigned char *) &rarray[idx],
73 sizeof(u32));
74 rarray[idx] = i;
75 j++;
76 }
77 for (i = 0; i < RANDOM_SIZE; i++)
78 kfifo_in(fifo,
79 (unsigned char *) &rarray[i],
80 sizeof(u32));
81 } else
82 for (i = skip_low; i < nr - skip_high; i++)
83 kfifo_in(fifo, (unsigned char *) &i, sizeof(u32));
84
85 for (i = 0; i < skip_low + skip_high; i++)
86 if (kfifo_out_locked(fifo, (unsigned char *) &entry,
87 sizeof(u32), fifo_lock))
88 break;
89 return 0;
90}
91
92static int c4iw_init_resource_fifo(struct kfifo *fifo, spinlock_t * fifo_lock,
93 u32 nr, u32 skip_low, u32 skip_high)
94{
95 return __c4iw_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
96 skip_high, 0);
97}
98
99static int c4iw_init_resource_fifo_random(struct kfifo *fifo,
100 spinlock_t *fifo_lock,
101 u32 nr, u32 skip_low, u32 skip_high)
102{
103 return __c4iw_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
104 skip_high, 1);
105}
106
107static int c4iw_init_qid_fifo(struct c4iw_rdev *rdev)
108{
109 u32 i;
110
111 spin_lock_init(&rdev->resource.qid_fifo_lock);
112
113 if (kfifo_alloc(&rdev->resource.qid_fifo, T4_MAX_QIDS * sizeof(u32),
114 GFP_KERNEL))
115 return -ENOMEM;
116
117 for (i = T4_QID_BASE; i < T4_QID_BASE + T4_MAX_QIDS; i++)
118 if (!(i & rdev->qpmask))
119 kfifo_in(&rdev->resource.qid_fifo,
120 (unsigned char *) &i, sizeof(u32));
121 return 0;
122}
123
124/* nr_* must be power of 2 */
125int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid)
126{
127 int err = 0;
128 err = c4iw_init_resource_fifo_random(&rdev->resource.tpt_fifo,
129 &rdev->resource.tpt_fifo_lock,
130 nr_tpt, 1, 0);
131 if (err)
132 goto tpt_err;
133 err = c4iw_init_qid_fifo(rdev);
134 if (err)
135 goto qid_err;
136 err = c4iw_init_resource_fifo(&rdev->resource.pdid_fifo,
137 &rdev->resource.pdid_fifo_lock,
138 nr_pdid, 1, 0);
139 if (err)
140 goto pdid_err;
141 return 0;
142pdid_err:
143 kfifo_free(&rdev->resource.qid_fifo);
144qid_err:
145 kfifo_free(&rdev->resource.tpt_fifo);
146tpt_err:
147 return -ENOMEM;
148}
149
150/*
151 * returns 0 if no resource available
152 */
153u32 c4iw_get_resource(struct kfifo *fifo, spinlock_t *lock)
154{
155 u32 entry;
156 if (kfifo_out_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock))
157 return entry;
158 else
159 return 0;
160}
161
162void c4iw_put_resource(struct kfifo *fifo, u32 entry, spinlock_t *lock)
163{
164 PDBG("%s entry 0x%x\n", __func__, entry);
165 kfifo_in_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock);
166}
167
168u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
169{
170 struct c4iw_qid_list *entry;
171 u32 qid;
172 int i;
173
174 mutex_lock(&uctx->lock);
175 if (!list_empty(&uctx->cqids)) {
176 entry = list_entry(uctx->cqids.next, struct c4iw_qid_list,
177 entry);
178 list_del(&entry->entry);
179 qid = entry->qid;
180 kfree(entry);
181 } else {
182 qid = c4iw_get_resource(&rdev->resource.qid_fifo,
183 &rdev->resource.qid_fifo_lock);
184 if (!qid)
185 goto out;
186 for (i = qid+1; i & rdev->qpmask; i++) {
187 entry = kmalloc(sizeof *entry, GFP_KERNEL);
188 if (!entry)
189 goto out;
190 entry->qid = i;
191 list_add_tail(&entry->entry, &uctx->cqids);
192 }
193
194 /*
195 * now put the same ids on the qp list since they all
196 * map to the same db/gts page.
197 */
198 entry = kmalloc(sizeof *entry, GFP_KERNEL);
199 if (!entry)
200 goto out;
201 entry->qid = qid;
202 list_add_tail(&entry->entry, &uctx->qpids);
203 for (i = qid+1; i & rdev->qpmask; i++) {
204 entry = kmalloc(sizeof *entry, GFP_KERNEL);
205 if (!entry)
206 goto out;
207 entry->qid = i;
208 list_add_tail(&entry->entry, &uctx->qpids);
209 }
210 }
211out:
212 mutex_unlock(&uctx->lock);
213 PDBG("%s qid 0x%x\n", __func__, qid);
214 return qid;
215}
216
217void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
218 struct c4iw_dev_ucontext *uctx)
219{
220 struct c4iw_qid_list *entry;
221
222 entry = kmalloc(sizeof *entry, GFP_KERNEL);
223 if (!entry)
224 return;
225 PDBG("%s qid 0x%x\n", __func__, qid);
226 entry->qid = qid;
227 mutex_lock(&uctx->lock);
228 list_add_tail(&entry->entry, &uctx->cqids);
229 mutex_unlock(&uctx->lock);
230}
231
232u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
233{
234 struct c4iw_qid_list *entry;
235 u32 qid;
236 int i;
237
238 mutex_lock(&uctx->lock);
239 if (!list_empty(&uctx->qpids)) {
240 entry = list_entry(uctx->qpids.next, struct c4iw_qid_list,
241 entry);
242 list_del(&entry->entry);
243 qid = entry->qid;
244 kfree(entry);
245 } else {
246 qid = c4iw_get_resource(&rdev->resource.qid_fifo,
247 &rdev->resource.qid_fifo_lock);
248 if (!qid)
249 goto out;
250 for (i = qid+1; i & rdev->qpmask; i++) {
251 entry = kmalloc(sizeof *entry, GFP_KERNEL);
252 if (!entry)
253 goto out;
254 entry->qid = i;
255 list_add_tail(&entry->entry, &uctx->qpids);
256 }
257
258 /*
259 * now put the same ids on the cq list since they all
260 * map to the same db/gts page.
261 */
262 entry = kmalloc(sizeof *entry, GFP_KERNEL);
263 if (!entry)
264 goto out;
265 entry->qid = qid;
266 list_add_tail(&entry->entry, &uctx->cqids);
267 for (i = qid; i & rdev->qpmask; i++) {
268 entry = kmalloc(sizeof *entry, GFP_KERNEL);
269 if (!entry)
270 goto out;
271 entry->qid = i;
272 list_add_tail(&entry->entry, &uctx->cqids);
273 }
274 }
275out:
276 mutex_unlock(&uctx->lock);
277 PDBG("%s qid 0x%x\n", __func__, qid);
278 return qid;
279}
280
281void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
282 struct c4iw_dev_ucontext *uctx)
283{
284 struct c4iw_qid_list *entry;
285
286 entry = kmalloc(sizeof *entry, GFP_KERNEL);
287 if (!entry)
288 return;
289 PDBG("%s qid 0x%x\n", __func__, qid);
290 entry->qid = qid;
291 mutex_lock(&uctx->lock);
292 list_add_tail(&entry->entry, &uctx->qpids);
293 mutex_unlock(&uctx->lock);
294}
295
296void c4iw_destroy_resource(struct c4iw_resource *rscp)
297{
298 kfifo_free(&rscp->tpt_fifo);
299 kfifo_free(&rscp->qid_fifo);
300 kfifo_free(&rscp->pdid_fifo);
301}
302
303/*
304 * PBL Memory Manager. Uses Linux generic allocator.
305 */
306
307#define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */
308
309u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
310{
311 unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size);
312 PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
313 return (u32)addr;
314}
315
316void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
317{
318 PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
319 gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size);
320}
321
322int c4iw_pblpool_create(struct c4iw_rdev *rdev)
323{
324 unsigned pbl_start, pbl_chunk, pbl_top;
325
326 rdev->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1);
327 if (!rdev->pbl_pool)
328 return -ENOMEM;
329
330 pbl_start = rdev->lldi.vr->pbl.start;
331 pbl_chunk = rdev->lldi.vr->pbl.size;
332 pbl_top = pbl_start + pbl_chunk;
333
334 while (pbl_start < pbl_top) {
335 pbl_chunk = min(pbl_top - pbl_start + 1, pbl_chunk);
336 if (gen_pool_add(rdev->pbl_pool, pbl_start, pbl_chunk, -1)) {
337 PDBG("%s failed to add PBL chunk (%x/%x)\n",
338 __func__, pbl_start, pbl_chunk);
339 if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) {
340 printk(KERN_WARNING MOD
341 "Failed to add all PBL chunks (%x/%x)\n",
342 pbl_start,
343 pbl_top - pbl_start);
344 return 0;
345 }
346 pbl_chunk >>= 1;
347 } else {
348 PDBG("%s added PBL chunk (%x/%x)\n",
349 __func__, pbl_start, pbl_chunk);
350 pbl_start += pbl_chunk;
351 }
352 }
353
354 return 0;
355}
356
357void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
358{
359 gen_pool_destroy(rdev->pbl_pool);
360}
361
362/*
363 * RQT Memory Manager. Uses Linux generic allocator.
364 */
365
366#define MIN_RQT_SHIFT 10 /* 1KB == min RQT size (16 entries) */
367
368u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
369{
370 unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6);
371 PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
372 return (u32)addr;
373}
374
375void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
376{
377 PDBG("%s addr 0x%x size %d\n", __func__, addr, size << 6);
378 gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6);
379}
380
381int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
382{
383 unsigned rqt_start, rqt_chunk, rqt_top;
384
385 rdev->rqt_pool = gen_pool_create(MIN_RQT_SHIFT, -1);
386 if (!rdev->rqt_pool)
387 return -ENOMEM;
388
389 rqt_start = rdev->lldi.vr->rq.start;
390 rqt_chunk = rdev->lldi.vr->rq.size;
391 rqt_top = rqt_start + rqt_chunk;
392
393 while (rqt_start < rqt_top) {
394 rqt_chunk = min(rqt_top - rqt_start + 1, rqt_chunk);
395 if (gen_pool_add(rdev->rqt_pool, rqt_start, rqt_chunk, -1)) {
396 PDBG("%s failed to add RQT chunk (%x/%x)\n",
397 __func__, rqt_start, rqt_chunk);
398 if (rqt_chunk <= 1024 << MIN_RQT_SHIFT) {
399 printk(KERN_WARNING MOD
400 "Failed to add all RQT chunks (%x/%x)\n",
401 rqt_start, rqt_top - rqt_start);
402 return 0;
403 }
404 rqt_chunk >>= 1;
405 } else {
406 PDBG("%s added RQT chunk (%x/%x)\n",
407 __func__, rqt_start, rqt_chunk);
408 rqt_start += rqt_chunk;
409 }
410 }
411 return 0;
412}
413
414void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
415{
416 gen_pool_destroy(rdev->rqt_pool);
417}
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
new file mode 100644
index 000000000000..3f0d2172efda
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -0,0 +1,536 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 * SOFTWARE.
30 */
31#ifndef __T4_H__
32#define __T4_H__
33
34#include "t4_hw.h"
35#include "t4_regs.h"
36#include "t4_msg.h"
37#include "t4fw_ri_api.h"
38
39#define T4_MAX_READ_DEPTH 16
40#define T4_QID_BASE 1024
41#define T4_MAX_QIDS 256
42#define T4_MAX_NUM_QP (1<<16)
43#define T4_MAX_NUM_CQ (1<<15)
44#define T4_MAX_NUM_PD (1<<15)
45#define T4_MAX_PBL_SIZE 256
46#define T4_MAX_RQ_SIZE 1024
47#define T4_MAX_SQ_SIZE 1024
48#define T4_MAX_QP_DEPTH (T4_MAX_RQ_SIZE-1)
49#define T4_MAX_CQ_DEPTH 8192
50#define T4_MAX_NUM_STAG (1<<15)
51#define T4_MAX_MR_SIZE (~0ULL - 1)
52#define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
53#define T4_STAG_UNSET 0xffffffff
54#define T4_FW_MAJ 0
55#define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1)
56
57struct t4_status_page {
58 __be32 rsvd1; /* flit 0 - hw owns */
59 __be16 rsvd2;
60 __be16 qid;
61 __be16 cidx;
62 __be16 pidx;
63 u8 qp_err; /* flit 1 - sw owns */
64 u8 db_off;
65};
66
67#define T4_EQ_SIZE 64
68
69#define T4_SQ_NUM_SLOTS 4
70#define T4_SQ_NUM_BYTES (T4_EQ_SIZE * T4_SQ_NUM_SLOTS)
71#define T4_MAX_SEND_SGE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
72 sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
73#define T4_MAX_SEND_INLINE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
74 sizeof(struct fw_ri_immd)))
75#define T4_MAX_WRITE_INLINE ((T4_SQ_NUM_BYTES - \
76 sizeof(struct fw_ri_rdma_write_wr) - \
77 sizeof(struct fw_ri_immd)))
78#define T4_MAX_WRITE_SGE ((T4_SQ_NUM_BYTES - \
79 sizeof(struct fw_ri_rdma_write_wr) - \
80 sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
81#define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \
82 sizeof(struct fw_ri_immd)))
83#define T4_MAX_FR_DEPTH 255
84
85#define T4_RQ_NUM_SLOTS 2
86#define T4_RQ_NUM_BYTES (T4_EQ_SIZE * T4_RQ_NUM_SLOTS)
87#define T4_MAX_RECV_SGE ((T4_RQ_NUM_BYTES - sizeof(struct fw_ri_recv_wr) - \
88 sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
89
90union t4_wr {
91 struct fw_ri_res_wr res;
92 struct fw_ri_wr ri;
93 struct fw_ri_rdma_write_wr write;
94 struct fw_ri_send_wr send;
95 struct fw_ri_rdma_read_wr read;
96 struct fw_ri_bind_mw_wr bind;
97 struct fw_ri_fr_nsmr_wr fr;
98 struct fw_ri_inv_lstag_wr inv;
99 struct t4_status_page status;
100 __be64 flits[T4_EQ_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS];
101};
102
103union t4_recv_wr {
104 struct fw_ri_recv_wr recv;
105 struct t4_status_page status;
106 __be64 flits[T4_EQ_SIZE / sizeof(__be64) * T4_RQ_NUM_SLOTS];
107};
108
109static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid,
110 enum fw_wr_opcodes opcode, u8 flags, u8 len16)
111{
112 int slots_used;
113
114 wqe->send.opcode = (u8)opcode;
115 wqe->send.flags = flags;
116 wqe->send.wrid = wrid;
117 wqe->send.r1[0] = 0;
118 wqe->send.r1[1] = 0;
119 wqe->send.r1[2] = 0;
120 wqe->send.len16 = len16;
121
122 slots_used = DIV_ROUND_UP(len16*16, T4_EQ_SIZE);
123 while (slots_used < T4_SQ_NUM_SLOTS) {
124 wqe->flits[slots_used * T4_EQ_SIZE / sizeof(__be64)] = 0;
125 slots_used++;
126 }
127}
128
129/* CQE/AE status codes */
130#define T4_ERR_SUCCESS 0x0
131#define T4_ERR_STAG 0x1 /* STAG invalid: either the */
132 /* STAG is offlimt, being 0, */
133 /* or STAG_key mismatch */
134#define T4_ERR_PDID 0x2 /* PDID mismatch */
135#define T4_ERR_QPID 0x3 /* QPID mismatch */
136#define T4_ERR_ACCESS 0x4 /* Invalid access right */
137#define T4_ERR_WRAP 0x5 /* Wrap error */
138#define T4_ERR_BOUND 0x6 /* base and bounds voilation */
139#define T4_ERR_INVALIDATE_SHARED_MR 0x7 /* attempt to invalidate a */
140 /* shared memory region */
141#define T4_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8 /* attempt to invalidate a */
142 /* shared memory region */
143#define T4_ERR_ECC 0x9 /* ECC error detected */
144#define T4_ERR_ECC_PSTAG 0xA /* ECC error detected when */
145 /* reading PSTAG for a MW */
146 /* Invalidate */
147#define T4_ERR_PBL_ADDR_BOUND 0xB /* pbl addr out of bounds: */
148 /* software error */
149#define T4_ERR_SWFLUSH 0xC /* SW FLUSHED */
150#define T4_ERR_CRC 0x10 /* CRC error */
151#define T4_ERR_MARKER 0x11 /* Marker error */
152#define T4_ERR_PDU_LEN_ERR 0x12 /* invalid PDU length */
153#define T4_ERR_OUT_OF_RQE 0x13 /* out of RQE */
154#define T4_ERR_DDP_VERSION 0x14 /* wrong DDP version */
155#define T4_ERR_RDMA_VERSION 0x15 /* wrong RDMA version */
156#define T4_ERR_OPCODE 0x16 /* invalid rdma opcode */
157#define T4_ERR_DDP_QUEUE_NUM 0x17 /* invalid ddp queue number */
158#define T4_ERR_MSN 0x18 /* MSN error */
159#define T4_ERR_TBIT 0x19 /* tag bit not set correctly */
160#define T4_ERR_MO 0x1A /* MO not 0 for TERMINATE */
161 /* or READ_REQ */
162#define T4_ERR_MSN_GAP 0x1B
163#define T4_ERR_MSN_RANGE 0x1C
164#define T4_ERR_IRD_OVERFLOW 0x1D
165#define T4_ERR_RQE_ADDR_BOUND 0x1E /* RQE addr out of bounds: */
166 /* software error */
167#define T4_ERR_INTERNAL_ERR 0x1F /* internal error (opcode */
168 /* mismatch) */
169/*
170 * CQE defs
171 */
172struct t4_cqe {
173 __be32 header;
174 __be32 len;
175 union {
176 struct {
177 __be32 stag;
178 __be32 msn;
179 } rcqe;
180 struct {
181 u32 nada1;
182 u16 nada2;
183 u16 cidx;
184 } scqe;
185 struct {
186 __be32 wrid_hi;
187 __be32 wrid_low;
188 } gen;
189 } u;
190 __be64 reserved;
191 __be64 bits_type_ts;
192};
193
194/* macros for flit 0 of the cqe */
195
196#define S_CQE_QPID 12
197#define M_CQE_QPID 0xFFFFF
198#define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID)
199#define V_CQE_QPID(x) ((x)<<S_CQE_QPID)
200
201#define S_CQE_SWCQE 11
202#define M_CQE_SWCQE 0x1
203#define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE)
204#define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE)
205
206#define S_CQE_STATUS 5
207#define M_CQE_STATUS 0x1F
208#define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS)
209#define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS)
210
211#define S_CQE_TYPE 4
212#define M_CQE_TYPE 0x1
213#define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE)
214#define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE)
215
216#define S_CQE_OPCODE 0
217#define M_CQE_OPCODE 0xF
218#define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE)
219#define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE)
220
221#define SW_CQE(x) (G_CQE_SWCQE(be32_to_cpu((x)->header)))
222#define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x)->header)))
223#define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x)->header)))
224#define SQ_TYPE(x) (CQE_TYPE((x)))
225#define RQ_TYPE(x) (!CQE_TYPE((x)))
226#define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x)->header)))
227#define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x)->header)))
228
229#define CQE_SEND_OPCODE(x)( \
230 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND) || \
231 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \
232 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \
233 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV))
234
235#define CQE_LEN(x) (be32_to_cpu((x)->len))
236
237/* used for RQ completion processing */
238#define CQE_WRID_STAG(x) (be32_to_cpu((x)->u.rcqe.stag))
239#define CQE_WRID_MSN(x) (be32_to_cpu((x)->u.rcqe.msn))
240
241/* used for SQ completion processing */
242#define CQE_WRID_SQ_IDX(x) ((x)->u.scqe.cidx)
243
244/* generic accessor macros */
245#define CQE_WRID_HI(x) ((x)->u.gen.wrid_hi)
246#define CQE_WRID_LOW(x) ((x)->u.gen.wrid_low)
247
248/* macros for flit 3 of the cqe */
249#define S_CQE_GENBIT 63
250#define M_CQE_GENBIT 0x1
251#define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT)
252#define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT)
253
254#define S_CQE_OVFBIT 62
255#define M_CQE_OVFBIT 0x1
256#define G_CQE_OVFBIT(x) ((((x) >> S_CQE_OVFBIT)) & M_CQE_OVFBIT)
257
258#define S_CQE_IQTYPE 60
259#define M_CQE_IQTYPE 0x3
260#define G_CQE_IQTYPE(x) ((((x) >> S_CQE_IQTYPE)) & M_CQE_IQTYPE)
261
262#define M_CQE_TS 0x0fffffffffffffffULL
263#define G_CQE_TS(x) ((x) & M_CQE_TS)
264
265#define CQE_OVFBIT(x) ((unsigned)G_CQE_OVFBIT(be64_to_cpu((x)->bits_type_ts)))
266#define CQE_GENBIT(x) ((unsigned)G_CQE_GENBIT(be64_to_cpu((x)->bits_type_ts)))
267#define CQE_TS(x) (G_CQE_TS(be64_to_cpu((x)->bits_type_ts)))
268
269struct t4_swsqe {
270 u64 wr_id;
271 struct t4_cqe cqe;
272 int read_len;
273 int opcode;
274 int complete;
275 int signaled;
276 u16 idx;
277};
278
279struct t4_sq {
280 union t4_wr *queue;
281 dma_addr_t dma_addr;
282 DECLARE_PCI_UNMAP_ADDR(mapping);
283 struct t4_swsqe *sw_sq;
284 struct t4_swsqe *oldest_read;
285 u64 udb;
286 size_t memsize;
287 u32 qid;
288 u16 in_use;
289 u16 size;
290 u16 cidx;
291 u16 pidx;
292};
293
294struct t4_swrqe {
295 u64 wr_id;
296};
297
298struct t4_rq {
299 union t4_recv_wr *queue;
300 dma_addr_t dma_addr;
301 DECLARE_PCI_UNMAP_ADDR(mapping);
302 struct t4_swrqe *sw_rq;
303 u64 udb;
304 size_t memsize;
305 u32 qid;
306 u32 msn;
307 u32 rqt_hwaddr;
308 u16 rqt_size;
309 u16 in_use;
310 u16 size;
311 u16 cidx;
312 u16 pidx;
313};
314
315struct t4_wq {
316 struct t4_sq sq;
317 struct t4_rq rq;
318 void __iomem *db;
319 void __iomem *gts;
320 struct c4iw_rdev *rdev;
321};
322
323static inline int t4_rqes_posted(struct t4_wq *wq)
324{
325 return wq->rq.in_use;
326}
327
328static inline int t4_rq_empty(struct t4_wq *wq)
329{
330 return wq->rq.in_use == 0;
331}
332
333static inline int t4_rq_full(struct t4_wq *wq)
334{
335 return wq->rq.in_use == (wq->rq.size - 1);
336}
337
338static inline u32 t4_rq_avail(struct t4_wq *wq)
339{
340 return wq->rq.size - 1 - wq->rq.in_use;
341}
342
343static inline void t4_rq_produce(struct t4_wq *wq)
344{
345 wq->rq.in_use++;
346 if (++wq->rq.pidx == wq->rq.size)
347 wq->rq.pidx = 0;
348}
349
350static inline void t4_rq_consume(struct t4_wq *wq)
351{
352 wq->rq.in_use--;
353 wq->rq.msn++;
354 if (++wq->rq.cidx == wq->rq.size)
355 wq->rq.cidx = 0;
356}
357
358static inline int t4_sq_empty(struct t4_wq *wq)
359{
360 return wq->sq.in_use == 0;
361}
362
363static inline int t4_sq_full(struct t4_wq *wq)
364{
365 return wq->sq.in_use == (wq->sq.size - 1);
366}
367
368static inline u32 t4_sq_avail(struct t4_wq *wq)
369{
370 return wq->sq.size - 1 - wq->sq.in_use;
371}
372
373static inline void t4_sq_produce(struct t4_wq *wq)
374{
375 wq->sq.in_use++;
376 if (++wq->sq.pidx == wq->sq.size)
377 wq->sq.pidx = 0;
378}
379
380static inline void t4_sq_consume(struct t4_wq *wq)
381{
382 wq->sq.in_use--;
383 if (++wq->sq.cidx == wq->sq.size)
384 wq->sq.cidx = 0;
385}
386
387static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc)
388{
389 inc *= T4_SQ_NUM_SLOTS;
390 wmb();
391 writel(QID(wq->sq.qid) | PIDX(inc), wq->db);
392}
393
394static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc)
395{
396 inc *= T4_RQ_NUM_SLOTS;
397 wmb();
398 writel(QID(wq->rq.qid) | PIDX(inc), wq->db);
399}
400
401static inline int t4_wq_in_error(struct t4_wq *wq)
402{
403 return wq->sq.queue[wq->sq.size].status.qp_err;
404}
405
406static inline void t4_set_wq_in_error(struct t4_wq *wq)
407{
408 wq->sq.queue[wq->sq.size].status.qp_err = 1;
409 wq->rq.queue[wq->rq.size].status.qp_err = 1;
410}
411
412static inline void t4_disable_wq_db(struct t4_wq *wq)
413{
414 wq->sq.queue[wq->sq.size].status.db_off = 1;
415 wq->rq.queue[wq->rq.size].status.db_off = 1;
416}
417
418static inline void t4_enable_wq_db(struct t4_wq *wq)
419{
420 wq->sq.queue[wq->sq.size].status.db_off = 0;
421 wq->rq.queue[wq->rq.size].status.db_off = 0;
422}
423
424static inline int t4_wq_db_enabled(struct t4_wq *wq)
425{
426 return !wq->sq.queue[wq->sq.size].status.db_off;
427}
428
429struct t4_cq {
430 struct t4_cqe *queue;
431 dma_addr_t dma_addr;
432 DECLARE_PCI_UNMAP_ADDR(mapping);
433 struct t4_cqe *sw_queue;
434 void __iomem *gts;
435 struct c4iw_rdev *rdev;
436 u64 ugts;
437 size_t memsize;
438 u64 timestamp;
439 u32 cqid;
440 u16 size; /* including status page */
441 u16 cidx;
442 u16 sw_pidx;
443 u16 sw_cidx;
444 u16 sw_in_use;
445 u16 cidx_inc;
446 u8 gen;
447 u8 error;
448};
449
450static inline int t4_arm_cq(struct t4_cq *cq, int se)
451{
452 u32 val;
453
454 val = SEINTARM(se) | CIDXINC(cq->cidx_inc) | TIMERREG(6) |
455 INGRESSQID(cq->cqid);
456 cq->cidx_inc = 0;
457 writel(val, cq->gts);
458 return 0;
459}
460
461static inline void t4_swcq_produce(struct t4_cq *cq)
462{
463 cq->sw_in_use++;
464 if (++cq->sw_pidx == cq->size)
465 cq->sw_pidx = 0;
466}
467
468static inline void t4_swcq_consume(struct t4_cq *cq)
469{
470 cq->sw_in_use--;
471 if (++cq->sw_cidx == cq->size)
472 cq->sw_cidx = 0;
473}
474
475static inline void t4_hwcq_consume(struct t4_cq *cq)
476{
477 cq->cidx_inc++;
478 if (++cq->cidx == cq->size) {
479 cq->cidx = 0;
480 cq->gen ^= 1;
481 }
482}
483
484static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe)
485{
486 return (CQE_GENBIT(cqe) == cq->gen);
487}
488
489static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
490{
491 int ret = 0;
492
493 if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) {
494 *cqe = &cq->queue[cq->cidx];
495 cq->timestamp = CQE_TS(*cqe);
496 } else if (CQE_TS(&cq->queue[cq->cidx]) > cq->timestamp)
497 ret = -EOVERFLOW;
498 else
499 ret = -ENODATA;
500 if (ret == -EOVERFLOW) {
501 printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid);
502 cq->error = 1;
503 }
504 return ret;
505}
506
507static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq)
508{
509 if (cq->sw_in_use)
510 return &cq->sw_queue[cq->sw_cidx];
511 return NULL;
512}
513
514static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
515{
516 int ret = 0;
517
518 if (cq->error)
519 ret = -ENODATA;
520 else if (cq->sw_in_use)
521 *cqe = &cq->sw_queue[cq->sw_cidx];
522 else
523 ret = t4_next_hw_cqe(cq, cqe);
524 return ret;
525}
526
527static inline int t4_cq_in_error(struct t4_cq *cq)
528{
529 return ((struct t4_status_page *)&cq->queue[cq->size])->qp_err;
530}
531
532static inline void t4_set_cq_in_error(struct t4_cq *cq)
533{
534 ((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1;
535}
536#endif
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
new file mode 100644
index 000000000000..fc706bd07fae
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
@@ -0,0 +1,829 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 * SOFTWARE.
30 */
31#ifndef _T4FW_RI_API_H_
32#define _T4FW_RI_API_H_
33
34#include "t4fw_api.h"
35
36enum fw_ri_wr_opcode {
37 FW_RI_RDMA_WRITE = 0x0, /* IETF RDMAP v1.0 ... */
38 FW_RI_READ_REQ = 0x1,
39 FW_RI_READ_RESP = 0x2,
40 FW_RI_SEND = 0x3,
41 FW_RI_SEND_WITH_INV = 0x4,
42 FW_RI_SEND_WITH_SE = 0x5,
43 FW_RI_SEND_WITH_SE_INV = 0x6,
44 FW_RI_TERMINATE = 0x7,
45 FW_RI_RDMA_INIT = 0x8, /* CHELSIO RI specific ... */
46 FW_RI_BIND_MW = 0x9,
47 FW_RI_FAST_REGISTER = 0xa,
48 FW_RI_LOCAL_INV = 0xb,
49 FW_RI_QP_MODIFY = 0xc,
50 FW_RI_BYPASS = 0xd,
51 FW_RI_RECEIVE = 0xe,
52
53 FW_RI_SGE_EC_CR_RETURN = 0xf
54};
55
56enum fw_ri_wr_flags {
57 FW_RI_COMPLETION_FLAG = 0x01,
58 FW_RI_NOTIFICATION_FLAG = 0x02,
59 FW_RI_SOLICITED_EVENT_FLAG = 0x04,
60 FW_RI_READ_FENCE_FLAG = 0x08,
61 FW_RI_LOCAL_FENCE_FLAG = 0x10,
62 FW_RI_RDMA_READ_INVALIDATE = 0x20
63};
64
65enum fw_ri_mpa_attrs {
66 FW_RI_MPA_RX_MARKER_ENABLE = 0x01,
67 FW_RI_MPA_TX_MARKER_ENABLE = 0x02,
68 FW_RI_MPA_CRC_ENABLE = 0x04,
69 FW_RI_MPA_IETF_ENABLE = 0x08
70};
71
72enum fw_ri_qp_caps {
73 FW_RI_QP_RDMA_READ_ENABLE = 0x01,
74 FW_RI_QP_RDMA_WRITE_ENABLE = 0x02,
75 FW_RI_QP_BIND_ENABLE = 0x04,
76 FW_RI_QP_FAST_REGISTER_ENABLE = 0x08,
77 FW_RI_QP_STAG0_ENABLE = 0x10
78};
79
80enum fw_ri_addr_type {
81 FW_RI_ZERO_BASED_TO = 0x00,
82 FW_RI_VA_BASED_TO = 0x01
83};
84
85enum fw_ri_mem_perms {
86 FW_RI_MEM_ACCESS_REM_WRITE = 0x01,
87 FW_RI_MEM_ACCESS_REM_READ = 0x02,
88 FW_RI_MEM_ACCESS_REM = 0x03,
89 FW_RI_MEM_ACCESS_LOCAL_WRITE = 0x04,
90 FW_RI_MEM_ACCESS_LOCAL_READ = 0x08,
91 FW_RI_MEM_ACCESS_LOCAL = 0x0C
92};
93
94enum fw_ri_stag_type {
95 FW_RI_STAG_NSMR = 0x00,
96 FW_RI_STAG_SMR = 0x01,
97 FW_RI_STAG_MW = 0x02,
98 FW_RI_STAG_MW_RELAXED = 0x03
99};
100
101enum fw_ri_data_op {
102 FW_RI_DATA_IMMD = 0x81,
103 FW_RI_DATA_DSGL = 0x82,
104 FW_RI_DATA_ISGL = 0x83
105};
106
107enum fw_ri_sgl_depth {
108 FW_RI_SGL_DEPTH_MAX_SQ = 16,
109 FW_RI_SGL_DEPTH_MAX_RQ = 4
110};
111
112struct fw_ri_dsge_pair {
113 __be32 len[2];
114 __be64 addr[2];
115};
116
117struct fw_ri_dsgl {
118 __u8 op;
119 __u8 r1;
120 __be16 nsge;
121 __be32 len0;
122 __be64 addr0;
123#ifndef C99_NOT_SUPPORTED
124 struct fw_ri_dsge_pair sge[0];
125#endif
126};
127
128struct fw_ri_sge {
129 __be32 stag;
130 __be32 len;
131 __be64 to;
132};
133
134struct fw_ri_isgl {
135 __u8 op;
136 __u8 r1;
137 __be16 nsge;
138 __be32 r2;
139#ifndef C99_NOT_SUPPORTED
140 struct fw_ri_sge sge[0];
141#endif
142};
143
144struct fw_ri_immd {
145 __u8 op;
146 __u8 r1;
147 __be16 r2;
148 __be32 immdlen;
149#ifndef C99_NOT_SUPPORTED
150 __u8 data[0];
151#endif
152};
153
154struct fw_ri_tpte {
155 __be32 valid_to_pdid;
156 __be32 locread_to_qpid;
157 __be32 nosnoop_pbladdr;
158 __be32 len_lo;
159 __be32 va_hi;
160 __be32 va_lo_fbo;
161 __be32 dca_mwbcnt_pstag;
162 __be32 len_hi;
163};
164
165#define S_FW_RI_TPTE_VALID 31
166#define M_FW_RI_TPTE_VALID 0x1
167#define V_FW_RI_TPTE_VALID(x) ((x) << S_FW_RI_TPTE_VALID)
168#define G_FW_RI_TPTE_VALID(x) \
169 (((x) >> S_FW_RI_TPTE_VALID) & M_FW_RI_TPTE_VALID)
170#define F_FW_RI_TPTE_VALID V_FW_RI_TPTE_VALID(1U)
171
172#define S_FW_RI_TPTE_STAGKEY 23
173#define M_FW_RI_TPTE_STAGKEY 0xff
174#define V_FW_RI_TPTE_STAGKEY(x) ((x) << S_FW_RI_TPTE_STAGKEY)
175#define G_FW_RI_TPTE_STAGKEY(x) \
176 (((x) >> S_FW_RI_TPTE_STAGKEY) & M_FW_RI_TPTE_STAGKEY)
177
178#define S_FW_RI_TPTE_STAGSTATE 22
179#define M_FW_RI_TPTE_STAGSTATE 0x1
180#define V_FW_RI_TPTE_STAGSTATE(x) ((x) << S_FW_RI_TPTE_STAGSTATE)
181#define G_FW_RI_TPTE_STAGSTATE(x) \
182 (((x) >> S_FW_RI_TPTE_STAGSTATE) & M_FW_RI_TPTE_STAGSTATE)
183#define F_FW_RI_TPTE_STAGSTATE V_FW_RI_TPTE_STAGSTATE(1U)
184
185#define S_FW_RI_TPTE_STAGTYPE 20
186#define M_FW_RI_TPTE_STAGTYPE 0x3
187#define V_FW_RI_TPTE_STAGTYPE(x) ((x) << S_FW_RI_TPTE_STAGTYPE)
188#define G_FW_RI_TPTE_STAGTYPE(x) \
189 (((x) >> S_FW_RI_TPTE_STAGTYPE) & M_FW_RI_TPTE_STAGTYPE)
190
191#define S_FW_RI_TPTE_PDID 0
192#define M_FW_RI_TPTE_PDID 0xfffff
193#define V_FW_RI_TPTE_PDID(x) ((x) << S_FW_RI_TPTE_PDID)
194#define G_FW_RI_TPTE_PDID(x) \
195 (((x) >> S_FW_RI_TPTE_PDID) & M_FW_RI_TPTE_PDID)
196
197#define S_FW_RI_TPTE_PERM 28
198#define M_FW_RI_TPTE_PERM 0xf
199#define V_FW_RI_TPTE_PERM(x) ((x) << S_FW_RI_TPTE_PERM)
200#define G_FW_RI_TPTE_PERM(x) \
201 (((x) >> S_FW_RI_TPTE_PERM) & M_FW_RI_TPTE_PERM)
202
203#define S_FW_RI_TPTE_REMINVDIS 27
204#define M_FW_RI_TPTE_REMINVDIS 0x1
205#define V_FW_RI_TPTE_REMINVDIS(x) ((x) << S_FW_RI_TPTE_REMINVDIS)
206#define G_FW_RI_TPTE_REMINVDIS(x) \
207 (((x) >> S_FW_RI_TPTE_REMINVDIS) & M_FW_RI_TPTE_REMINVDIS)
208#define F_FW_RI_TPTE_REMINVDIS V_FW_RI_TPTE_REMINVDIS(1U)
209
210#define S_FW_RI_TPTE_ADDRTYPE 26
211#define M_FW_RI_TPTE_ADDRTYPE 1
212#define V_FW_RI_TPTE_ADDRTYPE(x) ((x) << S_FW_RI_TPTE_ADDRTYPE)
213#define G_FW_RI_TPTE_ADDRTYPE(x) \
214 (((x) >> S_FW_RI_TPTE_ADDRTYPE) & M_FW_RI_TPTE_ADDRTYPE)
215#define F_FW_RI_TPTE_ADDRTYPE V_FW_RI_TPTE_ADDRTYPE(1U)
216
217#define S_FW_RI_TPTE_MWBINDEN 25
218#define M_FW_RI_TPTE_MWBINDEN 0x1
219#define V_FW_RI_TPTE_MWBINDEN(x) ((x) << S_FW_RI_TPTE_MWBINDEN)
220#define G_FW_RI_TPTE_MWBINDEN(x) \
221 (((x) >> S_FW_RI_TPTE_MWBINDEN) & M_FW_RI_TPTE_MWBINDEN)
222#define F_FW_RI_TPTE_MWBINDEN V_FW_RI_TPTE_MWBINDEN(1U)
223
224#define S_FW_RI_TPTE_PS 20
225#define M_FW_RI_TPTE_PS 0x1f
226#define V_FW_RI_TPTE_PS(x) ((x) << S_FW_RI_TPTE_PS)
227#define G_FW_RI_TPTE_PS(x) \
228 (((x) >> S_FW_RI_TPTE_PS) & M_FW_RI_TPTE_PS)
229
230#define S_FW_RI_TPTE_QPID 0
231#define M_FW_RI_TPTE_QPID 0xfffff
232#define V_FW_RI_TPTE_QPID(x) ((x) << S_FW_RI_TPTE_QPID)
233#define G_FW_RI_TPTE_QPID(x) \
234 (((x) >> S_FW_RI_TPTE_QPID) & M_FW_RI_TPTE_QPID)
235
236#define S_FW_RI_TPTE_NOSNOOP 30
237#define M_FW_RI_TPTE_NOSNOOP 0x1
238#define V_FW_RI_TPTE_NOSNOOP(x) ((x) << S_FW_RI_TPTE_NOSNOOP)
239#define G_FW_RI_TPTE_NOSNOOP(x) \
240 (((x) >> S_FW_RI_TPTE_NOSNOOP) & M_FW_RI_TPTE_NOSNOOP)
241#define F_FW_RI_TPTE_NOSNOOP V_FW_RI_TPTE_NOSNOOP(1U)
242
243#define S_FW_RI_TPTE_PBLADDR 0
244#define M_FW_RI_TPTE_PBLADDR 0x1fffffff
245#define V_FW_RI_TPTE_PBLADDR(x) ((x) << S_FW_RI_TPTE_PBLADDR)
246#define G_FW_RI_TPTE_PBLADDR(x) \
247 (((x) >> S_FW_RI_TPTE_PBLADDR) & M_FW_RI_TPTE_PBLADDR)
248
249#define S_FW_RI_TPTE_DCA 24
250#define M_FW_RI_TPTE_DCA 0x1f
251#define V_FW_RI_TPTE_DCA(x) ((x) << S_FW_RI_TPTE_DCA)
252#define G_FW_RI_TPTE_DCA(x) \
253 (((x) >> S_FW_RI_TPTE_DCA) & M_FW_RI_TPTE_DCA)
254
255#define S_FW_RI_TPTE_MWBCNT_PSTAG 0
256#define M_FW_RI_TPTE_MWBCNT_PSTAG 0xffffff
257#define V_FW_RI_TPTE_MWBCNT_PSTAT(x) \
258 ((x) << S_FW_RI_TPTE_MWBCNT_PSTAG)
259#define G_FW_RI_TPTE_MWBCNT_PSTAG(x) \
260 (((x) >> S_FW_RI_TPTE_MWBCNT_PSTAG) & M_FW_RI_TPTE_MWBCNT_PSTAG)
261
262enum fw_ri_res_type {
263 FW_RI_RES_TYPE_SQ,
264 FW_RI_RES_TYPE_RQ,
265 FW_RI_RES_TYPE_CQ,
266};
267
268enum fw_ri_res_op {
269 FW_RI_RES_OP_WRITE,
270 FW_RI_RES_OP_RESET,
271};
272
273struct fw_ri_res {
274 union fw_ri_restype {
275 struct fw_ri_res_sqrq {
276 __u8 restype;
277 __u8 op;
278 __be16 r3;
279 __be32 eqid;
280 __be32 r4[2];
281 __be32 fetchszm_to_iqid;
282 __be32 dcaen_to_eqsize;
283 __be64 eqaddr;
284 } sqrq;
285 struct fw_ri_res_cq {
286 __u8 restype;
287 __u8 op;
288 __be16 r3;
289 __be32 iqid;
290 __be32 r4[2];
291 __be32 iqandst_to_iqandstindex;
292 __be16 iqdroprss_to_iqesize;
293 __be16 iqsize;
294 __be64 iqaddr;
295 __be32 iqns_iqro;
296 __be32 r6_lo;
297 __be64 r7;
298 } cq;
299 } u;
300};
301
302struct fw_ri_res_wr {
303 __be32 op_nres;
304 __be32 len16_pkd;
305 __u64 cookie;
306#ifndef C99_NOT_SUPPORTED
307 struct fw_ri_res res[0];
308#endif
309};
310
311#define S_FW_RI_RES_WR_NRES 0
312#define M_FW_RI_RES_WR_NRES 0xff
313#define V_FW_RI_RES_WR_NRES(x) ((x) << S_FW_RI_RES_WR_NRES)
314#define G_FW_RI_RES_WR_NRES(x) \
315 (((x) >> S_FW_RI_RES_WR_NRES) & M_FW_RI_RES_WR_NRES)
316
317#define S_FW_RI_RES_WR_FETCHSZM 26
318#define M_FW_RI_RES_WR_FETCHSZM 0x1
319#define V_FW_RI_RES_WR_FETCHSZM(x) ((x) << S_FW_RI_RES_WR_FETCHSZM)
320#define G_FW_RI_RES_WR_FETCHSZM(x) \
321 (((x) >> S_FW_RI_RES_WR_FETCHSZM) & M_FW_RI_RES_WR_FETCHSZM)
322#define F_FW_RI_RES_WR_FETCHSZM V_FW_RI_RES_WR_FETCHSZM(1U)
323
324#define S_FW_RI_RES_WR_STATUSPGNS 25
325#define M_FW_RI_RES_WR_STATUSPGNS 0x1
326#define V_FW_RI_RES_WR_STATUSPGNS(x) ((x) << S_FW_RI_RES_WR_STATUSPGNS)
327#define G_FW_RI_RES_WR_STATUSPGNS(x) \
328 (((x) >> S_FW_RI_RES_WR_STATUSPGNS) & M_FW_RI_RES_WR_STATUSPGNS)
329#define F_FW_RI_RES_WR_STATUSPGNS V_FW_RI_RES_WR_STATUSPGNS(1U)
330
331#define S_FW_RI_RES_WR_STATUSPGRO 24
332#define M_FW_RI_RES_WR_STATUSPGRO 0x1
333#define V_FW_RI_RES_WR_STATUSPGRO(x) ((x) << S_FW_RI_RES_WR_STATUSPGRO)
334#define G_FW_RI_RES_WR_STATUSPGRO(x) \
335 (((x) >> S_FW_RI_RES_WR_STATUSPGRO) & M_FW_RI_RES_WR_STATUSPGRO)
336#define F_FW_RI_RES_WR_STATUSPGRO V_FW_RI_RES_WR_STATUSPGRO(1U)
337
338#define S_FW_RI_RES_WR_FETCHNS 23
339#define M_FW_RI_RES_WR_FETCHNS 0x1
340#define V_FW_RI_RES_WR_FETCHNS(x) ((x) << S_FW_RI_RES_WR_FETCHNS)
341#define G_FW_RI_RES_WR_FETCHNS(x) \
342 (((x) >> S_FW_RI_RES_WR_FETCHNS) & M_FW_RI_RES_WR_FETCHNS)
343#define F_FW_RI_RES_WR_FETCHNS V_FW_RI_RES_WR_FETCHNS(1U)
344
345#define S_FW_RI_RES_WR_FETCHRO 22
346#define M_FW_RI_RES_WR_FETCHRO 0x1
347#define V_FW_RI_RES_WR_FETCHRO(x) ((x) << S_FW_RI_RES_WR_FETCHRO)
348#define G_FW_RI_RES_WR_FETCHRO(x) \
349 (((x) >> S_FW_RI_RES_WR_FETCHRO) & M_FW_RI_RES_WR_FETCHRO)
350#define F_FW_RI_RES_WR_FETCHRO V_FW_RI_RES_WR_FETCHRO(1U)
351
352#define S_FW_RI_RES_WR_HOSTFCMODE 20
353#define M_FW_RI_RES_WR_HOSTFCMODE 0x3
354#define V_FW_RI_RES_WR_HOSTFCMODE(x) ((x) << S_FW_RI_RES_WR_HOSTFCMODE)
355#define G_FW_RI_RES_WR_HOSTFCMODE(x) \
356 (((x) >> S_FW_RI_RES_WR_HOSTFCMODE) & M_FW_RI_RES_WR_HOSTFCMODE)
357
358#define S_FW_RI_RES_WR_CPRIO 19
359#define M_FW_RI_RES_WR_CPRIO 0x1
360#define V_FW_RI_RES_WR_CPRIO(x) ((x) << S_FW_RI_RES_WR_CPRIO)
361#define G_FW_RI_RES_WR_CPRIO(x) \
362 (((x) >> S_FW_RI_RES_WR_CPRIO) & M_FW_RI_RES_WR_CPRIO)
363#define F_FW_RI_RES_WR_CPRIO V_FW_RI_RES_WR_CPRIO(1U)
364
365#define S_FW_RI_RES_WR_ONCHIP 18
366#define M_FW_RI_RES_WR_ONCHIP 0x1
367#define V_FW_RI_RES_WR_ONCHIP(x) ((x) << S_FW_RI_RES_WR_ONCHIP)
368#define G_FW_RI_RES_WR_ONCHIP(x) \
369 (((x) >> S_FW_RI_RES_WR_ONCHIP) & M_FW_RI_RES_WR_ONCHIP)
370#define F_FW_RI_RES_WR_ONCHIP V_FW_RI_RES_WR_ONCHIP(1U)
371
372#define S_FW_RI_RES_WR_PCIECHN 16
373#define M_FW_RI_RES_WR_PCIECHN 0x3
374#define V_FW_RI_RES_WR_PCIECHN(x) ((x) << S_FW_RI_RES_WR_PCIECHN)
375#define G_FW_RI_RES_WR_PCIECHN(x) \
376 (((x) >> S_FW_RI_RES_WR_PCIECHN) & M_FW_RI_RES_WR_PCIECHN)
377
378#define S_FW_RI_RES_WR_IQID 0
379#define M_FW_RI_RES_WR_IQID 0xffff
380#define V_FW_RI_RES_WR_IQID(x) ((x) << S_FW_RI_RES_WR_IQID)
381#define G_FW_RI_RES_WR_IQID(x) \
382 (((x) >> S_FW_RI_RES_WR_IQID) & M_FW_RI_RES_WR_IQID)
383
384#define S_FW_RI_RES_WR_DCAEN 31
385#define M_FW_RI_RES_WR_DCAEN 0x1
386#define V_FW_RI_RES_WR_DCAEN(x) ((x) << S_FW_RI_RES_WR_DCAEN)
387#define G_FW_RI_RES_WR_DCAEN(x) \
388 (((x) >> S_FW_RI_RES_WR_DCAEN) & M_FW_RI_RES_WR_DCAEN)
389#define F_FW_RI_RES_WR_DCAEN V_FW_RI_RES_WR_DCAEN(1U)
390
391#define S_FW_RI_RES_WR_DCACPU 26
392#define M_FW_RI_RES_WR_DCACPU 0x1f
393#define V_FW_RI_RES_WR_DCACPU(x) ((x) << S_FW_RI_RES_WR_DCACPU)
394#define G_FW_RI_RES_WR_DCACPU(x) \
395 (((x) >> S_FW_RI_RES_WR_DCACPU) & M_FW_RI_RES_WR_DCACPU)
396
397#define S_FW_RI_RES_WR_FBMIN 23
398#define M_FW_RI_RES_WR_FBMIN 0x7
399#define V_FW_RI_RES_WR_FBMIN(x) ((x) << S_FW_RI_RES_WR_FBMIN)
400#define G_FW_RI_RES_WR_FBMIN(x) \
401 (((x) >> S_FW_RI_RES_WR_FBMIN) & M_FW_RI_RES_WR_FBMIN)
402
403#define S_FW_RI_RES_WR_FBMAX 20
404#define M_FW_RI_RES_WR_FBMAX 0x7
405#define V_FW_RI_RES_WR_FBMAX(x) ((x) << S_FW_RI_RES_WR_FBMAX)
406#define G_FW_RI_RES_WR_FBMAX(x) \
407 (((x) >> S_FW_RI_RES_WR_FBMAX) & M_FW_RI_RES_WR_FBMAX)
408
409#define S_FW_RI_RES_WR_CIDXFTHRESHO 19
410#define M_FW_RI_RES_WR_CIDXFTHRESHO 0x1
411#define V_FW_RI_RES_WR_CIDXFTHRESHO(x) ((x) << S_FW_RI_RES_WR_CIDXFTHRESHO)
412#define G_FW_RI_RES_WR_CIDXFTHRESHO(x) \
413 (((x) >> S_FW_RI_RES_WR_CIDXFTHRESHO) & M_FW_RI_RES_WR_CIDXFTHRESHO)
414#define F_FW_RI_RES_WR_CIDXFTHRESHO V_FW_RI_RES_WR_CIDXFTHRESHO(1U)
415
416#define S_FW_RI_RES_WR_CIDXFTHRESH 16
417#define M_FW_RI_RES_WR_CIDXFTHRESH 0x7
418#define V_FW_RI_RES_WR_CIDXFTHRESH(x) ((x) << S_FW_RI_RES_WR_CIDXFTHRESH)
419#define G_FW_RI_RES_WR_CIDXFTHRESH(x) \
420 (((x) >> S_FW_RI_RES_WR_CIDXFTHRESH) & M_FW_RI_RES_WR_CIDXFTHRESH)
421
422#define S_FW_RI_RES_WR_EQSIZE 0
423#define M_FW_RI_RES_WR_EQSIZE 0xffff
424#define V_FW_RI_RES_WR_EQSIZE(x) ((x) << S_FW_RI_RES_WR_EQSIZE)
425#define G_FW_RI_RES_WR_EQSIZE(x) \
426 (((x) >> S_FW_RI_RES_WR_EQSIZE) & M_FW_RI_RES_WR_EQSIZE)
427
428#define S_FW_RI_RES_WR_IQANDST 15
429#define M_FW_RI_RES_WR_IQANDST 0x1
430#define V_FW_RI_RES_WR_IQANDST(x) ((x) << S_FW_RI_RES_WR_IQANDST)
431#define G_FW_RI_RES_WR_IQANDST(x) \
432 (((x) >> S_FW_RI_RES_WR_IQANDST) & M_FW_RI_RES_WR_IQANDST)
433#define F_FW_RI_RES_WR_IQANDST V_FW_RI_RES_WR_IQANDST(1U)
434
435#define S_FW_RI_RES_WR_IQANUS 14
436#define M_FW_RI_RES_WR_IQANUS 0x1
437#define V_FW_RI_RES_WR_IQANUS(x) ((x) << S_FW_RI_RES_WR_IQANUS)
438#define G_FW_RI_RES_WR_IQANUS(x) \
439 (((x) >> S_FW_RI_RES_WR_IQANUS) & M_FW_RI_RES_WR_IQANUS)
440#define F_FW_RI_RES_WR_IQANUS V_FW_RI_RES_WR_IQANUS(1U)
441
442#define S_FW_RI_RES_WR_IQANUD 12
443#define M_FW_RI_RES_WR_IQANUD 0x3
444#define V_FW_RI_RES_WR_IQANUD(x) ((x) << S_FW_RI_RES_WR_IQANUD)
445#define G_FW_RI_RES_WR_IQANUD(x) \
446 (((x) >> S_FW_RI_RES_WR_IQANUD) & M_FW_RI_RES_WR_IQANUD)
447
448#define S_FW_RI_RES_WR_IQANDSTINDEX 0
449#define M_FW_RI_RES_WR_IQANDSTINDEX 0xfff
450#define V_FW_RI_RES_WR_IQANDSTINDEX(x) ((x) << S_FW_RI_RES_WR_IQANDSTINDEX)
451#define G_FW_RI_RES_WR_IQANDSTINDEX(x) \
452 (((x) >> S_FW_RI_RES_WR_IQANDSTINDEX) & M_FW_RI_RES_WR_IQANDSTINDEX)
453
454#define S_FW_RI_RES_WR_IQDROPRSS 15
455#define M_FW_RI_RES_WR_IQDROPRSS 0x1
456#define V_FW_RI_RES_WR_IQDROPRSS(x) ((x) << S_FW_RI_RES_WR_IQDROPRSS)
457#define G_FW_RI_RES_WR_IQDROPRSS(x) \
458 (((x) >> S_FW_RI_RES_WR_IQDROPRSS) & M_FW_RI_RES_WR_IQDROPRSS)
459#define F_FW_RI_RES_WR_IQDROPRSS V_FW_RI_RES_WR_IQDROPRSS(1U)
460
461#define S_FW_RI_RES_WR_IQGTSMODE 14
462#define M_FW_RI_RES_WR_IQGTSMODE 0x1
463#define V_FW_RI_RES_WR_IQGTSMODE(x) ((x) << S_FW_RI_RES_WR_IQGTSMODE)
464#define G_FW_RI_RES_WR_IQGTSMODE(x) \
465 (((x) >> S_FW_RI_RES_WR_IQGTSMODE) & M_FW_RI_RES_WR_IQGTSMODE)
466#define F_FW_RI_RES_WR_IQGTSMODE V_FW_RI_RES_WR_IQGTSMODE(1U)
467
468#define S_FW_RI_RES_WR_IQPCIECH 12
469#define M_FW_RI_RES_WR_IQPCIECH 0x3
470#define V_FW_RI_RES_WR_IQPCIECH(x) ((x) << S_FW_RI_RES_WR_IQPCIECH)
471#define G_FW_RI_RES_WR_IQPCIECH(x) \
472 (((x) >> S_FW_RI_RES_WR_IQPCIECH) & M_FW_RI_RES_WR_IQPCIECH)
473
474#define S_FW_RI_RES_WR_IQDCAEN 11
475#define M_FW_RI_RES_WR_IQDCAEN 0x1
476#define V_FW_RI_RES_WR_IQDCAEN(x) ((x) << S_FW_RI_RES_WR_IQDCAEN)
477#define G_FW_RI_RES_WR_IQDCAEN(x) \
478 (((x) >> S_FW_RI_RES_WR_IQDCAEN) & M_FW_RI_RES_WR_IQDCAEN)
479#define F_FW_RI_RES_WR_IQDCAEN V_FW_RI_RES_WR_IQDCAEN(1U)
480
481#define S_FW_RI_RES_WR_IQDCACPU 6
482#define M_FW_RI_RES_WR_IQDCACPU 0x1f
483#define V_FW_RI_RES_WR_IQDCACPU(x) ((x) << S_FW_RI_RES_WR_IQDCACPU)
484#define G_FW_RI_RES_WR_IQDCACPU(x) \
485 (((x) >> S_FW_RI_RES_WR_IQDCACPU) & M_FW_RI_RES_WR_IQDCACPU)
486
487#define S_FW_RI_RES_WR_IQINTCNTTHRESH 4
488#define M_FW_RI_RES_WR_IQINTCNTTHRESH 0x3
489#define V_FW_RI_RES_WR_IQINTCNTTHRESH(x) \
490 ((x) << S_FW_RI_RES_WR_IQINTCNTTHRESH)
491#define G_FW_RI_RES_WR_IQINTCNTTHRESH(x) \
492 (((x) >> S_FW_RI_RES_WR_IQINTCNTTHRESH) & M_FW_RI_RES_WR_IQINTCNTTHRESH)
493
494#define S_FW_RI_RES_WR_IQO 3
495#define M_FW_RI_RES_WR_IQO 0x1
496#define V_FW_RI_RES_WR_IQO(x) ((x) << S_FW_RI_RES_WR_IQO)
497#define G_FW_RI_RES_WR_IQO(x) \
498 (((x) >> S_FW_RI_RES_WR_IQO) & M_FW_RI_RES_WR_IQO)
499#define F_FW_RI_RES_WR_IQO V_FW_RI_RES_WR_IQO(1U)
500
501#define S_FW_RI_RES_WR_IQCPRIO 2
502#define M_FW_RI_RES_WR_IQCPRIO 0x1
503#define V_FW_RI_RES_WR_IQCPRIO(x) ((x) << S_FW_RI_RES_WR_IQCPRIO)
504#define G_FW_RI_RES_WR_IQCPRIO(x) \
505 (((x) >> S_FW_RI_RES_WR_IQCPRIO) & M_FW_RI_RES_WR_IQCPRIO)
506#define F_FW_RI_RES_WR_IQCPRIO V_FW_RI_RES_WR_IQCPRIO(1U)
507
508#define S_FW_RI_RES_WR_IQESIZE 0
509#define M_FW_RI_RES_WR_IQESIZE 0x3
510#define V_FW_RI_RES_WR_IQESIZE(x) ((x) << S_FW_RI_RES_WR_IQESIZE)
511#define G_FW_RI_RES_WR_IQESIZE(x) \
512 (((x) >> S_FW_RI_RES_WR_IQESIZE) & M_FW_RI_RES_WR_IQESIZE)
513
514#define S_FW_RI_RES_WR_IQNS 31
515#define M_FW_RI_RES_WR_IQNS 0x1
516#define V_FW_RI_RES_WR_IQNS(x) ((x) << S_FW_RI_RES_WR_IQNS)
517#define G_FW_RI_RES_WR_IQNS(x) \
518 (((x) >> S_FW_RI_RES_WR_IQNS) & M_FW_RI_RES_WR_IQNS)
519#define F_FW_RI_RES_WR_IQNS V_FW_RI_RES_WR_IQNS(1U)
520
521#define S_FW_RI_RES_WR_IQRO 30
522#define M_FW_RI_RES_WR_IQRO 0x1
523#define V_FW_RI_RES_WR_IQRO(x) ((x) << S_FW_RI_RES_WR_IQRO)
524#define G_FW_RI_RES_WR_IQRO(x) \
525 (((x) >> S_FW_RI_RES_WR_IQRO) & M_FW_RI_RES_WR_IQRO)
526#define F_FW_RI_RES_WR_IQRO V_FW_RI_RES_WR_IQRO(1U)
527
528struct fw_ri_rdma_write_wr {
529 __u8 opcode;
530 __u8 flags;
531 __u16 wrid;
532 __u8 r1[3];
533 __u8 len16;
534 __be64 r2;
535 __be32 plen;
536 __be32 stag_sink;
537 __be64 to_sink;
538#ifndef C99_NOT_SUPPORTED
539 union {
540 struct fw_ri_immd immd_src[0];
541 struct fw_ri_isgl isgl_src[0];
542 } u;
543#endif
544};
545
546struct fw_ri_send_wr {
547 __u8 opcode;
548 __u8 flags;
549 __u16 wrid;
550 __u8 r1[3];
551 __u8 len16;
552 __be32 sendop_pkd;
553 __be32 stag_inv;
554 __be32 plen;
555 __be32 r3;
556 __be64 r4;
557#ifndef C99_NOT_SUPPORTED
558 union {
559 struct fw_ri_immd immd_src[0];
560 struct fw_ri_isgl isgl_src[0];
561 } u;
562#endif
563};
564
565#define S_FW_RI_SEND_WR_SENDOP 0
566#define M_FW_RI_SEND_WR_SENDOP 0xf
567#define V_FW_RI_SEND_WR_SENDOP(x) ((x) << S_FW_RI_SEND_WR_SENDOP)
568#define G_FW_RI_SEND_WR_SENDOP(x) \
569 (((x) >> S_FW_RI_SEND_WR_SENDOP) & M_FW_RI_SEND_WR_SENDOP)
570
571struct fw_ri_rdma_read_wr {
572 __u8 opcode;
573 __u8 flags;
574 __u16 wrid;
575 __u8 r1[3];
576 __u8 len16;
577 __be64 r2;
578 __be32 stag_sink;
579 __be32 to_sink_hi;
580 __be32 to_sink_lo;
581 __be32 plen;
582 __be32 stag_src;
583 __be32 to_src_hi;
584 __be32 to_src_lo;
585 __be32 r5;
586};
587
588struct fw_ri_recv_wr {
589 __u8 opcode;
590 __u8 r1;
591 __u16 wrid;
592 __u8 r2[3];
593 __u8 len16;
594 struct fw_ri_isgl isgl;
595};
596
597struct fw_ri_bind_mw_wr {
598 __u8 opcode;
599 __u8 flags;
600 __u16 wrid;
601 __u8 r1[3];
602 __u8 len16;
603 __u8 qpbinde_to_dcacpu;
604 __u8 pgsz_shift;
605 __u8 addr_type;
606 __u8 mem_perms;
607 __be32 stag_mr;
608 __be32 stag_mw;
609 __be32 r3;
610 __be64 len_mw;
611 __be64 va_fbo;
612 __be64 r4;
613};
614
615#define S_FW_RI_BIND_MW_WR_QPBINDE 6
616#define M_FW_RI_BIND_MW_WR_QPBINDE 0x1
617#define V_FW_RI_BIND_MW_WR_QPBINDE(x) ((x) << S_FW_RI_BIND_MW_WR_QPBINDE)
618#define G_FW_RI_BIND_MW_WR_QPBINDE(x) \
619 (((x) >> S_FW_RI_BIND_MW_WR_QPBINDE) & M_FW_RI_BIND_MW_WR_QPBINDE)
620#define F_FW_RI_BIND_MW_WR_QPBINDE V_FW_RI_BIND_MW_WR_QPBINDE(1U)
621
622#define S_FW_RI_BIND_MW_WR_NS 5
623#define M_FW_RI_BIND_MW_WR_NS 0x1
624#define V_FW_RI_BIND_MW_WR_NS(x) ((x) << S_FW_RI_BIND_MW_WR_NS)
625#define G_FW_RI_BIND_MW_WR_NS(x) \
626 (((x) >> S_FW_RI_BIND_MW_WR_NS) & M_FW_RI_BIND_MW_WR_NS)
627#define F_FW_RI_BIND_MW_WR_NS V_FW_RI_BIND_MW_WR_NS(1U)
628
629#define S_FW_RI_BIND_MW_WR_DCACPU 0
630#define M_FW_RI_BIND_MW_WR_DCACPU 0x1f
631#define V_FW_RI_BIND_MW_WR_DCACPU(x) ((x) << S_FW_RI_BIND_MW_WR_DCACPU)
632#define G_FW_RI_BIND_MW_WR_DCACPU(x) \
633 (((x) >> S_FW_RI_BIND_MW_WR_DCACPU) & M_FW_RI_BIND_MW_WR_DCACPU)
634
635struct fw_ri_fr_nsmr_wr {
636 __u8 opcode;
637 __u8 flags;
638 __u16 wrid;
639 __u8 r1[3];
640 __u8 len16;
641 __u8 qpbinde_to_dcacpu;
642 __u8 pgsz_shift;
643 __u8 addr_type;
644 __u8 mem_perms;
645 __be32 stag;
646 __be32 len_hi;
647 __be32 len_lo;
648 __be32 va_hi;
649 __be32 va_lo_fbo;
650};
651
652#define S_FW_RI_FR_NSMR_WR_QPBINDE 6
653#define M_FW_RI_FR_NSMR_WR_QPBINDE 0x1
654#define V_FW_RI_FR_NSMR_WR_QPBINDE(x) ((x) << S_FW_RI_FR_NSMR_WR_QPBINDE)
655#define G_FW_RI_FR_NSMR_WR_QPBINDE(x) \
656 (((x) >> S_FW_RI_FR_NSMR_WR_QPBINDE) & M_FW_RI_FR_NSMR_WR_QPBINDE)
657#define F_FW_RI_FR_NSMR_WR_QPBINDE V_FW_RI_FR_NSMR_WR_QPBINDE(1U)
658
659#define S_FW_RI_FR_NSMR_WR_NS 5
660#define M_FW_RI_FR_NSMR_WR_NS 0x1
661#define V_FW_RI_FR_NSMR_WR_NS(x) ((x) << S_FW_RI_FR_NSMR_WR_NS)
662#define G_FW_RI_FR_NSMR_WR_NS(x) \
663 (((x) >> S_FW_RI_FR_NSMR_WR_NS) & M_FW_RI_FR_NSMR_WR_NS)
664#define F_FW_RI_FR_NSMR_WR_NS V_FW_RI_FR_NSMR_WR_NS(1U)
665
666#define S_FW_RI_FR_NSMR_WR_DCACPU 0
667#define M_FW_RI_FR_NSMR_WR_DCACPU 0x1f
668#define V_FW_RI_FR_NSMR_WR_DCACPU(x) ((x) << S_FW_RI_FR_NSMR_WR_DCACPU)
669#define G_FW_RI_FR_NSMR_WR_DCACPU(x) \
670 (((x) >> S_FW_RI_FR_NSMR_WR_DCACPU) & M_FW_RI_FR_NSMR_WR_DCACPU)
671
672struct fw_ri_inv_lstag_wr {
673 __u8 opcode;
674 __u8 flags;
675 __u16 wrid;
676 __u8 r1[3];
677 __u8 len16;
678 __be32 r2;
679 __be32 stag_inv;
680};
681
682enum fw_ri_type {
683 FW_RI_TYPE_INIT,
684 FW_RI_TYPE_FINI,
685 FW_RI_TYPE_TERMINATE
686};
687
688enum fw_ri_init_p2ptype {
689 FW_RI_INIT_P2PTYPE_RDMA_WRITE = FW_RI_RDMA_WRITE,
690 FW_RI_INIT_P2PTYPE_READ_REQ = FW_RI_READ_REQ,
691 FW_RI_INIT_P2PTYPE_SEND = FW_RI_SEND,
692 FW_RI_INIT_P2PTYPE_SEND_WITH_INV = FW_RI_SEND_WITH_INV,
693 FW_RI_INIT_P2PTYPE_SEND_WITH_SE = FW_RI_SEND_WITH_SE,
694 FW_RI_INIT_P2PTYPE_SEND_WITH_SE_INV = FW_RI_SEND_WITH_SE_INV,
695 FW_RI_INIT_P2PTYPE_DISABLED = 0xf,
696};
697
698struct fw_ri_wr {
699 __be32 op_compl;
700 __be32 flowid_len16;
701 __u64 cookie;
702 union fw_ri {
703 struct fw_ri_init {
704 __u8 type;
705 __u8 mpareqbit_p2ptype;
706 __u8 r4[2];
707 __u8 mpa_attrs;
708 __u8 qp_caps;
709 __be16 nrqe;
710 __be32 pdid;
711 __be32 qpid;
712 __be32 sq_eqid;
713 __be32 rq_eqid;
714 __be32 scqid;
715 __be32 rcqid;
716 __be32 ord_max;
717 __be32 ird_max;
718 __be32 iss;
719 __be32 irs;
720 __be32 hwrqsize;
721 __be32 hwrqaddr;
722 __be64 r5;
723 union fw_ri_init_p2p {
724 struct fw_ri_rdma_write_wr write;
725 struct fw_ri_rdma_read_wr read;
726 struct fw_ri_send_wr send;
727 } u;
728 } init;
729 struct fw_ri_fini {
730 __u8 type;
731 __u8 r3[7];
732 __be64 r4;
733 } fini;
734 struct fw_ri_terminate {
735 __u8 type;
736 __u8 r3[3];
737 __be32 immdlen;
738 __u8 termmsg[40];
739 } terminate;
740 } u;
741};
742
743#define S_FW_RI_WR_MPAREQBIT 7
744#define M_FW_RI_WR_MPAREQBIT 0x1
745#define V_FW_RI_WR_MPAREQBIT(x) ((x) << S_FW_RI_WR_MPAREQBIT)
746#define G_FW_RI_WR_MPAREQBIT(x) \
747 (((x) >> S_FW_RI_WR_MPAREQBIT) & M_FW_RI_WR_MPAREQBIT)
748#define F_FW_RI_WR_MPAREQBIT V_FW_RI_WR_MPAREQBIT(1U)
749
750#define S_FW_RI_WR_P2PTYPE 0
751#define M_FW_RI_WR_P2PTYPE 0xf
752#define V_FW_RI_WR_P2PTYPE(x) ((x) << S_FW_RI_WR_P2PTYPE)
753#define G_FW_RI_WR_P2PTYPE(x) \
754 (((x) >> S_FW_RI_WR_P2PTYPE) & M_FW_RI_WR_P2PTYPE)
755
756struct tcp_options {
757 __be16 mss;
758 __u8 wsf;
759#if defined(__LITTLE_ENDIAN_BITFIELD)
760 __u8:4;
761 __u8 unknown:1;
762 __u8:1;
763 __u8 sack:1;
764 __u8 tstamp:1;
765#else
766 __u8 tstamp:1;
767 __u8 sack:1;
768 __u8:1;
769 __u8 unknown:1;
770 __u8:4;
771#endif
772};
773
774struct cpl_pass_accept_req {
775 union opcode_tid ot;
776 __be16 rsvd;
777 __be16 len;
778 __be32 hdr_len;
779 __be16 vlan;
780 __be16 l2info;
781 __be32 tos_stid;
782 struct tcp_options tcpopt;
783};
784
785/* cpl_pass_accept_req.hdr_len fields */
786#define S_SYN_RX_CHAN 0
787#define M_SYN_RX_CHAN 0xF
788#define V_SYN_RX_CHAN(x) ((x) << S_SYN_RX_CHAN)
789#define G_SYN_RX_CHAN(x) (((x) >> S_SYN_RX_CHAN) & M_SYN_RX_CHAN)
790
791#define S_TCP_HDR_LEN 10
792#define M_TCP_HDR_LEN 0x3F
793#define V_TCP_HDR_LEN(x) ((x) << S_TCP_HDR_LEN)
794#define G_TCP_HDR_LEN(x) (((x) >> S_TCP_HDR_LEN) & M_TCP_HDR_LEN)
795
796#define S_IP_HDR_LEN 16
797#define M_IP_HDR_LEN 0x3FF
798#define V_IP_HDR_LEN(x) ((x) << S_IP_HDR_LEN)
799#define G_IP_HDR_LEN(x) (((x) >> S_IP_HDR_LEN) & M_IP_HDR_LEN)
800
801#define S_ETH_HDR_LEN 26
802#define M_ETH_HDR_LEN 0x1F
803#define V_ETH_HDR_LEN(x) ((x) << S_ETH_HDR_LEN)
804#define G_ETH_HDR_LEN(x) (((x) >> S_ETH_HDR_LEN) & M_ETH_HDR_LEN)
805
806/* cpl_pass_accept_req.l2info fields */
807#define S_SYN_MAC_IDX 0
808#define M_SYN_MAC_IDX 0x1FF
809#define V_SYN_MAC_IDX(x) ((x) << S_SYN_MAC_IDX)
810#define G_SYN_MAC_IDX(x) (((x) >> S_SYN_MAC_IDX) & M_SYN_MAC_IDX)
811
812#define S_SYN_XACT_MATCH 9
813#define V_SYN_XACT_MATCH(x) ((x) << S_SYN_XACT_MATCH)
814#define F_SYN_XACT_MATCH V_SYN_XACT_MATCH(1U)
815
816#define S_SYN_INTF 12
817#define M_SYN_INTF 0xF
818#define V_SYN_INTF(x) ((x) << S_SYN_INTF)
819#define G_SYN_INTF(x) (((x) >> S_SYN_INTF) & M_SYN_INTF)
820
821struct ulptx_idata {
822 __be32 cmd_more;
823 __be32 len;
824};
825
826#define S_ULPTX_NSGE 0
827#define M_ULPTX_NSGE 0xFFFF
828#define V_ULPTX_NSGE(x) ((x) << S_ULPTX_NSGE)
829#endif /* _T4FW_RI_API_H_ */
diff --git a/drivers/infiniband/hw/cxgb4/user.h b/drivers/infiniband/hw/cxgb4/user.h
new file mode 100644
index 000000000000..ed6414abde02
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/user.h
@@ -0,0 +1,66 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef __C4IW_USER_H__
33#define __C4IW_USER_H__
34
35#define C4IW_UVERBS_ABI_VERSION 1
36
37/*
38 * Make sure that all structs defined in this file remain laid out so
39 * that they pack the same way on 32-bit and 64-bit architectures (to
40 * avoid incompatibility between 32-bit userspace and 64-bit kernels).
41 * In particular do not use pointer types -- pass pointers in __u64
42 * instead.
43 */
44struct c4iw_create_cq_resp {
45 __u64 key;
46 __u64 gts_key;
47 __u64 memsize;
48 __u32 cqid;
49 __u32 size;
50 __u32 qid_mask;
51};
52
53struct c4iw_create_qp_resp {
54 __u64 sq_key;
55 __u64 rq_key;
56 __u64 sq_db_gts_key;
57 __u64 rq_db_gts_key;
58 __u64 sq_memsize;
59 __u64 rq_memsize;
60 __u32 sqid;
61 __u32 rqid;
62 __u32 sq_size;
63 __u32 rq_size;
64 __u32 qid_mask;
65};
66#endif