aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael Chan <mchan@broadcom.com>2009-06-08 21:14:43 -0400
committerJames Bottomley <James.Bottomley@HansenPartnership.com>2009-06-09 11:22:42 -0400
commita463696039f7097ce87c21db3cf5c16cdcb3850d (patch)
tree3308681e117008282fd73a224215e0aab173262e
parent4edd473f208cff77ce1f7ef26d5a41f31fa198e0 (diff)
[SCSI] cnic: Add new Broadcom CNIC driver.
The CNIC driver controls BNX2 hardware rings and resources used by iSCSI. Most hardware resources for iSCSI are separate from those used for ethernet networking. iSCSI uses a separate MAC address and IP address. The CNIC driver creates a UIO interface to handle the non-offloaded packets such as ARP, etc in userspace. Signed-off-by: Michael Chan <mchan@broadcom.com> Acked-by: David S. Miller <davem@davemloft.net> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
-rw-r--r--drivers/net/Kconfig11
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/cnic.c2711
-rw-r--r--drivers/net/cnic.h299
-rw-r--r--drivers/net/cnic_defs.h580
-rw-r--r--drivers/net/cnic_if.h299
6 files changed, 3901 insertions, 0 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 214a92d1ef75..f3c4a3b910bb 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2264,6 +2264,17 @@ config BNX2
2264 To compile this driver as a module, choose M here: the module 2264 To compile this driver as a module, choose M here: the module
2265 will be called bnx2. This is recommended. 2265 will be called bnx2. This is recommended.
2266 2266
2267config CNIC
2268 tristate "Broadcom CNIC support"
2269 depends on BNX2
2270 depends on UIO
2271 help
2272 This driver supports offload features of Broadcom NetXtremeII
2273 gigabit Ethernet cards.
2274
2275 To compile this driver as a module, choose M here: the module
2276 will be called cnic. This is recommended.
2277
2267config SPIDER_NET 2278config SPIDER_NET
2268 tristate "Spider Gigabit Ethernet driver" 2279 tristate "Spider Gigabit Ethernet driver"
2269 depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB) 2280 depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB)
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 1fc4602a6ff2..e6f1f8c3f8d4 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -73,6 +73,7 @@ obj-$(CONFIG_STNIC) += stnic.o 8390.o
73obj-$(CONFIG_FEALNX) += fealnx.o 73obj-$(CONFIG_FEALNX) += fealnx.o
74obj-$(CONFIG_TIGON3) += tg3.o 74obj-$(CONFIG_TIGON3) += tg3.o
75obj-$(CONFIG_BNX2) += bnx2.o 75obj-$(CONFIG_BNX2) += bnx2.o
76obj-$(CONFIG_CNIC) += cnic.o
76obj-$(CONFIG_BNX2X) += bnx2x.o 77obj-$(CONFIG_BNX2X) += bnx2x.o
77bnx2x-objs := bnx2x_main.o bnx2x_link.o 78bnx2x-objs := bnx2x_main.o bnx2x_link.o
78spidernet-y += spider_net.o spider_net_ethtool.o 79spidernet-y += spider_net.o spider_net_ethtool.o
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
new file mode 100644
index 000000000000..8d740376bbd2
--- /dev/null
+++ b/drivers/net/cnic.c
@@ -0,0 +1,2711 @@
1/* cnic.c: Broadcom CNIC core network driver.
2 *
3 * Copyright (c) 2006-2009 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
10 * Modified and maintained by: Michael Chan <mchan@broadcom.com>
11 */
12
13#include <linux/module.h>
14
15#include <linux/kernel.h>
16#include <linux/errno.h>
17#include <linux/list.h>
18#include <linux/slab.h>
19#include <linux/pci.h>
20#include <linux/init.h>
21#include <linux/netdevice.h>
22#include <linux/uio_driver.h>
23#include <linux/in.h>
24#include <linux/dma-mapping.h>
25#include <linux/delay.h>
26#include <linux/ethtool.h>
27#include <linux/if_vlan.h>
28#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
29#define BCM_VLAN 1
30#endif
31#include <net/ip.h>
32#include <net/tcp.h>
33#include <net/route.h>
34#include <net/ipv6.h>
35#include <net/ip6_route.h>
36#include <scsi/iscsi_if.h>
37
38#include "cnic_if.h"
39#include "bnx2.h"
40#include "cnic.h"
41#include "cnic_defs.h"
42
43#define DRV_MODULE_NAME "cnic"
44#define PFX DRV_MODULE_NAME ": "
45
46static char version[] __devinitdata =
47 "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
48
49MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
50 "Chen (zongxi@broadcom.com");
51MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
52MODULE_LICENSE("GPL");
53MODULE_VERSION(CNIC_MODULE_VERSION);
54
55static LIST_HEAD(cnic_dev_list);
56static DEFINE_RWLOCK(cnic_dev_lock);
57static DEFINE_MUTEX(cnic_lock);
58
59static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
60
61static int cnic_service_bnx2(void *, void *);
62static int cnic_ctl(void *, struct cnic_ctl_info *);
63
64static struct cnic_ops cnic_bnx2_ops = {
65 .cnic_owner = THIS_MODULE,
66 .cnic_handler = cnic_service_bnx2,
67 .cnic_ctl = cnic_ctl,
68};
69
70static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *);
71static void cnic_init_bnx2_tx_ring(struct cnic_dev *);
72static void cnic_init_bnx2_rx_ring(struct cnic_dev *);
73static int cnic_cm_set_pg(struct cnic_sock *);
74
75static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
76{
77 struct cnic_dev *dev = uinfo->priv;
78 struct cnic_local *cp = dev->cnic_priv;
79
80 if (!capable(CAP_NET_ADMIN))
81 return -EPERM;
82
83 if (cp->uio_dev != -1)
84 return -EBUSY;
85
86 cp->uio_dev = iminor(inode);
87
88 cnic_shutdown_bnx2_rx_ring(dev);
89
90 cnic_init_bnx2_tx_ring(dev);
91 cnic_init_bnx2_rx_ring(dev);
92
93 return 0;
94}
95
96static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
97{
98 struct cnic_dev *dev = uinfo->priv;
99 struct cnic_local *cp = dev->cnic_priv;
100
101 cp->uio_dev = -1;
102 return 0;
103}
104
105static inline void cnic_hold(struct cnic_dev *dev)
106{
107 atomic_inc(&dev->ref_count);
108}
109
110static inline void cnic_put(struct cnic_dev *dev)
111{
112 atomic_dec(&dev->ref_count);
113}
114
115static inline void csk_hold(struct cnic_sock *csk)
116{
117 atomic_inc(&csk->ref_count);
118}
119
120static inline void csk_put(struct cnic_sock *csk)
121{
122 atomic_dec(&csk->ref_count);
123}
124
125static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
126{
127 struct cnic_dev *cdev;
128
129 read_lock(&cnic_dev_lock);
130 list_for_each_entry(cdev, &cnic_dev_list, list) {
131 if (netdev == cdev->netdev) {
132 cnic_hold(cdev);
133 read_unlock(&cnic_dev_lock);
134 return cdev;
135 }
136 }
137 read_unlock(&cnic_dev_lock);
138 return NULL;
139}
140
141static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
142{
143 struct cnic_local *cp = dev->cnic_priv;
144 struct cnic_eth_dev *ethdev = cp->ethdev;
145 struct drv_ctl_info info;
146 struct drv_ctl_io *io = &info.data.io;
147
148 info.cmd = DRV_CTL_CTX_WR_CMD;
149 io->cid_addr = cid_addr;
150 io->offset = off;
151 io->data = val;
152 ethdev->drv_ctl(dev->netdev, &info);
153}
154
155static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
156{
157 struct cnic_local *cp = dev->cnic_priv;
158 struct cnic_eth_dev *ethdev = cp->ethdev;
159 struct drv_ctl_info info;
160 struct drv_ctl_io *io = &info.data.io;
161
162 info.cmd = DRV_CTL_IO_WR_CMD;
163 io->offset = off;
164 io->data = val;
165 ethdev->drv_ctl(dev->netdev, &info);
166}
167
168static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
169{
170 struct cnic_local *cp = dev->cnic_priv;
171 struct cnic_eth_dev *ethdev = cp->ethdev;
172 struct drv_ctl_info info;
173 struct drv_ctl_io *io = &info.data.io;
174
175 info.cmd = DRV_CTL_IO_RD_CMD;
176 io->offset = off;
177 ethdev->drv_ctl(dev->netdev, &info);
178 return io->data;
179}
180
181static int cnic_in_use(struct cnic_sock *csk)
182{
183 return test_bit(SK_F_INUSE, &csk->flags);
184}
185
186static void cnic_kwq_completion(struct cnic_dev *dev, u32 count)
187{
188 struct cnic_local *cp = dev->cnic_priv;
189 struct cnic_eth_dev *ethdev = cp->ethdev;
190 struct drv_ctl_info info;
191
192 info.cmd = DRV_CTL_COMPLETION_CMD;
193 info.data.comp.comp_count = count;
194 ethdev->drv_ctl(dev->netdev, &info);
195}
196
197static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
198 struct cnic_sock *csk)
199{
200 struct iscsi_path path_req;
201 char *buf = NULL;
202 u16 len = 0;
203 u32 msg_type = ISCSI_KEVENT_IF_DOWN;
204 struct cnic_ulp_ops *ulp_ops;
205
206 if (cp->uio_dev == -1)
207 return -ENODEV;
208
209 if (csk) {
210 len = sizeof(path_req);
211 buf = (char *) &path_req;
212 memset(&path_req, 0, len);
213
214 msg_type = ISCSI_KEVENT_PATH_REQ;
215 path_req.handle = (u64) csk->l5_cid;
216 if (test_bit(SK_F_IPV6, &csk->flags)) {
217 memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
218 sizeof(struct in6_addr));
219 path_req.ip_addr_len = 16;
220 } else {
221 memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
222 sizeof(struct in_addr));
223 path_req.ip_addr_len = 4;
224 }
225 path_req.vlan_id = csk->vlan_id;
226 path_req.pmtu = csk->mtu;
227 }
228
229 rcu_read_lock();
230 ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]);
231 if (ulp_ops)
232 ulp_ops->iscsi_nl_send_msg(cp->dev, msg_type, buf, len);
233 rcu_read_unlock();
234 return 0;
235}
236
237static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
238 char *buf, u16 len)
239{
240 int rc = -EINVAL;
241
242 switch (msg_type) {
243 case ISCSI_UEVENT_PATH_UPDATE: {
244 struct cnic_local *cp;
245 u32 l5_cid;
246 struct cnic_sock *csk;
247 struct iscsi_path *path_resp;
248
249 if (len < sizeof(*path_resp))
250 break;
251
252 path_resp = (struct iscsi_path *) buf;
253 cp = dev->cnic_priv;
254 l5_cid = (u32) path_resp->handle;
255 if (l5_cid >= MAX_CM_SK_TBL_SZ)
256 break;
257
258 csk = &cp->csk_tbl[l5_cid];
259 csk_hold(csk);
260 if (cnic_in_use(csk)) {
261 memcpy(csk->ha, path_resp->mac_addr, 6);
262 if (test_bit(SK_F_IPV6, &csk->flags))
263 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
264 sizeof(struct in6_addr));
265 else
266 memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
267 sizeof(struct in_addr));
268 if (is_valid_ether_addr(csk->ha))
269 cnic_cm_set_pg(csk);
270 }
271 csk_put(csk);
272 rc = 0;
273 }
274 }
275
276 return rc;
277}
278
279static int cnic_offld_prep(struct cnic_sock *csk)
280{
281 if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
282 return 0;
283
284 if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
285 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
286 return 0;
287 }
288
289 return 1;
290}
291
292static int cnic_close_prep(struct cnic_sock *csk)
293{
294 clear_bit(SK_F_CONNECT_START, &csk->flags);
295 smp_mb__after_clear_bit();
296
297 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
298 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
299 msleep(1);
300
301 return 1;
302 }
303 return 0;
304}
305
306static int cnic_abort_prep(struct cnic_sock *csk)
307{
308 clear_bit(SK_F_CONNECT_START, &csk->flags);
309 smp_mb__after_clear_bit();
310
311 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
312 msleep(1);
313
314 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
315 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
316 return 1;
317 }
318
319 return 0;
320}
321
322int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
323{
324 struct cnic_dev *dev;
325
326 if (ulp_type >= MAX_CNIC_ULP_TYPE) {
327 printk(KERN_ERR PFX "cnic_register_driver: Bad type %d\n",
328 ulp_type);
329 return -EINVAL;
330 }
331 mutex_lock(&cnic_lock);
332 if (cnic_ulp_tbl[ulp_type]) {
333 printk(KERN_ERR PFX "cnic_register_driver: Type %d has already "
334 "been registered\n", ulp_type);
335 mutex_unlock(&cnic_lock);
336 return -EBUSY;
337 }
338
339 read_lock(&cnic_dev_lock);
340 list_for_each_entry(dev, &cnic_dev_list, list) {
341 struct cnic_local *cp = dev->cnic_priv;
342
343 clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
344 }
345 read_unlock(&cnic_dev_lock);
346
347 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
348 mutex_unlock(&cnic_lock);
349
350 /* Prevent race conditions with netdev_event */
351 rtnl_lock();
352 read_lock(&cnic_dev_lock);
353 list_for_each_entry(dev, &cnic_dev_list, list) {
354 struct cnic_local *cp = dev->cnic_priv;
355
356 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
357 ulp_ops->cnic_init(dev);
358 }
359 read_unlock(&cnic_dev_lock);
360 rtnl_unlock();
361
362 return 0;
363}
364
365int cnic_unregister_driver(int ulp_type)
366{
367 struct cnic_dev *dev;
368
369 if (ulp_type >= MAX_CNIC_ULP_TYPE) {
370 printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n",
371 ulp_type);
372 return -EINVAL;
373 }
374 mutex_lock(&cnic_lock);
375 if (!cnic_ulp_tbl[ulp_type]) {
376 printk(KERN_ERR PFX "cnic_unregister_driver: Type %d has not "
377 "been registered\n", ulp_type);
378 goto out_unlock;
379 }
380 read_lock(&cnic_dev_lock);
381 list_for_each_entry(dev, &cnic_dev_list, list) {
382 struct cnic_local *cp = dev->cnic_priv;
383
384 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
385 printk(KERN_ERR PFX "cnic_unregister_driver: Type %d "
386 "still has devices registered\n", ulp_type);
387 read_unlock(&cnic_dev_lock);
388 goto out_unlock;
389 }
390 }
391 read_unlock(&cnic_dev_lock);
392
393 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL);
394
395 mutex_unlock(&cnic_lock);
396 synchronize_rcu();
397 return 0;
398
399out_unlock:
400 mutex_unlock(&cnic_lock);
401 return -EINVAL;
402}
403
404static int cnic_start_hw(struct cnic_dev *);
405static void cnic_stop_hw(struct cnic_dev *);
406
407static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
408 void *ulp_ctx)
409{
410 struct cnic_local *cp = dev->cnic_priv;
411 struct cnic_ulp_ops *ulp_ops;
412
413 if (ulp_type >= MAX_CNIC_ULP_TYPE) {
414 printk(KERN_ERR PFX "cnic_register_device: Bad type %d\n",
415 ulp_type);
416 return -EINVAL;
417 }
418 mutex_lock(&cnic_lock);
419 if (cnic_ulp_tbl[ulp_type] == NULL) {
420 printk(KERN_ERR PFX "cnic_register_device: Driver with type %d "
421 "has not been registered\n", ulp_type);
422 mutex_unlock(&cnic_lock);
423 return -EAGAIN;
424 }
425 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
426 printk(KERN_ERR PFX "cnic_register_device: Type %d has already "
427 "been registered to this device\n", ulp_type);
428 mutex_unlock(&cnic_lock);
429 return -EBUSY;
430 }
431
432 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
433 cp->ulp_handle[ulp_type] = ulp_ctx;
434 ulp_ops = cnic_ulp_tbl[ulp_type];
435 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
436 cnic_hold(dev);
437
438 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
439 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
440 ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
441
442 mutex_unlock(&cnic_lock);
443
444 return 0;
445
446}
447EXPORT_SYMBOL(cnic_register_driver);
448
449static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
450{
451 struct cnic_local *cp = dev->cnic_priv;
452
453 if (ulp_type >= MAX_CNIC_ULP_TYPE) {
454 printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n",
455 ulp_type);
456 return -EINVAL;
457 }
458 mutex_lock(&cnic_lock);
459 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
460 rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL);
461 cnic_put(dev);
462 } else {
463 printk(KERN_ERR PFX "cnic_unregister_device: device not "
464 "registered to this ulp type %d\n", ulp_type);
465 mutex_unlock(&cnic_lock);
466 return -EINVAL;
467 }
468 mutex_unlock(&cnic_lock);
469
470 synchronize_rcu();
471
472 return 0;
473}
474EXPORT_SYMBOL(cnic_unregister_driver);
475
476static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id)
477{
478 id_tbl->start = start_id;
479 id_tbl->max = size;
480 id_tbl->next = 0;
481 spin_lock_init(&id_tbl->lock);
482 id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
483 if (!id_tbl->table)
484 return -ENOMEM;
485
486 return 0;
487}
488
489static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
490{
491 kfree(id_tbl->table);
492 id_tbl->table = NULL;
493}
494
495static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
496{
497 int ret = -1;
498
499 id -= id_tbl->start;
500 if (id >= id_tbl->max)
501 return ret;
502
503 spin_lock(&id_tbl->lock);
504 if (!test_bit(id, id_tbl->table)) {
505 set_bit(id, id_tbl->table);
506 ret = 0;
507 }
508 spin_unlock(&id_tbl->lock);
509 return ret;
510}
511
512/* Returns -1 if not successful */
513static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
514{
515 u32 id;
516
517 spin_lock(&id_tbl->lock);
518 id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
519 if (id >= id_tbl->max) {
520 id = -1;
521 if (id_tbl->next != 0) {
522 id = find_first_zero_bit(id_tbl->table, id_tbl->next);
523 if (id >= id_tbl->next)
524 id = -1;
525 }
526 }
527
528 if (id < id_tbl->max) {
529 set_bit(id, id_tbl->table);
530 id_tbl->next = (id + 1) & (id_tbl->max - 1);
531 id += id_tbl->start;
532 }
533
534 spin_unlock(&id_tbl->lock);
535
536 return id;
537}
538
539static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
540{
541 if (id == -1)
542 return;
543
544 id -= id_tbl->start;
545 if (id >= id_tbl->max)
546 return;
547
548 clear_bit(id, id_tbl->table);
549}
550
551static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
552{
553 int i;
554
555 if (!dma->pg_arr)
556 return;
557
558 for (i = 0; i < dma->num_pages; i++) {
559 if (dma->pg_arr[i]) {
560 pci_free_consistent(dev->pcidev, BCM_PAGE_SIZE,
561 dma->pg_arr[i], dma->pg_map_arr[i]);
562 dma->pg_arr[i] = NULL;
563 }
564 }
565 if (dma->pgtbl) {
566 pci_free_consistent(dev->pcidev, dma->pgtbl_size,
567 dma->pgtbl, dma->pgtbl_map);
568 dma->pgtbl = NULL;
569 }
570 kfree(dma->pg_arr);
571 dma->pg_arr = NULL;
572 dma->num_pages = 0;
573}
574
575static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
576{
577 int i;
578 u32 *page_table = dma->pgtbl;
579
580 for (i = 0; i < dma->num_pages; i++) {
581 /* Each entry needs to be in big endian format. */
582 *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32);
583 page_table++;
584 *page_table = (u32) dma->pg_map_arr[i];
585 page_table++;
586 }
587}
588
589static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
590 int pages, int use_pg_tbl)
591{
592 int i, size;
593 struct cnic_local *cp = dev->cnic_priv;
594
595 size = pages * (sizeof(void *) + sizeof(dma_addr_t));
596 dma->pg_arr = kzalloc(size, GFP_ATOMIC);
597 if (dma->pg_arr == NULL)
598 return -ENOMEM;
599
600 dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
601 dma->num_pages = pages;
602
603 for (i = 0; i < pages; i++) {
604 dma->pg_arr[i] = pci_alloc_consistent(dev->pcidev,
605 BCM_PAGE_SIZE,
606 &dma->pg_map_arr[i]);
607 if (dma->pg_arr[i] == NULL)
608 goto error;
609 }
610 if (!use_pg_tbl)
611 return 0;
612
613 dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) &
614 ~(BCM_PAGE_SIZE - 1);
615 dma->pgtbl = pci_alloc_consistent(dev->pcidev, dma->pgtbl_size,
616 &dma->pgtbl_map);
617 if (dma->pgtbl == NULL)
618 goto error;
619
620 cp->setup_pgtbl(dev, dma);
621
622 return 0;
623
624error:
625 cnic_free_dma(dev, dma);
626 return -ENOMEM;
627}
628
629static void cnic_free_resc(struct cnic_dev *dev)
630{
631 struct cnic_local *cp = dev->cnic_priv;
632 int i = 0;
633
634 if (cp->cnic_uinfo) {
635 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
636 while (cp->uio_dev != -1 && i < 15) {
637 msleep(100);
638 i++;
639 }
640 uio_unregister_device(cp->cnic_uinfo);
641 kfree(cp->cnic_uinfo);
642 cp->cnic_uinfo = NULL;
643 }
644
645 if (cp->l2_buf) {
646 pci_free_consistent(dev->pcidev, cp->l2_buf_size,
647 cp->l2_buf, cp->l2_buf_map);
648 cp->l2_buf = NULL;
649 }
650
651 if (cp->l2_ring) {
652 pci_free_consistent(dev->pcidev, cp->l2_ring_size,
653 cp->l2_ring, cp->l2_ring_map);
654 cp->l2_ring = NULL;
655 }
656
657 for (i = 0; i < cp->ctx_blks; i++) {
658 if (cp->ctx_arr[i].ctx) {
659 pci_free_consistent(dev->pcidev, cp->ctx_blk_size,
660 cp->ctx_arr[i].ctx,
661 cp->ctx_arr[i].mapping);
662 cp->ctx_arr[i].ctx = NULL;
663 }
664 }
665 kfree(cp->ctx_arr);
666 cp->ctx_arr = NULL;
667 cp->ctx_blks = 0;
668
669 cnic_free_dma(dev, &cp->gbl_buf_info);
670 cnic_free_dma(dev, &cp->conn_buf_info);
671 cnic_free_dma(dev, &cp->kwq_info);
672 cnic_free_dma(dev, &cp->kcq_info);
673 kfree(cp->iscsi_tbl);
674 cp->iscsi_tbl = NULL;
675 kfree(cp->ctx_tbl);
676 cp->ctx_tbl = NULL;
677
678 cnic_free_id_tbl(&cp->cid_tbl);
679}
680
681static int cnic_alloc_context(struct cnic_dev *dev)
682{
683 struct cnic_local *cp = dev->cnic_priv;
684
685 if (CHIP_NUM(cp) == CHIP_NUM_5709) {
686 int i, k, arr_size;
687
688 cp->ctx_blk_size = BCM_PAGE_SIZE;
689 cp->cids_per_blk = BCM_PAGE_SIZE / 128;
690 arr_size = BNX2_MAX_CID / cp->cids_per_blk *
691 sizeof(struct cnic_ctx);
692 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
693 if (cp->ctx_arr == NULL)
694 return -ENOMEM;
695
696 k = 0;
697 for (i = 0; i < 2; i++) {
698 u32 j, reg, off, lo, hi;
699
700 if (i == 0)
701 off = BNX2_PG_CTX_MAP;
702 else
703 off = BNX2_ISCSI_CTX_MAP;
704
705 reg = cnic_reg_rd_ind(dev, off);
706 lo = reg >> 16;
707 hi = reg & 0xffff;
708 for (j = lo; j < hi; j += cp->cids_per_blk, k++)
709 cp->ctx_arr[k].cid = j;
710 }
711
712 cp->ctx_blks = k;
713 if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
714 cp->ctx_blks = 0;
715 return -ENOMEM;
716 }
717
718 for (i = 0; i < cp->ctx_blks; i++) {
719 cp->ctx_arr[i].ctx =
720 pci_alloc_consistent(dev->pcidev, BCM_PAGE_SIZE,
721 &cp->ctx_arr[i].mapping);
722 if (cp->ctx_arr[i].ctx == NULL)
723 return -ENOMEM;
724 }
725 }
726 return 0;
727}
728
729static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
730{
731 struct cnic_local *cp = dev->cnic_priv;
732 struct uio_info *uinfo;
733 int ret;
734
735 ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
736 if (ret)
737 goto error;
738 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
739
740 ret = cnic_alloc_dma(dev, &cp->kcq_info, KCQ_PAGE_CNT, 1);
741 if (ret)
742 goto error;
743 cp->kcq = (struct kcqe **) cp->kcq_info.pg_arr;
744
745 ret = cnic_alloc_context(dev);
746 if (ret)
747 goto error;
748
749 cp->l2_ring_size = 2 * BCM_PAGE_SIZE;
750 cp->l2_ring = pci_alloc_consistent(dev->pcidev, cp->l2_ring_size,
751 &cp->l2_ring_map);
752 if (!cp->l2_ring)
753 goto error;
754
755 cp->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
756 cp->l2_buf_size = PAGE_ALIGN(cp->l2_buf_size);
757 cp->l2_buf = pci_alloc_consistent(dev->pcidev, cp->l2_buf_size,
758 &cp->l2_buf_map);
759 if (!cp->l2_buf)
760 goto error;
761
762 uinfo = kzalloc(sizeof(*uinfo), GFP_ATOMIC);
763 if (!uinfo)
764 goto error;
765
766 uinfo->mem[0].addr = dev->netdev->base_addr;
767 uinfo->mem[0].internal_addr = dev->regview;
768 uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
769 uinfo->mem[0].memtype = UIO_MEM_PHYS;
770
771 uinfo->mem[1].addr = (unsigned long) cp->status_blk & PAGE_MASK;
772 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
773 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
774 else
775 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
776 uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
777
778 uinfo->mem[2].addr = (unsigned long) cp->l2_ring;
779 uinfo->mem[2].size = cp->l2_ring_size;
780 uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
781
782 uinfo->mem[3].addr = (unsigned long) cp->l2_buf;
783 uinfo->mem[3].size = cp->l2_buf_size;
784 uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
785
786 uinfo->name = "bnx2_cnic";
787 uinfo->version = CNIC_MODULE_VERSION;
788 uinfo->irq = UIO_IRQ_CUSTOM;
789
790 uinfo->open = cnic_uio_open;
791 uinfo->release = cnic_uio_close;
792
793 uinfo->priv = dev;
794
795 ret = uio_register_device(&dev->pcidev->dev, uinfo);
796 if (ret) {
797 kfree(uinfo);
798 goto error;
799 }
800
801 cp->cnic_uinfo = uinfo;
802
803 return 0;
804
805error:
806 cnic_free_resc(dev);
807 return ret;
808}
809
810static inline u32 cnic_kwq_avail(struct cnic_local *cp)
811{
812 return cp->max_kwq_idx -
813 ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
814}
815
816static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
817 u32 num_wqes)
818{
819 struct cnic_local *cp = dev->cnic_priv;
820 struct kwqe *prod_qe;
821 u16 prod, sw_prod, i;
822
823 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
824 return -EAGAIN; /* bnx2 is down */
825
826 spin_lock_bh(&cp->cnic_ulp_lock);
827 if (num_wqes > cnic_kwq_avail(cp) &&
828 !(cp->cnic_local_flags & CNIC_LCL_FL_KWQ_INIT)) {
829 spin_unlock_bh(&cp->cnic_ulp_lock);
830 return -EAGAIN;
831 }
832
833 cp->cnic_local_flags &= ~CNIC_LCL_FL_KWQ_INIT;
834
835 prod = cp->kwq_prod_idx;
836 sw_prod = prod & MAX_KWQ_IDX;
837 for (i = 0; i < num_wqes; i++) {
838 prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
839 memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
840 prod++;
841 sw_prod = prod & MAX_KWQ_IDX;
842 }
843 cp->kwq_prod_idx = prod;
844
845 CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
846
847 spin_unlock_bh(&cp->cnic_ulp_lock);
848 return 0;
849}
850
851static void service_kcqes(struct cnic_dev *dev, int num_cqes)
852{
853 struct cnic_local *cp = dev->cnic_priv;
854 int i, j;
855
856 i = 0;
857 j = 1;
858 while (num_cqes) {
859 struct cnic_ulp_ops *ulp_ops;
860 int ulp_type;
861 u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
862 u32 kcqe_layer = kcqe_op_flag & KCQE_FLAGS_LAYER_MASK;
863
864 if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
865 cnic_kwq_completion(dev, 1);
866
867 while (j < num_cqes) {
868 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
869
870 if ((next_op & KCQE_FLAGS_LAYER_MASK) != kcqe_layer)
871 break;
872
873 if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
874 cnic_kwq_completion(dev, 1);
875 j++;
876 }
877
878 if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
879 ulp_type = CNIC_ULP_RDMA;
880 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
881 ulp_type = CNIC_ULP_ISCSI;
882 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
883 ulp_type = CNIC_ULP_L4;
884 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
885 goto end;
886 else {
887 printk(KERN_ERR PFX "%s: Unknown type of KCQE(0x%x)\n",
888 dev->netdev->name, kcqe_op_flag);
889 goto end;
890 }
891
892 rcu_read_lock();
893 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
894 if (likely(ulp_ops)) {
895 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
896 cp->completed_kcq + i, j);
897 }
898 rcu_read_unlock();
899end:
900 num_cqes -= j;
901 i += j;
902 j = 1;
903 }
904 return;
905}
906
907static u16 cnic_bnx2_next_idx(u16 idx)
908{
909 return idx + 1;
910}
911
912static u16 cnic_bnx2_hw_idx(u16 idx)
913{
914 return idx;
915}
916
917static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod)
918{
919 struct cnic_local *cp = dev->cnic_priv;
920 u16 i, ri, last;
921 struct kcqe *kcqe;
922 int kcqe_cnt = 0, last_cnt = 0;
923
924 i = ri = last = *sw_prod;
925 ri &= MAX_KCQ_IDX;
926
927 while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
928 kcqe = &cp->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
929 cp->completed_kcq[kcqe_cnt++] = kcqe;
930 i = cp->next_idx(i);
931 ri = i & MAX_KCQ_IDX;
932 if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
933 last_cnt = kcqe_cnt;
934 last = i;
935 }
936 }
937
938 *sw_prod = last;
939 return last_cnt;
940}
941
942static void cnic_chk_bnx2_pkt_rings(struct cnic_local *cp)
943{
944 u16 rx_cons = *cp->rx_cons_ptr;
945 u16 tx_cons = *cp->tx_cons_ptr;
946
947 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
948 cp->tx_cons = tx_cons;
949 cp->rx_cons = rx_cons;
950 uio_event_notify(cp->cnic_uinfo);
951 }
952}
953
954static int cnic_service_bnx2(void *data, void *status_blk)
955{
956 struct cnic_dev *dev = data;
957 struct status_block *sblk = status_blk;
958 struct cnic_local *cp = dev->cnic_priv;
959 u32 status_idx = sblk->status_idx;
960 u16 hw_prod, sw_prod;
961 int kcqe_cnt;
962
963 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
964 return status_idx;
965
966 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
967
968 hw_prod = sblk->status_completion_producer_index;
969 sw_prod = cp->kcq_prod_idx;
970 while (sw_prod != hw_prod) {
971 kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
972 if (kcqe_cnt == 0)
973 goto done;
974
975 service_kcqes(dev, kcqe_cnt);
976
977 /* Tell compiler that status_blk fields can change. */
978 barrier();
979 if (status_idx != sblk->status_idx) {
980 status_idx = sblk->status_idx;
981 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
982 hw_prod = sblk->status_completion_producer_index;
983 } else
984 break;
985 }
986
987done:
988 CNIC_WR16(dev, cp->kcq_io_addr, sw_prod);
989
990 cp->kcq_prod_idx = sw_prod;
991
992 cnic_chk_bnx2_pkt_rings(cp);
993 return status_idx;
994}
995
996static void cnic_service_bnx2_msix(unsigned long data)
997{
998 struct cnic_dev *dev = (struct cnic_dev *) data;
999 struct cnic_local *cp = dev->cnic_priv;
1000 struct status_block_msix *status_blk = cp->bnx2_status_blk;
1001 u32 status_idx = status_blk->status_idx;
1002 u16 hw_prod, sw_prod;
1003 int kcqe_cnt;
1004
1005 cp->kwq_con_idx = status_blk->status_cmd_consumer_index;
1006
1007 hw_prod = status_blk->status_completion_producer_index;
1008 sw_prod = cp->kcq_prod_idx;
1009 while (sw_prod != hw_prod) {
1010 kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
1011 if (kcqe_cnt == 0)
1012 goto done;
1013
1014 service_kcqes(dev, kcqe_cnt);
1015
1016 /* Tell compiler that status_blk fields can change. */
1017 barrier();
1018 if (status_idx != status_blk->status_idx) {
1019 status_idx = status_blk->status_idx;
1020 cp->kwq_con_idx = status_blk->status_cmd_consumer_index;
1021 hw_prod = status_blk->status_completion_producer_index;
1022 } else
1023 break;
1024 }
1025
1026done:
1027 CNIC_WR16(dev, cp->kcq_io_addr, sw_prod);
1028 cp->kcq_prod_idx = sw_prod;
1029
1030 cnic_chk_bnx2_pkt_rings(cp);
1031
1032 cp->last_status_idx = status_idx;
1033 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
1034 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
1035}
1036
1037static irqreturn_t cnic_irq(int irq, void *dev_instance)
1038{
1039 struct cnic_dev *dev = dev_instance;
1040 struct cnic_local *cp = dev->cnic_priv;
1041 u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX;
1042
1043 if (cp->ack_int)
1044 cp->ack_int(dev);
1045
1046 prefetch(cp->status_blk);
1047 prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
1048
1049 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags)))
1050 tasklet_schedule(&cp->cnic_irq_task);
1051
1052 return IRQ_HANDLED;
1053}
1054
1055static void cnic_ulp_stop(struct cnic_dev *dev)
1056{
1057 struct cnic_local *cp = dev->cnic_priv;
1058 int if_type;
1059
1060 rcu_read_lock();
1061 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
1062 struct cnic_ulp_ops *ulp_ops;
1063
1064 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
1065 if (!ulp_ops)
1066 continue;
1067
1068 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
1069 ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
1070 }
1071 rcu_read_unlock();
1072}
1073
1074static void cnic_ulp_start(struct cnic_dev *dev)
1075{
1076 struct cnic_local *cp = dev->cnic_priv;
1077 int if_type;
1078
1079 rcu_read_lock();
1080 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
1081 struct cnic_ulp_ops *ulp_ops;
1082
1083 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
1084 if (!ulp_ops || !ulp_ops->cnic_start)
1085 continue;
1086
1087 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
1088 ulp_ops->cnic_start(cp->ulp_handle[if_type]);
1089 }
1090 rcu_read_unlock();
1091}
1092
1093static int cnic_ctl(void *data, struct cnic_ctl_info *info)
1094{
1095 struct cnic_dev *dev = data;
1096
1097 switch (info->cmd) {
1098 case CNIC_CTL_STOP_CMD:
1099 cnic_hold(dev);
1100 mutex_lock(&cnic_lock);
1101
1102 cnic_ulp_stop(dev);
1103 cnic_stop_hw(dev);
1104
1105 mutex_unlock(&cnic_lock);
1106 cnic_put(dev);
1107 break;
1108 case CNIC_CTL_START_CMD:
1109 cnic_hold(dev);
1110 mutex_lock(&cnic_lock);
1111
1112 if (!cnic_start_hw(dev))
1113 cnic_ulp_start(dev);
1114
1115 mutex_unlock(&cnic_lock);
1116 cnic_put(dev);
1117 break;
1118 default:
1119 return -EINVAL;
1120 }
1121 return 0;
1122}
1123
1124static void cnic_ulp_init(struct cnic_dev *dev)
1125{
1126 int i;
1127 struct cnic_local *cp = dev->cnic_priv;
1128
1129 rcu_read_lock();
1130 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
1131 struct cnic_ulp_ops *ulp_ops;
1132
1133 ulp_ops = rcu_dereference(cnic_ulp_tbl[i]);
1134 if (!ulp_ops || !ulp_ops->cnic_init)
1135 continue;
1136
1137 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
1138 ulp_ops->cnic_init(dev);
1139
1140 }
1141 rcu_read_unlock();
1142}
1143
1144static void cnic_ulp_exit(struct cnic_dev *dev)
1145{
1146 int i;
1147 struct cnic_local *cp = dev->cnic_priv;
1148
1149 rcu_read_lock();
1150 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
1151 struct cnic_ulp_ops *ulp_ops;
1152
1153 ulp_ops = rcu_dereference(cnic_ulp_tbl[i]);
1154 if (!ulp_ops || !ulp_ops->cnic_exit)
1155 continue;
1156
1157 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
1158 ulp_ops->cnic_exit(dev);
1159
1160 }
1161 rcu_read_unlock();
1162}
1163
1164static int cnic_cm_offload_pg(struct cnic_sock *csk)
1165{
1166 struct cnic_dev *dev = csk->dev;
1167 struct l4_kwq_offload_pg *l4kwqe;
1168 struct kwqe *wqes[1];
1169
1170 l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
1171 memset(l4kwqe, 0, sizeof(*l4kwqe));
1172 wqes[0] = (struct kwqe *) l4kwqe;
1173
1174 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
1175 l4kwqe->flags =
1176 L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
1177 l4kwqe->l2hdr_nbytes = ETH_HLEN;
1178
1179 l4kwqe->da0 = csk->ha[0];
1180 l4kwqe->da1 = csk->ha[1];
1181 l4kwqe->da2 = csk->ha[2];
1182 l4kwqe->da3 = csk->ha[3];
1183 l4kwqe->da4 = csk->ha[4];
1184 l4kwqe->da5 = csk->ha[5];
1185
1186 l4kwqe->sa0 = dev->mac_addr[0];
1187 l4kwqe->sa1 = dev->mac_addr[1];
1188 l4kwqe->sa2 = dev->mac_addr[2];
1189 l4kwqe->sa3 = dev->mac_addr[3];
1190 l4kwqe->sa4 = dev->mac_addr[4];
1191 l4kwqe->sa5 = dev->mac_addr[5];
1192
1193 l4kwqe->etype = ETH_P_IP;
1194 l4kwqe->ipid_count = DEF_IPID_COUNT;
1195 l4kwqe->host_opaque = csk->l5_cid;
1196
1197 if (csk->vlan_id) {
1198 l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
1199 l4kwqe->vlan_tag = csk->vlan_id;
1200 l4kwqe->l2hdr_nbytes += 4;
1201 }
1202
1203 return dev->submit_kwqes(dev, wqes, 1);
1204}
1205
1206static int cnic_cm_update_pg(struct cnic_sock *csk)
1207{
1208 struct cnic_dev *dev = csk->dev;
1209 struct l4_kwq_update_pg *l4kwqe;
1210 struct kwqe *wqes[1];
1211
1212 l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
1213 memset(l4kwqe, 0, sizeof(*l4kwqe));
1214 wqes[0] = (struct kwqe *) l4kwqe;
1215
1216 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
1217 l4kwqe->flags =
1218 L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
1219 l4kwqe->pg_cid = csk->pg_cid;
1220
1221 l4kwqe->da0 = csk->ha[0];
1222 l4kwqe->da1 = csk->ha[1];
1223 l4kwqe->da2 = csk->ha[2];
1224 l4kwqe->da3 = csk->ha[3];
1225 l4kwqe->da4 = csk->ha[4];
1226 l4kwqe->da5 = csk->ha[5];
1227
1228 l4kwqe->pg_host_opaque = csk->l5_cid;
1229 l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
1230
1231 return dev->submit_kwqes(dev, wqes, 1);
1232}
1233
1234static int cnic_cm_upload_pg(struct cnic_sock *csk)
1235{
1236 struct cnic_dev *dev = csk->dev;
1237 struct l4_kwq_upload *l4kwqe;
1238 struct kwqe *wqes[1];
1239
1240 l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
1241 memset(l4kwqe, 0, sizeof(*l4kwqe));
1242 wqes[0] = (struct kwqe *) l4kwqe;
1243
1244 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
1245 l4kwqe->flags =
1246 L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
1247 l4kwqe->cid = csk->pg_cid;
1248
1249 return dev->submit_kwqes(dev, wqes, 1);
1250}
1251
1252static int cnic_cm_conn_req(struct cnic_sock *csk)
1253{
1254 struct cnic_dev *dev = csk->dev;
1255 struct l4_kwq_connect_req1 *l4kwqe1;
1256 struct l4_kwq_connect_req2 *l4kwqe2;
1257 struct l4_kwq_connect_req3 *l4kwqe3;
1258 struct kwqe *wqes[3];
1259 u8 tcp_flags = 0;
1260 int num_wqes = 2;
1261
1262 l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
1263 l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
1264 l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
1265 memset(l4kwqe1, 0, sizeof(*l4kwqe1));
1266 memset(l4kwqe2, 0, sizeof(*l4kwqe2));
1267 memset(l4kwqe3, 0, sizeof(*l4kwqe3));
1268
1269 l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
1270 l4kwqe3->flags =
1271 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
1272 l4kwqe3->ka_timeout = csk->ka_timeout;
1273 l4kwqe3->ka_interval = csk->ka_interval;
1274 l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
1275 l4kwqe3->tos = csk->tos;
1276 l4kwqe3->ttl = csk->ttl;
1277 l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
1278 l4kwqe3->pmtu = csk->mtu;
1279 l4kwqe3->rcv_buf = csk->rcv_buf;
1280 l4kwqe3->snd_buf = csk->snd_buf;
1281 l4kwqe3->seed = csk->seed;
1282
1283 wqes[0] = (struct kwqe *) l4kwqe1;
1284 if (test_bit(SK_F_IPV6, &csk->flags)) {
1285 wqes[1] = (struct kwqe *) l4kwqe2;
1286 wqes[2] = (struct kwqe *) l4kwqe3;
1287 num_wqes = 3;
1288
1289 l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
1290 l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
1291 l4kwqe2->flags =
1292 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
1293 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
1294 l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
1295 l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
1296 l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
1297 l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
1298 l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
1299 l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
1300 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
1301 sizeof(struct tcphdr);
1302 } else {
1303 wqes[1] = (struct kwqe *) l4kwqe3;
1304 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
1305 sizeof(struct tcphdr);
1306 }
1307
1308 l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
1309 l4kwqe1->flags =
1310 (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
1311 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
1312 l4kwqe1->cid = csk->cid;
1313 l4kwqe1->pg_cid = csk->pg_cid;
1314 l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
1315 l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
1316 l4kwqe1->src_port = be16_to_cpu(csk->src_port);
1317 l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
1318 if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
1319 tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
1320 if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
1321 tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
1322 if (csk->tcp_flags & SK_TCP_NAGLE)
1323 tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
1324 if (csk->tcp_flags & SK_TCP_TIMESTAMP)
1325 tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
1326 if (csk->tcp_flags & SK_TCP_SACK)
1327 tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
1328 if (csk->tcp_flags & SK_TCP_SEG_SCALING)
1329 tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
1330
1331 l4kwqe1->tcp_flags = tcp_flags;
1332
1333 return dev->submit_kwqes(dev, wqes, num_wqes);
1334}
1335
1336static int cnic_cm_close_req(struct cnic_sock *csk)
1337{
1338 struct cnic_dev *dev = csk->dev;
1339 struct l4_kwq_close_req *l4kwqe;
1340 struct kwqe *wqes[1];
1341
1342 l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
1343 memset(l4kwqe, 0, sizeof(*l4kwqe));
1344 wqes[0] = (struct kwqe *) l4kwqe;
1345
1346 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
1347 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
1348 l4kwqe->cid = csk->cid;
1349
1350 return dev->submit_kwqes(dev, wqes, 1);
1351}
1352
1353static int cnic_cm_abort_req(struct cnic_sock *csk)
1354{
1355 struct cnic_dev *dev = csk->dev;
1356 struct l4_kwq_reset_req *l4kwqe;
1357 struct kwqe *wqes[1];
1358
1359 l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
1360 memset(l4kwqe, 0, sizeof(*l4kwqe));
1361 wqes[0] = (struct kwqe *) l4kwqe;
1362
1363 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
1364 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
1365 l4kwqe->cid = csk->cid;
1366
1367 return dev->submit_kwqes(dev, wqes, 1);
1368}
1369
1370static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
1371 u32 l5_cid, struct cnic_sock **csk, void *context)
1372{
1373 struct cnic_local *cp = dev->cnic_priv;
1374 struct cnic_sock *csk1;
1375
1376 if (l5_cid >= MAX_CM_SK_TBL_SZ)
1377 return -EINVAL;
1378
1379 csk1 = &cp->csk_tbl[l5_cid];
1380 if (atomic_read(&csk1->ref_count))
1381 return -EAGAIN;
1382
1383 if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
1384 return -EBUSY;
1385
1386 csk1->dev = dev;
1387 csk1->cid = cid;
1388 csk1->l5_cid = l5_cid;
1389 csk1->ulp_type = ulp_type;
1390 csk1->context = context;
1391
1392 csk1->ka_timeout = DEF_KA_TIMEOUT;
1393 csk1->ka_interval = DEF_KA_INTERVAL;
1394 csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
1395 csk1->tos = DEF_TOS;
1396 csk1->ttl = DEF_TTL;
1397 csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
1398 csk1->rcv_buf = DEF_RCV_BUF;
1399 csk1->snd_buf = DEF_SND_BUF;
1400 csk1->seed = DEF_SEED;
1401
1402 *csk = csk1;
1403 return 0;
1404}
1405
1406static void cnic_cm_cleanup(struct cnic_sock *csk)
1407{
1408 if (csk->src_port) {
1409 struct cnic_dev *dev = csk->dev;
1410 struct cnic_local *cp = dev->cnic_priv;
1411
1412 cnic_free_id(&cp->csk_port_tbl, csk->src_port);
1413 csk->src_port = 0;
1414 }
1415}
1416
1417static void cnic_close_conn(struct cnic_sock *csk)
1418{
1419 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
1420 cnic_cm_upload_pg(csk);
1421 clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
1422 }
1423 cnic_cm_cleanup(csk);
1424}
1425
1426static int cnic_cm_destroy(struct cnic_sock *csk)
1427{
1428 if (!cnic_in_use(csk))
1429 return -EINVAL;
1430
1431 csk_hold(csk);
1432 clear_bit(SK_F_INUSE, &csk->flags);
1433 smp_mb__after_clear_bit();
1434 while (atomic_read(&csk->ref_count) != 1)
1435 msleep(1);
1436 cnic_cm_cleanup(csk);
1437
1438 csk->flags = 0;
1439 csk_put(csk);
1440 return 0;
1441}
1442
1443static inline u16 cnic_get_vlan(struct net_device *dev,
1444 struct net_device **vlan_dev)
1445{
1446 if (dev->priv_flags & IFF_802_1Q_VLAN) {
1447 *vlan_dev = vlan_dev_real_dev(dev);
1448 return vlan_dev_vlan_id(dev);
1449 }
1450 *vlan_dev = dev;
1451 return 0;
1452}
1453
1454static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
1455 struct dst_entry **dst)
1456{
1457 struct flowi fl;
1458 int err;
1459 struct rtable *rt;
1460
1461 memset(&fl, 0, sizeof(fl));
1462 fl.nl_u.ip4_u.daddr = dst_addr->sin_addr.s_addr;
1463
1464 err = ip_route_output_key(&init_net, &rt, &fl);
1465 if (!err)
1466 *dst = &rt->u.dst;
1467 return err;
1468}
1469
1470static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
1471 struct dst_entry **dst)
1472{
1473#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1474 struct flowi fl;
1475
1476 memset(&fl, 0, sizeof(fl));
1477 ipv6_addr_copy(&fl.fl6_dst, &dst_addr->sin6_addr);
1478 if (ipv6_addr_type(&fl.fl6_dst) & IPV6_ADDR_LINKLOCAL)
1479 fl.oif = dst_addr->sin6_scope_id;
1480
1481 *dst = ip6_route_output(&init_net, NULL, &fl);
1482 if (*dst)
1483 return 0;
1484#endif
1485
1486 return -ENETUNREACH;
1487}
1488
1489static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
1490 int ulp_type)
1491{
1492 struct cnic_dev *dev = NULL;
1493 struct dst_entry *dst;
1494 struct net_device *netdev = NULL;
1495 int err = -ENETUNREACH;
1496
1497 if (dst_addr->sin_family == AF_INET)
1498 err = cnic_get_v4_route(dst_addr, &dst);
1499 else if (dst_addr->sin_family == AF_INET6) {
1500 struct sockaddr_in6 *dst_addr6 =
1501 (struct sockaddr_in6 *) dst_addr;
1502
1503 err = cnic_get_v6_route(dst_addr6, &dst);
1504 } else
1505 return NULL;
1506
1507 if (err)
1508 return NULL;
1509
1510 if (!dst->dev)
1511 goto done;
1512
1513 cnic_get_vlan(dst->dev, &netdev);
1514
1515 dev = cnic_from_netdev(netdev);
1516
1517done:
1518 dst_release(dst);
1519 if (dev)
1520 cnic_put(dev);
1521 return dev;
1522}
1523
1524static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
1525{
1526 struct cnic_dev *dev = csk->dev;
1527 struct cnic_local *cp = dev->cnic_priv;
1528
1529 return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
1530}
1531
1532static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
1533{
1534 struct cnic_dev *dev = csk->dev;
1535 struct cnic_local *cp = dev->cnic_priv;
1536 int is_v6, err, rc = -ENETUNREACH;
1537 struct dst_entry *dst;
1538 struct net_device *realdev;
1539 u32 local_port;
1540
1541 if (saddr->local.v6.sin6_family == AF_INET6 &&
1542 saddr->remote.v6.sin6_family == AF_INET6)
1543 is_v6 = 1;
1544 else if (saddr->local.v4.sin_family == AF_INET &&
1545 saddr->remote.v4.sin_family == AF_INET)
1546 is_v6 = 0;
1547 else
1548 return -EINVAL;
1549
1550 clear_bit(SK_F_IPV6, &csk->flags);
1551
1552 if (is_v6) {
1553#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1554 set_bit(SK_F_IPV6, &csk->flags);
1555 err = cnic_get_v6_route(&saddr->remote.v6, &dst);
1556 if (err)
1557 return err;
1558
1559 if (!dst || dst->error || !dst->dev)
1560 goto err_out;
1561
1562 memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
1563 sizeof(struct in6_addr));
1564 csk->dst_port = saddr->remote.v6.sin6_port;
1565 local_port = saddr->local.v6.sin6_port;
1566#else
1567 return rc;
1568#endif
1569
1570 } else {
1571 err = cnic_get_v4_route(&saddr->remote.v4, &dst);
1572 if (err)
1573 return err;
1574
1575 if (!dst || dst->error || !dst->dev)
1576 goto err_out;
1577
1578 csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
1579 csk->dst_port = saddr->remote.v4.sin_port;
1580 local_port = saddr->local.v4.sin_port;
1581 }
1582
1583 csk->vlan_id = cnic_get_vlan(dst->dev, &realdev);
1584 if (realdev != dev->netdev)
1585 goto err_out;
1586
1587 if (local_port >= CNIC_LOCAL_PORT_MIN &&
1588 local_port < CNIC_LOCAL_PORT_MAX) {
1589 if (cnic_alloc_id(&cp->csk_port_tbl, local_port))
1590 local_port = 0;
1591 } else
1592 local_port = 0;
1593
1594 if (!local_port) {
1595 local_port = cnic_alloc_new_id(&cp->csk_port_tbl);
1596 if (local_port == -1) {
1597 rc = -ENOMEM;
1598 goto err_out;
1599 }
1600 }
1601 csk->src_port = local_port;
1602
1603 csk->mtu = dst_mtu(dst);
1604 rc = 0;
1605
1606err_out:
1607 dst_release(dst);
1608 return rc;
1609}
1610
1611static void cnic_init_csk_state(struct cnic_sock *csk)
1612{
1613 csk->state = 0;
1614 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
1615 clear_bit(SK_F_CLOSING, &csk->flags);
1616}
1617
1618static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
1619{
1620 int err = 0;
1621
1622 if (!cnic_in_use(csk))
1623 return -EINVAL;
1624
1625 if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
1626 return -EINVAL;
1627
1628 cnic_init_csk_state(csk);
1629
1630 err = cnic_get_route(csk, saddr);
1631 if (err)
1632 goto err_out;
1633
1634 err = cnic_resolve_addr(csk, saddr);
1635 if (!err)
1636 return 0;
1637
1638err_out:
1639 clear_bit(SK_F_CONNECT_START, &csk->flags);
1640 return err;
1641}
1642
1643static int cnic_cm_abort(struct cnic_sock *csk)
1644{
1645 struct cnic_local *cp = csk->dev->cnic_priv;
1646 u32 opcode;
1647
1648 if (!cnic_in_use(csk))
1649 return -EINVAL;
1650
1651 if (cnic_abort_prep(csk))
1652 return cnic_cm_abort_req(csk);
1653
1654 /* Getting here means that we haven't started connect, or
1655 * connect was not successful.
1656 */
1657
1658 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
1659 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
1660 opcode = csk->state;
1661 else
1662 opcode = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
1663 cp->close_conn(csk, opcode);
1664
1665 return 0;
1666}
1667
1668static int cnic_cm_close(struct cnic_sock *csk)
1669{
1670 if (!cnic_in_use(csk))
1671 return -EINVAL;
1672
1673 if (cnic_close_prep(csk)) {
1674 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
1675 return cnic_cm_close_req(csk);
1676 }
1677 return 0;
1678}
1679
1680static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
1681 u8 opcode)
1682{
1683 struct cnic_ulp_ops *ulp_ops;
1684 int ulp_type = csk->ulp_type;
1685
1686 rcu_read_lock();
1687 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
1688 if (ulp_ops) {
1689 if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
1690 ulp_ops->cm_connect_complete(csk);
1691 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
1692 ulp_ops->cm_close_complete(csk);
1693 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
1694 ulp_ops->cm_remote_abort(csk);
1695 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
1696 ulp_ops->cm_abort_complete(csk);
1697 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
1698 ulp_ops->cm_remote_close(csk);
1699 }
1700 rcu_read_unlock();
1701}
1702
1703static int cnic_cm_set_pg(struct cnic_sock *csk)
1704{
1705 if (cnic_offld_prep(csk)) {
1706 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
1707 cnic_cm_update_pg(csk);
1708 else
1709 cnic_cm_offload_pg(csk);
1710 }
1711 return 0;
1712}
1713
1714static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
1715{
1716 struct cnic_local *cp = dev->cnic_priv;
1717 u32 l5_cid = kcqe->pg_host_opaque;
1718 u8 opcode = kcqe->op_code;
1719 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
1720
1721 csk_hold(csk);
1722 if (!cnic_in_use(csk))
1723 goto done;
1724
1725 if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
1726 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
1727 goto done;
1728 }
1729 csk->pg_cid = kcqe->pg_cid;
1730 set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
1731 cnic_cm_conn_req(csk);
1732
1733done:
1734 csk_put(csk);
1735}
1736
1737static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
1738{
1739 struct cnic_local *cp = dev->cnic_priv;
1740 struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
1741 u8 opcode = l4kcqe->op_code;
1742 u32 l5_cid;
1743 struct cnic_sock *csk;
1744
1745 if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
1746 opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
1747 cnic_cm_process_offld_pg(dev, l4kcqe);
1748 return;
1749 }
1750
1751 l5_cid = l4kcqe->conn_id;
1752 if (opcode & 0x80)
1753 l5_cid = l4kcqe->cid;
1754 if (l5_cid >= MAX_CM_SK_TBL_SZ)
1755 return;
1756
1757 csk = &cp->csk_tbl[l5_cid];
1758 csk_hold(csk);
1759
1760 if (!cnic_in_use(csk)) {
1761 csk_put(csk);
1762 return;
1763 }
1764
1765 switch (opcode) {
1766 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
1767 if (l4kcqe->status == 0)
1768 set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
1769
1770 smp_mb__before_clear_bit();
1771 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
1772 cnic_cm_upcall(cp, csk, opcode);
1773 break;
1774
1775 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
1776 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags))
1777 csk->state = opcode;
1778 /* fall through */
1779 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
1780 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
1781 cp->close_conn(csk, opcode);
1782 break;
1783
1784 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
1785 cnic_cm_upcall(cp, csk, opcode);
1786 break;
1787 }
1788 csk_put(csk);
1789}
1790
1791static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
1792{
1793 struct cnic_dev *dev = data;
1794 int i;
1795
1796 for (i = 0; i < num; i++)
1797 cnic_cm_process_kcqe(dev, kcqe[i]);
1798}
1799
1800static struct cnic_ulp_ops cm_ulp_ops = {
1801 .indicate_kcqes = cnic_cm_indicate_kcqe,
1802};
1803
1804static void cnic_cm_free_mem(struct cnic_dev *dev)
1805{
1806 struct cnic_local *cp = dev->cnic_priv;
1807
1808 kfree(cp->csk_tbl);
1809 cp->csk_tbl = NULL;
1810 cnic_free_id_tbl(&cp->csk_port_tbl);
1811}
1812
1813static int cnic_cm_alloc_mem(struct cnic_dev *dev)
1814{
1815 struct cnic_local *cp = dev->cnic_priv;
1816
1817 cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
1818 GFP_KERNEL);
1819 if (!cp->csk_tbl)
1820 return -ENOMEM;
1821
1822 if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
1823 CNIC_LOCAL_PORT_MIN)) {
1824 cnic_cm_free_mem(dev);
1825 return -ENOMEM;
1826 }
1827 return 0;
1828}
1829
1830static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
1831{
1832 if ((opcode == csk->state) ||
1833 (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED &&
1834 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)) {
1835 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags))
1836 return 1;
1837 }
1838 return 0;
1839}
1840
1841static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
1842{
1843 struct cnic_dev *dev = csk->dev;
1844 struct cnic_local *cp = dev->cnic_priv;
1845
1846 clear_bit(SK_F_CONNECT_START, &csk->flags);
1847 if (cnic_ready_to_close(csk, opcode)) {
1848 cnic_close_conn(csk);
1849 cnic_cm_upcall(cp, csk, opcode);
1850 }
1851}
1852
1853static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
1854{
1855}
1856
1857static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
1858{
1859 u32 seed;
1860
1861 get_random_bytes(&seed, 4);
1862 cnic_ctx_wr(dev, 45, 0, seed);
1863 return 0;
1864}
1865
1866static int cnic_cm_open(struct cnic_dev *dev)
1867{
1868 struct cnic_local *cp = dev->cnic_priv;
1869 int err;
1870
1871 err = cnic_cm_alloc_mem(dev);
1872 if (err)
1873 return err;
1874
1875 err = cp->start_cm(dev);
1876
1877 if (err)
1878 goto err_out;
1879
1880 dev->cm_create = cnic_cm_create;
1881 dev->cm_destroy = cnic_cm_destroy;
1882 dev->cm_connect = cnic_cm_connect;
1883 dev->cm_abort = cnic_cm_abort;
1884 dev->cm_close = cnic_cm_close;
1885 dev->cm_select_dev = cnic_cm_select_dev;
1886
1887 cp->ulp_handle[CNIC_ULP_L4] = dev;
1888 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
1889 return 0;
1890
1891err_out:
1892 cnic_cm_free_mem(dev);
1893 return err;
1894}
1895
1896static int cnic_cm_shutdown(struct cnic_dev *dev)
1897{
1898 struct cnic_local *cp = dev->cnic_priv;
1899 int i;
1900
1901 cp->stop_cm(dev);
1902
1903 if (!cp->csk_tbl)
1904 return 0;
1905
1906 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
1907 struct cnic_sock *csk = &cp->csk_tbl[i];
1908
1909 clear_bit(SK_F_INUSE, &csk->flags);
1910 cnic_cm_cleanup(csk);
1911 }
1912 cnic_cm_free_mem(dev);
1913
1914 return 0;
1915}
1916
1917static void cnic_init_context(struct cnic_dev *dev, u32 cid)
1918{
1919 struct cnic_local *cp = dev->cnic_priv;
1920 u32 cid_addr;
1921 int i;
1922
1923 if (CHIP_NUM(cp) == CHIP_NUM_5709)
1924 return;
1925
1926 cid_addr = GET_CID_ADDR(cid);
1927
1928 for (i = 0; i < CTX_SIZE; i += 4)
1929 cnic_ctx_wr(dev, cid_addr, i, 0);
1930}
1931
1932static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
1933{
1934 struct cnic_local *cp = dev->cnic_priv;
1935 int ret = 0, i;
1936 u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
1937
1938 if (CHIP_NUM(cp) != CHIP_NUM_5709)
1939 return 0;
1940
1941 for (i = 0; i < cp->ctx_blks; i++) {
1942 int j;
1943 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
1944 u32 val;
1945
1946 memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE);
1947
1948 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1949 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
1950 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1951 (u64) cp->ctx_arr[i].mapping >> 32);
1952 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
1953 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1954 for (j = 0; j < 10; j++) {
1955
1956 val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1957 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1958 break;
1959 udelay(5);
1960 }
1961 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1962 ret = -EBUSY;
1963 break;
1964 }
1965 }
1966 return ret;
1967}
1968
1969static void cnic_free_irq(struct cnic_dev *dev)
1970{
1971 struct cnic_local *cp = dev->cnic_priv;
1972 struct cnic_eth_dev *ethdev = cp->ethdev;
1973
1974 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
1975 cp->disable_int_sync(dev);
1976 tasklet_disable(&cp->cnic_irq_task);
1977 free_irq(ethdev->irq_arr[0].vector, dev);
1978 }
1979}
1980
1981static int cnic_init_bnx2_irq(struct cnic_dev *dev)
1982{
1983 struct cnic_local *cp = dev->cnic_priv;
1984 struct cnic_eth_dev *ethdev = cp->ethdev;
1985
1986 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
1987 int err, i = 0;
1988 int sblk_num = cp->status_blk_num;
1989 u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
1990 BNX2_HC_SB_CONFIG_1;
1991
1992 CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
1993
1994 CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
1995 CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
1996 CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
1997
1998 cp->bnx2_status_blk = cp->status_blk;
1999 cp->last_status_idx = cp->bnx2_status_blk->status_idx;
2000 tasklet_init(&cp->cnic_irq_task, &cnic_service_bnx2_msix,
2001 (unsigned long) dev);
2002 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0,
2003 "cnic", dev);
2004 if (err) {
2005 tasklet_disable(&cp->cnic_irq_task);
2006 return err;
2007 }
2008 while (cp->bnx2_status_blk->status_completion_producer_index &&
2009 i < 10) {
2010 CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
2011 1 << (11 + sblk_num));
2012 udelay(10);
2013 i++;
2014 barrier();
2015 }
2016 if (cp->bnx2_status_blk->status_completion_producer_index) {
2017 cnic_free_irq(dev);
2018 goto failed;
2019 }
2020
2021 } else {
2022 struct status_block *sblk = cp->status_blk;
2023 u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
2024 int i = 0;
2025
2026 while (sblk->status_completion_producer_index && i < 10) {
2027 CNIC_WR(dev, BNX2_HC_COMMAND,
2028 hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2029 udelay(10);
2030 i++;
2031 barrier();
2032 }
2033 if (sblk->status_completion_producer_index)
2034 goto failed;
2035
2036 }
2037 return 0;
2038
2039failed:
2040 printk(KERN_ERR PFX "%s: " "KCQ index not resetting to 0.\n",
2041 dev->netdev->name);
2042 return -EBUSY;
2043}
2044
2045static void cnic_enable_bnx2_int(struct cnic_dev *dev)
2046{
2047 struct cnic_local *cp = dev->cnic_priv;
2048 struct cnic_eth_dev *ethdev = cp->ethdev;
2049
2050 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
2051 return;
2052
2053 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
2054 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
2055}
2056
2057static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
2058{
2059 struct cnic_local *cp = dev->cnic_priv;
2060 struct cnic_eth_dev *ethdev = cp->ethdev;
2061
2062 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
2063 return;
2064
2065 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
2066 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2067 CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
2068 synchronize_irq(ethdev->irq_arr[0].vector);
2069}
2070
2071static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
2072{
2073 struct cnic_local *cp = dev->cnic_priv;
2074 struct cnic_eth_dev *ethdev = cp->ethdev;
2075 u32 cid_addr, tx_cid, sb_id;
2076 u32 val, offset0, offset1, offset2, offset3;
2077 int i;
2078 struct tx_bd *txbd;
2079 dma_addr_t buf_map;
2080 struct status_block *s_blk = cp->status_blk;
2081
2082 sb_id = cp->status_blk_num;
2083 tx_cid = 20;
2084 cnic_init_context(dev, tx_cid);
2085 cnic_init_context(dev, tx_cid + 1);
2086 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
2087 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
2088 struct status_block_msix *sblk = cp->status_blk;
2089
2090 tx_cid = TX_TSS_CID + sb_id - 1;
2091 cnic_init_context(dev, tx_cid);
2092 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
2093 (TX_TSS_CID << 7));
2094 cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
2095 }
2096 cp->tx_cons = *cp->tx_cons_ptr;
2097
2098 cid_addr = GET_CID_ADDR(tx_cid);
2099 if (CHIP_NUM(cp) == CHIP_NUM_5709) {
2100 u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
2101
2102 for (i = 0; i < PHY_CTX_SIZE; i += 4)
2103 cnic_ctx_wr(dev, cid_addr2, i, 0);
2104
2105 offset0 = BNX2_L2CTX_TYPE_XI;
2106 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
2107 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
2108 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
2109 } else {
2110 offset0 = BNX2_L2CTX_TYPE;
2111 offset1 = BNX2_L2CTX_CMD_TYPE;
2112 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
2113 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
2114 }
2115 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
2116 cnic_ctx_wr(dev, cid_addr, offset0, val);
2117
2118 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
2119 cnic_ctx_wr(dev, cid_addr, offset1, val);
2120
2121 txbd = (struct tx_bd *) cp->l2_ring;
2122
2123 buf_map = cp->l2_buf_map;
2124 for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) {
2125 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
2126 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
2127 }
2128 val = (u64) cp->l2_ring_map >> 32;
2129 cnic_ctx_wr(dev, cid_addr, offset2, val);
2130 txbd->tx_bd_haddr_hi = val;
2131
2132 val = (u64) cp->l2_ring_map & 0xffffffff;
2133 cnic_ctx_wr(dev, cid_addr, offset3, val);
2134 txbd->tx_bd_haddr_lo = val;
2135}
2136
2137static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
2138{
2139 struct cnic_local *cp = dev->cnic_priv;
2140 struct cnic_eth_dev *ethdev = cp->ethdev;
2141 u32 cid_addr, sb_id, val, coal_reg, coal_val;
2142 int i;
2143 struct rx_bd *rxbd;
2144 struct status_block *s_blk = cp->status_blk;
2145
2146 sb_id = cp->status_blk_num;
2147 cnic_init_context(dev, 2);
2148 cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
2149 coal_reg = BNX2_HC_COMMAND;
2150 coal_val = CNIC_RD(dev, coal_reg);
2151 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
2152 struct status_block_msix *sblk = cp->status_blk;
2153
2154 cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
2155 coal_reg = BNX2_HC_COALESCE_NOW;
2156 coal_val = 1 << (11 + sb_id);
2157 }
2158 i = 0;
2159 while (!(*cp->rx_cons_ptr != 0) && i < 10) {
2160 CNIC_WR(dev, coal_reg, coal_val);
2161 udelay(10);
2162 i++;
2163 barrier();
2164 }
2165 cp->rx_cons = *cp->rx_cons_ptr;
2166
2167 cid_addr = GET_CID_ADDR(2);
2168 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
2169 BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
2170 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
2171
2172 if (sb_id == 0)
2173 val = 2 << BNX2_L2CTX_STATUSB_NUM_SHIFT;
2174 else
2175 val = BNX2_L2CTX_STATUSB_NUM(sb_id);
2176 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
2177
2178 rxbd = (struct rx_bd *) (cp->l2_ring + BCM_PAGE_SIZE);
2179 for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
2180 dma_addr_t buf_map;
2181 int n = (i % cp->l2_rx_ring_size) + 1;
2182
2183 buf_map = cp->l2_buf_map + (n * cp->l2_single_buf_size);
2184 rxbd->rx_bd_len = cp->l2_single_buf_size;
2185 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
2186 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
2187 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
2188 }
2189 val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) >> 32;
2190 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
2191 rxbd->rx_bd_haddr_hi = val;
2192
2193 val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) & 0xffffffff;
2194 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
2195 rxbd->rx_bd_haddr_lo = val;
2196
2197 val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
2198 cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
2199}
2200
2201static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
2202{
2203 struct kwqe *wqes[1], l2kwqe;
2204
2205 memset(&l2kwqe, 0, sizeof(l2kwqe));
2206 wqes[0] = &l2kwqe;
2207 l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_FLAGS_LAYER_SHIFT) |
2208 (L2_KWQE_OPCODE_VALUE_FLUSH <<
2209 KWQE_OPCODE_SHIFT) | 2;
2210 dev->submit_kwqes(dev, wqes, 1);
2211}
2212
2213static void cnic_set_bnx2_mac(struct cnic_dev *dev)
2214{
2215 struct cnic_local *cp = dev->cnic_priv;
2216 u32 val;
2217
2218 val = cp->func << 2;
2219
2220 cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
2221
2222 val = cnic_reg_rd_ind(dev, cp->shmem_base +
2223 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
2224 dev->mac_addr[0] = (u8) (val >> 8);
2225 dev->mac_addr[1] = (u8) val;
2226
2227 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
2228
2229 val = cnic_reg_rd_ind(dev, cp->shmem_base +
2230 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
2231 dev->mac_addr[2] = (u8) (val >> 24);
2232 dev->mac_addr[3] = (u8) (val >> 16);
2233 dev->mac_addr[4] = (u8) (val >> 8);
2234 dev->mac_addr[5] = (u8) val;
2235
2236 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
2237
2238 val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
2239 if (CHIP_NUM(cp) != CHIP_NUM_5709)
2240 val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
2241
2242 CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
2243 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
2244 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
2245}
2246
2247static int cnic_start_bnx2_hw(struct cnic_dev *dev)
2248{
2249 struct cnic_local *cp = dev->cnic_priv;
2250 struct cnic_eth_dev *ethdev = cp->ethdev;
2251 struct status_block *sblk = cp->status_blk;
2252 u32 val;
2253 int err;
2254
2255 cnic_set_bnx2_mac(dev);
2256
2257 val = CNIC_RD(dev, BNX2_MQ_CONFIG);
2258 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
2259 if (BCM_PAGE_BITS > 12)
2260 val |= (12 - 8) << 4;
2261 else
2262 val |= (BCM_PAGE_BITS - 8) << 4;
2263
2264 CNIC_WR(dev, BNX2_MQ_CONFIG, val);
2265
2266 CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
2267 CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
2268 CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
2269
2270 err = cnic_setup_5709_context(dev, 1);
2271 if (err)
2272 return err;
2273
2274 cnic_init_context(dev, KWQ_CID);
2275 cnic_init_context(dev, KCQ_CID);
2276
2277 cp->kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
2278 cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
2279
2280 cp->max_kwq_idx = MAX_KWQ_IDX;
2281 cp->kwq_prod_idx = 0;
2282 cp->kwq_con_idx = 0;
2283 cp->cnic_local_flags |= CNIC_LCL_FL_KWQ_INIT;
2284
2285 if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708)
2286 cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
2287 else
2288 cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
2289
2290 /* Initialize the kernel work queue context. */
2291 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
2292 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
2293 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_TYPE, val);
2294
2295 val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
2296 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
2297
2298 val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
2299 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
2300
2301 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
2302 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
2303
2304 val = (u32) cp->kwq_info.pgtbl_map;
2305 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
2306
2307 cp->kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
2308 cp->kcq_io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
2309
2310 cp->kcq_prod_idx = 0;
2311
2312 /* Initialize the kernel complete queue context. */
2313 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
2314 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
2315 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_TYPE, val);
2316
2317 val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
2318 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
2319
2320 val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
2321 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
2322
2323 val = (u32) ((u64) cp->kcq_info.pgtbl_map >> 32);
2324 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
2325
2326 val = (u32) cp->kcq_info.pgtbl_map;
2327 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
2328
2329 cp->int_num = 0;
2330 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
2331 u32 sb_id = cp->status_blk_num;
2332 u32 sb = BNX2_L2CTX_STATUSB_NUM(sb_id);
2333
2334 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
2335 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
2336 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
2337 }
2338
2339 /* Enable Commnad Scheduler notification when we write to the
2340 * host producer index of the kernel contexts. */
2341 CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
2342
2343 /* Enable Command Scheduler notification when we write to either
2344 * the Send Queue or Receive Queue producer indexes of the kernel
2345 * bypass contexts. */
2346 CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
2347 CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
2348
2349 /* Notify COM when the driver post an application buffer. */
2350 CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
2351
2352 /* Set the CP and COM doorbells. These two processors polls the
2353 * doorbell for a non zero value before running. This must be done
2354 * after setting up the kernel queue contexts. */
2355 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
2356 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
2357
2358 cnic_init_bnx2_tx_ring(dev);
2359 cnic_init_bnx2_rx_ring(dev);
2360
2361 err = cnic_init_bnx2_irq(dev);
2362 if (err) {
2363 printk(KERN_ERR PFX "%s: cnic_init_irq failed\n",
2364 dev->netdev->name);
2365 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
2366 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
2367 return err;
2368 }
2369
2370 return 0;
2371}
2372
2373static int cnic_start_hw(struct cnic_dev *dev)
2374{
2375 struct cnic_local *cp = dev->cnic_priv;
2376 struct cnic_eth_dev *ethdev = cp->ethdev;
2377 int err;
2378
2379 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
2380 return -EALREADY;
2381
2382 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
2383 if (err) {
2384 printk(KERN_ERR PFX "%s: register_cnic failed\n",
2385 dev->netdev->name);
2386 goto err2;
2387 }
2388
2389 dev->regview = ethdev->io_base;
2390 cp->chip_id = ethdev->chip_id;
2391 pci_dev_get(dev->pcidev);
2392 cp->func = PCI_FUNC(dev->pcidev->devfn);
2393 cp->status_blk = ethdev->irq_arr[0].status_blk;
2394 cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
2395
2396 err = cp->alloc_resc(dev);
2397 if (err) {
2398 printk(KERN_ERR PFX "%s: allocate resource failure\n",
2399 dev->netdev->name);
2400 goto err1;
2401 }
2402
2403 err = cp->start_hw(dev);
2404 if (err)
2405 goto err1;
2406
2407 err = cnic_cm_open(dev);
2408 if (err)
2409 goto err1;
2410
2411 set_bit(CNIC_F_CNIC_UP, &dev->flags);
2412
2413 cp->enable_int(dev);
2414
2415 return 0;
2416
2417err1:
2418 ethdev->drv_unregister_cnic(dev->netdev);
2419 cp->free_resc(dev);
2420 pci_dev_put(dev->pcidev);
2421err2:
2422 return err;
2423}
2424
2425static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
2426{
2427 struct cnic_local *cp = dev->cnic_priv;
2428 struct cnic_eth_dev *ethdev = cp->ethdev;
2429
2430 cnic_disable_bnx2_int_sync(dev);
2431
2432 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
2433 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
2434
2435 cnic_init_context(dev, KWQ_CID);
2436 cnic_init_context(dev, KCQ_CID);
2437
2438 cnic_setup_5709_context(dev, 0);
2439 cnic_free_irq(dev);
2440
2441 ethdev->drv_unregister_cnic(dev->netdev);
2442
2443 cnic_free_resc(dev);
2444}
2445
2446static void cnic_stop_hw(struct cnic_dev *dev)
2447{
2448 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
2449 struct cnic_local *cp = dev->cnic_priv;
2450
2451 clear_bit(CNIC_F_CNIC_UP, &dev->flags);
2452 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL);
2453 synchronize_rcu();
2454 cnic_cm_shutdown(dev);
2455 cp->stop_hw(dev);
2456 pci_dev_put(dev->pcidev);
2457 }
2458}
2459
2460static void cnic_free_dev(struct cnic_dev *dev)
2461{
2462 int i = 0;
2463
2464 while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
2465 msleep(100);
2466 i++;
2467 }
2468 if (atomic_read(&dev->ref_count) != 0)
2469 printk(KERN_ERR PFX "%s: Failed waiting for ref count to go"
2470 " to zero.\n", dev->netdev->name);
2471
2472 printk(KERN_INFO PFX "Removed CNIC device: %s\n", dev->netdev->name);
2473 dev_put(dev->netdev);
2474 kfree(dev);
2475}
2476
2477static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
2478 struct pci_dev *pdev)
2479{
2480 struct cnic_dev *cdev;
2481 struct cnic_local *cp;
2482 int alloc_size;
2483
2484 alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
2485
2486 cdev = kzalloc(alloc_size , GFP_KERNEL);
2487 if (cdev == NULL) {
2488 printk(KERN_ERR PFX "%s: allocate dev struct failure\n",
2489 dev->name);
2490 return NULL;
2491 }
2492
2493 cdev->netdev = dev;
2494 cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
2495 cdev->register_device = cnic_register_device;
2496 cdev->unregister_device = cnic_unregister_device;
2497 cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
2498
2499 cp = cdev->cnic_priv;
2500 cp->dev = cdev;
2501 cp->uio_dev = -1;
2502 cp->l2_single_buf_size = 0x400;
2503 cp->l2_rx_ring_size = 3;
2504
2505 spin_lock_init(&cp->cnic_ulp_lock);
2506
2507 printk(KERN_INFO PFX "Added CNIC device: %s\n", dev->name);
2508
2509 return cdev;
2510}
2511
2512static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
2513{
2514 struct pci_dev *pdev;
2515 struct cnic_dev *cdev;
2516 struct cnic_local *cp;
2517 struct cnic_eth_dev *ethdev = NULL;
2518 struct cnic_eth_dev *(*probe)(void *) = NULL;
2519
2520 probe = __symbol_get("bnx2_cnic_probe");
2521 if (probe) {
2522 ethdev = (*probe)(dev);
2523 symbol_put_addr(probe);
2524 }
2525 if (!ethdev)
2526 return NULL;
2527
2528 pdev = ethdev->pdev;
2529 if (!pdev)
2530 return NULL;
2531
2532 dev_hold(dev);
2533 pci_dev_get(pdev);
2534 if (pdev->device == PCI_DEVICE_ID_NX2_5709 ||
2535 pdev->device == PCI_DEVICE_ID_NX2_5709S) {
2536 u8 rev;
2537
2538 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
2539 if (rev < 0x10) {
2540 pci_dev_put(pdev);
2541 goto cnic_err;
2542 }
2543 }
2544 pci_dev_put(pdev);
2545
2546 cdev = cnic_alloc_dev(dev, pdev);
2547 if (cdev == NULL)
2548 goto cnic_err;
2549
2550 set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
2551 cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
2552
2553 cp = cdev->cnic_priv;
2554 cp->ethdev = ethdev;
2555 cdev->pcidev = pdev;
2556
2557 cp->cnic_ops = &cnic_bnx2_ops;
2558 cp->start_hw = cnic_start_bnx2_hw;
2559 cp->stop_hw = cnic_stop_bnx2_hw;
2560 cp->setup_pgtbl = cnic_setup_page_tbl;
2561 cp->alloc_resc = cnic_alloc_bnx2_resc;
2562 cp->free_resc = cnic_free_resc;
2563 cp->start_cm = cnic_cm_init_bnx2_hw;
2564 cp->stop_cm = cnic_cm_stop_bnx2_hw;
2565 cp->enable_int = cnic_enable_bnx2_int;
2566 cp->disable_int_sync = cnic_disable_bnx2_int_sync;
2567 cp->close_conn = cnic_close_bnx2_conn;
2568 cp->next_idx = cnic_bnx2_next_idx;
2569 cp->hw_idx = cnic_bnx2_hw_idx;
2570 return cdev;
2571
2572cnic_err:
2573 dev_put(dev);
2574 return NULL;
2575}
2576
2577static struct cnic_dev *is_cnic_dev(struct net_device *dev)
2578{
2579 struct ethtool_drvinfo drvinfo;
2580 struct cnic_dev *cdev = NULL;
2581
2582 if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
2583 memset(&drvinfo, 0, sizeof(drvinfo));
2584 dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
2585
2586 if (!strcmp(drvinfo.driver, "bnx2"))
2587 cdev = init_bnx2_cnic(dev);
2588 if (cdev) {
2589 write_lock(&cnic_dev_lock);
2590 list_add(&cdev->list, &cnic_dev_list);
2591 write_unlock(&cnic_dev_lock);
2592 }
2593 }
2594 return cdev;
2595}
2596
2597/**
2598 * netdev event handler
2599 */
2600static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
2601 void *ptr)
2602{
2603 struct net_device *netdev = ptr;
2604 struct cnic_dev *dev;
2605 int if_type;
2606 int new_dev = 0;
2607
2608 dev = cnic_from_netdev(netdev);
2609
2610 if (!dev && (event == NETDEV_REGISTER || event == NETDEV_UP)) {
2611 /* Check for the hot-plug device */
2612 dev = is_cnic_dev(netdev);
2613 if (dev) {
2614 new_dev = 1;
2615 cnic_hold(dev);
2616 }
2617 }
2618 if (dev) {
2619 struct cnic_local *cp = dev->cnic_priv;
2620
2621 if (new_dev)
2622 cnic_ulp_init(dev);
2623 else if (event == NETDEV_UNREGISTER)
2624 cnic_ulp_exit(dev);
2625 else if (event == NETDEV_UP) {
2626 mutex_lock(&cnic_lock);
2627 if (!cnic_start_hw(dev))
2628 cnic_ulp_start(dev);
2629 mutex_unlock(&cnic_lock);
2630 }
2631
2632 rcu_read_lock();
2633 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
2634 struct cnic_ulp_ops *ulp_ops;
2635 void *ctx;
2636
2637 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
2638 if (!ulp_ops || !ulp_ops->indicate_netevent)
2639 continue;
2640
2641 ctx = cp->ulp_handle[if_type];
2642
2643 ulp_ops->indicate_netevent(ctx, event);
2644 }
2645 rcu_read_unlock();
2646
2647 if (event == NETDEV_GOING_DOWN) {
2648 mutex_lock(&cnic_lock);
2649 cnic_ulp_stop(dev);
2650 cnic_stop_hw(dev);
2651 mutex_unlock(&cnic_lock);
2652 } else if (event == NETDEV_UNREGISTER) {
2653 write_lock(&cnic_dev_lock);
2654 list_del_init(&dev->list);
2655 write_unlock(&cnic_dev_lock);
2656
2657 cnic_put(dev);
2658 cnic_free_dev(dev);
2659 goto done;
2660 }
2661 cnic_put(dev);
2662 }
2663done:
2664 return NOTIFY_DONE;
2665}
2666
2667static struct notifier_block cnic_netdev_notifier = {
2668 .notifier_call = cnic_netdev_event
2669};
2670
2671static void cnic_release(void)
2672{
2673 struct cnic_dev *dev;
2674
2675 while (!list_empty(&cnic_dev_list)) {
2676 dev = list_entry(cnic_dev_list.next, struct cnic_dev, list);
2677 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
2678 cnic_ulp_stop(dev);
2679 cnic_stop_hw(dev);
2680 }
2681
2682 cnic_ulp_exit(dev);
2683 list_del_init(&dev->list);
2684 cnic_free_dev(dev);
2685 }
2686}
2687
2688static int __init cnic_init(void)
2689{
2690 int rc = 0;
2691
2692 printk(KERN_INFO "%s", version);
2693
2694 rc = register_netdevice_notifier(&cnic_netdev_notifier);
2695 if (rc) {
2696 cnic_release();
2697 return rc;
2698 }
2699
2700 return 0;
2701}
2702
2703static void __exit cnic_exit(void)
2704{
2705 unregister_netdevice_notifier(&cnic_netdev_notifier);
2706 cnic_release();
2707 return;
2708}
2709
2710module_init(cnic_init);
2711module_exit(cnic_exit);
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
new file mode 100644
index 000000000000..5192d4a9df5a
--- /dev/null
+++ b/drivers/net/cnic.h
@@ -0,0 +1,299 @@
1/* cnic.h: Broadcom CNIC core network driver.
2 *
3 * Copyright (c) 2006-2009 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 */
10
11
12#ifndef CNIC_H
13#define CNIC_H
14
15#define KWQ_PAGE_CNT 4
16#define KCQ_PAGE_CNT 16
17
18#define KWQ_CID 24
19#define KCQ_CID 25
20
21/*
22 * krnlq_context definition
23 */
24#define L5_KRNLQ_FLAGS 0x00000000
25#define L5_KRNLQ_SIZE 0x00000000
26#define L5_KRNLQ_TYPE 0x00000000
27#define KRNLQ_FLAGS_PG_SZ (0xf<<0)
28#define KRNLQ_FLAGS_PG_SZ_256 (0<<0)
29#define KRNLQ_FLAGS_PG_SZ_512 (1<<0)
30#define KRNLQ_FLAGS_PG_SZ_1K (2<<0)
31#define KRNLQ_FLAGS_PG_SZ_2K (3<<0)
32#define KRNLQ_FLAGS_PG_SZ_4K (4<<0)
33#define KRNLQ_FLAGS_PG_SZ_8K (5<<0)
34#define KRNLQ_FLAGS_PG_SZ_16K (6<<0)
35#define KRNLQ_FLAGS_PG_SZ_32K (7<<0)
36#define KRNLQ_FLAGS_PG_SZ_64K (8<<0)
37#define KRNLQ_FLAGS_PG_SZ_128K (9<<0)
38#define KRNLQ_FLAGS_PG_SZ_256K (10<<0)
39#define KRNLQ_FLAGS_PG_SZ_512K (11<<0)
40#define KRNLQ_FLAGS_PG_SZ_1M (12<<0)
41#define KRNLQ_FLAGS_PG_SZ_2M (13<<0)
42#define KRNLQ_FLAGS_QE_SELF_SEQ (1<<15)
43#define KRNLQ_SIZE_TYPE_SIZE ((((0x28 + 0x1f) & ~0x1f) / 0x20) << 16)
44#define KRNLQ_TYPE_TYPE (0xf<<28)
45#define KRNLQ_TYPE_TYPE_EMPTY (0<<28)
46#define KRNLQ_TYPE_TYPE_KRNLQ (6<<28)
47
48#define L5_KRNLQ_HOST_QIDX 0x00000004
49#define L5_KRNLQ_HOST_FW_QIDX 0x00000008
50#define L5_KRNLQ_NX_QE_SELF_SEQ 0x0000000c
51#define L5_KRNLQ_QE_SELF_SEQ_MAX 0x0000000c
52#define L5_KRNLQ_NX_QE_HADDR_HI 0x00000010
53#define L5_KRNLQ_NX_QE_HADDR_LO 0x00000014
54#define L5_KRNLQ_PGTBL_PGIDX 0x00000018
55#define L5_KRNLQ_NX_PG_QIDX 0x00000018
56#define L5_KRNLQ_PGTBL_NPAGES 0x0000001c
57#define L5_KRNLQ_QIDX_INCR 0x0000001c
58#define L5_KRNLQ_PGTBL_HADDR_HI 0x00000020
59#define L5_KRNLQ_PGTBL_HADDR_LO 0x00000024
60
61#define BNX2_PG_CTX_MAP 0x1a0034
62#define BNX2_ISCSI_CTX_MAP 0x1a0074
63
64struct cnic_redirect_entry {
65 struct dst_entry *old_dst;
66 struct dst_entry *new_dst;
67};
68
69#define MAX_COMPLETED_KCQE 64
70
71#define MAX_CNIC_L5_CONTEXT 256
72
73#define MAX_CM_SK_TBL_SZ MAX_CNIC_L5_CONTEXT
74
75#define MAX_ISCSI_TBL_SZ 256
76
77#define CNIC_LOCAL_PORT_MIN 60000
78#define CNIC_LOCAL_PORT_MAX 61000
79#define CNIC_LOCAL_PORT_RANGE (CNIC_LOCAL_PORT_MAX - CNIC_LOCAL_PORT_MIN)
80
81#define KWQE_CNT (BCM_PAGE_SIZE / sizeof(struct kwqe))
82#define KCQE_CNT (BCM_PAGE_SIZE / sizeof(struct kcqe))
83#define MAX_KWQE_CNT (KWQE_CNT - 1)
84#define MAX_KCQE_CNT (KCQE_CNT - 1)
85
86#define MAX_KWQ_IDX ((KWQ_PAGE_CNT * KWQE_CNT) - 1)
87#define MAX_KCQ_IDX ((KCQ_PAGE_CNT * KCQE_CNT) - 1)
88
89#define KWQ_PG(x) (((x) & ~MAX_KWQE_CNT) >> (BCM_PAGE_BITS - 5))
90#define KWQ_IDX(x) ((x) & MAX_KWQE_CNT)
91
92#define KCQ_PG(x) (((x) & ~MAX_KCQE_CNT) >> (BCM_PAGE_BITS - 5))
93#define KCQ_IDX(x) ((x) & MAX_KCQE_CNT)
94
95#define BNX2X_NEXT_KCQE(x) (((x) & (MAX_KCQE_CNT - 1)) == \
96 (MAX_KCQE_CNT - 1)) ? \
97 (x) + 2 : (x) + 1
98
99#define BNX2X_KWQ_DATA_PG(cp, x) ((x) / (cp)->kwq_16_data_pp)
100#define BNX2X_KWQ_DATA_IDX(cp, x) ((x) % (cp)->kwq_16_data_pp)
101#define BNX2X_KWQ_DATA(cp, x) \
102 &(cp)->kwq_16_data[BNX2X_KWQ_DATA_PG(cp, x)][BNX2X_KWQ_DATA_IDX(cp, x)]
103
104#define DEF_IPID_COUNT 0xc001
105
106#define DEF_KA_TIMEOUT 10000
107#define DEF_KA_INTERVAL 300000
108#define DEF_KA_MAX_PROBE_COUNT 3
109#define DEF_TOS 0
110#define DEF_TTL 0xfe
111#define DEF_SND_SEQ_SCALE 0
112#define DEF_RCV_BUF 0xffff
113#define DEF_SND_BUF 0xffff
114#define DEF_SEED 0
115#define DEF_MAX_RT_TIME 500
116#define DEF_MAX_DA_COUNT 2
117#define DEF_SWS_TIMER 1000
118#define DEF_MAX_CWND 0xffff
119
120struct cnic_ctx {
121 u32 cid;
122 void *ctx;
123 dma_addr_t mapping;
124};
125
126#define BNX2_MAX_CID 0x2000
127
128struct cnic_dma {
129 int num_pages;
130 void **pg_arr;
131 dma_addr_t *pg_map_arr;
132 int pgtbl_size;
133 u32 *pgtbl;
134 dma_addr_t pgtbl_map;
135};
136
137struct cnic_id_tbl {
138 spinlock_t lock;
139 u32 start;
140 u32 max;
141 u32 next;
142 unsigned long *table;
143};
144
145#define CNIC_KWQ16_DATA_SIZE 128
146
147struct kwqe_16_data {
148 u8 data[CNIC_KWQ16_DATA_SIZE];
149};
150
151struct cnic_iscsi {
152 struct cnic_dma task_array_info;
153 struct cnic_dma r2tq_info;
154 struct cnic_dma hq_info;
155};
156
157struct cnic_context {
158 u32 cid;
159 struct kwqe_16_data *kwqe_data;
160 dma_addr_t kwqe_data_mapping;
161 wait_queue_head_t waitq;
162 int wait_cond;
163 unsigned long timestamp;
164 u32 ctx_flags;
165#define CTX_FL_OFFLD_START 0x00000001
166 u8 ulp_proto_id;
167 union {
168 struct cnic_iscsi *iscsi;
169 } proto;
170};
171
172struct cnic_local {
173
174 spinlock_t cnic_ulp_lock;
175 void *ulp_handle[MAX_CNIC_ULP_TYPE];
176 unsigned long ulp_flags[MAX_CNIC_ULP_TYPE];
177#define ULP_F_INIT 0
178#define ULP_F_START 1
179 struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE];
180
181 /* protected by ulp_lock */
182 u32 cnic_local_flags;
183#define CNIC_LCL_FL_KWQ_INIT 0x00000001
184
185 struct cnic_dev *dev;
186
187 struct cnic_eth_dev *ethdev;
188
189 void *l2_ring;
190 dma_addr_t l2_ring_map;
191 int l2_ring_size;
192 int l2_rx_ring_size;
193
194 void *l2_buf;
195 dma_addr_t l2_buf_map;
196 int l2_buf_size;
197 int l2_single_buf_size;
198
199 u16 *rx_cons_ptr;
200 u16 *tx_cons_ptr;
201 u16 rx_cons;
202 u16 tx_cons;
203
204 u32 kwq_cid_addr;
205 u32 kcq_cid_addr;
206
207 struct cnic_dma kwq_info;
208 struct kwqe **kwq;
209
210 struct cnic_dma kwq_16_data_info;
211
212 u16 max_kwq_idx;
213
214 u16 kwq_prod_idx;
215 u32 kwq_io_addr;
216
217 u16 *kwq_con_idx_ptr;
218 u16 kwq_con_idx;
219
220 struct cnic_dma kcq_info;
221 struct kcqe **kcq;
222
223 u16 kcq_prod_idx;
224 u32 kcq_io_addr;
225
226 void *status_blk;
227 struct status_block_msix *bnx2_status_blk;
228 struct host_status_block *bnx2x_status_blk;
229
230 u32 status_blk_num;
231 u32 int_num;
232 u32 last_status_idx;
233 struct tasklet_struct cnic_irq_task;
234
235 struct kcqe *completed_kcq[MAX_COMPLETED_KCQE];
236
237 struct cnic_sock *csk_tbl;
238 struct cnic_id_tbl csk_port_tbl;
239
240 struct cnic_dma conn_buf_info;
241 struct cnic_dma gbl_buf_info;
242
243 struct cnic_iscsi *iscsi_tbl;
244 struct cnic_context *ctx_tbl;
245 struct cnic_id_tbl cid_tbl;
246 int max_iscsi_conn;
247 atomic_t iscsi_conn;
248
249 /* per connection parameters */
250 int num_iscsi_tasks;
251 int num_ccells;
252 int task_array_size;
253 int r2tq_size;
254 int hq_size;
255 int num_cqs;
256
257 struct cnic_ctx *ctx_arr;
258 int ctx_blks;
259 int ctx_blk_size;
260 int cids_per_blk;
261
262 u32 chip_id;
263 int func;
264 u32 shmem_base;
265
266 u32 uio_dev;
267 struct uio_info *cnic_uinfo;
268
269 struct cnic_ops *cnic_ops;
270 int (*start_hw)(struct cnic_dev *);
271 void (*stop_hw)(struct cnic_dev *);
272 void (*setup_pgtbl)(struct cnic_dev *,
273 struct cnic_dma *);
274 int (*alloc_resc)(struct cnic_dev *);
275 void (*free_resc)(struct cnic_dev *);
276 int (*start_cm)(struct cnic_dev *);
277 void (*stop_cm)(struct cnic_dev *);
278 void (*enable_int)(struct cnic_dev *);
279 void (*disable_int_sync)(struct cnic_dev *);
280 void (*ack_int)(struct cnic_dev *);
281 void (*close_conn)(struct cnic_sock *, u32 opcode);
282 u16 (*next_idx)(u16);
283 u16 (*hw_idx)(u16);
284};
285
286struct bnx2x_bd_chain_next {
287 u32 addr_lo;
288 u32 addr_hi;
289 u8 reserved[8];
290};
291
292#define ISCSI_RAMROD_CMD_ID_UPDATE_CONN (ISCSI_KCQE_OPCODE_UPDATE_CONN)
293#define ISCSI_RAMROD_CMD_ID_INIT (ISCSI_KCQE_OPCODE_INIT)
294
295#define CDU_REGION_NUMBER_XCM_AG 2
296#define CDU_REGION_NUMBER_UCM_AG 4
297
298#endif
299
diff --git a/drivers/net/cnic_defs.h b/drivers/net/cnic_defs.h
new file mode 100644
index 000000000000..cee80f694457
--- /dev/null
+++ b/drivers/net/cnic_defs.h
@@ -0,0 +1,580 @@
1
2/* cnic.c: Broadcom CNIC core network driver.
3 *
4 * Copyright (c) 2006-2009 Broadcom Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 */
11
12#ifndef CNIC_DEFS_H
13#define CNIC_DEFS_H
14
15/* KWQ (kernel work queue) request op codes */
16#define L2_KWQE_OPCODE_VALUE_FLUSH (4)
17
18#define L4_KWQE_OPCODE_VALUE_CONNECT1 (50)
19#define L4_KWQE_OPCODE_VALUE_CONNECT2 (51)
20#define L4_KWQE_OPCODE_VALUE_CONNECT3 (52)
21#define L4_KWQE_OPCODE_VALUE_RESET (53)
22#define L4_KWQE_OPCODE_VALUE_CLOSE (54)
23#define L4_KWQE_OPCODE_VALUE_UPDATE_SECRET (60)
24#define L4_KWQE_OPCODE_VALUE_INIT_ULP (61)
25
26#define L4_KWQE_OPCODE_VALUE_OFFLOAD_PG (1)
27#define L4_KWQE_OPCODE_VALUE_UPDATE_PG (9)
28#define L4_KWQE_OPCODE_VALUE_UPLOAD_PG (14)
29
30#define L5CM_RAMROD_CMD_ID_BASE (0x80)
31#define L5CM_RAMROD_CMD_ID_TCP_CONNECT (L5CM_RAMROD_CMD_ID_BASE + 3)
32#define L5CM_RAMROD_CMD_ID_CLOSE (L5CM_RAMROD_CMD_ID_BASE + 12)
33#define L5CM_RAMROD_CMD_ID_ABORT (L5CM_RAMROD_CMD_ID_BASE + 13)
34#define L5CM_RAMROD_CMD_ID_SEARCHER_DELETE (L5CM_RAMROD_CMD_ID_BASE + 14)
35#define L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD (L5CM_RAMROD_CMD_ID_BASE + 15)
36
37/* KCQ (kernel completion queue) response op codes */
38#define L4_KCQE_OPCODE_VALUE_CLOSE_COMP (53)
39#define L4_KCQE_OPCODE_VALUE_RESET_COMP (54)
40#define L4_KCQE_OPCODE_VALUE_FW_TCP_UPDATE (55)
41#define L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE (56)
42#define L4_KCQE_OPCODE_VALUE_RESET_RECEIVED (57)
43#define L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED (58)
44#define L4_KCQE_OPCODE_VALUE_INIT_ULP (61)
45
46#define L4_KCQE_OPCODE_VALUE_OFFLOAD_PG (1)
47#define L4_KCQE_OPCODE_VALUE_UPDATE_PG (9)
48#define L4_KCQE_OPCODE_VALUE_UPLOAD_PG (14)
49
50/* KCQ (kernel completion queue) completion status */
51#define L4_KCQE_COMPLETION_STATUS_SUCCESS (0)
52#define L4_KCQE_COMPLETION_STATUS_TIMEOUT (0x93)
53
54#define L4_LAYER_CODE (4)
55#define L2_LAYER_CODE (2)
56
57/*
58 * L4 KCQ CQE
59 */
60struct l4_kcq {
61 u32 cid;
62 u32 pg_cid;
63 u32 conn_id;
64 u32 pg_host_opaque;
65#if defined(__BIG_ENDIAN)
66 u16 status;
67 u16 reserved1;
68#elif defined(__LITTLE_ENDIAN)
69 u16 reserved1;
70 u16 status;
71#endif
72 u32 reserved2[2];
73#if defined(__BIG_ENDIAN)
74 u8 flags;
75#define L4_KCQ_RESERVED3 (0x7<<0)
76#define L4_KCQ_RESERVED3_SHIFT 0
77#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */
78#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3
79#define L4_KCQ_LAYER_CODE (0x7<<4)
80#define L4_KCQ_LAYER_CODE_SHIFT 4
81#define L4_KCQ_RESERVED4 (0x1<<7)
82#define L4_KCQ_RESERVED4_SHIFT 7
83 u8 op_code;
84 u16 qe_self_seq;
85#elif defined(__LITTLE_ENDIAN)
86 u16 qe_self_seq;
87 u8 op_code;
88 u8 flags;
89#define L4_KCQ_RESERVED3 (0xF<<0)
90#define L4_KCQ_RESERVED3_SHIFT 0
91#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */
92#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3
93#define L4_KCQ_LAYER_CODE (0x7<<4)
94#define L4_KCQ_LAYER_CODE_SHIFT 4
95#define L4_KCQ_RESERVED4 (0x1<<7)
96#define L4_KCQ_RESERVED4_SHIFT 7
97#endif
98};
99
100
101/*
102 * L4 KCQ CQE PG upload
103 */
104struct l4_kcq_upload_pg {
105 u32 pg_cid;
106#if defined(__BIG_ENDIAN)
107 u16 pg_status;
108 u16 pg_ipid_count;
109#elif defined(__LITTLE_ENDIAN)
110 u16 pg_ipid_count;
111 u16 pg_status;
112#endif
113 u32 reserved1[5];
114#if defined(__BIG_ENDIAN)
115 u8 flags;
116#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0)
117#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0
118#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4)
119#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4
120#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7)
121#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7
122 u8 op_code;
123 u16 qe_self_seq;
124#elif defined(__LITTLE_ENDIAN)
125 u16 qe_self_seq;
126 u8 op_code;
127 u8 flags;
128#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0)
129#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0
130#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4)
131#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4
132#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7)
133#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7
134#endif
135};
136
137
138/*
139 * Gracefully close the connection request
140 */
141struct l4_kwq_close_req {
142#if defined(__BIG_ENDIAN)
143 u8 flags;
144#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0)
145#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0
146#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4)
147#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4
148#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7)
149#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7
150 u8 op_code;
151 u16 reserved0;
152#elif defined(__LITTLE_ENDIAN)
153 u16 reserved0;
154 u8 op_code;
155 u8 flags;
156#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0)
157#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0
158#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4)
159#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4
160#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7)
161#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7
162#endif
163 u32 cid;
164 u32 reserved2[6];
165};
166
167
168/*
169 * The first request to be passed in order to establish connection in option2
170 */
171struct l4_kwq_connect_req1 {
172#if defined(__BIG_ENDIAN)
173 u8 flags;
174#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0)
175#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0
176#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4)
177#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4
178#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7)
179#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7
180 u8 op_code;
181 u8 reserved0;
182 u8 conn_flags;
183#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0)
184#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0
185#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1)
186#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1
187#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2)
188#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2
189#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3)
190#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3
191#elif defined(__LITTLE_ENDIAN)
192 u8 conn_flags;
193#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0)
194#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0
195#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1)
196#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1
197#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2)
198#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2
199#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3)
200#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3
201 u8 reserved0;
202 u8 op_code;
203 u8 flags;
204#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0)
205#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0
206#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4)
207#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4
208#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7)
209#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7
210#endif
211 u32 cid;
212 u32 pg_cid;
213 u32 src_ip;
214 u32 dst_ip;
215#if defined(__BIG_ENDIAN)
216 u16 dst_port;
217 u16 src_port;
218#elif defined(__LITTLE_ENDIAN)
219 u16 src_port;
220 u16 dst_port;
221#endif
222#if defined(__BIG_ENDIAN)
223 u8 rsrv1[3];
224 u8 tcp_flags;
225#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0)
226#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0
227#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1)
228#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1
229#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2)
230#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2
231#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3)
232#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3
233#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4)
234#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4
235#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5)
236#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5
237#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6)
238#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6
239#elif defined(__LITTLE_ENDIAN)
240 u8 tcp_flags;
241#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0)
242#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0
243#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1)
244#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1
245#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2)
246#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2
247#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3)
248#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3
249#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4)
250#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4
251#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5)
252#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5
253#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6)
254#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6
255 u8 rsrv1[3];
256#endif
257 u32 rsrv2;
258};
259
260
261/*
262 * The second ( optional )request to be passed in order to establish
263 * connection in option2 - for IPv6 only
264 */
265struct l4_kwq_connect_req2 {
266#if defined(__BIG_ENDIAN)
267 u8 flags;
268#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0)
269#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0
270#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4)
271#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4
272#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7)
273#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7
274 u8 op_code;
275 u8 reserved0;
276 u8 rsrv;
277#elif defined(__LITTLE_ENDIAN)
278 u8 rsrv;
279 u8 reserved0;
280 u8 op_code;
281 u8 flags;
282#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0)
283#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0
284#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4)
285#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4
286#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7)
287#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7
288#endif
289 u32 reserved2;
290 u32 src_ip_v6_2;
291 u32 src_ip_v6_3;
292 u32 src_ip_v6_4;
293 u32 dst_ip_v6_2;
294 u32 dst_ip_v6_3;
295 u32 dst_ip_v6_4;
296};
297
298
299/*
300 * The third ( and last )request to be passed in order to establish
301 * connection in option2
302 */
303struct l4_kwq_connect_req3 {
304#if defined(__BIG_ENDIAN)
305 u8 flags;
306#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0)
307#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0
308#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4)
309#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4
310#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7)
311#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7
312 u8 op_code;
313 u16 reserved0;
314#elif defined(__LITTLE_ENDIAN)
315 u16 reserved0;
316 u8 op_code;
317 u8 flags;
318#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0)
319#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0
320#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4)
321#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4
322#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7)
323#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7
324#endif
325 u32 ka_timeout;
326 u32 ka_interval ;
327#if defined(__BIG_ENDIAN)
328 u8 snd_seq_scale;
329 u8 ttl;
330 u8 tos;
331 u8 ka_max_probe_count;
332#elif defined(__LITTLE_ENDIAN)
333 u8 ka_max_probe_count;
334 u8 tos;
335 u8 ttl;
336 u8 snd_seq_scale;
337#endif
338#if defined(__BIG_ENDIAN)
339 u16 pmtu;
340 u16 mss;
341#elif defined(__LITTLE_ENDIAN)
342 u16 mss;
343 u16 pmtu;
344#endif
345 u32 rcv_buf;
346 u32 snd_buf;
347 u32 seed;
348};
349
350
351/*
352 * a KWQE request to offload a PG connection
353 */
354struct l4_kwq_offload_pg {
355#if defined(__BIG_ENDIAN)
356 u8 flags;
357#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0)
358#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0
359#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4)
360#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4
361#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7)
362#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7
363 u8 op_code;
364 u16 reserved0;
365#elif defined(__LITTLE_ENDIAN)
366 u16 reserved0;
367 u8 op_code;
368 u8 flags;
369#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0)
370#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0
371#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4)
372#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4
373#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7)
374#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7
375#endif
376#if defined(__BIG_ENDIAN)
377 u8 l2hdr_nbytes;
378 u8 pg_flags;
379#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0)
380#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0
381#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1)
382#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1
383#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2)
384#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2
385 u8 da0;
386 u8 da1;
387#elif defined(__LITTLE_ENDIAN)
388 u8 da1;
389 u8 da0;
390 u8 pg_flags;
391#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0)
392#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0
393#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1)
394#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1
395#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2)
396#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2
397 u8 l2hdr_nbytes;
398#endif
399#if defined(__BIG_ENDIAN)
400 u8 da2;
401 u8 da3;
402 u8 da4;
403 u8 da5;
404#elif defined(__LITTLE_ENDIAN)
405 u8 da5;
406 u8 da4;
407 u8 da3;
408 u8 da2;
409#endif
410#if defined(__BIG_ENDIAN)
411 u8 sa0;
412 u8 sa1;
413 u8 sa2;
414 u8 sa3;
415#elif defined(__LITTLE_ENDIAN)
416 u8 sa3;
417 u8 sa2;
418 u8 sa1;
419 u8 sa0;
420#endif
421#if defined(__BIG_ENDIAN)
422 u8 sa4;
423 u8 sa5;
424 u16 etype;
425#elif defined(__LITTLE_ENDIAN)
426 u16 etype;
427 u8 sa5;
428 u8 sa4;
429#endif
430#if defined(__BIG_ENDIAN)
431 u16 vlan_tag;
432 u16 ipid_start;
433#elif defined(__LITTLE_ENDIAN)
434 u16 ipid_start;
435 u16 vlan_tag;
436#endif
437#if defined(__BIG_ENDIAN)
438 u16 ipid_count;
439 u16 reserved3;
440#elif defined(__LITTLE_ENDIAN)
441 u16 reserved3;
442 u16 ipid_count;
443#endif
444 u32 host_opaque;
445};
446
447
448/*
449 * Abortively close the connection request
450 */
451struct l4_kwq_reset_req {
452#if defined(__BIG_ENDIAN)
453 u8 flags;
454#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0)
455#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0
456#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4)
457#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4
458#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7)
459#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7
460 u8 op_code;
461 u16 reserved0;
462#elif defined(__LITTLE_ENDIAN)
463 u16 reserved0;
464 u8 op_code;
465 u8 flags;
466#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0)
467#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0
468#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4)
469#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4
470#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7)
471#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7
472#endif
473 u32 cid;
474 u32 reserved2[6];
475};
476
477
478/*
479 * a KWQE request to update a PG connection
480 */
481struct l4_kwq_update_pg {
482#if defined(__BIG_ENDIAN)
483 u8 flags;
484#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0)
485#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0
486#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4)
487#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4
488#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7)
489#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7
490 u8 opcode;
491 u16 oper16;
492#elif defined(__LITTLE_ENDIAN)
493 u16 oper16;
494 u8 opcode;
495 u8 flags;
496#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0)
497#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0
498#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4)
499#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4
500#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7)
501#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7
502#endif
503 u32 pg_cid;
504 u32 pg_host_opaque;
505#if defined(__BIG_ENDIAN)
506 u8 pg_valids;
507#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0)
508#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0
509#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1)
510#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1
511#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2)
512#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2
513 u8 pg_unused_a;
514 u16 pg_ipid_count;
515#elif defined(__LITTLE_ENDIAN)
516 u16 pg_ipid_count;
517 u8 pg_unused_a;
518 u8 pg_valids;
519#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0)
520#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0
521#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1)
522#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1
523#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2)
524#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2
525#endif
526#if defined(__BIG_ENDIAN)
527 u16 reserverd3;
528 u8 da0;
529 u8 da1;
530#elif defined(__LITTLE_ENDIAN)
531 u8 da1;
532 u8 da0;
533 u16 reserverd3;
534#endif
535#if defined(__BIG_ENDIAN)
536 u8 da2;
537 u8 da3;
538 u8 da4;
539 u8 da5;
540#elif defined(__LITTLE_ENDIAN)
541 u8 da5;
542 u8 da4;
543 u8 da3;
544 u8 da2;
545#endif
546 u32 reserved4;
547 u32 reserved5;
548};
549
550
551/*
552 * a KWQE request to upload a PG or L4 context
553 */
554struct l4_kwq_upload {
555#if defined(__BIG_ENDIAN)
556 u8 flags;
557#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0)
558#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0
559#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4)
560#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4
561#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7)
562#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7
563 u8 opcode;
564 u16 oper16;
565#elif defined(__LITTLE_ENDIAN)
566 u16 oper16;
567 u8 opcode;
568 u8 flags;
569#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0)
570#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0
571#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4)
572#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4
573#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7)
574#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7
575#endif
576 u32 cid;
577 u32 reserved2[6];
578};
579
580#endif /* CNIC_DEFS_H */
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
new file mode 100644
index 000000000000..06380963a34e
--- /dev/null
+++ b/drivers/net/cnic_if.h
@@ -0,0 +1,299 @@
1/* cnic_if.h: Broadcom CNIC core network driver.
2 *
3 * Copyright (c) 2006 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 */
10
11
12#ifndef CNIC_IF_H
13#define CNIC_IF_H
14
15#define CNIC_MODULE_VERSION "2.0.0"
16#define CNIC_MODULE_RELDATE "May 21, 2009"
17
18#define CNIC_ULP_RDMA 0
19#define CNIC_ULP_ISCSI 1
20#define CNIC_ULP_L4 2
21#define MAX_CNIC_ULP_TYPE_EXT 2
22#define MAX_CNIC_ULP_TYPE 3
23
24struct kwqe {
25 u32 kwqe_op_flag;
26
27#define KWQE_OPCODE_MASK 0x00ff0000
28#define KWQE_OPCODE_SHIFT 16
29#define KWQE_FLAGS_LAYER_SHIFT 28
30#define KWQE_OPCODE(x) ((x & KWQE_OPCODE_MASK) >> KWQE_OPCODE_SHIFT)
31
32 u32 kwqe_info0;
33 u32 kwqe_info1;
34 u32 kwqe_info2;
35 u32 kwqe_info3;
36 u32 kwqe_info4;
37 u32 kwqe_info5;
38 u32 kwqe_info6;
39};
40
41struct kwqe_16 {
42 u32 kwqe_info0;
43 u32 kwqe_info1;
44 u32 kwqe_info2;
45 u32 kwqe_info3;
46};
47
48struct kcqe {
49 u32 kcqe_info0;
50 u32 kcqe_info1;
51 u32 kcqe_info2;
52 u32 kcqe_info3;
53 u32 kcqe_info4;
54 u32 kcqe_info5;
55 u32 kcqe_info6;
56 u32 kcqe_op_flag;
57 #define KCQE_RAMROD_COMPLETION (0x1<<27) /* Everest */
58 #define KCQE_FLAGS_LAYER_MASK (0x7<<28)
59 #define KCQE_FLAGS_LAYER_MASK_MISC (0<<28)
60 #define KCQE_FLAGS_LAYER_MASK_L2 (2<<28)
61 #define KCQE_FLAGS_LAYER_MASK_L3 (3<<28)
62 #define KCQE_FLAGS_LAYER_MASK_L4 (4<<28)
63 #define KCQE_FLAGS_LAYER_MASK_L5_RDMA (5<<28)
64 #define KCQE_FLAGS_LAYER_MASK_L5_ISCSI (6<<28)
65 #define KCQE_FLAGS_NEXT (1<<31)
66 #define KCQE_FLAGS_OPCODE_MASK (0xff<<16)
67 #define KCQE_FLAGS_OPCODE_SHIFT (16)
68 #define KCQE_OPCODE(op) \
69 (((op) & KCQE_FLAGS_OPCODE_MASK) >> KCQE_FLAGS_OPCODE_SHIFT)
70};
71
72#define MAX_CNIC_CTL_DATA 64
73#define MAX_DRV_CTL_DATA 64
74
75#define CNIC_CTL_STOP_CMD 1
76#define CNIC_CTL_START_CMD 2
77#define CNIC_CTL_COMPLETION_CMD 3
78
79#define DRV_CTL_IO_WR_CMD 0x101
80#define DRV_CTL_IO_RD_CMD 0x102
81#define DRV_CTL_CTX_WR_CMD 0x103
82#define DRV_CTL_CTXTBL_WR_CMD 0x104
83#define DRV_CTL_COMPLETION_CMD 0x105
84
85struct cnic_ctl_completion {
86 u32 cid;
87};
88
89struct drv_ctl_completion {
90 u32 comp_count;
91};
92
93struct cnic_ctl_info {
94 int cmd;
95 union {
96 struct cnic_ctl_completion comp;
97 char bytes[MAX_CNIC_CTL_DATA];
98 } data;
99};
100
101struct drv_ctl_io {
102 u32 cid_addr;
103 u32 offset;
104 u32 data;
105 dma_addr_t dma_addr;
106};
107
108struct drv_ctl_info {
109 int cmd;
110 union {
111 struct drv_ctl_completion comp;
112 struct drv_ctl_io io;
113 char bytes[MAX_DRV_CTL_DATA];
114 } data;
115};
116
117struct cnic_ops {
118 struct module *cnic_owner;
119 /* Calls to these functions are protected by RCU. When
120 * unregistering, we wait for any calls to complete before
121 * continuing.
122 */
123 int (*cnic_handler)(void *, void *);
124 int (*cnic_ctl)(void *, struct cnic_ctl_info *);
125};
126
127#define MAX_CNIC_VEC 8
128
129struct cnic_irq {
130 unsigned int vector;
131 void *status_blk;
132 u32 status_blk_num;
133 u32 irq_flags;
134#define CNIC_IRQ_FL_MSIX 0x00000001
135};
136
137struct cnic_eth_dev {
138 struct module *drv_owner;
139 u32 drv_state;
140#define CNIC_DRV_STATE_REGD 0x00000001
141#define CNIC_DRV_STATE_USING_MSIX 0x00000002
142 u32 chip_id;
143 u32 max_kwqe_pending;
144 struct pci_dev *pdev;
145 void __iomem *io_base;
146
147 u32 ctx_tbl_offset;
148 u32 ctx_tbl_len;
149 int ctx_blk_size;
150 u32 starting_cid;
151 u32 max_iscsi_conn;
152 u32 max_fcoe_conn;
153 u32 max_rdma_conn;
154 u32 reserved0[2];
155
156 int num_irq;
157 struct cnic_irq irq_arr[MAX_CNIC_VEC];
158 int (*drv_register_cnic)(struct net_device *,
159 struct cnic_ops *, void *);
160 int (*drv_unregister_cnic)(struct net_device *);
161 int (*drv_submit_kwqes_32)(struct net_device *,
162 struct kwqe *[], u32);
163 int (*drv_submit_kwqes_16)(struct net_device *,
164 struct kwqe_16 *[], u32);
165 int (*drv_ctl)(struct net_device *, struct drv_ctl_info *);
166 unsigned long reserved1[2];
167};
168
169struct cnic_sockaddr {
170 union {
171 struct sockaddr_in v4;
172 struct sockaddr_in6 v6;
173 } local;
174 union {
175 struct sockaddr_in v4;
176 struct sockaddr_in6 v6;
177 } remote;
178};
179
180struct cnic_sock {
181 struct cnic_dev *dev;
182 void *context;
183 u32 src_ip[4];
184 u32 dst_ip[4];
185 u16 src_port;
186 u16 dst_port;
187 u16 vlan_id;
188 unsigned char old_ha[6];
189 unsigned char ha[6];
190 u32 mtu;
191 u32 cid;
192 u32 l5_cid;
193 u32 pg_cid;
194 int ulp_type;
195
196 u32 ka_timeout;
197 u32 ka_interval;
198 u8 ka_max_probe_count;
199 u8 tos;
200 u8 ttl;
201 u8 snd_seq_scale;
202 u32 rcv_buf;
203 u32 snd_buf;
204 u32 seed;
205
206 unsigned long tcp_flags;
207#define SK_TCP_NO_DELAY_ACK 0x1
208#define SK_TCP_KEEP_ALIVE 0x2
209#define SK_TCP_NAGLE 0x4
210#define SK_TCP_TIMESTAMP 0x8
211#define SK_TCP_SACK 0x10
212#define SK_TCP_SEG_SCALING 0x20
213 unsigned long flags;
214#define SK_F_INUSE 0
215#define SK_F_OFFLD_COMPLETE 1
216#define SK_F_OFFLD_SCHED 2
217#define SK_F_PG_OFFLD_COMPLETE 3
218#define SK_F_CONNECT_START 4
219#define SK_F_IPV6 5
220#define SK_F_CLOSING 7
221
222 atomic_t ref_count;
223 u32 state;
224 struct kwqe kwqe1;
225 struct kwqe kwqe2;
226 struct kwqe kwqe3;
227};
228
229struct cnic_dev {
230 struct net_device *netdev;
231 struct pci_dev *pcidev;
232 void __iomem *regview;
233 struct list_head list;
234
235 int (*register_device)(struct cnic_dev *dev, int ulp_type,
236 void *ulp_ctx);
237 int (*unregister_device)(struct cnic_dev *dev, int ulp_type);
238 int (*submit_kwqes)(struct cnic_dev *dev, struct kwqe *wqes[],
239 u32 num_wqes);
240 int (*submit_kwqes_16)(struct cnic_dev *dev, struct kwqe_16 *wqes[],
241 u32 num_wqes);
242
243 int (*cm_create)(struct cnic_dev *, int, u32, u32, struct cnic_sock **,
244 void *);
245 int (*cm_destroy)(struct cnic_sock *);
246 int (*cm_connect)(struct cnic_sock *, struct cnic_sockaddr *);
247 int (*cm_abort)(struct cnic_sock *);
248 int (*cm_close)(struct cnic_sock *);
249 struct cnic_dev *(*cm_select_dev)(struct sockaddr_in *, int ulp_type);
250 int (*iscsi_nl_msg_recv)(struct cnic_dev *dev, u32 msg_type,
251 char *data, u16 data_size);
252 unsigned long flags;
253#define CNIC_F_CNIC_UP 1
254#define CNIC_F_BNX2_CLASS 3
255#define CNIC_F_BNX2X_CLASS 4
256 atomic_t ref_count;
257 u8 mac_addr[6];
258
259 int max_iscsi_conn;
260 int max_fcoe_conn;
261 int max_rdma_conn;
262
263 void *cnic_priv;
264};
265
266#define CNIC_WR(dev, off, val) writel(val, dev->regview + off)
267#define CNIC_WR16(dev, off, val) writew(val, dev->regview + off)
268#define CNIC_WR8(dev, off, val) writeb(val, dev->regview + off)
269#define CNIC_RD(dev, off) readl(dev->regview + off)
270#define CNIC_RD16(dev, off) readw(dev->regview + off)
271
272struct cnic_ulp_ops {
273 /* Calls to these functions are protected by RCU. When
274 * unregistering, we wait for any calls to complete before
275 * continuing.
276 */
277
278 void (*cnic_init)(struct cnic_dev *dev);
279 void (*cnic_exit)(struct cnic_dev *dev);
280 void (*cnic_start)(void *ulp_ctx);
281 void (*cnic_stop)(void *ulp_ctx);
282 void (*indicate_kcqes)(void *ulp_ctx, struct kcqe *cqes[],
283 u32 num_cqes);
284 void (*indicate_netevent)(void *ulp_ctx, unsigned long event);
285 void (*cm_connect_complete)(struct cnic_sock *);
286 void (*cm_close_complete)(struct cnic_sock *);
287 void (*cm_abort_complete)(struct cnic_sock *);
288 void (*cm_remote_close)(struct cnic_sock *);
289 void (*cm_remote_abort)(struct cnic_sock *);
290 void (*iscsi_nl_send_msg)(struct cnic_dev *dev, u32 msg_type,
291 char *data, u16 data_size);
292 struct module *owner;
293};
294
295extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
296
297extern int cnic_unregister_driver(int ulp_type);
298
299#endif