aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/cnic.c
diff options
context:
space:
mode:
authorMichael Chan <mchan@broadcom.com>2009-06-08 21:14:43 -0400
committerJames Bottomley <James.Bottomley@HansenPartnership.com>2009-06-09 11:22:42 -0400
commita463696039f7097ce87c21db3cf5c16cdcb3850d (patch)
tree3308681e117008282fd73a224215e0aab173262e /drivers/net/cnic.c
parent4edd473f208cff77ce1f7ef26d5a41f31fa198e0 (diff)
[SCSI] cnic: Add new Broadcom CNIC driver.
The CNIC driver controls BNX2 hardware rings and resources used by iSCSI. Most hardware resources for iSCSI are separate from those used for ethernet networking. iSCSI uses a separate MAC address and IP address. The CNIC driver creates a UIO interface to handle the non-offloaded packets such as ARP, etc in userspace. Signed-off-by: Michael Chan <mchan@broadcom.com> Acked-by: David S. Miller <davem@davemloft.net> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/net/cnic.c')
-rw-r--r--drivers/net/cnic.c2711
1 files changed, 2711 insertions, 0 deletions
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
new file mode 100644
index 000000000000..8d740376bbd2
--- /dev/null
+++ b/drivers/net/cnic.c
@@ -0,0 +1,2711 @@
1/* cnic.c: Broadcom CNIC core network driver.
2 *
3 * Copyright (c) 2006-2009 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
10 * Modified and maintained by: Michael Chan <mchan@broadcom.com>
11 */
12
13#include <linux/module.h>
14
15#include <linux/kernel.h>
16#include <linux/errno.h>
17#include <linux/list.h>
18#include <linux/slab.h>
19#include <linux/pci.h>
20#include <linux/init.h>
21#include <linux/netdevice.h>
22#include <linux/uio_driver.h>
23#include <linux/in.h>
24#include <linux/dma-mapping.h>
25#include <linux/delay.h>
26#include <linux/ethtool.h>
27#include <linux/if_vlan.h>
28#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
29#define BCM_VLAN 1
30#endif
31#include <net/ip.h>
32#include <net/tcp.h>
33#include <net/route.h>
34#include <net/ipv6.h>
35#include <net/ip6_route.h>
36#include <scsi/iscsi_if.h>
37
38#include "cnic_if.h"
39#include "bnx2.h"
40#include "cnic.h"
41#include "cnic_defs.h"
42
43#define DRV_MODULE_NAME "cnic"
44#define PFX DRV_MODULE_NAME ": "
45
46static char version[] __devinitdata =
47 "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
48
49MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
50 "Chen (zongxi@broadcom.com");
51MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
52MODULE_LICENSE("GPL");
53MODULE_VERSION(CNIC_MODULE_VERSION);
54
55static LIST_HEAD(cnic_dev_list);
56static DEFINE_RWLOCK(cnic_dev_lock);
57static DEFINE_MUTEX(cnic_lock);
58
59static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
60
61static int cnic_service_bnx2(void *, void *);
62static int cnic_ctl(void *, struct cnic_ctl_info *);
63
64static struct cnic_ops cnic_bnx2_ops = {
65 .cnic_owner = THIS_MODULE,
66 .cnic_handler = cnic_service_bnx2,
67 .cnic_ctl = cnic_ctl,
68};
69
70static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *);
71static void cnic_init_bnx2_tx_ring(struct cnic_dev *);
72static void cnic_init_bnx2_rx_ring(struct cnic_dev *);
73static int cnic_cm_set_pg(struct cnic_sock *);
74
75static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
76{
77 struct cnic_dev *dev = uinfo->priv;
78 struct cnic_local *cp = dev->cnic_priv;
79
80 if (!capable(CAP_NET_ADMIN))
81 return -EPERM;
82
83 if (cp->uio_dev != -1)
84 return -EBUSY;
85
86 cp->uio_dev = iminor(inode);
87
88 cnic_shutdown_bnx2_rx_ring(dev);
89
90 cnic_init_bnx2_tx_ring(dev);
91 cnic_init_bnx2_rx_ring(dev);
92
93 return 0;
94}
95
96static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
97{
98 struct cnic_dev *dev = uinfo->priv;
99 struct cnic_local *cp = dev->cnic_priv;
100
101 cp->uio_dev = -1;
102 return 0;
103}
104
105static inline void cnic_hold(struct cnic_dev *dev)
106{
107 atomic_inc(&dev->ref_count);
108}
109
110static inline void cnic_put(struct cnic_dev *dev)
111{
112 atomic_dec(&dev->ref_count);
113}
114
115static inline void csk_hold(struct cnic_sock *csk)
116{
117 atomic_inc(&csk->ref_count);
118}
119
120static inline void csk_put(struct cnic_sock *csk)
121{
122 atomic_dec(&csk->ref_count);
123}
124
125static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
126{
127 struct cnic_dev *cdev;
128
129 read_lock(&cnic_dev_lock);
130 list_for_each_entry(cdev, &cnic_dev_list, list) {
131 if (netdev == cdev->netdev) {
132 cnic_hold(cdev);
133 read_unlock(&cnic_dev_lock);
134 return cdev;
135 }
136 }
137 read_unlock(&cnic_dev_lock);
138 return NULL;
139}
140
141static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
142{
143 struct cnic_local *cp = dev->cnic_priv;
144 struct cnic_eth_dev *ethdev = cp->ethdev;
145 struct drv_ctl_info info;
146 struct drv_ctl_io *io = &info.data.io;
147
148 info.cmd = DRV_CTL_CTX_WR_CMD;
149 io->cid_addr = cid_addr;
150 io->offset = off;
151 io->data = val;
152 ethdev->drv_ctl(dev->netdev, &info);
153}
154
155static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
156{
157 struct cnic_local *cp = dev->cnic_priv;
158 struct cnic_eth_dev *ethdev = cp->ethdev;
159 struct drv_ctl_info info;
160 struct drv_ctl_io *io = &info.data.io;
161
162 info.cmd = DRV_CTL_IO_WR_CMD;
163 io->offset = off;
164 io->data = val;
165 ethdev->drv_ctl(dev->netdev, &info);
166}
167
168static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
169{
170 struct cnic_local *cp = dev->cnic_priv;
171 struct cnic_eth_dev *ethdev = cp->ethdev;
172 struct drv_ctl_info info;
173 struct drv_ctl_io *io = &info.data.io;
174
175 info.cmd = DRV_CTL_IO_RD_CMD;
176 io->offset = off;
177 ethdev->drv_ctl(dev->netdev, &info);
178 return io->data;
179}
180
181static int cnic_in_use(struct cnic_sock *csk)
182{
183 return test_bit(SK_F_INUSE, &csk->flags);
184}
185
186static void cnic_kwq_completion(struct cnic_dev *dev, u32 count)
187{
188 struct cnic_local *cp = dev->cnic_priv;
189 struct cnic_eth_dev *ethdev = cp->ethdev;
190 struct drv_ctl_info info;
191
192 info.cmd = DRV_CTL_COMPLETION_CMD;
193 info.data.comp.comp_count = count;
194 ethdev->drv_ctl(dev->netdev, &info);
195}
196
197static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
198 struct cnic_sock *csk)
199{
200 struct iscsi_path path_req;
201 char *buf = NULL;
202 u16 len = 0;
203 u32 msg_type = ISCSI_KEVENT_IF_DOWN;
204 struct cnic_ulp_ops *ulp_ops;
205
206 if (cp->uio_dev == -1)
207 return -ENODEV;
208
209 if (csk) {
210 len = sizeof(path_req);
211 buf = (char *) &path_req;
212 memset(&path_req, 0, len);
213
214 msg_type = ISCSI_KEVENT_PATH_REQ;
215 path_req.handle = (u64) csk->l5_cid;
216 if (test_bit(SK_F_IPV6, &csk->flags)) {
217 memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
218 sizeof(struct in6_addr));
219 path_req.ip_addr_len = 16;
220 } else {
221 memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
222 sizeof(struct in_addr));
223 path_req.ip_addr_len = 4;
224 }
225 path_req.vlan_id = csk->vlan_id;
226 path_req.pmtu = csk->mtu;
227 }
228
229 rcu_read_lock();
230 ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]);
231 if (ulp_ops)
232 ulp_ops->iscsi_nl_send_msg(cp->dev, msg_type, buf, len);
233 rcu_read_unlock();
234 return 0;
235}
236
237static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
238 char *buf, u16 len)
239{
240 int rc = -EINVAL;
241
242 switch (msg_type) {
243 case ISCSI_UEVENT_PATH_UPDATE: {
244 struct cnic_local *cp;
245 u32 l5_cid;
246 struct cnic_sock *csk;
247 struct iscsi_path *path_resp;
248
249 if (len < sizeof(*path_resp))
250 break;
251
252 path_resp = (struct iscsi_path *) buf;
253 cp = dev->cnic_priv;
254 l5_cid = (u32) path_resp->handle;
255 if (l5_cid >= MAX_CM_SK_TBL_SZ)
256 break;
257
258 csk = &cp->csk_tbl[l5_cid];
259 csk_hold(csk);
260 if (cnic_in_use(csk)) {
261 memcpy(csk->ha, path_resp->mac_addr, 6);
262 if (test_bit(SK_F_IPV6, &csk->flags))
263 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
264 sizeof(struct in6_addr));
265 else
266 memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
267 sizeof(struct in_addr));
268 if (is_valid_ether_addr(csk->ha))
269 cnic_cm_set_pg(csk);
270 }
271 csk_put(csk);
272 rc = 0;
273 }
274 }
275
276 return rc;
277}
278
279static int cnic_offld_prep(struct cnic_sock *csk)
280{
281 if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
282 return 0;
283
284 if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
285 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
286 return 0;
287 }
288
289 return 1;
290}
291
292static int cnic_close_prep(struct cnic_sock *csk)
293{
294 clear_bit(SK_F_CONNECT_START, &csk->flags);
295 smp_mb__after_clear_bit();
296
297 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
298 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
299 msleep(1);
300
301 return 1;
302 }
303 return 0;
304}
305
306static int cnic_abort_prep(struct cnic_sock *csk)
307{
308 clear_bit(SK_F_CONNECT_START, &csk->flags);
309 smp_mb__after_clear_bit();
310
311 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
312 msleep(1);
313
314 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
315 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
316 return 1;
317 }
318
319 return 0;
320}
321
322int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
323{
324 struct cnic_dev *dev;
325
326 if (ulp_type >= MAX_CNIC_ULP_TYPE) {
327 printk(KERN_ERR PFX "cnic_register_driver: Bad type %d\n",
328 ulp_type);
329 return -EINVAL;
330 }
331 mutex_lock(&cnic_lock);
332 if (cnic_ulp_tbl[ulp_type]) {
333 printk(KERN_ERR PFX "cnic_register_driver: Type %d has already "
334 "been registered\n", ulp_type);
335 mutex_unlock(&cnic_lock);
336 return -EBUSY;
337 }
338
339 read_lock(&cnic_dev_lock);
340 list_for_each_entry(dev, &cnic_dev_list, list) {
341 struct cnic_local *cp = dev->cnic_priv;
342
343 clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
344 }
345 read_unlock(&cnic_dev_lock);
346
347 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
348 mutex_unlock(&cnic_lock);
349
350 /* Prevent race conditions with netdev_event */
351 rtnl_lock();
352 read_lock(&cnic_dev_lock);
353 list_for_each_entry(dev, &cnic_dev_list, list) {
354 struct cnic_local *cp = dev->cnic_priv;
355
356 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
357 ulp_ops->cnic_init(dev);
358 }
359 read_unlock(&cnic_dev_lock);
360 rtnl_unlock();
361
362 return 0;
363}
364
365int cnic_unregister_driver(int ulp_type)
366{
367 struct cnic_dev *dev;
368
369 if (ulp_type >= MAX_CNIC_ULP_TYPE) {
370 printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n",
371 ulp_type);
372 return -EINVAL;
373 }
374 mutex_lock(&cnic_lock);
375 if (!cnic_ulp_tbl[ulp_type]) {
376 printk(KERN_ERR PFX "cnic_unregister_driver: Type %d has not "
377 "been registered\n", ulp_type);
378 goto out_unlock;
379 }
380 read_lock(&cnic_dev_lock);
381 list_for_each_entry(dev, &cnic_dev_list, list) {
382 struct cnic_local *cp = dev->cnic_priv;
383
384 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
385 printk(KERN_ERR PFX "cnic_unregister_driver: Type %d "
386 "still has devices registered\n", ulp_type);
387 read_unlock(&cnic_dev_lock);
388 goto out_unlock;
389 }
390 }
391 read_unlock(&cnic_dev_lock);
392
393 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL);
394
395 mutex_unlock(&cnic_lock);
396 synchronize_rcu();
397 return 0;
398
399out_unlock:
400 mutex_unlock(&cnic_lock);
401 return -EINVAL;
402}
403
404static int cnic_start_hw(struct cnic_dev *);
405static void cnic_stop_hw(struct cnic_dev *);
406
407static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
408 void *ulp_ctx)
409{
410 struct cnic_local *cp = dev->cnic_priv;
411 struct cnic_ulp_ops *ulp_ops;
412
413 if (ulp_type >= MAX_CNIC_ULP_TYPE) {
414 printk(KERN_ERR PFX "cnic_register_device: Bad type %d\n",
415 ulp_type);
416 return -EINVAL;
417 }
418 mutex_lock(&cnic_lock);
419 if (cnic_ulp_tbl[ulp_type] == NULL) {
420 printk(KERN_ERR PFX "cnic_register_device: Driver with type %d "
421 "has not been registered\n", ulp_type);
422 mutex_unlock(&cnic_lock);
423 return -EAGAIN;
424 }
425 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
426 printk(KERN_ERR PFX "cnic_register_device: Type %d has already "
427 "been registered to this device\n", ulp_type);
428 mutex_unlock(&cnic_lock);
429 return -EBUSY;
430 }
431
432 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
433 cp->ulp_handle[ulp_type] = ulp_ctx;
434 ulp_ops = cnic_ulp_tbl[ulp_type];
435 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
436 cnic_hold(dev);
437
438 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
439 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
440 ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
441
442 mutex_unlock(&cnic_lock);
443
444 return 0;
445
446}
447EXPORT_SYMBOL(cnic_register_driver);
448
449static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
450{
451 struct cnic_local *cp = dev->cnic_priv;
452
453 if (ulp_type >= MAX_CNIC_ULP_TYPE) {
454 printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n",
455 ulp_type);
456 return -EINVAL;
457 }
458 mutex_lock(&cnic_lock);
459 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
460 rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL);
461 cnic_put(dev);
462 } else {
463 printk(KERN_ERR PFX "cnic_unregister_device: device not "
464 "registered to this ulp type %d\n", ulp_type);
465 mutex_unlock(&cnic_lock);
466 return -EINVAL;
467 }
468 mutex_unlock(&cnic_lock);
469
470 synchronize_rcu();
471
472 return 0;
473}
474EXPORT_SYMBOL(cnic_unregister_driver);
475
476static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id)
477{
478 id_tbl->start = start_id;
479 id_tbl->max = size;
480 id_tbl->next = 0;
481 spin_lock_init(&id_tbl->lock);
482 id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
483 if (!id_tbl->table)
484 return -ENOMEM;
485
486 return 0;
487}
488
489static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
490{
491 kfree(id_tbl->table);
492 id_tbl->table = NULL;
493}
494
495static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
496{
497 int ret = -1;
498
499 id -= id_tbl->start;
500 if (id >= id_tbl->max)
501 return ret;
502
503 spin_lock(&id_tbl->lock);
504 if (!test_bit(id, id_tbl->table)) {
505 set_bit(id, id_tbl->table);
506 ret = 0;
507 }
508 spin_unlock(&id_tbl->lock);
509 return ret;
510}
511
512/* Returns -1 if not successful */
513static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
514{
515 u32 id;
516
517 spin_lock(&id_tbl->lock);
518 id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
519 if (id >= id_tbl->max) {
520 id = -1;
521 if (id_tbl->next != 0) {
522 id = find_first_zero_bit(id_tbl->table, id_tbl->next);
523 if (id >= id_tbl->next)
524 id = -1;
525 }
526 }
527
528 if (id < id_tbl->max) {
529 set_bit(id, id_tbl->table);
530 id_tbl->next = (id + 1) & (id_tbl->max - 1);
531 id += id_tbl->start;
532 }
533
534 spin_unlock(&id_tbl->lock);
535
536 return id;
537}
538
539static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
540{
541 if (id == -1)
542 return;
543
544 id -= id_tbl->start;
545 if (id >= id_tbl->max)
546 return;
547
548 clear_bit(id, id_tbl->table);
549}
550
551static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
552{
553 int i;
554
555 if (!dma->pg_arr)
556 return;
557
558 for (i = 0; i < dma->num_pages; i++) {
559 if (dma->pg_arr[i]) {
560 pci_free_consistent(dev->pcidev, BCM_PAGE_SIZE,
561 dma->pg_arr[i], dma->pg_map_arr[i]);
562 dma->pg_arr[i] = NULL;
563 }
564 }
565 if (dma->pgtbl) {
566 pci_free_consistent(dev->pcidev, dma->pgtbl_size,
567 dma->pgtbl, dma->pgtbl_map);
568 dma->pgtbl = NULL;
569 }
570 kfree(dma->pg_arr);
571 dma->pg_arr = NULL;
572 dma->num_pages = 0;
573}
574
575static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
576{
577 int i;
578 u32 *page_table = dma->pgtbl;
579
580 for (i = 0; i < dma->num_pages; i++) {
581 /* Each entry needs to be in big endian format. */
582 *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32);
583 page_table++;
584 *page_table = (u32) dma->pg_map_arr[i];
585 page_table++;
586 }
587}
588
589static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
590 int pages, int use_pg_tbl)
591{
592 int i, size;
593 struct cnic_local *cp = dev->cnic_priv;
594
595 size = pages * (sizeof(void *) + sizeof(dma_addr_t));
596 dma->pg_arr = kzalloc(size, GFP_ATOMIC);
597 if (dma->pg_arr == NULL)
598 return -ENOMEM;
599
600 dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
601 dma->num_pages = pages;
602
603 for (i = 0; i < pages; i++) {
604 dma->pg_arr[i] = pci_alloc_consistent(dev->pcidev,
605 BCM_PAGE_SIZE,
606 &dma->pg_map_arr[i]);
607 if (dma->pg_arr[i] == NULL)
608 goto error;
609 }
610 if (!use_pg_tbl)
611 return 0;
612
613 dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) &
614 ~(BCM_PAGE_SIZE - 1);
615 dma->pgtbl = pci_alloc_consistent(dev->pcidev, dma->pgtbl_size,
616 &dma->pgtbl_map);
617 if (dma->pgtbl == NULL)
618 goto error;
619
620 cp->setup_pgtbl(dev, dma);
621
622 return 0;
623
624error:
625 cnic_free_dma(dev, dma);
626 return -ENOMEM;
627}
628
629static void cnic_free_resc(struct cnic_dev *dev)
630{
631 struct cnic_local *cp = dev->cnic_priv;
632 int i = 0;
633
634 if (cp->cnic_uinfo) {
635 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
636 while (cp->uio_dev != -1 && i < 15) {
637 msleep(100);
638 i++;
639 }
640 uio_unregister_device(cp->cnic_uinfo);
641 kfree(cp->cnic_uinfo);
642 cp->cnic_uinfo = NULL;
643 }
644
645 if (cp->l2_buf) {
646 pci_free_consistent(dev->pcidev, cp->l2_buf_size,
647 cp->l2_buf, cp->l2_buf_map);
648 cp->l2_buf = NULL;
649 }
650
651 if (cp->l2_ring) {
652 pci_free_consistent(dev->pcidev, cp->l2_ring_size,
653 cp->l2_ring, cp->l2_ring_map);
654 cp->l2_ring = NULL;
655 }
656
657 for (i = 0; i < cp->ctx_blks; i++) {
658 if (cp->ctx_arr[i].ctx) {
659 pci_free_consistent(dev->pcidev, cp->ctx_blk_size,
660 cp->ctx_arr[i].ctx,
661 cp->ctx_arr[i].mapping);
662 cp->ctx_arr[i].ctx = NULL;
663 }
664 }
665 kfree(cp->ctx_arr);
666 cp->ctx_arr = NULL;
667 cp->ctx_blks = 0;
668
669 cnic_free_dma(dev, &cp->gbl_buf_info);
670 cnic_free_dma(dev, &cp->conn_buf_info);
671 cnic_free_dma(dev, &cp->kwq_info);
672 cnic_free_dma(dev, &cp->kcq_info);
673 kfree(cp->iscsi_tbl);
674 cp->iscsi_tbl = NULL;
675 kfree(cp->ctx_tbl);
676 cp->ctx_tbl = NULL;
677
678 cnic_free_id_tbl(&cp->cid_tbl);
679}
680
681static int cnic_alloc_context(struct cnic_dev *dev)
682{
683 struct cnic_local *cp = dev->cnic_priv;
684
685 if (CHIP_NUM(cp) == CHIP_NUM_5709) {
686 int i, k, arr_size;
687
688 cp->ctx_blk_size = BCM_PAGE_SIZE;
689 cp->cids_per_blk = BCM_PAGE_SIZE / 128;
690 arr_size = BNX2_MAX_CID / cp->cids_per_blk *
691 sizeof(struct cnic_ctx);
692 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
693 if (cp->ctx_arr == NULL)
694 return -ENOMEM;
695
696 k = 0;
697 for (i = 0; i < 2; i++) {
698 u32 j, reg, off, lo, hi;
699
700 if (i == 0)
701 off = BNX2_PG_CTX_MAP;
702 else
703 off = BNX2_ISCSI_CTX_MAP;
704
705 reg = cnic_reg_rd_ind(dev, off);
706 lo = reg >> 16;
707 hi = reg & 0xffff;
708 for (j = lo; j < hi; j += cp->cids_per_blk, k++)
709 cp->ctx_arr[k].cid = j;
710 }
711
712 cp->ctx_blks = k;
713 if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
714 cp->ctx_blks = 0;
715 return -ENOMEM;
716 }
717
718 for (i = 0; i < cp->ctx_blks; i++) {
719 cp->ctx_arr[i].ctx =
720 pci_alloc_consistent(dev->pcidev, BCM_PAGE_SIZE,
721 &cp->ctx_arr[i].mapping);
722 if (cp->ctx_arr[i].ctx == NULL)
723 return -ENOMEM;
724 }
725 }
726 return 0;
727}
728
729static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
730{
731 struct cnic_local *cp = dev->cnic_priv;
732 struct uio_info *uinfo;
733 int ret;
734
735 ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
736 if (ret)
737 goto error;
738 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
739
740 ret = cnic_alloc_dma(dev, &cp->kcq_info, KCQ_PAGE_CNT, 1);
741 if (ret)
742 goto error;
743 cp->kcq = (struct kcqe **) cp->kcq_info.pg_arr;
744
745 ret = cnic_alloc_context(dev);
746 if (ret)
747 goto error;
748
749 cp->l2_ring_size = 2 * BCM_PAGE_SIZE;
750 cp->l2_ring = pci_alloc_consistent(dev->pcidev, cp->l2_ring_size,
751 &cp->l2_ring_map);
752 if (!cp->l2_ring)
753 goto error;
754
755 cp->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
756 cp->l2_buf_size = PAGE_ALIGN(cp->l2_buf_size);
757 cp->l2_buf = pci_alloc_consistent(dev->pcidev, cp->l2_buf_size,
758 &cp->l2_buf_map);
759 if (!cp->l2_buf)
760 goto error;
761
762 uinfo = kzalloc(sizeof(*uinfo), GFP_ATOMIC);
763 if (!uinfo)
764 goto error;
765
766 uinfo->mem[0].addr = dev->netdev->base_addr;
767 uinfo->mem[0].internal_addr = dev->regview;
768 uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
769 uinfo->mem[0].memtype = UIO_MEM_PHYS;
770
771 uinfo->mem[1].addr = (unsigned long) cp->status_blk & PAGE_MASK;
772 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
773 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
774 else
775 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
776 uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
777
778 uinfo->mem[2].addr = (unsigned long) cp->l2_ring;
779 uinfo->mem[2].size = cp->l2_ring_size;
780 uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
781
782 uinfo->mem[3].addr = (unsigned long) cp->l2_buf;
783 uinfo->mem[3].size = cp->l2_buf_size;
784 uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
785
786 uinfo->name = "bnx2_cnic";
787 uinfo->version = CNIC_MODULE_VERSION;
788 uinfo->irq = UIO_IRQ_CUSTOM;
789
790 uinfo->open = cnic_uio_open;
791 uinfo->release = cnic_uio_close;
792
793 uinfo->priv = dev;
794
795 ret = uio_register_device(&dev->pcidev->dev, uinfo);
796 if (ret) {
797 kfree(uinfo);
798 goto error;
799 }
800
801 cp->cnic_uinfo = uinfo;
802
803 return 0;
804
805error:
806 cnic_free_resc(dev);
807 return ret;
808}
809
810static inline u32 cnic_kwq_avail(struct cnic_local *cp)
811{
812 return cp->max_kwq_idx -
813 ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
814}
815
816static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
817 u32 num_wqes)
818{
819 struct cnic_local *cp = dev->cnic_priv;
820 struct kwqe *prod_qe;
821 u16 prod, sw_prod, i;
822
823 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
824 return -EAGAIN; /* bnx2 is down */
825
826 spin_lock_bh(&cp->cnic_ulp_lock);
827 if (num_wqes > cnic_kwq_avail(cp) &&
828 !(cp->cnic_local_flags & CNIC_LCL_FL_KWQ_INIT)) {
829 spin_unlock_bh(&cp->cnic_ulp_lock);
830 return -EAGAIN;
831 }
832
833 cp->cnic_local_flags &= ~CNIC_LCL_FL_KWQ_INIT;
834
835 prod = cp->kwq_prod_idx;
836 sw_prod = prod & MAX_KWQ_IDX;
837 for (i = 0; i < num_wqes; i++) {
838 prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
839 memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
840 prod++;
841 sw_prod = prod & MAX_KWQ_IDX;
842 }
843 cp->kwq_prod_idx = prod;
844
845 CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
846
847 spin_unlock_bh(&cp->cnic_ulp_lock);
848 return 0;
849}
850
851static void service_kcqes(struct cnic_dev *dev, int num_cqes)
852{
853 struct cnic_local *cp = dev->cnic_priv;
854 int i, j;
855
856 i = 0;
857 j = 1;
858 while (num_cqes) {
859 struct cnic_ulp_ops *ulp_ops;
860 int ulp_type;
861 u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
862 u32 kcqe_layer = kcqe_op_flag & KCQE_FLAGS_LAYER_MASK;
863
864 if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
865 cnic_kwq_completion(dev, 1);
866
867 while (j < num_cqes) {
868 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
869
870 if ((next_op & KCQE_FLAGS_LAYER_MASK) != kcqe_layer)
871 break;
872
873 if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
874 cnic_kwq_completion(dev, 1);
875 j++;
876 }
877
878 if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
879 ulp_type = CNIC_ULP_RDMA;
880 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
881 ulp_type = CNIC_ULP_ISCSI;
882 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
883 ulp_type = CNIC_ULP_L4;
884 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
885 goto end;
886 else {
887 printk(KERN_ERR PFX "%s: Unknown type of KCQE(0x%x)\n",
888 dev->netdev->name, kcqe_op_flag);
889 goto end;
890 }
891
892 rcu_read_lock();
893 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
894 if (likely(ulp_ops)) {
895 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
896 cp->completed_kcq + i, j);
897 }
898 rcu_read_unlock();
899end:
900 num_cqes -= j;
901 i += j;
902 j = 1;
903 }
904 return;
905}
906
907static u16 cnic_bnx2_next_idx(u16 idx)
908{
909 return idx + 1;
910}
911
912static u16 cnic_bnx2_hw_idx(u16 idx)
913{
914 return idx;
915}
916
917static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod)
918{
919 struct cnic_local *cp = dev->cnic_priv;
920 u16 i, ri, last;
921 struct kcqe *kcqe;
922 int kcqe_cnt = 0, last_cnt = 0;
923
924 i = ri = last = *sw_prod;
925 ri &= MAX_KCQ_IDX;
926
927 while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
928 kcqe = &cp->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
929 cp->completed_kcq[kcqe_cnt++] = kcqe;
930 i = cp->next_idx(i);
931 ri = i & MAX_KCQ_IDX;
932 if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
933 last_cnt = kcqe_cnt;
934 last = i;
935 }
936 }
937
938 *sw_prod = last;
939 return last_cnt;
940}
941
942static void cnic_chk_bnx2_pkt_rings(struct cnic_local *cp)
943{
944 u16 rx_cons = *cp->rx_cons_ptr;
945 u16 tx_cons = *cp->tx_cons_ptr;
946
947 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
948 cp->tx_cons = tx_cons;
949 cp->rx_cons = rx_cons;
950 uio_event_notify(cp->cnic_uinfo);
951 }
952}
953
954static int cnic_service_bnx2(void *data, void *status_blk)
955{
956 struct cnic_dev *dev = data;
957 struct status_block *sblk = status_blk;
958 struct cnic_local *cp = dev->cnic_priv;
959 u32 status_idx = sblk->status_idx;
960 u16 hw_prod, sw_prod;
961 int kcqe_cnt;
962
963 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
964 return status_idx;
965
966 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
967
968 hw_prod = sblk->status_completion_producer_index;
969 sw_prod = cp->kcq_prod_idx;
970 while (sw_prod != hw_prod) {
971 kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
972 if (kcqe_cnt == 0)
973 goto done;
974
975 service_kcqes(dev, kcqe_cnt);
976
977 /* Tell compiler that status_blk fields can change. */
978 barrier();
979 if (status_idx != sblk->status_idx) {
980 status_idx = sblk->status_idx;
981 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
982 hw_prod = sblk->status_completion_producer_index;
983 } else
984 break;
985 }
986
987done:
988 CNIC_WR16(dev, cp->kcq_io_addr, sw_prod);
989
990 cp->kcq_prod_idx = sw_prod;
991
992 cnic_chk_bnx2_pkt_rings(cp);
993 return status_idx;
994}
995
996static void cnic_service_bnx2_msix(unsigned long data)
997{
998 struct cnic_dev *dev = (struct cnic_dev *) data;
999 struct cnic_local *cp = dev->cnic_priv;
1000 struct status_block_msix *status_blk = cp->bnx2_status_blk;
1001 u32 status_idx = status_blk->status_idx;
1002 u16 hw_prod, sw_prod;
1003 int kcqe_cnt;
1004
1005 cp->kwq_con_idx = status_blk->status_cmd_consumer_index;
1006
1007 hw_prod = status_blk->status_completion_producer_index;
1008 sw_prod = cp->kcq_prod_idx;
1009 while (sw_prod != hw_prod) {
1010 kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
1011 if (kcqe_cnt == 0)
1012 goto done;
1013
1014 service_kcqes(dev, kcqe_cnt);
1015
1016 /* Tell compiler that status_blk fields can change. */
1017 barrier();
1018 if (status_idx != status_blk->status_idx) {
1019 status_idx = status_blk->status_idx;
1020 cp->kwq_con_idx = status_blk->status_cmd_consumer_index;
1021 hw_prod = status_blk->status_completion_producer_index;
1022 } else
1023 break;
1024 }
1025
1026done:
1027 CNIC_WR16(dev, cp->kcq_io_addr, sw_prod);
1028 cp->kcq_prod_idx = sw_prod;
1029
1030 cnic_chk_bnx2_pkt_rings(cp);
1031
1032 cp->last_status_idx = status_idx;
1033 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
1034 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
1035}
1036
1037static irqreturn_t cnic_irq(int irq, void *dev_instance)
1038{
1039 struct cnic_dev *dev = dev_instance;
1040 struct cnic_local *cp = dev->cnic_priv;
1041 u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX;
1042
1043 if (cp->ack_int)
1044 cp->ack_int(dev);
1045
1046 prefetch(cp->status_blk);
1047 prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
1048
1049 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags)))
1050 tasklet_schedule(&cp->cnic_irq_task);
1051
1052 return IRQ_HANDLED;
1053}
1054
1055static void cnic_ulp_stop(struct cnic_dev *dev)
1056{
1057 struct cnic_local *cp = dev->cnic_priv;
1058 int if_type;
1059
1060 rcu_read_lock();
1061 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
1062 struct cnic_ulp_ops *ulp_ops;
1063
1064 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
1065 if (!ulp_ops)
1066 continue;
1067
1068 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
1069 ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
1070 }
1071 rcu_read_unlock();
1072}
1073
1074static void cnic_ulp_start(struct cnic_dev *dev)
1075{
1076 struct cnic_local *cp = dev->cnic_priv;
1077 int if_type;
1078
1079 rcu_read_lock();
1080 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
1081 struct cnic_ulp_ops *ulp_ops;
1082
1083 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
1084 if (!ulp_ops || !ulp_ops->cnic_start)
1085 continue;
1086
1087 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
1088 ulp_ops->cnic_start(cp->ulp_handle[if_type]);
1089 }
1090 rcu_read_unlock();
1091}
1092
1093static int cnic_ctl(void *data, struct cnic_ctl_info *info)
1094{
1095 struct cnic_dev *dev = data;
1096
1097 switch (info->cmd) {
1098 case CNIC_CTL_STOP_CMD:
1099 cnic_hold(dev);
1100 mutex_lock(&cnic_lock);
1101
1102 cnic_ulp_stop(dev);
1103 cnic_stop_hw(dev);
1104
1105 mutex_unlock(&cnic_lock);
1106 cnic_put(dev);
1107 break;
1108 case CNIC_CTL_START_CMD:
1109 cnic_hold(dev);
1110 mutex_lock(&cnic_lock);
1111
1112 if (!cnic_start_hw(dev))
1113 cnic_ulp_start(dev);
1114
1115 mutex_unlock(&cnic_lock);
1116 cnic_put(dev);
1117 break;
1118 default:
1119 return -EINVAL;
1120 }
1121 return 0;
1122}
1123
1124static void cnic_ulp_init(struct cnic_dev *dev)
1125{
1126 int i;
1127 struct cnic_local *cp = dev->cnic_priv;
1128
1129 rcu_read_lock();
1130 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
1131 struct cnic_ulp_ops *ulp_ops;
1132
1133 ulp_ops = rcu_dereference(cnic_ulp_tbl[i]);
1134 if (!ulp_ops || !ulp_ops->cnic_init)
1135 continue;
1136
1137 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
1138 ulp_ops->cnic_init(dev);
1139
1140 }
1141 rcu_read_unlock();
1142}
1143
1144static void cnic_ulp_exit(struct cnic_dev *dev)
1145{
1146 int i;
1147 struct cnic_local *cp = dev->cnic_priv;
1148
1149 rcu_read_lock();
1150 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
1151 struct cnic_ulp_ops *ulp_ops;
1152
1153 ulp_ops = rcu_dereference(cnic_ulp_tbl[i]);
1154 if (!ulp_ops || !ulp_ops->cnic_exit)
1155 continue;
1156
1157 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
1158 ulp_ops->cnic_exit(dev);
1159
1160 }
1161 rcu_read_unlock();
1162}
1163
1164static int cnic_cm_offload_pg(struct cnic_sock *csk)
1165{
1166 struct cnic_dev *dev = csk->dev;
1167 struct l4_kwq_offload_pg *l4kwqe;
1168 struct kwqe *wqes[1];
1169
1170 l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
1171 memset(l4kwqe, 0, sizeof(*l4kwqe));
1172 wqes[0] = (struct kwqe *) l4kwqe;
1173
1174 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
1175 l4kwqe->flags =
1176 L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
1177 l4kwqe->l2hdr_nbytes = ETH_HLEN;
1178
1179 l4kwqe->da0 = csk->ha[0];
1180 l4kwqe->da1 = csk->ha[1];
1181 l4kwqe->da2 = csk->ha[2];
1182 l4kwqe->da3 = csk->ha[3];
1183 l4kwqe->da4 = csk->ha[4];
1184 l4kwqe->da5 = csk->ha[5];
1185
1186 l4kwqe->sa0 = dev->mac_addr[0];
1187 l4kwqe->sa1 = dev->mac_addr[1];
1188 l4kwqe->sa2 = dev->mac_addr[2];
1189 l4kwqe->sa3 = dev->mac_addr[3];
1190 l4kwqe->sa4 = dev->mac_addr[4];
1191 l4kwqe->sa5 = dev->mac_addr[5];
1192
1193 l4kwqe->etype = ETH_P_IP;
1194 l4kwqe->ipid_count = DEF_IPID_COUNT;
1195 l4kwqe->host_opaque = csk->l5_cid;
1196
1197 if (csk->vlan_id) {
1198 l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
1199 l4kwqe->vlan_tag = csk->vlan_id;
1200 l4kwqe->l2hdr_nbytes += 4;
1201 }
1202
1203 return dev->submit_kwqes(dev, wqes, 1);
1204}
1205
1206static int cnic_cm_update_pg(struct cnic_sock *csk)
1207{
1208 struct cnic_dev *dev = csk->dev;
1209 struct l4_kwq_update_pg *l4kwqe;
1210 struct kwqe *wqes[1];
1211
1212 l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
1213 memset(l4kwqe, 0, sizeof(*l4kwqe));
1214 wqes[0] = (struct kwqe *) l4kwqe;
1215
1216 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
1217 l4kwqe->flags =
1218 L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
1219 l4kwqe->pg_cid = csk->pg_cid;
1220
1221 l4kwqe->da0 = csk->ha[0];
1222 l4kwqe->da1 = csk->ha[1];
1223 l4kwqe->da2 = csk->ha[2];
1224 l4kwqe->da3 = csk->ha[3];
1225 l4kwqe->da4 = csk->ha[4];
1226 l4kwqe->da5 = csk->ha[5];
1227
1228 l4kwqe->pg_host_opaque = csk->l5_cid;
1229 l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
1230
1231 return dev->submit_kwqes(dev, wqes, 1);
1232}
1233
1234static int cnic_cm_upload_pg(struct cnic_sock *csk)
1235{
1236 struct cnic_dev *dev = csk->dev;
1237 struct l4_kwq_upload *l4kwqe;
1238 struct kwqe *wqes[1];
1239
1240 l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
1241 memset(l4kwqe, 0, sizeof(*l4kwqe));
1242 wqes[0] = (struct kwqe *) l4kwqe;
1243
1244 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
1245 l4kwqe->flags =
1246 L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
1247 l4kwqe->cid = csk->pg_cid;
1248
1249 return dev->submit_kwqes(dev, wqes, 1);
1250}
1251
1252static int cnic_cm_conn_req(struct cnic_sock *csk)
1253{
1254 struct cnic_dev *dev = csk->dev;
1255 struct l4_kwq_connect_req1 *l4kwqe1;
1256 struct l4_kwq_connect_req2 *l4kwqe2;
1257 struct l4_kwq_connect_req3 *l4kwqe3;
1258 struct kwqe *wqes[3];
1259 u8 tcp_flags = 0;
1260 int num_wqes = 2;
1261
1262 l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
1263 l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
1264 l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
1265 memset(l4kwqe1, 0, sizeof(*l4kwqe1));
1266 memset(l4kwqe2, 0, sizeof(*l4kwqe2));
1267 memset(l4kwqe3, 0, sizeof(*l4kwqe3));
1268
1269 l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
1270 l4kwqe3->flags =
1271 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
1272 l4kwqe3->ka_timeout = csk->ka_timeout;
1273 l4kwqe3->ka_interval = csk->ka_interval;
1274 l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
1275 l4kwqe3->tos = csk->tos;
1276 l4kwqe3->ttl = csk->ttl;
1277 l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
1278 l4kwqe3->pmtu = csk->mtu;
1279 l4kwqe3->rcv_buf = csk->rcv_buf;
1280 l4kwqe3->snd_buf = csk->snd_buf;
1281 l4kwqe3->seed = csk->seed;
1282
1283 wqes[0] = (struct kwqe *) l4kwqe1;
1284 if (test_bit(SK_F_IPV6, &csk->flags)) {
1285 wqes[1] = (struct kwqe *) l4kwqe2;
1286 wqes[2] = (struct kwqe *) l4kwqe3;
1287 num_wqes = 3;
1288
1289 l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
1290 l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
1291 l4kwqe2->flags =
1292 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
1293 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
1294 l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
1295 l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
1296 l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
1297 l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
1298 l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
1299 l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
1300 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
1301 sizeof(struct tcphdr);
1302 } else {
1303 wqes[1] = (struct kwqe *) l4kwqe3;
1304 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
1305 sizeof(struct tcphdr);
1306 }
1307
1308 l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
1309 l4kwqe1->flags =
1310 (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
1311 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
1312 l4kwqe1->cid = csk->cid;
1313 l4kwqe1->pg_cid = csk->pg_cid;
1314 l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
1315 l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
1316 l4kwqe1->src_port = be16_to_cpu(csk->src_port);
1317 l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
1318 if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
1319 tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
1320 if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
1321 tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
1322 if (csk->tcp_flags & SK_TCP_NAGLE)
1323 tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
1324 if (csk->tcp_flags & SK_TCP_TIMESTAMP)
1325 tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
1326 if (csk->tcp_flags & SK_TCP_SACK)
1327 tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
1328 if (csk->tcp_flags & SK_TCP_SEG_SCALING)
1329 tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
1330
1331 l4kwqe1->tcp_flags = tcp_flags;
1332
1333 return dev->submit_kwqes(dev, wqes, num_wqes);
1334}
1335
1336static int cnic_cm_close_req(struct cnic_sock *csk)
1337{
1338 struct cnic_dev *dev = csk->dev;
1339 struct l4_kwq_close_req *l4kwqe;
1340 struct kwqe *wqes[1];
1341
1342 l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
1343 memset(l4kwqe, 0, sizeof(*l4kwqe));
1344 wqes[0] = (struct kwqe *) l4kwqe;
1345
1346 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
1347 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
1348 l4kwqe->cid = csk->cid;
1349
1350 return dev->submit_kwqes(dev, wqes, 1);
1351}
1352
1353static int cnic_cm_abort_req(struct cnic_sock *csk)
1354{
1355 struct cnic_dev *dev = csk->dev;
1356 struct l4_kwq_reset_req *l4kwqe;
1357 struct kwqe *wqes[1];
1358
1359 l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
1360 memset(l4kwqe, 0, sizeof(*l4kwqe));
1361 wqes[0] = (struct kwqe *) l4kwqe;
1362
1363 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
1364 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
1365 l4kwqe->cid = csk->cid;
1366
1367 return dev->submit_kwqes(dev, wqes, 1);
1368}
1369
1370static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
1371 u32 l5_cid, struct cnic_sock **csk, void *context)
1372{
1373 struct cnic_local *cp = dev->cnic_priv;
1374 struct cnic_sock *csk1;
1375
1376 if (l5_cid >= MAX_CM_SK_TBL_SZ)
1377 return -EINVAL;
1378
1379 csk1 = &cp->csk_tbl[l5_cid];
1380 if (atomic_read(&csk1->ref_count))
1381 return -EAGAIN;
1382
1383 if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
1384 return -EBUSY;
1385
1386 csk1->dev = dev;
1387 csk1->cid = cid;
1388 csk1->l5_cid = l5_cid;
1389 csk1->ulp_type = ulp_type;
1390 csk1->context = context;
1391
1392 csk1->ka_timeout = DEF_KA_TIMEOUT;
1393 csk1->ka_interval = DEF_KA_INTERVAL;
1394 csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
1395 csk1->tos = DEF_TOS;
1396 csk1->ttl = DEF_TTL;
1397 csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
1398 csk1->rcv_buf = DEF_RCV_BUF;
1399 csk1->snd_buf = DEF_SND_BUF;
1400 csk1->seed = DEF_SEED;
1401
1402 *csk = csk1;
1403 return 0;
1404}
1405
1406static void cnic_cm_cleanup(struct cnic_sock *csk)
1407{
1408 if (csk->src_port) {
1409 struct cnic_dev *dev = csk->dev;
1410 struct cnic_local *cp = dev->cnic_priv;
1411
1412 cnic_free_id(&cp->csk_port_tbl, csk->src_port);
1413 csk->src_port = 0;
1414 }
1415}
1416
1417static void cnic_close_conn(struct cnic_sock *csk)
1418{
1419 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
1420 cnic_cm_upload_pg(csk);
1421 clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
1422 }
1423 cnic_cm_cleanup(csk);
1424}
1425
1426static int cnic_cm_destroy(struct cnic_sock *csk)
1427{
1428 if (!cnic_in_use(csk))
1429 return -EINVAL;
1430
1431 csk_hold(csk);
1432 clear_bit(SK_F_INUSE, &csk->flags);
1433 smp_mb__after_clear_bit();
1434 while (atomic_read(&csk->ref_count) != 1)
1435 msleep(1);
1436 cnic_cm_cleanup(csk);
1437
1438 csk->flags = 0;
1439 csk_put(csk);
1440 return 0;
1441}
1442
1443static inline u16 cnic_get_vlan(struct net_device *dev,
1444 struct net_device **vlan_dev)
1445{
1446 if (dev->priv_flags & IFF_802_1Q_VLAN) {
1447 *vlan_dev = vlan_dev_real_dev(dev);
1448 return vlan_dev_vlan_id(dev);
1449 }
1450 *vlan_dev = dev;
1451 return 0;
1452}
1453
1454static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
1455 struct dst_entry **dst)
1456{
1457 struct flowi fl;
1458 int err;
1459 struct rtable *rt;
1460
1461 memset(&fl, 0, sizeof(fl));
1462 fl.nl_u.ip4_u.daddr = dst_addr->sin_addr.s_addr;
1463
1464 err = ip_route_output_key(&init_net, &rt, &fl);
1465 if (!err)
1466 *dst = &rt->u.dst;
1467 return err;
1468}
1469
1470static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
1471 struct dst_entry **dst)
1472{
1473#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1474 struct flowi fl;
1475
1476 memset(&fl, 0, sizeof(fl));
1477 ipv6_addr_copy(&fl.fl6_dst, &dst_addr->sin6_addr);
1478 if (ipv6_addr_type(&fl.fl6_dst) & IPV6_ADDR_LINKLOCAL)
1479 fl.oif = dst_addr->sin6_scope_id;
1480
1481 *dst = ip6_route_output(&init_net, NULL, &fl);
1482 if (*dst)
1483 return 0;
1484#endif
1485
1486 return -ENETUNREACH;
1487}
1488
1489static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
1490 int ulp_type)
1491{
1492 struct cnic_dev *dev = NULL;
1493 struct dst_entry *dst;
1494 struct net_device *netdev = NULL;
1495 int err = -ENETUNREACH;
1496
1497 if (dst_addr->sin_family == AF_INET)
1498 err = cnic_get_v4_route(dst_addr, &dst);
1499 else if (dst_addr->sin_family == AF_INET6) {
1500 struct sockaddr_in6 *dst_addr6 =
1501 (struct sockaddr_in6 *) dst_addr;
1502
1503 err = cnic_get_v6_route(dst_addr6, &dst);
1504 } else
1505 return NULL;
1506
1507 if (err)
1508 return NULL;
1509
1510 if (!dst->dev)
1511 goto done;
1512
1513 cnic_get_vlan(dst->dev, &netdev);
1514
1515 dev = cnic_from_netdev(netdev);
1516
1517done:
1518 dst_release(dst);
1519 if (dev)
1520 cnic_put(dev);
1521 return dev;
1522}
1523
1524static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
1525{
1526 struct cnic_dev *dev = csk->dev;
1527 struct cnic_local *cp = dev->cnic_priv;
1528
1529 return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
1530}
1531
1532static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
1533{
1534 struct cnic_dev *dev = csk->dev;
1535 struct cnic_local *cp = dev->cnic_priv;
1536 int is_v6, err, rc = -ENETUNREACH;
1537 struct dst_entry *dst;
1538 struct net_device *realdev;
1539 u32 local_port;
1540
1541 if (saddr->local.v6.sin6_family == AF_INET6 &&
1542 saddr->remote.v6.sin6_family == AF_INET6)
1543 is_v6 = 1;
1544 else if (saddr->local.v4.sin_family == AF_INET &&
1545 saddr->remote.v4.sin_family == AF_INET)
1546 is_v6 = 0;
1547 else
1548 return -EINVAL;
1549
1550 clear_bit(SK_F_IPV6, &csk->flags);
1551
1552 if (is_v6) {
1553#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1554 set_bit(SK_F_IPV6, &csk->flags);
1555 err = cnic_get_v6_route(&saddr->remote.v6, &dst);
1556 if (err)
1557 return err;
1558
1559 if (!dst || dst->error || !dst->dev)
1560 goto err_out;
1561
1562 memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
1563 sizeof(struct in6_addr));
1564 csk->dst_port = saddr->remote.v6.sin6_port;
1565 local_port = saddr->local.v6.sin6_port;
1566#else
1567 return rc;
1568#endif
1569
1570 } else {
1571 err = cnic_get_v4_route(&saddr->remote.v4, &dst);
1572 if (err)
1573 return err;
1574
1575 if (!dst || dst->error || !dst->dev)
1576 goto err_out;
1577
1578 csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
1579 csk->dst_port = saddr->remote.v4.sin_port;
1580 local_port = saddr->local.v4.sin_port;
1581 }
1582
1583 csk->vlan_id = cnic_get_vlan(dst->dev, &realdev);
1584 if (realdev != dev->netdev)
1585 goto err_out;
1586
1587 if (local_port >= CNIC_LOCAL_PORT_MIN &&
1588 local_port < CNIC_LOCAL_PORT_MAX) {
1589 if (cnic_alloc_id(&cp->csk_port_tbl, local_port))
1590 local_port = 0;
1591 } else
1592 local_port = 0;
1593
1594 if (!local_port) {
1595 local_port = cnic_alloc_new_id(&cp->csk_port_tbl);
1596 if (local_port == -1) {
1597 rc = -ENOMEM;
1598 goto err_out;
1599 }
1600 }
1601 csk->src_port = local_port;
1602
1603 csk->mtu = dst_mtu(dst);
1604 rc = 0;
1605
1606err_out:
1607 dst_release(dst);
1608 return rc;
1609}
1610
1611static void cnic_init_csk_state(struct cnic_sock *csk)
1612{
1613 csk->state = 0;
1614 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
1615 clear_bit(SK_F_CLOSING, &csk->flags);
1616}
1617
1618static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
1619{
1620 int err = 0;
1621
1622 if (!cnic_in_use(csk))
1623 return -EINVAL;
1624
1625 if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
1626 return -EINVAL;
1627
1628 cnic_init_csk_state(csk);
1629
1630 err = cnic_get_route(csk, saddr);
1631 if (err)
1632 goto err_out;
1633
1634 err = cnic_resolve_addr(csk, saddr);
1635 if (!err)
1636 return 0;
1637
1638err_out:
1639 clear_bit(SK_F_CONNECT_START, &csk->flags);
1640 return err;
1641}
1642
1643static int cnic_cm_abort(struct cnic_sock *csk)
1644{
1645 struct cnic_local *cp = csk->dev->cnic_priv;
1646 u32 opcode;
1647
1648 if (!cnic_in_use(csk))
1649 return -EINVAL;
1650
1651 if (cnic_abort_prep(csk))
1652 return cnic_cm_abort_req(csk);
1653
1654 /* Getting here means that we haven't started connect, or
1655 * connect was not successful.
1656 */
1657
1658 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
1659 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
1660 opcode = csk->state;
1661 else
1662 opcode = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
1663 cp->close_conn(csk, opcode);
1664
1665 return 0;
1666}
1667
1668static int cnic_cm_close(struct cnic_sock *csk)
1669{
1670 if (!cnic_in_use(csk))
1671 return -EINVAL;
1672
1673 if (cnic_close_prep(csk)) {
1674 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
1675 return cnic_cm_close_req(csk);
1676 }
1677 return 0;
1678}
1679
1680static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
1681 u8 opcode)
1682{
1683 struct cnic_ulp_ops *ulp_ops;
1684 int ulp_type = csk->ulp_type;
1685
1686 rcu_read_lock();
1687 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
1688 if (ulp_ops) {
1689 if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
1690 ulp_ops->cm_connect_complete(csk);
1691 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
1692 ulp_ops->cm_close_complete(csk);
1693 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
1694 ulp_ops->cm_remote_abort(csk);
1695 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
1696 ulp_ops->cm_abort_complete(csk);
1697 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
1698 ulp_ops->cm_remote_close(csk);
1699 }
1700 rcu_read_unlock();
1701}
1702
1703static int cnic_cm_set_pg(struct cnic_sock *csk)
1704{
1705 if (cnic_offld_prep(csk)) {
1706 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
1707 cnic_cm_update_pg(csk);
1708 else
1709 cnic_cm_offload_pg(csk);
1710 }
1711 return 0;
1712}
1713
1714static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
1715{
1716 struct cnic_local *cp = dev->cnic_priv;
1717 u32 l5_cid = kcqe->pg_host_opaque;
1718 u8 opcode = kcqe->op_code;
1719 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
1720
1721 csk_hold(csk);
1722 if (!cnic_in_use(csk))
1723 goto done;
1724
1725 if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
1726 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
1727 goto done;
1728 }
1729 csk->pg_cid = kcqe->pg_cid;
1730 set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
1731 cnic_cm_conn_req(csk);
1732
1733done:
1734 csk_put(csk);
1735}
1736
1737static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
1738{
1739 struct cnic_local *cp = dev->cnic_priv;
1740 struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
1741 u8 opcode = l4kcqe->op_code;
1742 u32 l5_cid;
1743 struct cnic_sock *csk;
1744
1745 if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
1746 opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
1747 cnic_cm_process_offld_pg(dev, l4kcqe);
1748 return;
1749 }
1750
1751 l5_cid = l4kcqe->conn_id;
1752 if (opcode & 0x80)
1753 l5_cid = l4kcqe->cid;
1754 if (l5_cid >= MAX_CM_SK_TBL_SZ)
1755 return;
1756
1757 csk = &cp->csk_tbl[l5_cid];
1758 csk_hold(csk);
1759
1760 if (!cnic_in_use(csk)) {
1761 csk_put(csk);
1762 return;
1763 }
1764
1765 switch (opcode) {
1766 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
1767 if (l4kcqe->status == 0)
1768 set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
1769
1770 smp_mb__before_clear_bit();
1771 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
1772 cnic_cm_upcall(cp, csk, opcode);
1773 break;
1774
1775 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
1776 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags))
1777 csk->state = opcode;
1778 /* fall through */
1779 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
1780 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
1781 cp->close_conn(csk, opcode);
1782 break;
1783
1784 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
1785 cnic_cm_upcall(cp, csk, opcode);
1786 break;
1787 }
1788 csk_put(csk);
1789}
1790
1791static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
1792{
1793 struct cnic_dev *dev = data;
1794 int i;
1795
1796 for (i = 0; i < num; i++)
1797 cnic_cm_process_kcqe(dev, kcqe[i]);
1798}
1799
1800static struct cnic_ulp_ops cm_ulp_ops = {
1801 .indicate_kcqes = cnic_cm_indicate_kcqe,
1802};
1803
1804static void cnic_cm_free_mem(struct cnic_dev *dev)
1805{
1806 struct cnic_local *cp = dev->cnic_priv;
1807
1808 kfree(cp->csk_tbl);
1809 cp->csk_tbl = NULL;
1810 cnic_free_id_tbl(&cp->csk_port_tbl);
1811}
1812
1813static int cnic_cm_alloc_mem(struct cnic_dev *dev)
1814{
1815 struct cnic_local *cp = dev->cnic_priv;
1816
1817 cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
1818 GFP_KERNEL);
1819 if (!cp->csk_tbl)
1820 return -ENOMEM;
1821
1822 if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
1823 CNIC_LOCAL_PORT_MIN)) {
1824 cnic_cm_free_mem(dev);
1825 return -ENOMEM;
1826 }
1827 return 0;
1828}
1829
1830static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
1831{
1832 if ((opcode == csk->state) ||
1833 (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED &&
1834 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)) {
1835 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags))
1836 return 1;
1837 }
1838 return 0;
1839}
1840
1841static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
1842{
1843 struct cnic_dev *dev = csk->dev;
1844 struct cnic_local *cp = dev->cnic_priv;
1845
1846 clear_bit(SK_F_CONNECT_START, &csk->flags);
1847 if (cnic_ready_to_close(csk, opcode)) {
1848 cnic_close_conn(csk);
1849 cnic_cm_upcall(cp, csk, opcode);
1850 }
1851}
1852
1853static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
1854{
1855}
1856
1857static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
1858{
1859 u32 seed;
1860
1861 get_random_bytes(&seed, 4);
1862 cnic_ctx_wr(dev, 45, 0, seed);
1863 return 0;
1864}
1865
1866static int cnic_cm_open(struct cnic_dev *dev)
1867{
1868 struct cnic_local *cp = dev->cnic_priv;
1869 int err;
1870
1871 err = cnic_cm_alloc_mem(dev);
1872 if (err)
1873 return err;
1874
1875 err = cp->start_cm(dev);
1876
1877 if (err)
1878 goto err_out;
1879
1880 dev->cm_create = cnic_cm_create;
1881 dev->cm_destroy = cnic_cm_destroy;
1882 dev->cm_connect = cnic_cm_connect;
1883 dev->cm_abort = cnic_cm_abort;
1884 dev->cm_close = cnic_cm_close;
1885 dev->cm_select_dev = cnic_cm_select_dev;
1886
1887 cp->ulp_handle[CNIC_ULP_L4] = dev;
1888 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
1889 return 0;
1890
1891err_out:
1892 cnic_cm_free_mem(dev);
1893 return err;
1894}
1895
1896static int cnic_cm_shutdown(struct cnic_dev *dev)
1897{
1898 struct cnic_local *cp = dev->cnic_priv;
1899 int i;
1900
1901 cp->stop_cm(dev);
1902
1903 if (!cp->csk_tbl)
1904 return 0;
1905
1906 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
1907 struct cnic_sock *csk = &cp->csk_tbl[i];
1908
1909 clear_bit(SK_F_INUSE, &csk->flags);
1910 cnic_cm_cleanup(csk);
1911 }
1912 cnic_cm_free_mem(dev);
1913
1914 return 0;
1915}
1916
1917static void cnic_init_context(struct cnic_dev *dev, u32 cid)
1918{
1919 struct cnic_local *cp = dev->cnic_priv;
1920 u32 cid_addr;
1921 int i;
1922
1923 if (CHIP_NUM(cp) == CHIP_NUM_5709)
1924 return;
1925
1926 cid_addr = GET_CID_ADDR(cid);
1927
1928 for (i = 0; i < CTX_SIZE; i += 4)
1929 cnic_ctx_wr(dev, cid_addr, i, 0);
1930}
1931
1932static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
1933{
1934 struct cnic_local *cp = dev->cnic_priv;
1935 int ret = 0, i;
1936 u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
1937
1938 if (CHIP_NUM(cp) != CHIP_NUM_5709)
1939 return 0;
1940
1941 for (i = 0; i < cp->ctx_blks; i++) {
1942 int j;
1943 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
1944 u32 val;
1945
1946 memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE);
1947
1948 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1949 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
1950 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1951 (u64) cp->ctx_arr[i].mapping >> 32);
1952 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
1953 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1954 for (j = 0; j < 10; j++) {
1955
1956 val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1957 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1958 break;
1959 udelay(5);
1960 }
1961 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1962 ret = -EBUSY;
1963 break;
1964 }
1965 }
1966 return ret;
1967}
1968
1969static void cnic_free_irq(struct cnic_dev *dev)
1970{
1971 struct cnic_local *cp = dev->cnic_priv;
1972 struct cnic_eth_dev *ethdev = cp->ethdev;
1973
1974 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
1975 cp->disable_int_sync(dev);
1976 tasklet_disable(&cp->cnic_irq_task);
1977 free_irq(ethdev->irq_arr[0].vector, dev);
1978 }
1979}
1980
1981static int cnic_init_bnx2_irq(struct cnic_dev *dev)
1982{
1983 struct cnic_local *cp = dev->cnic_priv;
1984 struct cnic_eth_dev *ethdev = cp->ethdev;
1985
1986 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
1987 int err, i = 0;
1988 int sblk_num = cp->status_blk_num;
1989 u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
1990 BNX2_HC_SB_CONFIG_1;
1991
1992 CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
1993
1994 CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
1995 CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
1996 CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
1997
1998 cp->bnx2_status_blk = cp->status_blk;
1999 cp->last_status_idx = cp->bnx2_status_blk->status_idx;
2000 tasklet_init(&cp->cnic_irq_task, &cnic_service_bnx2_msix,
2001 (unsigned long) dev);
2002 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0,
2003 "cnic", dev);
2004 if (err) {
2005 tasklet_disable(&cp->cnic_irq_task);
2006 return err;
2007 }
2008 while (cp->bnx2_status_blk->status_completion_producer_index &&
2009 i < 10) {
2010 CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
2011 1 << (11 + sblk_num));
2012 udelay(10);
2013 i++;
2014 barrier();
2015 }
2016 if (cp->bnx2_status_blk->status_completion_producer_index) {
2017 cnic_free_irq(dev);
2018 goto failed;
2019 }
2020
2021 } else {
2022 struct status_block *sblk = cp->status_blk;
2023 u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
2024 int i = 0;
2025
2026 while (sblk->status_completion_producer_index && i < 10) {
2027 CNIC_WR(dev, BNX2_HC_COMMAND,
2028 hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2029 udelay(10);
2030 i++;
2031 barrier();
2032 }
2033 if (sblk->status_completion_producer_index)
2034 goto failed;
2035
2036 }
2037 return 0;
2038
2039failed:
2040 printk(KERN_ERR PFX "%s: " "KCQ index not resetting to 0.\n",
2041 dev->netdev->name);
2042 return -EBUSY;
2043}
2044
2045static void cnic_enable_bnx2_int(struct cnic_dev *dev)
2046{
2047 struct cnic_local *cp = dev->cnic_priv;
2048 struct cnic_eth_dev *ethdev = cp->ethdev;
2049
2050 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
2051 return;
2052
2053 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
2054 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
2055}
2056
2057static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
2058{
2059 struct cnic_local *cp = dev->cnic_priv;
2060 struct cnic_eth_dev *ethdev = cp->ethdev;
2061
2062 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
2063 return;
2064
2065 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
2066 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2067 CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
2068 synchronize_irq(ethdev->irq_arr[0].vector);
2069}
2070
2071static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
2072{
2073 struct cnic_local *cp = dev->cnic_priv;
2074 struct cnic_eth_dev *ethdev = cp->ethdev;
2075 u32 cid_addr, tx_cid, sb_id;
2076 u32 val, offset0, offset1, offset2, offset3;
2077 int i;
2078 struct tx_bd *txbd;
2079 dma_addr_t buf_map;
2080 struct status_block *s_blk = cp->status_blk;
2081
2082 sb_id = cp->status_blk_num;
2083 tx_cid = 20;
2084 cnic_init_context(dev, tx_cid);
2085 cnic_init_context(dev, tx_cid + 1);
2086 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
2087 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
2088 struct status_block_msix *sblk = cp->status_blk;
2089
2090 tx_cid = TX_TSS_CID + sb_id - 1;
2091 cnic_init_context(dev, tx_cid);
2092 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
2093 (TX_TSS_CID << 7));
2094 cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
2095 }
2096 cp->tx_cons = *cp->tx_cons_ptr;
2097
2098 cid_addr = GET_CID_ADDR(tx_cid);
2099 if (CHIP_NUM(cp) == CHIP_NUM_5709) {
2100 u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
2101
2102 for (i = 0; i < PHY_CTX_SIZE; i += 4)
2103 cnic_ctx_wr(dev, cid_addr2, i, 0);
2104
2105 offset0 = BNX2_L2CTX_TYPE_XI;
2106 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
2107 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
2108 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
2109 } else {
2110 offset0 = BNX2_L2CTX_TYPE;
2111 offset1 = BNX2_L2CTX_CMD_TYPE;
2112 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
2113 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
2114 }
2115 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
2116 cnic_ctx_wr(dev, cid_addr, offset0, val);
2117
2118 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
2119 cnic_ctx_wr(dev, cid_addr, offset1, val);
2120
2121 txbd = (struct tx_bd *) cp->l2_ring;
2122
2123 buf_map = cp->l2_buf_map;
2124 for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) {
2125 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
2126 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
2127 }
2128 val = (u64) cp->l2_ring_map >> 32;
2129 cnic_ctx_wr(dev, cid_addr, offset2, val);
2130 txbd->tx_bd_haddr_hi = val;
2131
2132 val = (u64) cp->l2_ring_map & 0xffffffff;
2133 cnic_ctx_wr(dev, cid_addr, offset3, val);
2134 txbd->tx_bd_haddr_lo = val;
2135}
2136
2137static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
2138{
2139 struct cnic_local *cp = dev->cnic_priv;
2140 struct cnic_eth_dev *ethdev = cp->ethdev;
2141 u32 cid_addr, sb_id, val, coal_reg, coal_val;
2142 int i;
2143 struct rx_bd *rxbd;
2144 struct status_block *s_blk = cp->status_blk;
2145
2146 sb_id = cp->status_blk_num;
2147 cnic_init_context(dev, 2);
2148 cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
2149 coal_reg = BNX2_HC_COMMAND;
2150 coal_val = CNIC_RD(dev, coal_reg);
2151 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
2152 struct status_block_msix *sblk = cp->status_blk;
2153
2154 cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
2155 coal_reg = BNX2_HC_COALESCE_NOW;
2156 coal_val = 1 << (11 + sb_id);
2157 }
2158 i = 0;
2159 while (!(*cp->rx_cons_ptr != 0) && i < 10) {
2160 CNIC_WR(dev, coal_reg, coal_val);
2161 udelay(10);
2162 i++;
2163 barrier();
2164 }
2165 cp->rx_cons = *cp->rx_cons_ptr;
2166
2167 cid_addr = GET_CID_ADDR(2);
2168 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
2169 BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
2170 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
2171
2172 if (sb_id == 0)
2173 val = 2 << BNX2_L2CTX_STATUSB_NUM_SHIFT;
2174 else
2175 val = BNX2_L2CTX_STATUSB_NUM(sb_id);
2176 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
2177
2178 rxbd = (struct rx_bd *) (cp->l2_ring + BCM_PAGE_SIZE);
2179 for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
2180 dma_addr_t buf_map;
2181 int n = (i % cp->l2_rx_ring_size) + 1;
2182
2183 buf_map = cp->l2_buf_map + (n * cp->l2_single_buf_size);
2184 rxbd->rx_bd_len = cp->l2_single_buf_size;
2185 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
2186 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
2187 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
2188 }
2189 val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) >> 32;
2190 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
2191 rxbd->rx_bd_haddr_hi = val;
2192
2193 val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) & 0xffffffff;
2194 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
2195 rxbd->rx_bd_haddr_lo = val;
2196
2197 val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
2198 cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
2199}
2200
2201static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
2202{
2203 struct kwqe *wqes[1], l2kwqe;
2204
2205 memset(&l2kwqe, 0, sizeof(l2kwqe));
2206 wqes[0] = &l2kwqe;
2207 l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_FLAGS_LAYER_SHIFT) |
2208 (L2_KWQE_OPCODE_VALUE_FLUSH <<
2209 KWQE_OPCODE_SHIFT) | 2;
2210 dev->submit_kwqes(dev, wqes, 1);
2211}
2212
2213static void cnic_set_bnx2_mac(struct cnic_dev *dev)
2214{
2215 struct cnic_local *cp = dev->cnic_priv;
2216 u32 val;
2217
2218 val = cp->func << 2;
2219
2220 cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
2221
2222 val = cnic_reg_rd_ind(dev, cp->shmem_base +
2223 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
2224 dev->mac_addr[0] = (u8) (val >> 8);
2225 dev->mac_addr[1] = (u8) val;
2226
2227 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
2228
2229 val = cnic_reg_rd_ind(dev, cp->shmem_base +
2230 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
2231 dev->mac_addr[2] = (u8) (val >> 24);
2232 dev->mac_addr[3] = (u8) (val >> 16);
2233 dev->mac_addr[4] = (u8) (val >> 8);
2234 dev->mac_addr[5] = (u8) val;
2235
2236 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
2237
2238 val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
2239 if (CHIP_NUM(cp) != CHIP_NUM_5709)
2240 val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
2241
2242 CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
2243 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
2244 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
2245}
2246
2247static int cnic_start_bnx2_hw(struct cnic_dev *dev)
2248{
2249 struct cnic_local *cp = dev->cnic_priv;
2250 struct cnic_eth_dev *ethdev = cp->ethdev;
2251 struct status_block *sblk = cp->status_blk;
2252 u32 val;
2253 int err;
2254
2255 cnic_set_bnx2_mac(dev);
2256
2257 val = CNIC_RD(dev, BNX2_MQ_CONFIG);
2258 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
2259 if (BCM_PAGE_BITS > 12)
2260 val |= (12 - 8) << 4;
2261 else
2262 val |= (BCM_PAGE_BITS - 8) << 4;
2263
2264 CNIC_WR(dev, BNX2_MQ_CONFIG, val);
2265
2266 CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
2267 CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
2268 CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
2269
2270 err = cnic_setup_5709_context(dev, 1);
2271 if (err)
2272 return err;
2273
2274 cnic_init_context(dev, KWQ_CID);
2275 cnic_init_context(dev, KCQ_CID);
2276
2277 cp->kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
2278 cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
2279
2280 cp->max_kwq_idx = MAX_KWQ_IDX;
2281 cp->kwq_prod_idx = 0;
2282 cp->kwq_con_idx = 0;
2283 cp->cnic_local_flags |= CNIC_LCL_FL_KWQ_INIT;
2284
2285 if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708)
2286 cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
2287 else
2288 cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
2289
2290 /* Initialize the kernel work queue context. */
2291 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
2292 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
2293 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_TYPE, val);
2294
2295 val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
2296 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
2297
2298 val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
2299 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
2300
2301 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
2302 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
2303
2304 val = (u32) cp->kwq_info.pgtbl_map;
2305 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
2306
2307 cp->kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
2308 cp->kcq_io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
2309
2310 cp->kcq_prod_idx = 0;
2311
2312 /* Initialize the kernel complete queue context. */
2313 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
2314 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
2315 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_TYPE, val);
2316
2317 val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
2318 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
2319
2320 val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
2321 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
2322
2323 val = (u32) ((u64) cp->kcq_info.pgtbl_map >> 32);
2324 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
2325
2326 val = (u32) cp->kcq_info.pgtbl_map;
2327 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
2328
2329 cp->int_num = 0;
2330 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
2331 u32 sb_id = cp->status_blk_num;
2332 u32 sb = BNX2_L2CTX_STATUSB_NUM(sb_id);
2333
2334 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
2335 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
2336 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
2337 }
2338
2339 /* Enable Commnad Scheduler notification when we write to the
2340 * host producer index of the kernel contexts. */
2341 CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
2342
2343 /* Enable Command Scheduler notification when we write to either
2344 * the Send Queue or Receive Queue producer indexes of the kernel
2345 * bypass contexts. */
2346 CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
2347 CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
2348
2349 /* Notify COM when the driver post an application buffer. */
2350 CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
2351
2352 /* Set the CP and COM doorbells. These two processors polls the
2353 * doorbell for a non zero value before running. This must be done
2354 * after setting up the kernel queue contexts. */
2355 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
2356 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
2357
2358 cnic_init_bnx2_tx_ring(dev);
2359 cnic_init_bnx2_rx_ring(dev);
2360
2361 err = cnic_init_bnx2_irq(dev);
2362 if (err) {
2363 printk(KERN_ERR PFX "%s: cnic_init_irq failed\n",
2364 dev->netdev->name);
2365 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
2366 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
2367 return err;
2368 }
2369
2370 return 0;
2371}
2372
2373static int cnic_start_hw(struct cnic_dev *dev)
2374{
2375 struct cnic_local *cp = dev->cnic_priv;
2376 struct cnic_eth_dev *ethdev = cp->ethdev;
2377 int err;
2378
2379 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
2380 return -EALREADY;
2381
2382 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
2383 if (err) {
2384 printk(KERN_ERR PFX "%s: register_cnic failed\n",
2385 dev->netdev->name);
2386 goto err2;
2387 }
2388
2389 dev->regview = ethdev->io_base;
2390 cp->chip_id = ethdev->chip_id;
2391 pci_dev_get(dev->pcidev);
2392 cp->func = PCI_FUNC(dev->pcidev->devfn);
2393 cp->status_blk = ethdev->irq_arr[0].status_blk;
2394 cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
2395
2396 err = cp->alloc_resc(dev);
2397 if (err) {
2398 printk(KERN_ERR PFX "%s: allocate resource failure\n",
2399 dev->netdev->name);
2400 goto err1;
2401 }
2402
2403 err = cp->start_hw(dev);
2404 if (err)
2405 goto err1;
2406
2407 err = cnic_cm_open(dev);
2408 if (err)
2409 goto err1;
2410
2411 set_bit(CNIC_F_CNIC_UP, &dev->flags);
2412
2413 cp->enable_int(dev);
2414
2415 return 0;
2416
2417err1:
2418 ethdev->drv_unregister_cnic(dev->netdev);
2419 cp->free_resc(dev);
2420 pci_dev_put(dev->pcidev);
2421err2:
2422 return err;
2423}
2424
2425static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
2426{
2427 struct cnic_local *cp = dev->cnic_priv;
2428 struct cnic_eth_dev *ethdev = cp->ethdev;
2429
2430 cnic_disable_bnx2_int_sync(dev);
2431
2432 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
2433 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
2434
2435 cnic_init_context(dev, KWQ_CID);
2436 cnic_init_context(dev, KCQ_CID);
2437
2438 cnic_setup_5709_context(dev, 0);
2439 cnic_free_irq(dev);
2440
2441 ethdev->drv_unregister_cnic(dev->netdev);
2442
2443 cnic_free_resc(dev);
2444}
2445
2446static void cnic_stop_hw(struct cnic_dev *dev)
2447{
2448 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
2449 struct cnic_local *cp = dev->cnic_priv;
2450
2451 clear_bit(CNIC_F_CNIC_UP, &dev->flags);
2452 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL);
2453 synchronize_rcu();
2454 cnic_cm_shutdown(dev);
2455 cp->stop_hw(dev);
2456 pci_dev_put(dev->pcidev);
2457 }
2458}
2459
2460static void cnic_free_dev(struct cnic_dev *dev)
2461{
2462 int i = 0;
2463
2464 while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
2465 msleep(100);
2466 i++;
2467 }
2468 if (atomic_read(&dev->ref_count) != 0)
2469 printk(KERN_ERR PFX "%s: Failed waiting for ref count to go"
2470 " to zero.\n", dev->netdev->name);
2471
2472 printk(KERN_INFO PFX "Removed CNIC device: %s\n", dev->netdev->name);
2473 dev_put(dev->netdev);
2474 kfree(dev);
2475}
2476
2477static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
2478 struct pci_dev *pdev)
2479{
2480 struct cnic_dev *cdev;
2481 struct cnic_local *cp;
2482 int alloc_size;
2483
2484 alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
2485
2486 cdev = kzalloc(alloc_size , GFP_KERNEL);
2487 if (cdev == NULL) {
2488 printk(KERN_ERR PFX "%s: allocate dev struct failure\n",
2489 dev->name);
2490 return NULL;
2491 }
2492
2493 cdev->netdev = dev;
2494 cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
2495 cdev->register_device = cnic_register_device;
2496 cdev->unregister_device = cnic_unregister_device;
2497 cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
2498
2499 cp = cdev->cnic_priv;
2500 cp->dev = cdev;
2501 cp->uio_dev = -1;
2502 cp->l2_single_buf_size = 0x400;
2503 cp->l2_rx_ring_size = 3;
2504
2505 spin_lock_init(&cp->cnic_ulp_lock);
2506
2507 printk(KERN_INFO PFX "Added CNIC device: %s\n", dev->name);
2508
2509 return cdev;
2510}
2511
2512static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
2513{
2514 struct pci_dev *pdev;
2515 struct cnic_dev *cdev;
2516 struct cnic_local *cp;
2517 struct cnic_eth_dev *ethdev = NULL;
2518 struct cnic_eth_dev *(*probe)(void *) = NULL;
2519
2520 probe = __symbol_get("bnx2_cnic_probe");
2521 if (probe) {
2522 ethdev = (*probe)(dev);
2523 symbol_put_addr(probe);
2524 }
2525 if (!ethdev)
2526 return NULL;
2527
2528 pdev = ethdev->pdev;
2529 if (!pdev)
2530 return NULL;
2531
2532 dev_hold(dev);
2533 pci_dev_get(pdev);
2534 if (pdev->device == PCI_DEVICE_ID_NX2_5709 ||
2535 pdev->device == PCI_DEVICE_ID_NX2_5709S) {
2536 u8 rev;
2537
2538 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
2539 if (rev < 0x10) {
2540 pci_dev_put(pdev);
2541 goto cnic_err;
2542 }
2543 }
2544 pci_dev_put(pdev);
2545
2546 cdev = cnic_alloc_dev(dev, pdev);
2547 if (cdev == NULL)
2548 goto cnic_err;
2549
2550 set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
2551 cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
2552
2553 cp = cdev->cnic_priv;
2554 cp->ethdev = ethdev;
2555 cdev->pcidev = pdev;
2556
2557 cp->cnic_ops = &cnic_bnx2_ops;
2558 cp->start_hw = cnic_start_bnx2_hw;
2559 cp->stop_hw = cnic_stop_bnx2_hw;
2560 cp->setup_pgtbl = cnic_setup_page_tbl;
2561 cp->alloc_resc = cnic_alloc_bnx2_resc;
2562 cp->free_resc = cnic_free_resc;
2563 cp->start_cm = cnic_cm_init_bnx2_hw;
2564 cp->stop_cm = cnic_cm_stop_bnx2_hw;
2565 cp->enable_int = cnic_enable_bnx2_int;
2566 cp->disable_int_sync = cnic_disable_bnx2_int_sync;
2567 cp->close_conn = cnic_close_bnx2_conn;
2568 cp->next_idx = cnic_bnx2_next_idx;
2569 cp->hw_idx = cnic_bnx2_hw_idx;
2570 return cdev;
2571
2572cnic_err:
2573 dev_put(dev);
2574 return NULL;
2575}
2576
2577static struct cnic_dev *is_cnic_dev(struct net_device *dev)
2578{
2579 struct ethtool_drvinfo drvinfo;
2580 struct cnic_dev *cdev = NULL;
2581
2582 if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
2583 memset(&drvinfo, 0, sizeof(drvinfo));
2584 dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
2585
2586 if (!strcmp(drvinfo.driver, "bnx2"))
2587 cdev = init_bnx2_cnic(dev);
2588 if (cdev) {
2589 write_lock(&cnic_dev_lock);
2590 list_add(&cdev->list, &cnic_dev_list);
2591 write_unlock(&cnic_dev_lock);
2592 }
2593 }
2594 return cdev;
2595}
2596
2597/**
2598 * netdev event handler
2599 */
2600static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
2601 void *ptr)
2602{
2603 struct net_device *netdev = ptr;
2604 struct cnic_dev *dev;
2605 int if_type;
2606 int new_dev = 0;
2607
2608 dev = cnic_from_netdev(netdev);
2609
2610 if (!dev && (event == NETDEV_REGISTER || event == NETDEV_UP)) {
2611 /* Check for the hot-plug device */
2612 dev = is_cnic_dev(netdev);
2613 if (dev) {
2614 new_dev = 1;
2615 cnic_hold(dev);
2616 }
2617 }
2618 if (dev) {
2619 struct cnic_local *cp = dev->cnic_priv;
2620
2621 if (new_dev)
2622 cnic_ulp_init(dev);
2623 else if (event == NETDEV_UNREGISTER)
2624 cnic_ulp_exit(dev);
2625 else if (event == NETDEV_UP) {
2626 mutex_lock(&cnic_lock);
2627 if (!cnic_start_hw(dev))
2628 cnic_ulp_start(dev);
2629 mutex_unlock(&cnic_lock);
2630 }
2631
2632 rcu_read_lock();
2633 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
2634 struct cnic_ulp_ops *ulp_ops;
2635 void *ctx;
2636
2637 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
2638 if (!ulp_ops || !ulp_ops->indicate_netevent)
2639 continue;
2640
2641 ctx = cp->ulp_handle[if_type];
2642
2643 ulp_ops->indicate_netevent(ctx, event);
2644 }
2645 rcu_read_unlock();
2646
2647 if (event == NETDEV_GOING_DOWN) {
2648 mutex_lock(&cnic_lock);
2649 cnic_ulp_stop(dev);
2650 cnic_stop_hw(dev);
2651 mutex_unlock(&cnic_lock);
2652 } else if (event == NETDEV_UNREGISTER) {
2653 write_lock(&cnic_dev_lock);
2654 list_del_init(&dev->list);
2655 write_unlock(&cnic_dev_lock);
2656
2657 cnic_put(dev);
2658 cnic_free_dev(dev);
2659 goto done;
2660 }
2661 cnic_put(dev);
2662 }
2663done:
2664 return NOTIFY_DONE;
2665}
2666
2667static struct notifier_block cnic_netdev_notifier = {
2668 .notifier_call = cnic_netdev_event
2669};
2670
2671static void cnic_release(void)
2672{
2673 struct cnic_dev *dev;
2674
2675 while (!list_empty(&cnic_dev_list)) {
2676 dev = list_entry(cnic_dev_list.next, struct cnic_dev, list);
2677 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
2678 cnic_ulp_stop(dev);
2679 cnic_stop_hw(dev);
2680 }
2681
2682 cnic_ulp_exit(dev);
2683 list_del_init(&dev->list);
2684 cnic_free_dev(dev);
2685 }
2686}
2687
2688static int __init cnic_init(void)
2689{
2690 int rc = 0;
2691
2692 printk(KERN_INFO "%s", version);
2693
2694 rc = register_netdevice_notifier(&cnic_netdev_notifier);
2695 if (rc) {
2696 cnic_release();
2697 return rc;
2698 }
2699
2700 return 0;
2701}
2702
2703static void __exit cnic_exit(void)
2704{
2705 unregister_netdevice_notifier(&cnic_netdev_notifier);
2706 cnic_release();
2707 return;
2708}
2709
2710module_init(cnic_init);
2711module_exit(cnic_exit);