aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-06-15 06:02:23 -0400
committerDavid S. Miller <davem@davemloft.net>2009-06-15 06:02:23 -0400
commit9cbc1cb8cd46ce1f7645b9de249b2ce8460129bb (patch)
tree8d104ec2a459346b99413b0b77421ca7b9936c1a /drivers/net
parentca44d6e60f9de26281fda203f58b570e1748c015 (diff)
parent45e3e1935e2857c54783291107d33323b3ef33c8 (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts: Documentation/feature-removal-schedule.txt drivers/scsi/fcoe/fcoe.c net/core/drop_monitor.c net/core/net-traces.c
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig15
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/appletalk/ltpc.c2
-rw-r--r--drivers/net/arm/ixp4xx_eth.c26
-rw-r--r--drivers/net/b44.h2
-rw-r--r--drivers/net/bnx2.c193
-rw-r--r--drivers/net/bnx2.h18
-rw-r--r--drivers/net/cnic.c2717
-rw-r--r--drivers/net/cnic.h299
-rw-r--r--drivers/net/cnic_defs.h580
-rw-r--r--drivers/net/cnic_if.h299
-rw-r--r--drivers/net/e100.c2
-rw-r--r--drivers/net/e1000e/e1000.h2
-rw-r--r--drivers/net/ehea/ehea.h2
-rw-r--r--drivers/net/igbvf/igbvf.h2
-rw-r--r--drivers/net/ipg.h2
-rw-r--r--drivers/net/mlx4/en_netdev.c2
-rw-r--r--drivers/net/mlx4/eq.c4
-rw-r--r--drivers/net/mlx4/main.c14
-rw-r--r--drivers/net/mlx4/mr.c6
-rw-r--r--drivers/net/mlx4/profile.c2
-rw-r--r--drivers/net/niu.h4
-rw-r--r--drivers/net/qlge/qlge_main.c2
-rw-r--r--drivers/net/qlge/qlge_mpi.c6
-rw-r--r--drivers/net/r8169.c11
-rw-r--r--drivers/net/skfp/h/smt.h2
-rw-r--r--drivers/net/smc91x.h5
-rw-r--r--drivers/net/tokenring/3c359.c2
-rw-r--r--drivers/net/tokenring/lanstreamer.c2
-rw-r--r--drivers/net/tokenring/olympic.c2
-rw-r--r--drivers/net/ucc_geth_ethtool.c2
-rw-r--r--drivers/net/usb/usbnet.c2
-rw-r--r--drivers/net/virtio_net.c45
-rw-r--r--drivers/net/wan/ixp4xx_hss.c11
-rw-r--r--drivers/net/wireless/Kconfig2
-rw-r--r--drivers/net/wireless/hostap/Kconfig8
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig4
-rw-r--r--drivers/net/wireless/rndis_wlan.c2
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig14
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h2
-rw-r--r--drivers/net/wireless/wavelan_cs.c2
41 files changed, 4219 insertions, 101 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 3f739cfd92fa..01f282cd0989 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1886,7 +1886,7 @@ config FEC_MPC52xx
1886 ---help--- 1886 ---help---
1887 This option enables support for the MPC5200's on-chip 1887 This option enables support for the MPC5200's on-chip
1888 Fast Ethernet Controller 1888 Fast Ethernet Controller
1889 If compiled as module, it will be called 'fec_mpc52xx.ko'. 1889 If compiled as module, it will be called fec_mpc52xx.
1890 1890
1891config FEC_MPC52xx_MDIO 1891config FEC_MPC52xx_MDIO
1892 bool "MPC52xx FEC MDIO bus driver" 1892 bool "MPC52xx FEC MDIO bus driver"
@@ -1898,7 +1898,7 @@ config FEC_MPC52xx_MDIO
1898 (Motorola? industry standard). 1898 (Motorola? industry standard).
1899 If your board uses an external PHY connected to FEC, enable this. 1899 If your board uses an external PHY connected to FEC, enable this.
1900 If not sure, enable. 1900 If not sure, enable.
1901 If compiled as module, it will be called 'fec_mpc52xx_phy.ko'. 1901 If compiled as module, it will be called fec_mpc52xx_phy.
1902 1902
1903config NE_H8300 1903config NE_H8300
1904 tristate "NE2000 compatible support for H8/300" 1904 tristate "NE2000 compatible support for H8/300"
@@ -2270,6 +2270,17 @@ config BNX2
2270 To compile this driver as a module, choose M here: the module 2270 To compile this driver as a module, choose M here: the module
2271 will be called bnx2. This is recommended. 2271 will be called bnx2. This is recommended.
2272 2272
2273config CNIC
2274 tristate "Broadcom CNIC support"
2275 depends on BNX2
2276 depends on UIO
2277 help
2278 This driver supports offload features of Broadcom NetXtremeII
2279 gigabit Ethernet cards.
2280
2281 To compile this driver as a module, choose M here: the module
2282 will be called cnic. This is recommended.
2283
2273config SPIDER_NET 2284config SPIDER_NET
2274 tristate "Spider Gigabit Ethernet driver" 2285 tristate "Spider Gigabit Ethernet driver"
2275 depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB) 2286 depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB)
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 1c378dd5933e..d366fb2b40e9 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -75,6 +75,7 @@ obj-$(CONFIG_STNIC) += stnic.o 8390.o
75obj-$(CONFIG_FEALNX) += fealnx.o 75obj-$(CONFIG_FEALNX) += fealnx.o
76obj-$(CONFIG_TIGON3) += tg3.o 76obj-$(CONFIG_TIGON3) += tg3.o
77obj-$(CONFIG_BNX2) += bnx2.o 77obj-$(CONFIG_BNX2) += bnx2.o
78obj-$(CONFIG_CNIC) += cnic.o
78obj-$(CONFIG_BNX2X) += bnx2x.o 79obj-$(CONFIG_BNX2X) += bnx2x.o
79bnx2x-objs := bnx2x_main.o bnx2x_link.o 80bnx2x-objs := bnx2x_main.o bnx2x_link.o
80spidernet-y += spider_net.o spider_net_ethtool.o 81spidernet-y += spider_net.o spider_net_ethtool.o
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index 78cc71469136..b642647170be 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -1220,7 +1220,7 @@ static int __init ltpc_setup(char *str)
1220 if (ints[0] > 2) { 1220 if (ints[0] > 2) {
1221 dma = ints[3]; 1221 dma = ints[3];
1222 } 1222 }
1223 /* ignore any other paramters */ 1223 /* ignore any other parameters */
1224 } 1224 }
1225 return 1; 1225 return 1;
1226} 1226}
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
index 1fcf8388b1c8..6f42ad728915 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -456,7 +456,8 @@ static inline void queue_put_desc(unsigned int queue, u32 phys,
456 debug_desc(phys, desc); 456 debug_desc(phys, desc);
457 BUG_ON(phys & 0x1F); 457 BUG_ON(phys & 0x1F);
458 qmgr_put_entry(queue, phys); 458 qmgr_put_entry(queue, phys);
459 BUG_ON(qmgr_stat_overflow(queue)); 459 /* Don't check for queue overflow here, we've allocated sufficient
460 length and queues >= 32 don't support this check anyway. */
460} 461}
461 462
462 463
@@ -512,8 +513,8 @@ static int eth_poll(struct napi_struct *napi, int budget)
512#endif 513#endif
513 napi_complete(napi); 514 napi_complete(napi);
514 qmgr_enable_irq(rxq); 515 qmgr_enable_irq(rxq);
515 if (!qmgr_stat_empty(rxq) && 516 if (!qmgr_stat_below_low_watermark(rxq) &&
516 napi_reschedule(napi)) { 517 napi_reschedule(napi)) { /* not empty again */
517#if DEBUG_RX 518#if DEBUG_RX
518 printk(KERN_DEBUG "%s: eth_poll" 519 printk(KERN_DEBUG "%s: eth_poll"
519 " napi_reschedule successed\n", 520 " napi_reschedule successed\n",
@@ -630,9 +631,9 @@ static void eth_txdone_irq(void *unused)
630 port->tx_buff_tab[n_desc] = NULL; 631 port->tx_buff_tab[n_desc] = NULL;
631 } 632 }
632 633
633 start = qmgr_stat_empty(port->plat->txreadyq); 634 start = qmgr_stat_below_low_watermark(port->plat->txreadyq);
634 queue_put_desc(port->plat->txreadyq, phys, desc); 635 queue_put_desc(port->plat->txreadyq, phys, desc);
635 if (start) { 636 if (start) { /* TX-ready queue was empty */
636#if DEBUG_TX 637#if DEBUG_TX
637 printk(KERN_DEBUG "%s: eth_txdone_irq xmit ready\n", 638 printk(KERN_DEBUG "%s: eth_txdone_irq xmit ready\n",
638 port->netdev->name); 639 port->netdev->name);
@@ -708,13 +709,14 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
708 queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc); 709 queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc);
709 dev->trans_start = jiffies; 710 dev->trans_start = jiffies;
710 711
711 if (qmgr_stat_empty(txreadyq)) { 712 if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */
712#if DEBUG_TX 713#if DEBUG_TX
713 printk(KERN_DEBUG "%s: eth_xmit queue full\n", dev->name); 714 printk(KERN_DEBUG "%s: eth_xmit queue full\n", dev->name);
714#endif 715#endif
715 netif_stop_queue(dev); 716 netif_stop_queue(dev);
716 /* we could miss TX ready interrupt */ 717 /* we could miss TX ready interrupt */
717 if (!qmgr_stat_empty(txreadyq)) { 718 /* really empty in fact */
719 if (!qmgr_stat_below_low_watermark(txreadyq)) {
718#if DEBUG_TX 720#if DEBUG_TX
719 printk(KERN_DEBUG "%s: eth_xmit ready again\n", 721 printk(KERN_DEBUG "%s: eth_xmit ready again\n",
720 dev->name); 722 dev->name);
@@ -814,29 +816,29 @@ static int request_queues(struct port *port)
814 int err; 816 int err;
815 817
816 err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0, 818 err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0,
817 "%s:RX-free", port->netdev->name); 819 "%s:RX-free", port->netdev->name);
818 if (err) 820 if (err)
819 return err; 821 return err;
820 822
821 err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0, 823 err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0,
822 "%s:RX", port->netdev->name); 824 "%s:RX", port->netdev->name);
823 if (err) 825 if (err)
824 goto rel_rxfree; 826 goto rel_rxfree;
825 827
826 err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0, 828 err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0,
827 "%s:TX", port->netdev->name); 829 "%s:TX", port->netdev->name);
828 if (err) 830 if (err)
829 goto rel_rx; 831 goto rel_rx;
830 832
831 err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0, 833 err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0,
832 "%s:TX-ready", port->netdev->name); 834 "%s:TX-ready", port->netdev->name);
833 if (err) 835 if (err)
834 goto rel_tx; 836 goto rel_tx;
835 837
836 /* TX-done queue handles skbs sent out by the NPEs */ 838 /* TX-done queue handles skbs sent out by the NPEs */
837 if (!ports_open) { 839 if (!ports_open) {
838 err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0, 840 err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0,
839 "%s:TX-done", DRV_NAME); 841 "%s:TX-done", DRV_NAME);
840 if (err) 842 if (err)
841 goto rel_txready; 843 goto rel_txready;
842 } 844 }
diff --git a/drivers/net/b44.h b/drivers/net/b44.h
index 0443f6801f60..e1905a49279f 100644
--- a/drivers/net/b44.h
+++ b/drivers/net/b44.h
@@ -97,7 +97,7 @@
97#define B44_DMARX_STAT 0x021CUL /* DMA RX Current Active Desc. + Status */ 97#define B44_DMARX_STAT 0x021CUL /* DMA RX Current Active Desc. + Status */
98#define DMARX_STAT_CDMASK 0x00000fff /* Current Descriptor Mask */ 98#define DMARX_STAT_CDMASK 0x00000fff /* Current Descriptor Mask */
99#define DMARX_STAT_SMASK 0x0000f000 /* State Mask */ 99#define DMARX_STAT_SMASK 0x0000f000 /* State Mask */
100#define DMARX_STAT_SDISABLED 0x00000000 /* State Disbaled */ 100#define DMARX_STAT_SDISABLED 0x00000000 /* State Disabled */
101#define DMARX_STAT_SACTIVE 0x00001000 /* State Active */ 101#define DMARX_STAT_SACTIVE 0x00001000 /* State Active */
102#define DMARX_STAT_SIDLE 0x00002000 /* State Idle Wait */ 102#define DMARX_STAT_SIDLE 0x00002000 /* State Idle Wait */
103#define DMARX_STAT_SSTOPPED 0x00003000 /* State Stopped */ 103#define DMARX_STAT_SSTOPPED 0x00003000 /* State Stopped */
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index f99e17e0a319..7e3738112c4e 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -50,6 +50,10 @@
50#include <linux/log2.h> 50#include <linux/log2.h>
51#include <linux/list.h> 51#include <linux/list.h>
52 52
53#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54#define BCM_CNIC 1
55#include "cnic_if.h"
56#endif
53#include "bnx2.h" 57#include "bnx2.h"
54#include "bnx2_fw.h" 58#include "bnx2_fw.h"
55 59
@@ -316,6 +320,158 @@ bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
316 spin_unlock_bh(&bp->indirect_lock); 320 spin_unlock_bh(&bp->indirect_lock);
317} 321}
318 322
323#ifdef BCM_CNIC
324static int
325bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
326{
327 struct bnx2 *bp = netdev_priv(dev);
328 struct drv_ctl_io *io = &info->data.io;
329
330 switch (info->cmd) {
331 case DRV_CTL_IO_WR_CMD:
332 bnx2_reg_wr_ind(bp, io->offset, io->data);
333 break;
334 case DRV_CTL_IO_RD_CMD:
335 io->data = bnx2_reg_rd_ind(bp, io->offset);
336 break;
337 case DRV_CTL_CTX_WR_CMD:
338 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
339 break;
340 default:
341 return -EINVAL;
342 }
343 return 0;
344}
345
346static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
347{
348 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
349 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
350 int sb_id;
351
352 if (bp->flags & BNX2_FLAG_USING_MSIX) {
353 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
354 bnapi->cnic_present = 0;
355 sb_id = bp->irq_nvecs;
356 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
357 } else {
358 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
359 bnapi->cnic_tag = bnapi->last_status_idx;
360 bnapi->cnic_present = 1;
361 sb_id = 0;
362 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
363 }
364
365 cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
366 cp->irq_arr[0].status_blk = (void *)
367 ((unsigned long) bnapi->status_blk.msi +
368 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
369 cp->irq_arr[0].status_blk_num = sb_id;
370 cp->num_irq = 1;
371}
372
373static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
374 void *data)
375{
376 struct bnx2 *bp = netdev_priv(dev);
377 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
378
379 if (ops == NULL)
380 return -EINVAL;
381
382 if (cp->drv_state & CNIC_DRV_STATE_REGD)
383 return -EBUSY;
384
385 bp->cnic_data = data;
386 rcu_assign_pointer(bp->cnic_ops, ops);
387
388 cp->num_irq = 0;
389 cp->drv_state = CNIC_DRV_STATE_REGD;
390
391 bnx2_setup_cnic_irq_info(bp);
392
393 return 0;
394}
395
396static int bnx2_unregister_cnic(struct net_device *dev)
397{
398 struct bnx2 *bp = netdev_priv(dev);
399 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
400 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
401
402 cp->drv_state = 0;
403 bnapi->cnic_present = 0;
404 rcu_assign_pointer(bp->cnic_ops, NULL);
405 synchronize_rcu();
406 return 0;
407}
408
409struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
410{
411 struct bnx2 *bp = netdev_priv(dev);
412 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
413
414 cp->drv_owner = THIS_MODULE;
415 cp->chip_id = bp->chip_id;
416 cp->pdev = bp->pdev;
417 cp->io_base = bp->regview;
418 cp->drv_ctl = bnx2_drv_ctl;
419 cp->drv_register_cnic = bnx2_register_cnic;
420 cp->drv_unregister_cnic = bnx2_unregister_cnic;
421
422 return cp;
423}
424EXPORT_SYMBOL(bnx2_cnic_probe);
425
426static void
427bnx2_cnic_stop(struct bnx2 *bp)
428{
429 struct cnic_ops *c_ops;
430 struct cnic_ctl_info info;
431
432 rcu_read_lock();
433 c_ops = rcu_dereference(bp->cnic_ops);
434 if (c_ops) {
435 info.cmd = CNIC_CTL_STOP_CMD;
436 c_ops->cnic_ctl(bp->cnic_data, &info);
437 }
438 rcu_read_unlock();
439}
440
441static void
442bnx2_cnic_start(struct bnx2 *bp)
443{
444 struct cnic_ops *c_ops;
445 struct cnic_ctl_info info;
446
447 rcu_read_lock();
448 c_ops = rcu_dereference(bp->cnic_ops);
449 if (c_ops) {
450 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
451 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
452
453 bnapi->cnic_tag = bnapi->last_status_idx;
454 }
455 info.cmd = CNIC_CTL_START_CMD;
456 c_ops->cnic_ctl(bp->cnic_data, &info);
457 }
458 rcu_read_unlock();
459}
460
461#else
462
463static void
464bnx2_cnic_stop(struct bnx2 *bp)
465{
466}
467
468static void
469bnx2_cnic_start(struct bnx2 *bp)
470{
471}
472
473#endif
474
319static int 475static int
320bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val) 476bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
321{ 477{
@@ -489,6 +645,7 @@ bnx2_napi_enable(struct bnx2 *bp)
489static void 645static void
490bnx2_netif_stop(struct bnx2 *bp) 646bnx2_netif_stop(struct bnx2 *bp)
491{ 647{
648 bnx2_cnic_stop(bp);
492 bnx2_disable_int_sync(bp); 649 bnx2_disable_int_sync(bp);
493 if (netif_running(bp->dev)) { 650 if (netif_running(bp->dev)) {
494 bnx2_napi_disable(bp); 651 bnx2_napi_disable(bp);
@@ -505,6 +662,7 @@ bnx2_netif_start(struct bnx2 *bp)
505 netif_tx_wake_all_queues(bp->dev); 662 netif_tx_wake_all_queues(bp->dev);
506 bnx2_napi_enable(bp); 663 bnx2_napi_enable(bp);
507 bnx2_enable_int(bp); 664 bnx2_enable_int(bp);
665 bnx2_cnic_start(bp);
508 } 666 }
509 } 667 }
510} 668}
@@ -3165,6 +3323,11 @@ bnx2_has_work(struct bnx2_napi *bnapi)
3165 if (bnx2_has_fast_work(bnapi)) 3323 if (bnx2_has_fast_work(bnapi))
3166 return 1; 3324 return 1;
3167 3325
3326#ifdef BCM_CNIC
3327 if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3328 return 1;
3329#endif
3330
3168 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) != 3331 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3169 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS)) 3332 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3170 return 1; 3333 return 1;
@@ -3194,6 +3357,23 @@ bnx2_chk_missed_msi(struct bnx2 *bp)
3194 bp->idle_chk_status_idx = bnapi->last_status_idx; 3357 bp->idle_chk_status_idx = bnapi->last_status_idx;
3195} 3358}
3196 3359
3360#ifdef BCM_CNIC
3361static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3362{
3363 struct cnic_ops *c_ops;
3364
3365 if (!bnapi->cnic_present)
3366 return;
3367
3368 rcu_read_lock();
3369 c_ops = rcu_dereference(bp->cnic_ops);
3370 if (c_ops)
3371 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3372 bnapi->status_blk.msi);
3373 rcu_read_unlock();
3374}
3375#endif
3376
3197static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi) 3377static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3198{ 3378{
3199 struct status_block *sblk = bnapi->status_blk.msi; 3379 struct status_block *sblk = bnapi->status_blk.msi;
@@ -3268,6 +3448,10 @@ static int bnx2_poll(struct napi_struct *napi, int budget)
3268 3448
3269 work_done = bnx2_poll_work(bp, bnapi, work_done, budget); 3449 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3270 3450
3451#ifdef BCM_CNIC
3452 bnx2_poll_cnic(bp, bnapi);
3453#endif
3454
3271 /* bnapi->last_status_idx is used below to tell the hw how 3455 /* bnapi->last_status_idx is used below to tell the hw how
3272 * much work has been processed, so we must read it before 3456 * much work has been processed, so we must read it before
3273 * checking for more work. 3457 * checking for more work.
@@ -4631,8 +4815,11 @@ bnx2_init_chip(struct bnx2 *bp)
4631 val = REG_RD(bp, BNX2_MQ_CONFIG); 4815 val = REG_RD(bp, BNX2_MQ_CONFIG);
4632 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; 4816 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4633 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; 4817 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4634 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1) 4818 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4635 val |= BNX2_MQ_CONFIG_HALT_DIS; 4819 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4820 if (CHIP_REV(bp) == CHIP_REV_Ax)
4821 val |= BNX2_MQ_CONFIG_HALT_DIS;
4822 }
4636 4823
4637 REG_WR(bp, BNX2_MQ_CONFIG, val); 4824 REG_WR(bp, BNX2_MQ_CONFIG, val);
4638 4825
@@ -7471,7 +7658,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7471 INIT_WORK(&bp->reset_task, bnx2_reset_task); 7658 INIT_WORK(&bp->reset_task, bnx2_reset_task);
7472 7659
7473 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); 7660 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7474 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS); 7661 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
7475 dev->mem_end = dev->mem_start + mem_len; 7662 dev->mem_end = dev->mem_start + mem_len;
7476 dev->irq = pdev->irq; 7663 dev->irq = pdev->irq;
7477 7664
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 026ed1c84698..f1edfaa9e56a 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -361,6 +361,9 @@ struct l2_fhdr {
361#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE (1<<28) 361#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE (1<<28)
362 362
363#define BNX2_L2CTX_HOST_BDIDX 0x00000004 363#define BNX2_L2CTX_HOST_BDIDX 0x00000004
364#define BNX2_L2CTX_STATUSB_NUM_SHIFT 16
365#define BNX2_L2CTX_STATUSB_NUM(sb_id) \
366 (((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_STATUSB_NUM_SHIFT) : 0)
364#define BNX2_L2CTX_HOST_BSEQ 0x00000008 367#define BNX2_L2CTX_HOST_BSEQ 0x00000008
365#define BNX2_L2CTX_NX_BSEQ 0x0000000c 368#define BNX2_L2CTX_NX_BSEQ 0x0000000c
366#define BNX2_L2CTX_NX_BDHADDR_HI 0x00000010 369#define BNX2_L2CTX_NX_BDHADDR_HI 0x00000010
@@ -5900,6 +5903,7 @@ struct l2_fhdr {
5900#define BNX2_RXP_FTQ_CTL_CUR_DEPTH (0x3ffL<<22) 5903#define BNX2_RXP_FTQ_CTL_CUR_DEPTH (0x3ffL<<22)
5901 5904
5902#define BNX2_RXP_SCRATCH 0x000e0000 5905#define BNX2_RXP_SCRATCH 0x000e0000
5906#define BNX2_RXP_SCRATCH_RXP_FLOOD 0x000e0024
5903#define BNX2_RXP_SCRATCH_RSS_TBL_SZ 0x000e0038 5907#define BNX2_RXP_SCRATCH_RSS_TBL_SZ 0x000e0038
5904#define BNX2_RXP_SCRATCH_RSS_TBL 0x000e003c 5908#define BNX2_RXP_SCRATCH_RSS_TBL 0x000e003c
5905#define BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES 128 5909#define BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES 128
@@ -6680,6 +6684,11 @@ struct bnx2_napi {
6680 u32 last_status_idx; 6684 u32 last_status_idx;
6681 u32 int_num; 6685 u32 int_num;
6682 6686
6687#ifdef BCM_CNIC
6688 u32 cnic_tag;
6689 int cnic_present;
6690#endif
6691
6683 struct bnx2_rx_ring_info rx_ring; 6692 struct bnx2_rx_ring_info rx_ring;
6684 struct bnx2_tx_ring_info tx_ring; 6693 struct bnx2_tx_ring_info tx_ring;
6685}; 6694};
@@ -6729,6 +6738,11 @@ struct bnx2 {
6729 int tx_ring_size; 6738 int tx_ring_size;
6730 u32 tx_wake_thresh; 6739 u32 tx_wake_thresh;
6731 6740
6741#ifdef BCM_CNIC
6742 struct cnic_ops *cnic_ops;
6743 void *cnic_data;
6744#endif
6745
6732 /* End of fields used in the performance code paths. */ 6746 /* End of fields used in the performance code paths. */
6733 6747
6734 unsigned int current_interval; 6748 unsigned int current_interval;
@@ -6887,6 +6901,10 @@ struct bnx2 {
6887 6901
6888 u32 idle_chk_status_idx; 6902 u32 idle_chk_status_idx;
6889 6903
6904#ifdef BCM_CNIC
6905 struct cnic_eth_dev cnic_eth_dev;
6906#endif
6907
6890 const struct firmware *mips_firmware; 6908 const struct firmware *mips_firmware;
6891 const struct firmware *rv2p_firmware; 6909 const struct firmware *rv2p_firmware;
6892}; 6910};
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
new file mode 100644
index 000000000000..44f77eb1180f
--- /dev/null
+++ b/drivers/net/cnic.c
@@ -0,0 +1,2717 @@
1/* cnic.c: Broadcom CNIC core network driver.
2 *
3 * Copyright (c) 2006-2009 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
10 * Modified and maintained by: Michael Chan <mchan@broadcom.com>
11 */
12
13#include <linux/module.h>
14
15#include <linux/kernel.h>
16#include <linux/errno.h>
17#include <linux/list.h>
18#include <linux/slab.h>
19#include <linux/pci.h>
20#include <linux/init.h>
21#include <linux/netdevice.h>
22#include <linux/uio_driver.h>
23#include <linux/in.h>
24#include <linux/dma-mapping.h>
25#include <linux/delay.h>
26#include <linux/ethtool.h>
27#include <linux/if_vlan.h>
28#include <linux/module.h>
29
30#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
31#define BCM_VLAN 1
32#endif
33#include <net/ip.h>
34#include <net/tcp.h>
35#include <net/route.h>
36#include <net/ipv6.h>
37#include <net/ip6_route.h>
38#include <scsi/iscsi_if.h>
39
40#include "cnic_if.h"
41#include "bnx2.h"
42#include "cnic.h"
43#include "cnic_defs.h"
44
45#define DRV_MODULE_NAME "cnic"
46#define PFX DRV_MODULE_NAME ": "
47
48static char version[] __devinitdata =
49 "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
50
51MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
52 "Chen (zongxi@broadcom.com");
53MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
54MODULE_LICENSE("GPL");
55MODULE_VERSION(CNIC_MODULE_VERSION);
56
57static LIST_HEAD(cnic_dev_list);
58static DEFINE_RWLOCK(cnic_dev_lock);
59static DEFINE_MUTEX(cnic_lock);
60
61static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
62
63static int cnic_service_bnx2(void *, void *);
64static int cnic_ctl(void *, struct cnic_ctl_info *);
65
66static struct cnic_ops cnic_bnx2_ops = {
67 .cnic_owner = THIS_MODULE,
68 .cnic_handler = cnic_service_bnx2,
69 .cnic_ctl = cnic_ctl,
70};
71
72static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *);
73static void cnic_init_bnx2_tx_ring(struct cnic_dev *);
74static void cnic_init_bnx2_rx_ring(struct cnic_dev *);
75static int cnic_cm_set_pg(struct cnic_sock *);
76
77static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
78{
79 struct cnic_dev *dev = uinfo->priv;
80 struct cnic_local *cp = dev->cnic_priv;
81
82 if (!capable(CAP_NET_ADMIN))
83 return -EPERM;
84
85 if (cp->uio_dev != -1)
86 return -EBUSY;
87
88 cp->uio_dev = iminor(inode);
89
90 cnic_shutdown_bnx2_rx_ring(dev);
91
92 cnic_init_bnx2_tx_ring(dev);
93 cnic_init_bnx2_rx_ring(dev);
94
95 return 0;
96}
97
98static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
99{
100 struct cnic_dev *dev = uinfo->priv;
101 struct cnic_local *cp = dev->cnic_priv;
102
103 cp->uio_dev = -1;
104 return 0;
105}
106
107static inline void cnic_hold(struct cnic_dev *dev)
108{
109 atomic_inc(&dev->ref_count);
110}
111
112static inline void cnic_put(struct cnic_dev *dev)
113{
114 atomic_dec(&dev->ref_count);
115}
116
117static inline void csk_hold(struct cnic_sock *csk)
118{
119 atomic_inc(&csk->ref_count);
120}
121
122static inline void csk_put(struct cnic_sock *csk)
123{
124 atomic_dec(&csk->ref_count);
125}
126
127static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
128{
129 struct cnic_dev *cdev;
130
131 read_lock(&cnic_dev_lock);
132 list_for_each_entry(cdev, &cnic_dev_list, list) {
133 if (netdev == cdev->netdev) {
134 cnic_hold(cdev);
135 read_unlock(&cnic_dev_lock);
136 return cdev;
137 }
138 }
139 read_unlock(&cnic_dev_lock);
140 return NULL;
141}
142
143static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
144{
145 struct cnic_local *cp = dev->cnic_priv;
146 struct cnic_eth_dev *ethdev = cp->ethdev;
147 struct drv_ctl_info info;
148 struct drv_ctl_io *io = &info.data.io;
149
150 info.cmd = DRV_CTL_CTX_WR_CMD;
151 io->cid_addr = cid_addr;
152 io->offset = off;
153 io->data = val;
154 ethdev->drv_ctl(dev->netdev, &info);
155}
156
157static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
158{
159 struct cnic_local *cp = dev->cnic_priv;
160 struct cnic_eth_dev *ethdev = cp->ethdev;
161 struct drv_ctl_info info;
162 struct drv_ctl_io *io = &info.data.io;
163
164 info.cmd = DRV_CTL_IO_WR_CMD;
165 io->offset = off;
166 io->data = val;
167 ethdev->drv_ctl(dev->netdev, &info);
168}
169
170static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
171{
172 struct cnic_local *cp = dev->cnic_priv;
173 struct cnic_eth_dev *ethdev = cp->ethdev;
174 struct drv_ctl_info info;
175 struct drv_ctl_io *io = &info.data.io;
176
177 info.cmd = DRV_CTL_IO_RD_CMD;
178 io->offset = off;
179 ethdev->drv_ctl(dev->netdev, &info);
180 return io->data;
181}
182
183static int cnic_in_use(struct cnic_sock *csk)
184{
185 return test_bit(SK_F_INUSE, &csk->flags);
186}
187
188static void cnic_kwq_completion(struct cnic_dev *dev, u32 count)
189{
190 struct cnic_local *cp = dev->cnic_priv;
191 struct cnic_eth_dev *ethdev = cp->ethdev;
192 struct drv_ctl_info info;
193
194 info.cmd = DRV_CTL_COMPLETION_CMD;
195 info.data.comp.comp_count = count;
196 ethdev->drv_ctl(dev->netdev, &info);
197}
198
199static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
200 struct cnic_sock *csk)
201{
202 struct iscsi_path path_req;
203 char *buf = NULL;
204 u16 len = 0;
205 u32 msg_type = ISCSI_KEVENT_IF_DOWN;
206 struct cnic_ulp_ops *ulp_ops;
207
208 if (cp->uio_dev == -1)
209 return -ENODEV;
210
211 if (csk) {
212 len = sizeof(path_req);
213 buf = (char *) &path_req;
214 memset(&path_req, 0, len);
215
216 msg_type = ISCSI_KEVENT_PATH_REQ;
217 path_req.handle = (u64) csk->l5_cid;
218 if (test_bit(SK_F_IPV6, &csk->flags)) {
219 memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
220 sizeof(struct in6_addr));
221 path_req.ip_addr_len = 16;
222 } else {
223 memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
224 sizeof(struct in_addr));
225 path_req.ip_addr_len = 4;
226 }
227 path_req.vlan_id = csk->vlan_id;
228 path_req.pmtu = csk->mtu;
229 }
230
231 rcu_read_lock();
232 ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]);
233 if (ulp_ops)
234 ulp_ops->iscsi_nl_send_msg(cp->dev, msg_type, buf, len);
235 rcu_read_unlock();
236 return 0;
237}
238
239static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
240 char *buf, u16 len)
241{
242 int rc = -EINVAL;
243
244 switch (msg_type) {
245 case ISCSI_UEVENT_PATH_UPDATE: {
246 struct cnic_local *cp;
247 u32 l5_cid;
248 struct cnic_sock *csk;
249 struct iscsi_path *path_resp;
250
251 if (len < sizeof(*path_resp))
252 break;
253
254 path_resp = (struct iscsi_path *) buf;
255 cp = dev->cnic_priv;
256 l5_cid = (u32) path_resp->handle;
257 if (l5_cid >= MAX_CM_SK_TBL_SZ)
258 break;
259
260 csk = &cp->csk_tbl[l5_cid];
261 csk_hold(csk);
262 if (cnic_in_use(csk)) {
263 memcpy(csk->ha, path_resp->mac_addr, 6);
264 if (test_bit(SK_F_IPV6, &csk->flags))
265 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
266 sizeof(struct in6_addr));
267 else
268 memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
269 sizeof(struct in_addr));
270 if (is_valid_ether_addr(csk->ha))
271 cnic_cm_set_pg(csk);
272 }
273 csk_put(csk);
274 rc = 0;
275 }
276 }
277
278 return rc;
279}
280
281static int cnic_offld_prep(struct cnic_sock *csk)
282{
283 if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
284 return 0;
285
286 if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
287 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
288 return 0;
289 }
290
291 return 1;
292}
293
294static int cnic_close_prep(struct cnic_sock *csk)
295{
296 clear_bit(SK_F_CONNECT_START, &csk->flags);
297 smp_mb__after_clear_bit();
298
299 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
300 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
301 msleep(1);
302
303 return 1;
304 }
305 return 0;
306}
307
308static int cnic_abort_prep(struct cnic_sock *csk)
309{
310 clear_bit(SK_F_CONNECT_START, &csk->flags);
311 smp_mb__after_clear_bit();
312
313 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
314 msleep(1);
315
316 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
317 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
318 return 1;
319 }
320
321 return 0;
322}
323
324int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
325{
326 struct cnic_dev *dev;
327
328 if (ulp_type >= MAX_CNIC_ULP_TYPE) {
329 printk(KERN_ERR PFX "cnic_register_driver: Bad type %d\n",
330 ulp_type);
331 return -EINVAL;
332 }
333 mutex_lock(&cnic_lock);
334 if (cnic_ulp_tbl[ulp_type]) {
335 printk(KERN_ERR PFX "cnic_register_driver: Type %d has already "
336 "been registered\n", ulp_type);
337 mutex_unlock(&cnic_lock);
338 return -EBUSY;
339 }
340
341 read_lock(&cnic_dev_lock);
342 list_for_each_entry(dev, &cnic_dev_list, list) {
343 struct cnic_local *cp = dev->cnic_priv;
344
345 clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
346 }
347 read_unlock(&cnic_dev_lock);
348
349 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
350 mutex_unlock(&cnic_lock);
351
352 /* Prevent race conditions with netdev_event */
353 rtnl_lock();
354 read_lock(&cnic_dev_lock);
355 list_for_each_entry(dev, &cnic_dev_list, list) {
356 struct cnic_local *cp = dev->cnic_priv;
357
358 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
359 ulp_ops->cnic_init(dev);
360 }
361 read_unlock(&cnic_dev_lock);
362 rtnl_unlock();
363
364 return 0;
365}
366
367int cnic_unregister_driver(int ulp_type)
368{
369 struct cnic_dev *dev;
370
371 if (ulp_type >= MAX_CNIC_ULP_TYPE) {
372 printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n",
373 ulp_type);
374 return -EINVAL;
375 }
376 mutex_lock(&cnic_lock);
377 if (!cnic_ulp_tbl[ulp_type]) {
378 printk(KERN_ERR PFX "cnic_unregister_driver: Type %d has not "
379 "been registered\n", ulp_type);
380 goto out_unlock;
381 }
382 read_lock(&cnic_dev_lock);
383 list_for_each_entry(dev, &cnic_dev_list, list) {
384 struct cnic_local *cp = dev->cnic_priv;
385
386 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
387 printk(KERN_ERR PFX "cnic_unregister_driver: Type %d "
388 "still has devices registered\n", ulp_type);
389 read_unlock(&cnic_dev_lock);
390 goto out_unlock;
391 }
392 }
393 read_unlock(&cnic_dev_lock);
394
395 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL);
396
397 mutex_unlock(&cnic_lock);
398 synchronize_rcu();
399 return 0;
400
401out_unlock:
402 mutex_unlock(&cnic_lock);
403 return -EINVAL;
404}
405
406static int cnic_start_hw(struct cnic_dev *);
407static void cnic_stop_hw(struct cnic_dev *);
408
409static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
410 void *ulp_ctx)
411{
412 struct cnic_local *cp = dev->cnic_priv;
413 struct cnic_ulp_ops *ulp_ops;
414
415 if (ulp_type >= MAX_CNIC_ULP_TYPE) {
416 printk(KERN_ERR PFX "cnic_register_device: Bad type %d\n",
417 ulp_type);
418 return -EINVAL;
419 }
420 mutex_lock(&cnic_lock);
421 if (cnic_ulp_tbl[ulp_type] == NULL) {
422 printk(KERN_ERR PFX "cnic_register_device: Driver with type %d "
423 "has not been registered\n", ulp_type);
424 mutex_unlock(&cnic_lock);
425 return -EAGAIN;
426 }
427 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
428 printk(KERN_ERR PFX "cnic_register_device: Type %d has already "
429 "been registered to this device\n", ulp_type);
430 mutex_unlock(&cnic_lock);
431 return -EBUSY;
432 }
433
434 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
435 cp->ulp_handle[ulp_type] = ulp_ctx;
436 ulp_ops = cnic_ulp_tbl[ulp_type];
437 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
438 cnic_hold(dev);
439
440 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
441 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
442 ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
443
444 mutex_unlock(&cnic_lock);
445
446 return 0;
447
448}
449EXPORT_SYMBOL(cnic_register_driver);
450
451static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
452{
453 struct cnic_local *cp = dev->cnic_priv;
454
455 if (ulp_type >= MAX_CNIC_ULP_TYPE) {
456 printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n",
457 ulp_type);
458 return -EINVAL;
459 }
460 mutex_lock(&cnic_lock);
461 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
462 rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL);
463 cnic_put(dev);
464 } else {
465 printk(KERN_ERR PFX "cnic_unregister_device: device not "
466 "registered to this ulp type %d\n", ulp_type);
467 mutex_unlock(&cnic_lock);
468 return -EINVAL;
469 }
470 mutex_unlock(&cnic_lock);
471
472 synchronize_rcu();
473
474 return 0;
475}
476EXPORT_SYMBOL(cnic_unregister_driver);
477
478static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id)
479{
480 id_tbl->start = start_id;
481 id_tbl->max = size;
482 id_tbl->next = 0;
483 spin_lock_init(&id_tbl->lock);
484 id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
485 if (!id_tbl->table)
486 return -ENOMEM;
487
488 return 0;
489}
490
491static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
492{
493 kfree(id_tbl->table);
494 id_tbl->table = NULL;
495}
496
497static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
498{
499 int ret = -1;
500
501 id -= id_tbl->start;
502 if (id >= id_tbl->max)
503 return ret;
504
505 spin_lock(&id_tbl->lock);
506 if (!test_bit(id, id_tbl->table)) {
507 set_bit(id, id_tbl->table);
508 ret = 0;
509 }
510 spin_unlock(&id_tbl->lock);
511 return ret;
512}
513
514/* Returns -1 if not successful */
515static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
516{
517 u32 id;
518
519 spin_lock(&id_tbl->lock);
520 id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
521 if (id >= id_tbl->max) {
522 id = -1;
523 if (id_tbl->next != 0) {
524 id = find_first_zero_bit(id_tbl->table, id_tbl->next);
525 if (id >= id_tbl->next)
526 id = -1;
527 }
528 }
529
530 if (id < id_tbl->max) {
531 set_bit(id, id_tbl->table);
532 id_tbl->next = (id + 1) & (id_tbl->max - 1);
533 id += id_tbl->start;
534 }
535
536 spin_unlock(&id_tbl->lock);
537
538 return id;
539}
540
541static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
542{
543 if (id == -1)
544 return;
545
546 id -= id_tbl->start;
547 if (id >= id_tbl->max)
548 return;
549
550 clear_bit(id, id_tbl->table);
551}
552
553static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
554{
555 int i;
556
557 if (!dma->pg_arr)
558 return;
559
560 for (i = 0; i < dma->num_pages; i++) {
561 if (dma->pg_arr[i]) {
562 pci_free_consistent(dev->pcidev, BCM_PAGE_SIZE,
563 dma->pg_arr[i], dma->pg_map_arr[i]);
564 dma->pg_arr[i] = NULL;
565 }
566 }
567 if (dma->pgtbl) {
568 pci_free_consistent(dev->pcidev, dma->pgtbl_size,
569 dma->pgtbl, dma->pgtbl_map);
570 dma->pgtbl = NULL;
571 }
572 kfree(dma->pg_arr);
573 dma->pg_arr = NULL;
574 dma->num_pages = 0;
575}
576
577static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
578{
579 int i;
580 u32 *page_table = dma->pgtbl;
581
582 for (i = 0; i < dma->num_pages; i++) {
583 /* Each entry needs to be in big endian format. */
584 *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32);
585 page_table++;
586 *page_table = (u32) dma->pg_map_arr[i];
587 page_table++;
588 }
589}
590
591static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
592 int pages, int use_pg_tbl)
593{
594 int i, size;
595 struct cnic_local *cp = dev->cnic_priv;
596
597 size = pages * (sizeof(void *) + sizeof(dma_addr_t));
598 dma->pg_arr = kzalloc(size, GFP_ATOMIC);
599 if (dma->pg_arr == NULL)
600 return -ENOMEM;
601
602 dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
603 dma->num_pages = pages;
604
605 for (i = 0; i < pages; i++) {
606 dma->pg_arr[i] = pci_alloc_consistent(dev->pcidev,
607 BCM_PAGE_SIZE,
608 &dma->pg_map_arr[i]);
609 if (dma->pg_arr[i] == NULL)
610 goto error;
611 }
612 if (!use_pg_tbl)
613 return 0;
614
615 dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) &
616 ~(BCM_PAGE_SIZE - 1);
617 dma->pgtbl = pci_alloc_consistent(dev->pcidev, dma->pgtbl_size,
618 &dma->pgtbl_map);
619 if (dma->pgtbl == NULL)
620 goto error;
621
622 cp->setup_pgtbl(dev, dma);
623
624 return 0;
625
626error:
627 cnic_free_dma(dev, dma);
628 return -ENOMEM;
629}
630
631static void cnic_free_resc(struct cnic_dev *dev)
632{
633 struct cnic_local *cp = dev->cnic_priv;
634 int i = 0;
635
636 if (cp->cnic_uinfo) {
637 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
638 while (cp->uio_dev != -1 && i < 15) {
639 msleep(100);
640 i++;
641 }
642 uio_unregister_device(cp->cnic_uinfo);
643 kfree(cp->cnic_uinfo);
644 cp->cnic_uinfo = NULL;
645 }
646
647 if (cp->l2_buf) {
648 pci_free_consistent(dev->pcidev, cp->l2_buf_size,
649 cp->l2_buf, cp->l2_buf_map);
650 cp->l2_buf = NULL;
651 }
652
653 if (cp->l2_ring) {
654 pci_free_consistent(dev->pcidev, cp->l2_ring_size,
655 cp->l2_ring, cp->l2_ring_map);
656 cp->l2_ring = NULL;
657 }
658
659 for (i = 0; i < cp->ctx_blks; i++) {
660 if (cp->ctx_arr[i].ctx) {
661 pci_free_consistent(dev->pcidev, cp->ctx_blk_size,
662 cp->ctx_arr[i].ctx,
663 cp->ctx_arr[i].mapping);
664 cp->ctx_arr[i].ctx = NULL;
665 }
666 }
667 kfree(cp->ctx_arr);
668 cp->ctx_arr = NULL;
669 cp->ctx_blks = 0;
670
671 cnic_free_dma(dev, &cp->gbl_buf_info);
672 cnic_free_dma(dev, &cp->conn_buf_info);
673 cnic_free_dma(dev, &cp->kwq_info);
674 cnic_free_dma(dev, &cp->kcq_info);
675 kfree(cp->iscsi_tbl);
676 cp->iscsi_tbl = NULL;
677 kfree(cp->ctx_tbl);
678 cp->ctx_tbl = NULL;
679
680 cnic_free_id_tbl(&cp->cid_tbl);
681}
682
683static int cnic_alloc_context(struct cnic_dev *dev)
684{
685 struct cnic_local *cp = dev->cnic_priv;
686
687 if (CHIP_NUM(cp) == CHIP_NUM_5709) {
688 int i, k, arr_size;
689
690 cp->ctx_blk_size = BCM_PAGE_SIZE;
691 cp->cids_per_blk = BCM_PAGE_SIZE / 128;
692 arr_size = BNX2_MAX_CID / cp->cids_per_blk *
693 sizeof(struct cnic_ctx);
694 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
695 if (cp->ctx_arr == NULL)
696 return -ENOMEM;
697
698 k = 0;
699 for (i = 0; i < 2; i++) {
700 u32 j, reg, off, lo, hi;
701
702 if (i == 0)
703 off = BNX2_PG_CTX_MAP;
704 else
705 off = BNX2_ISCSI_CTX_MAP;
706
707 reg = cnic_reg_rd_ind(dev, off);
708 lo = reg >> 16;
709 hi = reg & 0xffff;
710 for (j = lo; j < hi; j += cp->cids_per_blk, k++)
711 cp->ctx_arr[k].cid = j;
712 }
713
714 cp->ctx_blks = k;
715 if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
716 cp->ctx_blks = 0;
717 return -ENOMEM;
718 }
719
720 for (i = 0; i < cp->ctx_blks; i++) {
721 cp->ctx_arr[i].ctx =
722 pci_alloc_consistent(dev->pcidev, BCM_PAGE_SIZE,
723 &cp->ctx_arr[i].mapping);
724 if (cp->ctx_arr[i].ctx == NULL)
725 return -ENOMEM;
726 }
727 }
728 return 0;
729}
730
731static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
732{
733 struct cnic_local *cp = dev->cnic_priv;
734 struct uio_info *uinfo;
735 int ret;
736
737 ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
738 if (ret)
739 goto error;
740 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
741
742 ret = cnic_alloc_dma(dev, &cp->kcq_info, KCQ_PAGE_CNT, 1);
743 if (ret)
744 goto error;
745 cp->kcq = (struct kcqe **) cp->kcq_info.pg_arr;
746
747 ret = cnic_alloc_context(dev);
748 if (ret)
749 goto error;
750
751 cp->l2_ring_size = 2 * BCM_PAGE_SIZE;
752 cp->l2_ring = pci_alloc_consistent(dev->pcidev, cp->l2_ring_size,
753 &cp->l2_ring_map);
754 if (!cp->l2_ring)
755 goto error;
756
757 cp->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
758 cp->l2_buf_size = PAGE_ALIGN(cp->l2_buf_size);
759 cp->l2_buf = pci_alloc_consistent(dev->pcidev, cp->l2_buf_size,
760 &cp->l2_buf_map);
761 if (!cp->l2_buf)
762 goto error;
763
764 uinfo = kzalloc(sizeof(*uinfo), GFP_ATOMIC);
765 if (!uinfo)
766 goto error;
767
768 uinfo->mem[0].addr = dev->netdev->base_addr;
769 uinfo->mem[0].internal_addr = dev->regview;
770 uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
771 uinfo->mem[0].memtype = UIO_MEM_PHYS;
772
773 uinfo->mem[1].addr = (unsigned long) cp->status_blk & PAGE_MASK;
774 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
775 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
776 else
777 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
778 uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
779
780 uinfo->mem[2].addr = (unsigned long) cp->l2_ring;
781 uinfo->mem[2].size = cp->l2_ring_size;
782 uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
783
784 uinfo->mem[3].addr = (unsigned long) cp->l2_buf;
785 uinfo->mem[3].size = cp->l2_buf_size;
786 uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
787
788 uinfo->name = "bnx2_cnic";
789 uinfo->version = CNIC_MODULE_VERSION;
790 uinfo->irq = UIO_IRQ_CUSTOM;
791
792 uinfo->open = cnic_uio_open;
793 uinfo->release = cnic_uio_close;
794
795 uinfo->priv = dev;
796
797 ret = uio_register_device(&dev->pcidev->dev, uinfo);
798 if (ret) {
799 kfree(uinfo);
800 goto error;
801 }
802
803 cp->cnic_uinfo = uinfo;
804
805 return 0;
806
807error:
808 cnic_free_resc(dev);
809 return ret;
810}
811
812static inline u32 cnic_kwq_avail(struct cnic_local *cp)
813{
814 return cp->max_kwq_idx -
815 ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
816}
817
818static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
819 u32 num_wqes)
820{
821 struct cnic_local *cp = dev->cnic_priv;
822 struct kwqe *prod_qe;
823 u16 prod, sw_prod, i;
824
825 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
826 return -EAGAIN; /* bnx2 is down */
827
828 spin_lock_bh(&cp->cnic_ulp_lock);
829 if (num_wqes > cnic_kwq_avail(cp) &&
830 !(cp->cnic_local_flags & CNIC_LCL_FL_KWQ_INIT)) {
831 spin_unlock_bh(&cp->cnic_ulp_lock);
832 return -EAGAIN;
833 }
834
835 cp->cnic_local_flags &= ~CNIC_LCL_FL_KWQ_INIT;
836
837 prod = cp->kwq_prod_idx;
838 sw_prod = prod & MAX_KWQ_IDX;
839 for (i = 0; i < num_wqes; i++) {
840 prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
841 memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
842 prod++;
843 sw_prod = prod & MAX_KWQ_IDX;
844 }
845 cp->kwq_prod_idx = prod;
846
847 CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
848
849 spin_unlock_bh(&cp->cnic_ulp_lock);
850 return 0;
851}
852
853static void service_kcqes(struct cnic_dev *dev, int num_cqes)
854{
855 struct cnic_local *cp = dev->cnic_priv;
856 int i, j;
857
858 i = 0;
859 j = 1;
860 while (num_cqes) {
861 struct cnic_ulp_ops *ulp_ops;
862 int ulp_type;
863 u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
864 u32 kcqe_layer = kcqe_op_flag & KCQE_FLAGS_LAYER_MASK;
865
866 if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
867 cnic_kwq_completion(dev, 1);
868
869 while (j < num_cqes) {
870 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
871
872 if ((next_op & KCQE_FLAGS_LAYER_MASK) != kcqe_layer)
873 break;
874
875 if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
876 cnic_kwq_completion(dev, 1);
877 j++;
878 }
879
880 if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
881 ulp_type = CNIC_ULP_RDMA;
882 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
883 ulp_type = CNIC_ULP_ISCSI;
884 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
885 ulp_type = CNIC_ULP_L4;
886 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
887 goto end;
888 else {
889 printk(KERN_ERR PFX "%s: Unknown type of KCQE(0x%x)\n",
890 dev->netdev->name, kcqe_op_flag);
891 goto end;
892 }
893
894 rcu_read_lock();
895 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
896 if (likely(ulp_ops)) {
897 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
898 cp->completed_kcq + i, j);
899 }
900 rcu_read_unlock();
901end:
902 num_cqes -= j;
903 i += j;
904 j = 1;
905 }
906 return;
907}
908
909static u16 cnic_bnx2_next_idx(u16 idx)
910{
911 return idx + 1;
912}
913
914static u16 cnic_bnx2_hw_idx(u16 idx)
915{
916 return idx;
917}
918
919static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod)
920{
921 struct cnic_local *cp = dev->cnic_priv;
922 u16 i, ri, last;
923 struct kcqe *kcqe;
924 int kcqe_cnt = 0, last_cnt = 0;
925
926 i = ri = last = *sw_prod;
927 ri &= MAX_KCQ_IDX;
928
929 while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
930 kcqe = &cp->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
931 cp->completed_kcq[kcqe_cnt++] = kcqe;
932 i = cp->next_idx(i);
933 ri = i & MAX_KCQ_IDX;
934 if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
935 last_cnt = kcqe_cnt;
936 last = i;
937 }
938 }
939
940 *sw_prod = last;
941 return last_cnt;
942}
943
944static void cnic_chk_bnx2_pkt_rings(struct cnic_local *cp)
945{
946 u16 rx_cons = *cp->rx_cons_ptr;
947 u16 tx_cons = *cp->tx_cons_ptr;
948
949 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
950 cp->tx_cons = tx_cons;
951 cp->rx_cons = rx_cons;
952 uio_event_notify(cp->cnic_uinfo);
953 }
954}
955
956static int cnic_service_bnx2(void *data, void *status_blk)
957{
958 struct cnic_dev *dev = data;
959 struct status_block *sblk = status_blk;
960 struct cnic_local *cp = dev->cnic_priv;
961 u32 status_idx = sblk->status_idx;
962 u16 hw_prod, sw_prod;
963 int kcqe_cnt;
964
965 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
966 return status_idx;
967
968 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
969
970 hw_prod = sblk->status_completion_producer_index;
971 sw_prod = cp->kcq_prod_idx;
972 while (sw_prod != hw_prod) {
973 kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
974 if (kcqe_cnt == 0)
975 goto done;
976
977 service_kcqes(dev, kcqe_cnt);
978
979 /* Tell compiler that status_blk fields can change. */
980 barrier();
981 if (status_idx != sblk->status_idx) {
982 status_idx = sblk->status_idx;
983 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
984 hw_prod = sblk->status_completion_producer_index;
985 } else
986 break;
987 }
988
989done:
990 CNIC_WR16(dev, cp->kcq_io_addr, sw_prod);
991
992 cp->kcq_prod_idx = sw_prod;
993
994 cnic_chk_bnx2_pkt_rings(cp);
995 return status_idx;
996}
997
998static void cnic_service_bnx2_msix(unsigned long data)
999{
1000 struct cnic_dev *dev = (struct cnic_dev *) data;
1001 struct cnic_local *cp = dev->cnic_priv;
1002 struct status_block_msix *status_blk = cp->bnx2_status_blk;
1003 u32 status_idx = status_blk->status_idx;
1004 u16 hw_prod, sw_prod;
1005 int kcqe_cnt;
1006
1007 cp->kwq_con_idx = status_blk->status_cmd_consumer_index;
1008
1009 hw_prod = status_blk->status_completion_producer_index;
1010 sw_prod = cp->kcq_prod_idx;
1011 while (sw_prod != hw_prod) {
1012 kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
1013 if (kcqe_cnt == 0)
1014 goto done;
1015
1016 service_kcqes(dev, kcqe_cnt);
1017
1018 /* Tell compiler that status_blk fields can change. */
1019 barrier();
1020 if (status_idx != status_blk->status_idx) {
1021 status_idx = status_blk->status_idx;
1022 cp->kwq_con_idx = status_blk->status_cmd_consumer_index;
1023 hw_prod = status_blk->status_completion_producer_index;
1024 } else
1025 break;
1026 }
1027
1028done:
1029 CNIC_WR16(dev, cp->kcq_io_addr, sw_prod);
1030 cp->kcq_prod_idx = sw_prod;
1031
1032 cnic_chk_bnx2_pkt_rings(cp);
1033
1034 cp->last_status_idx = status_idx;
1035 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
1036 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
1037}
1038
1039static irqreturn_t cnic_irq(int irq, void *dev_instance)
1040{
1041 struct cnic_dev *dev = dev_instance;
1042 struct cnic_local *cp = dev->cnic_priv;
1043 u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX;
1044
1045 if (cp->ack_int)
1046 cp->ack_int(dev);
1047
1048 prefetch(cp->status_blk);
1049 prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
1050
1051 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags)))
1052 tasklet_schedule(&cp->cnic_irq_task);
1053
1054 return IRQ_HANDLED;
1055}
1056
1057static void cnic_ulp_stop(struct cnic_dev *dev)
1058{
1059 struct cnic_local *cp = dev->cnic_priv;
1060 int if_type;
1061
1062 rcu_read_lock();
1063 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
1064 struct cnic_ulp_ops *ulp_ops;
1065
1066 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
1067 if (!ulp_ops)
1068 continue;
1069
1070 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
1071 ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
1072 }
1073 rcu_read_unlock();
1074}
1075
1076static void cnic_ulp_start(struct cnic_dev *dev)
1077{
1078 struct cnic_local *cp = dev->cnic_priv;
1079 int if_type;
1080
1081 rcu_read_lock();
1082 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
1083 struct cnic_ulp_ops *ulp_ops;
1084
1085 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
1086 if (!ulp_ops || !ulp_ops->cnic_start)
1087 continue;
1088
1089 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
1090 ulp_ops->cnic_start(cp->ulp_handle[if_type]);
1091 }
1092 rcu_read_unlock();
1093}
1094
1095static int cnic_ctl(void *data, struct cnic_ctl_info *info)
1096{
1097 struct cnic_dev *dev = data;
1098
1099 switch (info->cmd) {
1100 case CNIC_CTL_STOP_CMD:
1101 cnic_hold(dev);
1102 mutex_lock(&cnic_lock);
1103
1104 cnic_ulp_stop(dev);
1105 cnic_stop_hw(dev);
1106
1107 mutex_unlock(&cnic_lock);
1108 cnic_put(dev);
1109 break;
1110 case CNIC_CTL_START_CMD:
1111 cnic_hold(dev);
1112 mutex_lock(&cnic_lock);
1113
1114 if (!cnic_start_hw(dev))
1115 cnic_ulp_start(dev);
1116
1117 mutex_unlock(&cnic_lock);
1118 cnic_put(dev);
1119 break;
1120 default:
1121 return -EINVAL;
1122 }
1123 return 0;
1124}
1125
1126static void cnic_ulp_init(struct cnic_dev *dev)
1127{
1128 int i;
1129 struct cnic_local *cp = dev->cnic_priv;
1130
1131 rcu_read_lock();
1132 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
1133 struct cnic_ulp_ops *ulp_ops;
1134
1135 ulp_ops = rcu_dereference(cnic_ulp_tbl[i]);
1136 if (!ulp_ops || !ulp_ops->cnic_init)
1137 continue;
1138
1139 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
1140 ulp_ops->cnic_init(dev);
1141
1142 }
1143 rcu_read_unlock();
1144}
1145
1146static void cnic_ulp_exit(struct cnic_dev *dev)
1147{
1148 int i;
1149 struct cnic_local *cp = dev->cnic_priv;
1150
1151 rcu_read_lock();
1152 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
1153 struct cnic_ulp_ops *ulp_ops;
1154
1155 ulp_ops = rcu_dereference(cnic_ulp_tbl[i]);
1156 if (!ulp_ops || !ulp_ops->cnic_exit)
1157 continue;
1158
1159 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
1160 ulp_ops->cnic_exit(dev);
1161
1162 }
1163 rcu_read_unlock();
1164}
1165
1166static int cnic_cm_offload_pg(struct cnic_sock *csk)
1167{
1168 struct cnic_dev *dev = csk->dev;
1169 struct l4_kwq_offload_pg *l4kwqe;
1170 struct kwqe *wqes[1];
1171
1172 l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
1173 memset(l4kwqe, 0, sizeof(*l4kwqe));
1174 wqes[0] = (struct kwqe *) l4kwqe;
1175
1176 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
1177 l4kwqe->flags =
1178 L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
1179 l4kwqe->l2hdr_nbytes = ETH_HLEN;
1180
1181 l4kwqe->da0 = csk->ha[0];
1182 l4kwqe->da1 = csk->ha[1];
1183 l4kwqe->da2 = csk->ha[2];
1184 l4kwqe->da3 = csk->ha[3];
1185 l4kwqe->da4 = csk->ha[4];
1186 l4kwqe->da5 = csk->ha[5];
1187
1188 l4kwqe->sa0 = dev->mac_addr[0];
1189 l4kwqe->sa1 = dev->mac_addr[1];
1190 l4kwqe->sa2 = dev->mac_addr[2];
1191 l4kwqe->sa3 = dev->mac_addr[3];
1192 l4kwqe->sa4 = dev->mac_addr[4];
1193 l4kwqe->sa5 = dev->mac_addr[5];
1194
1195 l4kwqe->etype = ETH_P_IP;
1196 l4kwqe->ipid_count = DEF_IPID_COUNT;
1197 l4kwqe->host_opaque = csk->l5_cid;
1198
1199 if (csk->vlan_id) {
1200 l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
1201 l4kwqe->vlan_tag = csk->vlan_id;
1202 l4kwqe->l2hdr_nbytes += 4;
1203 }
1204
1205 return dev->submit_kwqes(dev, wqes, 1);
1206}
1207
1208static int cnic_cm_update_pg(struct cnic_sock *csk)
1209{
1210 struct cnic_dev *dev = csk->dev;
1211 struct l4_kwq_update_pg *l4kwqe;
1212 struct kwqe *wqes[1];
1213
1214 l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
1215 memset(l4kwqe, 0, sizeof(*l4kwqe));
1216 wqes[0] = (struct kwqe *) l4kwqe;
1217
1218 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
1219 l4kwqe->flags =
1220 L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
1221 l4kwqe->pg_cid = csk->pg_cid;
1222
1223 l4kwqe->da0 = csk->ha[0];
1224 l4kwqe->da1 = csk->ha[1];
1225 l4kwqe->da2 = csk->ha[2];
1226 l4kwqe->da3 = csk->ha[3];
1227 l4kwqe->da4 = csk->ha[4];
1228 l4kwqe->da5 = csk->ha[5];
1229
1230 l4kwqe->pg_host_opaque = csk->l5_cid;
1231 l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
1232
1233 return dev->submit_kwqes(dev, wqes, 1);
1234}
1235
1236static int cnic_cm_upload_pg(struct cnic_sock *csk)
1237{
1238 struct cnic_dev *dev = csk->dev;
1239 struct l4_kwq_upload *l4kwqe;
1240 struct kwqe *wqes[1];
1241
1242 l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
1243 memset(l4kwqe, 0, sizeof(*l4kwqe));
1244 wqes[0] = (struct kwqe *) l4kwqe;
1245
1246 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
1247 l4kwqe->flags =
1248 L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
1249 l4kwqe->cid = csk->pg_cid;
1250
1251 return dev->submit_kwqes(dev, wqes, 1);
1252}
1253
1254static int cnic_cm_conn_req(struct cnic_sock *csk)
1255{
1256 struct cnic_dev *dev = csk->dev;
1257 struct l4_kwq_connect_req1 *l4kwqe1;
1258 struct l4_kwq_connect_req2 *l4kwqe2;
1259 struct l4_kwq_connect_req3 *l4kwqe3;
1260 struct kwqe *wqes[3];
1261 u8 tcp_flags = 0;
1262 int num_wqes = 2;
1263
1264 l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
1265 l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
1266 l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
1267 memset(l4kwqe1, 0, sizeof(*l4kwqe1));
1268 memset(l4kwqe2, 0, sizeof(*l4kwqe2));
1269 memset(l4kwqe3, 0, sizeof(*l4kwqe3));
1270
1271 l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
1272 l4kwqe3->flags =
1273 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
1274 l4kwqe3->ka_timeout = csk->ka_timeout;
1275 l4kwqe3->ka_interval = csk->ka_interval;
1276 l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
1277 l4kwqe3->tos = csk->tos;
1278 l4kwqe3->ttl = csk->ttl;
1279 l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
1280 l4kwqe3->pmtu = csk->mtu;
1281 l4kwqe3->rcv_buf = csk->rcv_buf;
1282 l4kwqe3->snd_buf = csk->snd_buf;
1283 l4kwqe3->seed = csk->seed;
1284
1285 wqes[0] = (struct kwqe *) l4kwqe1;
1286 if (test_bit(SK_F_IPV6, &csk->flags)) {
1287 wqes[1] = (struct kwqe *) l4kwqe2;
1288 wqes[2] = (struct kwqe *) l4kwqe3;
1289 num_wqes = 3;
1290
1291 l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
1292 l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
1293 l4kwqe2->flags =
1294 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
1295 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
1296 l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
1297 l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
1298 l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
1299 l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
1300 l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
1301 l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
1302 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
1303 sizeof(struct tcphdr);
1304 } else {
1305 wqes[1] = (struct kwqe *) l4kwqe3;
1306 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
1307 sizeof(struct tcphdr);
1308 }
1309
1310 l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
1311 l4kwqe1->flags =
1312 (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
1313 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
1314 l4kwqe1->cid = csk->cid;
1315 l4kwqe1->pg_cid = csk->pg_cid;
1316 l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
1317 l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
1318 l4kwqe1->src_port = be16_to_cpu(csk->src_port);
1319 l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
1320 if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
1321 tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
1322 if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
1323 tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
1324 if (csk->tcp_flags & SK_TCP_NAGLE)
1325 tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
1326 if (csk->tcp_flags & SK_TCP_TIMESTAMP)
1327 tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
1328 if (csk->tcp_flags & SK_TCP_SACK)
1329 tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
1330 if (csk->tcp_flags & SK_TCP_SEG_SCALING)
1331 tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
1332
1333 l4kwqe1->tcp_flags = tcp_flags;
1334
1335 return dev->submit_kwqes(dev, wqes, num_wqes);
1336}
1337
1338static int cnic_cm_close_req(struct cnic_sock *csk)
1339{
1340 struct cnic_dev *dev = csk->dev;
1341 struct l4_kwq_close_req *l4kwqe;
1342 struct kwqe *wqes[1];
1343
1344 l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
1345 memset(l4kwqe, 0, sizeof(*l4kwqe));
1346 wqes[0] = (struct kwqe *) l4kwqe;
1347
1348 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
1349 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
1350 l4kwqe->cid = csk->cid;
1351
1352 return dev->submit_kwqes(dev, wqes, 1);
1353}
1354
1355static int cnic_cm_abort_req(struct cnic_sock *csk)
1356{
1357 struct cnic_dev *dev = csk->dev;
1358 struct l4_kwq_reset_req *l4kwqe;
1359 struct kwqe *wqes[1];
1360
1361 l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
1362 memset(l4kwqe, 0, sizeof(*l4kwqe));
1363 wqes[0] = (struct kwqe *) l4kwqe;
1364
1365 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
1366 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
1367 l4kwqe->cid = csk->cid;
1368
1369 return dev->submit_kwqes(dev, wqes, 1);
1370}
1371
1372static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
1373 u32 l5_cid, struct cnic_sock **csk, void *context)
1374{
1375 struct cnic_local *cp = dev->cnic_priv;
1376 struct cnic_sock *csk1;
1377
1378 if (l5_cid >= MAX_CM_SK_TBL_SZ)
1379 return -EINVAL;
1380
1381 csk1 = &cp->csk_tbl[l5_cid];
1382 if (atomic_read(&csk1->ref_count))
1383 return -EAGAIN;
1384
1385 if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
1386 return -EBUSY;
1387
1388 csk1->dev = dev;
1389 csk1->cid = cid;
1390 csk1->l5_cid = l5_cid;
1391 csk1->ulp_type = ulp_type;
1392 csk1->context = context;
1393
1394 csk1->ka_timeout = DEF_KA_TIMEOUT;
1395 csk1->ka_interval = DEF_KA_INTERVAL;
1396 csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
1397 csk1->tos = DEF_TOS;
1398 csk1->ttl = DEF_TTL;
1399 csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
1400 csk1->rcv_buf = DEF_RCV_BUF;
1401 csk1->snd_buf = DEF_SND_BUF;
1402 csk1->seed = DEF_SEED;
1403
1404 *csk = csk1;
1405 return 0;
1406}
1407
1408static void cnic_cm_cleanup(struct cnic_sock *csk)
1409{
1410 if (csk->src_port) {
1411 struct cnic_dev *dev = csk->dev;
1412 struct cnic_local *cp = dev->cnic_priv;
1413
1414 cnic_free_id(&cp->csk_port_tbl, csk->src_port);
1415 csk->src_port = 0;
1416 }
1417}
1418
1419static void cnic_close_conn(struct cnic_sock *csk)
1420{
1421 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
1422 cnic_cm_upload_pg(csk);
1423 clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
1424 }
1425 cnic_cm_cleanup(csk);
1426}
1427
1428static int cnic_cm_destroy(struct cnic_sock *csk)
1429{
1430 if (!cnic_in_use(csk))
1431 return -EINVAL;
1432
1433 csk_hold(csk);
1434 clear_bit(SK_F_INUSE, &csk->flags);
1435 smp_mb__after_clear_bit();
1436 while (atomic_read(&csk->ref_count) != 1)
1437 msleep(1);
1438 cnic_cm_cleanup(csk);
1439
1440 csk->flags = 0;
1441 csk_put(csk);
1442 return 0;
1443}
1444
1445static inline u16 cnic_get_vlan(struct net_device *dev,
1446 struct net_device **vlan_dev)
1447{
1448 if (dev->priv_flags & IFF_802_1Q_VLAN) {
1449 *vlan_dev = vlan_dev_real_dev(dev);
1450 return vlan_dev_vlan_id(dev);
1451 }
1452 *vlan_dev = dev;
1453 return 0;
1454}
1455
1456static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
1457 struct dst_entry **dst)
1458{
1459#if defined(CONFIG_INET)
1460 struct flowi fl;
1461 int err;
1462 struct rtable *rt;
1463
1464 memset(&fl, 0, sizeof(fl));
1465 fl.nl_u.ip4_u.daddr = dst_addr->sin_addr.s_addr;
1466
1467 err = ip_route_output_key(&init_net, &rt, &fl);
1468 if (!err)
1469 *dst = &rt->u.dst;
1470 return err;
1471#else
1472 return -ENETUNREACH;
1473#endif
1474}
1475
1476static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
1477 struct dst_entry **dst)
1478{
1479#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
1480 struct flowi fl;
1481
1482 memset(&fl, 0, sizeof(fl));
1483 ipv6_addr_copy(&fl.fl6_dst, &dst_addr->sin6_addr);
1484 if (ipv6_addr_type(&fl.fl6_dst) & IPV6_ADDR_LINKLOCAL)
1485 fl.oif = dst_addr->sin6_scope_id;
1486
1487 *dst = ip6_route_output(&init_net, NULL, &fl);
1488 if (*dst)
1489 return 0;
1490#endif
1491
1492 return -ENETUNREACH;
1493}
1494
1495static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
1496 int ulp_type)
1497{
1498 struct cnic_dev *dev = NULL;
1499 struct dst_entry *dst;
1500 struct net_device *netdev = NULL;
1501 int err = -ENETUNREACH;
1502
1503 if (dst_addr->sin_family == AF_INET)
1504 err = cnic_get_v4_route(dst_addr, &dst);
1505 else if (dst_addr->sin_family == AF_INET6) {
1506 struct sockaddr_in6 *dst_addr6 =
1507 (struct sockaddr_in6 *) dst_addr;
1508
1509 err = cnic_get_v6_route(dst_addr6, &dst);
1510 } else
1511 return NULL;
1512
1513 if (err)
1514 return NULL;
1515
1516 if (!dst->dev)
1517 goto done;
1518
1519 cnic_get_vlan(dst->dev, &netdev);
1520
1521 dev = cnic_from_netdev(netdev);
1522
1523done:
1524 dst_release(dst);
1525 if (dev)
1526 cnic_put(dev);
1527 return dev;
1528}
1529
1530static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
1531{
1532 struct cnic_dev *dev = csk->dev;
1533 struct cnic_local *cp = dev->cnic_priv;
1534
1535 return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
1536}
1537
1538static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
1539{
1540 struct cnic_dev *dev = csk->dev;
1541 struct cnic_local *cp = dev->cnic_priv;
1542 int is_v6, err, rc = -ENETUNREACH;
1543 struct dst_entry *dst;
1544 struct net_device *realdev;
1545 u32 local_port;
1546
1547 if (saddr->local.v6.sin6_family == AF_INET6 &&
1548 saddr->remote.v6.sin6_family == AF_INET6)
1549 is_v6 = 1;
1550 else if (saddr->local.v4.sin_family == AF_INET &&
1551 saddr->remote.v4.sin_family == AF_INET)
1552 is_v6 = 0;
1553 else
1554 return -EINVAL;
1555
1556 clear_bit(SK_F_IPV6, &csk->flags);
1557
1558 if (is_v6) {
1559#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
1560 set_bit(SK_F_IPV6, &csk->flags);
1561 err = cnic_get_v6_route(&saddr->remote.v6, &dst);
1562 if (err)
1563 return err;
1564
1565 if (!dst || dst->error || !dst->dev)
1566 goto err_out;
1567
1568 memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
1569 sizeof(struct in6_addr));
1570 csk->dst_port = saddr->remote.v6.sin6_port;
1571 local_port = saddr->local.v6.sin6_port;
1572#else
1573 return rc;
1574#endif
1575
1576 } else {
1577 err = cnic_get_v4_route(&saddr->remote.v4, &dst);
1578 if (err)
1579 return err;
1580
1581 if (!dst || dst->error || !dst->dev)
1582 goto err_out;
1583
1584 csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
1585 csk->dst_port = saddr->remote.v4.sin_port;
1586 local_port = saddr->local.v4.sin_port;
1587 }
1588
1589 csk->vlan_id = cnic_get_vlan(dst->dev, &realdev);
1590 if (realdev != dev->netdev)
1591 goto err_out;
1592
1593 if (local_port >= CNIC_LOCAL_PORT_MIN &&
1594 local_port < CNIC_LOCAL_PORT_MAX) {
1595 if (cnic_alloc_id(&cp->csk_port_tbl, local_port))
1596 local_port = 0;
1597 } else
1598 local_port = 0;
1599
1600 if (!local_port) {
1601 local_port = cnic_alloc_new_id(&cp->csk_port_tbl);
1602 if (local_port == -1) {
1603 rc = -ENOMEM;
1604 goto err_out;
1605 }
1606 }
1607 csk->src_port = local_port;
1608
1609 csk->mtu = dst_mtu(dst);
1610 rc = 0;
1611
1612err_out:
1613 dst_release(dst);
1614 return rc;
1615}
1616
1617static void cnic_init_csk_state(struct cnic_sock *csk)
1618{
1619 csk->state = 0;
1620 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
1621 clear_bit(SK_F_CLOSING, &csk->flags);
1622}
1623
1624static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
1625{
1626 int err = 0;
1627
1628 if (!cnic_in_use(csk))
1629 return -EINVAL;
1630
1631 if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
1632 return -EINVAL;
1633
1634 cnic_init_csk_state(csk);
1635
1636 err = cnic_get_route(csk, saddr);
1637 if (err)
1638 goto err_out;
1639
1640 err = cnic_resolve_addr(csk, saddr);
1641 if (!err)
1642 return 0;
1643
1644err_out:
1645 clear_bit(SK_F_CONNECT_START, &csk->flags);
1646 return err;
1647}
1648
1649static int cnic_cm_abort(struct cnic_sock *csk)
1650{
1651 struct cnic_local *cp = csk->dev->cnic_priv;
1652 u32 opcode;
1653
1654 if (!cnic_in_use(csk))
1655 return -EINVAL;
1656
1657 if (cnic_abort_prep(csk))
1658 return cnic_cm_abort_req(csk);
1659
1660 /* Getting here means that we haven't started connect, or
1661 * connect was not successful.
1662 */
1663
1664 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
1665 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
1666 opcode = csk->state;
1667 else
1668 opcode = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
1669 cp->close_conn(csk, opcode);
1670
1671 return 0;
1672}
1673
1674static int cnic_cm_close(struct cnic_sock *csk)
1675{
1676 if (!cnic_in_use(csk))
1677 return -EINVAL;
1678
1679 if (cnic_close_prep(csk)) {
1680 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
1681 return cnic_cm_close_req(csk);
1682 }
1683 return 0;
1684}
1685
1686static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
1687 u8 opcode)
1688{
1689 struct cnic_ulp_ops *ulp_ops;
1690 int ulp_type = csk->ulp_type;
1691
1692 rcu_read_lock();
1693 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
1694 if (ulp_ops) {
1695 if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
1696 ulp_ops->cm_connect_complete(csk);
1697 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
1698 ulp_ops->cm_close_complete(csk);
1699 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
1700 ulp_ops->cm_remote_abort(csk);
1701 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
1702 ulp_ops->cm_abort_complete(csk);
1703 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
1704 ulp_ops->cm_remote_close(csk);
1705 }
1706 rcu_read_unlock();
1707}
1708
1709static int cnic_cm_set_pg(struct cnic_sock *csk)
1710{
1711 if (cnic_offld_prep(csk)) {
1712 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
1713 cnic_cm_update_pg(csk);
1714 else
1715 cnic_cm_offload_pg(csk);
1716 }
1717 return 0;
1718}
1719
1720static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
1721{
1722 struct cnic_local *cp = dev->cnic_priv;
1723 u32 l5_cid = kcqe->pg_host_opaque;
1724 u8 opcode = kcqe->op_code;
1725 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
1726
1727 csk_hold(csk);
1728 if (!cnic_in_use(csk))
1729 goto done;
1730
1731 if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
1732 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
1733 goto done;
1734 }
1735 csk->pg_cid = kcqe->pg_cid;
1736 set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
1737 cnic_cm_conn_req(csk);
1738
1739done:
1740 csk_put(csk);
1741}
1742
1743static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
1744{
1745 struct cnic_local *cp = dev->cnic_priv;
1746 struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
1747 u8 opcode = l4kcqe->op_code;
1748 u32 l5_cid;
1749 struct cnic_sock *csk;
1750
1751 if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
1752 opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
1753 cnic_cm_process_offld_pg(dev, l4kcqe);
1754 return;
1755 }
1756
1757 l5_cid = l4kcqe->conn_id;
1758 if (opcode & 0x80)
1759 l5_cid = l4kcqe->cid;
1760 if (l5_cid >= MAX_CM_SK_TBL_SZ)
1761 return;
1762
1763 csk = &cp->csk_tbl[l5_cid];
1764 csk_hold(csk);
1765
1766 if (!cnic_in_use(csk)) {
1767 csk_put(csk);
1768 return;
1769 }
1770
1771 switch (opcode) {
1772 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
1773 if (l4kcqe->status == 0)
1774 set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
1775
1776 smp_mb__before_clear_bit();
1777 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
1778 cnic_cm_upcall(cp, csk, opcode);
1779 break;
1780
1781 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
1782 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags))
1783 csk->state = opcode;
1784 /* fall through */
1785 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
1786 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
1787 cp->close_conn(csk, opcode);
1788 break;
1789
1790 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
1791 cnic_cm_upcall(cp, csk, opcode);
1792 break;
1793 }
1794 csk_put(csk);
1795}
1796
1797static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
1798{
1799 struct cnic_dev *dev = data;
1800 int i;
1801
1802 for (i = 0; i < num; i++)
1803 cnic_cm_process_kcqe(dev, kcqe[i]);
1804}
1805
1806static struct cnic_ulp_ops cm_ulp_ops = {
1807 .indicate_kcqes = cnic_cm_indicate_kcqe,
1808};
1809
1810static void cnic_cm_free_mem(struct cnic_dev *dev)
1811{
1812 struct cnic_local *cp = dev->cnic_priv;
1813
1814 kfree(cp->csk_tbl);
1815 cp->csk_tbl = NULL;
1816 cnic_free_id_tbl(&cp->csk_port_tbl);
1817}
1818
1819static int cnic_cm_alloc_mem(struct cnic_dev *dev)
1820{
1821 struct cnic_local *cp = dev->cnic_priv;
1822
1823 cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
1824 GFP_KERNEL);
1825 if (!cp->csk_tbl)
1826 return -ENOMEM;
1827
1828 if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
1829 CNIC_LOCAL_PORT_MIN)) {
1830 cnic_cm_free_mem(dev);
1831 return -ENOMEM;
1832 }
1833 return 0;
1834}
1835
1836static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
1837{
1838 if ((opcode == csk->state) ||
1839 (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED &&
1840 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)) {
1841 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags))
1842 return 1;
1843 }
1844 return 0;
1845}
1846
1847static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
1848{
1849 struct cnic_dev *dev = csk->dev;
1850 struct cnic_local *cp = dev->cnic_priv;
1851
1852 clear_bit(SK_F_CONNECT_START, &csk->flags);
1853 if (cnic_ready_to_close(csk, opcode)) {
1854 cnic_close_conn(csk);
1855 cnic_cm_upcall(cp, csk, opcode);
1856 }
1857}
1858
1859static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
1860{
1861}
1862
1863static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
1864{
1865 u32 seed;
1866
1867 get_random_bytes(&seed, 4);
1868 cnic_ctx_wr(dev, 45, 0, seed);
1869 return 0;
1870}
1871
1872static int cnic_cm_open(struct cnic_dev *dev)
1873{
1874 struct cnic_local *cp = dev->cnic_priv;
1875 int err;
1876
1877 err = cnic_cm_alloc_mem(dev);
1878 if (err)
1879 return err;
1880
1881 err = cp->start_cm(dev);
1882
1883 if (err)
1884 goto err_out;
1885
1886 dev->cm_create = cnic_cm_create;
1887 dev->cm_destroy = cnic_cm_destroy;
1888 dev->cm_connect = cnic_cm_connect;
1889 dev->cm_abort = cnic_cm_abort;
1890 dev->cm_close = cnic_cm_close;
1891 dev->cm_select_dev = cnic_cm_select_dev;
1892
1893 cp->ulp_handle[CNIC_ULP_L4] = dev;
1894 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
1895 return 0;
1896
1897err_out:
1898 cnic_cm_free_mem(dev);
1899 return err;
1900}
1901
1902static int cnic_cm_shutdown(struct cnic_dev *dev)
1903{
1904 struct cnic_local *cp = dev->cnic_priv;
1905 int i;
1906
1907 cp->stop_cm(dev);
1908
1909 if (!cp->csk_tbl)
1910 return 0;
1911
1912 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
1913 struct cnic_sock *csk = &cp->csk_tbl[i];
1914
1915 clear_bit(SK_F_INUSE, &csk->flags);
1916 cnic_cm_cleanup(csk);
1917 }
1918 cnic_cm_free_mem(dev);
1919
1920 return 0;
1921}
1922
1923static void cnic_init_context(struct cnic_dev *dev, u32 cid)
1924{
1925 struct cnic_local *cp = dev->cnic_priv;
1926 u32 cid_addr;
1927 int i;
1928
1929 if (CHIP_NUM(cp) == CHIP_NUM_5709)
1930 return;
1931
1932 cid_addr = GET_CID_ADDR(cid);
1933
1934 for (i = 0; i < CTX_SIZE; i += 4)
1935 cnic_ctx_wr(dev, cid_addr, i, 0);
1936}
1937
1938static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
1939{
1940 struct cnic_local *cp = dev->cnic_priv;
1941 int ret = 0, i;
1942 u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
1943
1944 if (CHIP_NUM(cp) != CHIP_NUM_5709)
1945 return 0;
1946
1947 for (i = 0; i < cp->ctx_blks; i++) {
1948 int j;
1949 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
1950 u32 val;
1951
1952 memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE);
1953
1954 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1955 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
1956 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1957 (u64) cp->ctx_arr[i].mapping >> 32);
1958 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
1959 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1960 for (j = 0; j < 10; j++) {
1961
1962 val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1963 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1964 break;
1965 udelay(5);
1966 }
1967 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1968 ret = -EBUSY;
1969 break;
1970 }
1971 }
1972 return ret;
1973}
1974
1975static void cnic_free_irq(struct cnic_dev *dev)
1976{
1977 struct cnic_local *cp = dev->cnic_priv;
1978 struct cnic_eth_dev *ethdev = cp->ethdev;
1979
1980 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
1981 cp->disable_int_sync(dev);
1982 tasklet_disable(&cp->cnic_irq_task);
1983 free_irq(ethdev->irq_arr[0].vector, dev);
1984 }
1985}
1986
1987static int cnic_init_bnx2_irq(struct cnic_dev *dev)
1988{
1989 struct cnic_local *cp = dev->cnic_priv;
1990 struct cnic_eth_dev *ethdev = cp->ethdev;
1991
1992 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
1993 int err, i = 0;
1994 int sblk_num = cp->status_blk_num;
1995 u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
1996 BNX2_HC_SB_CONFIG_1;
1997
1998 CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
1999
2000 CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
2001 CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
2002 CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
2003
2004 cp->bnx2_status_blk = cp->status_blk;
2005 cp->last_status_idx = cp->bnx2_status_blk->status_idx;
2006 tasklet_init(&cp->cnic_irq_task, &cnic_service_bnx2_msix,
2007 (unsigned long) dev);
2008 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0,
2009 "cnic", dev);
2010 if (err) {
2011 tasklet_disable(&cp->cnic_irq_task);
2012 return err;
2013 }
2014 while (cp->bnx2_status_blk->status_completion_producer_index &&
2015 i < 10) {
2016 CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
2017 1 << (11 + sblk_num));
2018 udelay(10);
2019 i++;
2020 barrier();
2021 }
2022 if (cp->bnx2_status_blk->status_completion_producer_index) {
2023 cnic_free_irq(dev);
2024 goto failed;
2025 }
2026
2027 } else {
2028 struct status_block *sblk = cp->status_blk;
2029 u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
2030 int i = 0;
2031
2032 while (sblk->status_completion_producer_index && i < 10) {
2033 CNIC_WR(dev, BNX2_HC_COMMAND,
2034 hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2035 udelay(10);
2036 i++;
2037 barrier();
2038 }
2039 if (sblk->status_completion_producer_index)
2040 goto failed;
2041
2042 }
2043 return 0;
2044
2045failed:
2046 printk(KERN_ERR PFX "%s: " "KCQ index not resetting to 0.\n",
2047 dev->netdev->name);
2048 return -EBUSY;
2049}
2050
2051static void cnic_enable_bnx2_int(struct cnic_dev *dev)
2052{
2053 struct cnic_local *cp = dev->cnic_priv;
2054 struct cnic_eth_dev *ethdev = cp->ethdev;
2055
2056 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
2057 return;
2058
2059 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
2060 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
2061}
2062
2063static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
2064{
2065 struct cnic_local *cp = dev->cnic_priv;
2066 struct cnic_eth_dev *ethdev = cp->ethdev;
2067
2068 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
2069 return;
2070
2071 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
2072 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2073 CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
2074 synchronize_irq(ethdev->irq_arr[0].vector);
2075}
2076
2077static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
2078{
2079 struct cnic_local *cp = dev->cnic_priv;
2080 struct cnic_eth_dev *ethdev = cp->ethdev;
2081 u32 cid_addr, tx_cid, sb_id;
2082 u32 val, offset0, offset1, offset2, offset3;
2083 int i;
2084 struct tx_bd *txbd;
2085 dma_addr_t buf_map;
2086 struct status_block *s_blk = cp->status_blk;
2087
2088 sb_id = cp->status_blk_num;
2089 tx_cid = 20;
2090 cnic_init_context(dev, tx_cid);
2091 cnic_init_context(dev, tx_cid + 1);
2092 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
2093 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
2094 struct status_block_msix *sblk = cp->status_blk;
2095
2096 tx_cid = TX_TSS_CID + sb_id - 1;
2097 cnic_init_context(dev, tx_cid);
2098 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
2099 (TX_TSS_CID << 7));
2100 cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
2101 }
2102 cp->tx_cons = *cp->tx_cons_ptr;
2103
2104 cid_addr = GET_CID_ADDR(tx_cid);
2105 if (CHIP_NUM(cp) == CHIP_NUM_5709) {
2106 u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
2107
2108 for (i = 0; i < PHY_CTX_SIZE; i += 4)
2109 cnic_ctx_wr(dev, cid_addr2, i, 0);
2110
2111 offset0 = BNX2_L2CTX_TYPE_XI;
2112 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
2113 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
2114 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
2115 } else {
2116 offset0 = BNX2_L2CTX_TYPE;
2117 offset1 = BNX2_L2CTX_CMD_TYPE;
2118 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
2119 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
2120 }
2121 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
2122 cnic_ctx_wr(dev, cid_addr, offset0, val);
2123
2124 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
2125 cnic_ctx_wr(dev, cid_addr, offset1, val);
2126
2127 txbd = (struct tx_bd *) cp->l2_ring;
2128
2129 buf_map = cp->l2_buf_map;
2130 for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) {
2131 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
2132 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
2133 }
2134 val = (u64) cp->l2_ring_map >> 32;
2135 cnic_ctx_wr(dev, cid_addr, offset2, val);
2136 txbd->tx_bd_haddr_hi = val;
2137
2138 val = (u64) cp->l2_ring_map & 0xffffffff;
2139 cnic_ctx_wr(dev, cid_addr, offset3, val);
2140 txbd->tx_bd_haddr_lo = val;
2141}
2142
2143static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
2144{
2145 struct cnic_local *cp = dev->cnic_priv;
2146 struct cnic_eth_dev *ethdev = cp->ethdev;
2147 u32 cid_addr, sb_id, val, coal_reg, coal_val;
2148 int i;
2149 struct rx_bd *rxbd;
2150 struct status_block *s_blk = cp->status_blk;
2151
2152 sb_id = cp->status_blk_num;
2153 cnic_init_context(dev, 2);
2154 cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
2155 coal_reg = BNX2_HC_COMMAND;
2156 coal_val = CNIC_RD(dev, coal_reg);
2157 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
2158 struct status_block_msix *sblk = cp->status_blk;
2159
2160 cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
2161 coal_reg = BNX2_HC_COALESCE_NOW;
2162 coal_val = 1 << (11 + sb_id);
2163 }
2164 i = 0;
2165 while (!(*cp->rx_cons_ptr != 0) && i < 10) {
2166 CNIC_WR(dev, coal_reg, coal_val);
2167 udelay(10);
2168 i++;
2169 barrier();
2170 }
2171 cp->rx_cons = *cp->rx_cons_ptr;
2172
2173 cid_addr = GET_CID_ADDR(2);
2174 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
2175 BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
2176 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
2177
2178 if (sb_id == 0)
2179 val = 2 << BNX2_L2CTX_STATUSB_NUM_SHIFT;
2180 else
2181 val = BNX2_L2CTX_STATUSB_NUM(sb_id);
2182 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
2183
2184 rxbd = (struct rx_bd *) (cp->l2_ring + BCM_PAGE_SIZE);
2185 for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
2186 dma_addr_t buf_map;
2187 int n = (i % cp->l2_rx_ring_size) + 1;
2188
2189 buf_map = cp->l2_buf_map + (n * cp->l2_single_buf_size);
2190 rxbd->rx_bd_len = cp->l2_single_buf_size;
2191 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
2192 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
2193 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
2194 }
2195 val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) >> 32;
2196 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
2197 rxbd->rx_bd_haddr_hi = val;
2198
2199 val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) & 0xffffffff;
2200 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
2201 rxbd->rx_bd_haddr_lo = val;
2202
2203 val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
2204 cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
2205}
2206
2207static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
2208{
2209 struct kwqe *wqes[1], l2kwqe;
2210
2211 memset(&l2kwqe, 0, sizeof(l2kwqe));
2212 wqes[0] = &l2kwqe;
2213 l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_FLAGS_LAYER_SHIFT) |
2214 (L2_KWQE_OPCODE_VALUE_FLUSH <<
2215 KWQE_OPCODE_SHIFT) | 2;
2216 dev->submit_kwqes(dev, wqes, 1);
2217}
2218
2219static void cnic_set_bnx2_mac(struct cnic_dev *dev)
2220{
2221 struct cnic_local *cp = dev->cnic_priv;
2222 u32 val;
2223
2224 val = cp->func << 2;
2225
2226 cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
2227
2228 val = cnic_reg_rd_ind(dev, cp->shmem_base +
2229 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
2230 dev->mac_addr[0] = (u8) (val >> 8);
2231 dev->mac_addr[1] = (u8) val;
2232
2233 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
2234
2235 val = cnic_reg_rd_ind(dev, cp->shmem_base +
2236 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
2237 dev->mac_addr[2] = (u8) (val >> 24);
2238 dev->mac_addr[3] = (u8) (val >> 16);
2239 dev->mac_addr[4] = (u8) (val >> 8);
2240 dev->mac_addr[5] = (u8) val;
2241
2242 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
2243
2244 val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
2245 if (CHIP_NUM(cp) != CHIP_NUM_5709)
2246 val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
2247
2248 CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
2249 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
2250 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
2251}
2252
2253static int cnic_start_bnx2_hw(struct cnic_dev *dev)
2254{
2255 struct cnic_local *cp = dev->cnic_priv;
2256 struct cnic_eth_dev *ethdev = cp->ethdev;
2257 struct status_block *sblk = cp->status_blk;
2258 u32 val;
2259 int err;
2260
2261 cnic_set_bnx2_mac(dev);
2262
2263 val = CNIC_RD(dev, BNX2_MQ_CONFIG);
2264 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
2265 if (BCM_PAGE_BITS > 12)
2266 val |= (12 - 8) << 4;
2267 else
2268 val |= (BCM_PAGE_BITS - 8) << 4;
2269
2270 CNIC_WR(dev, BNX2_MQ_CONFIG, val);
2271
2272 CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
2273 CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
2274 CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
2275
2276 err = cnic_setup_5709_context(dev, 1);
2277 if (err)
2278 return err;
2279
2280 cnic_init_context(dev, KWQ_CID);
2281 cnic_init_context(dev, KCQ_CID);
2282
2283 cp->kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
2284 cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
2285
2286 cp->max_kwq_idx = MAX_KWQ_IDX;
2287 cp->kwq_prod_idx = 0;
2288 cp->kwq_con_idx = 0;
2289 cp->cnic_local_flags |= CNIC_LCL_FL_KWQ_INIT;
2290
2291 if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708)
2292 cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
2293 else
2294 cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
2295
2296 /* Initialize the kernel work queue context. */
2297 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
2298 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
2299 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_TYPE, val);
2300
2301 val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
2302 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
2303
2304 val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
2305 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
2306
2307 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
2308 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
2309
2310 val = (u32) cp->kwq_info.pgtbl_map;
2311 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
2312
2313 cp->kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
2314 cp->kcq_io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
2315
2316 cp->kcq_prod_idx = 0;
2317
2318 /* Initialize the kernel complete queue context. */
2319 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
2320 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
2321 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_TYPE, val);
2322
2323 val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
2324 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
2325
2326 val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
2327 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
2328
2329 val = (u32) ((u64) cp->kcq_info.pgtbl_map >> 32);
2330 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
2331
2332 val = (u32) cp->kcq_info.pgtbl_map;
2333 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
2334
2335 cp->int_num = 0;
2336 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
2337 u32 sb_id = cp->status_blk_num;
2338 u32 sb = BNX2_L2CTX_STATUSB_NUM(sb_id);
2339
2340 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
2341 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
2342 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
2343 }
2344
2345 /* Enable Commnad Scheduler notification when we write to the
2346 * host producer index of the kernel contexts. */
2347 CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
2348
2349 /* Enable Command Scheduler notification when we write to either
2350 * the Send Queue or Receive Queue producer indexes of the kernel
2351 * bypass contexts. */
2352 CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
2353 CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
2354
2355 /* Notify COM when the driver post an application buffer. */
2356 CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
2357
2358 /* Set the CP and COM doorbells. These two processors polls the
2359 * doorbell for a non zero value before running. This must be done
2360 * after setting up the kernel queue contexts. */
2361 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
2362 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
2363
2364 cnic_init_bnx2_tx_ring(dev);
2365 cnic_init_bnx2_rx_ring(dev);
2366
2367 err = cnic_init_bnx2_irq(dev);
2368 if (err) {
2369 printk(KERN_ERR PFX "%s: cnic_init_irq failed\n",
2370 dev->netdev->name);
2371 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
2372 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
2373 return err;
2374 }
2375
2376 return 0;
2377}
2378
2379static int cnic_start_hw(struct cnic_dev *dev)
2380{
2381 struct cnic_local *cp = dev->cnic_priv;
2382 struct cnic_eth_dev *ethdev = cp->ethdev;
2383 int err;
2384
2385 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
2386 return -EALREADY;
2387
2388 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
2389 if (err) {
2390 printk(KERN_ERR PFX "%s: register_cnic failed\n",
2391 dev->netdev->name);
2392 goto err2;
2393 }
2394
2395 dev->regview = ethdev->io_base;
2396 cp->chip_id = ethdev->chip_id;
2397 pci_dev_get(dev->pcidev);
2398 cp->func = PCI_FUNC(dev->pcidev->devfn);
2399 cp->status_blk = ethdev->irq_arr[0].status_blk;
2400 cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
2401
2402 err = cp->alloc_resc(dev);
2403 if (err) {
2404 printk(KERN_ERR PFX "%s: allocate resource failure\n",
2405 dev->netdev->name);
2406 goto err1;
2407 }
2408
2409 err = cp->start_hw(dev);
2410 if (err)
2411 goto err1;
2412
2413 err = cnic_cm_open(dev);
2414 if (err)
2415 goto err1;
2416
2417 set_bit(CNIC_F_CNIC_UP, &dev->flags);
2418
2419 cp->enable_int(dev);
2420
2421 return 0;
2422
2423err1:
2424 ethdev->drv_unregister_cnic(dev->netdev);
2425 cp->free_resc(dev);
2426 pci_dev_put(dev->pcidev);
2427err2:
2428 return err;
2429}
2430
2431static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
2432{
2433 struct cnic_local *cp = dev->cnic_priv;
2434 struct cnic_eth_dev *ethdev = cp->ethdev;
2435
2436 cnic_disable_bnx2_int_sync(dev);
2437
2438 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
2439 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
2440
2441 cnic_init_context(dev, KWQ_CID);
2442 cnic_init_context(dev, KCQ_CID);
2443
2444 cnic_setup_5709_context(dev, 0);
2445 cnic_free_irq(dev);
2446
2447 ethdev->drv_unregister_cnic(dev->netdev);
2448
2449 cnic_free_resc(dev);
2450}
2451
2452static void cnic_stop_hw(struct cnic_dev *dev)
2453{
2454 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
2455 struct cnic_local *cp = dev->cnic_priv;
2456
2457 clear_bit(CNIC_F_CNIC_UP, &dev->flags);
2458 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL);
2459 synchronize_rcu();
2460 cnic_cm_shutdown(dev);
2461 cp->stop_hw(dev);
2462 pci_dev_put(dev->pcidev);
2463 }
2464}
2465
2466static void cnic_free_dev(struct cnic_dev *dev)
2467{
2468 int i = 0;
2469
2470 while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
2471 msleep(100);
2472 i++;
2473 }
2474 if (atomic_read(&dev->ref_count) != 0)
2475 printk(KERN_ERR PFX "%s: Failed waiting for ref count to go"
2476 " to zero.\n", dev->netdev->name);
2477
2478 printk(KERN_INFO PFX "Removed CNIC device: %s\n", dev->netdev->name);
2479 dev_put(dev->netdev);
2480 kfree(dev);
2481}
2482
2483static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
2484 struct pci_dev *pdev)
2485{
2486 struct cnic_dev *cdev;
2487 struct cnic_local *cp;
2488 int alloc_size;
2489
2490 alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
2491
2492 cdev = kzalloc(alloc_size , GFP_KERNEL);
2493 if (cdev == NULL) {
2494 printk(KERN_ERR PFX "%s: allocate dev struct failure\n",
2495 dev->name);
2496 return NULL;
2497 }
2498
2499 cdev->netdev = dev;
2500 cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
2501 cdev->register_device = cnic_register_device;
2502 cdev->unregister_device = cnic_unregister_device;
2503 cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
2504
2505 cp = cdev->cnic_priv;
2506 cp->dev = cdev;
2507 cp->uio_dev = -1;
2508 cp->l2_single_buf_size = 0x400;
2509 cp->l2_rx_ring_size = 3;
2510
2511 spin_lock_init(&cp->cnic_ulp_lock);
2512
2513 printk(KERN_INFO PFX "Added CNIC device: %s\n", dev->name);
2514
2515 return cdev;
2516}
2517
2518static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
2519{
2520 struct pci_dev *pdev;
2521 struct cnic_dev *cdev;
2522 struct cnic_local *cp;
2523 struct cnic_eth_dev *ethdev = NULL;
2524 struct cnic_eth_dev *(*probe)(void *) = NULL;
2525
2526 probe = __symbol_get("bnx2_cnic_probe");
2527 if (probe) {
2528 ethdev = (*probe)(dev);
2529 symbol_put_addr(probe);
2530 }
2531 if (!ethdev)
2532 return NULL;
2533
2534 pdev = ethdev->pdev;
2535 if (!pdev)
2536 return NULL;
2537
2538 dev_hold(dev);
2539 pci_dev_get(pdev);
2540 if (pdev->device == PCI_DEVICE_ID_NX2_5709 ||
2541 pdev->device == PCI_DEVICE_ID_NX2_5709S) {
2542 u8 rev;
2543
2544 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
2545 if (rev < 0x10) {
2546 pci_dev_put(pdev);
2547 goto cnic_err;
2548 }
2549 }
2550 pci_dev_put(pdev);
2551
2552 cdev = cnic_alloc_dev(dev, pdev);
2553 if (cdev == NULL)
2554 goto cnic_err;
2555
2556 set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
2557 cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
2558
2559 cp = cdev->cnic_priv;
2560 cp->ethdev = ethdev;
2561 cdev->pcidev = pdev;
2562
2563 cp->cnic_ops = &cnic_bnx2_ops;
2564 cp->start_hw = cnic_start_bnx2_hw;
2565 cp->stop_hw = cnic_stop_bnx2_hw;
2566 cp->setup_pgtbl = cnic_setup_page_tbl;
2567 cp->alloc_resc = cnic_alloc_bnx2_resc;
2568 cp->free_resc = cnic_free_resc;
2569 cp->start_cm = cnic_cm_init_bnx2_hw;
2570 cp->stop_cm = cnic_cm_stop_bnx2_hw;
2571 cp->enable_int = cnic_enable_bnx2_int;
2572 cp->disable_int_sync = cnic_disable_bnx2_int_sync;
2573 cp->close_conn = cnic_close_bnx2_conn;
2574 cp->next_idx = cnic_bnx2_next_idx;
2575 cp->hw_idx = cnic_bnx2_hw_idx;
2576 return cdev;
2577
2578cnic_err:
2579 dev_put(dev);
2580 return NULL;
2581}
2582
2583static struct cnic_dev *is_cnic_dev(struct net_device *dev)
2584{
2585 struct ethtool_drvinfo drvinfo;
2586 struct cnic_dev *cdev = NULL;
2587
2588 if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
2589 memset(&drvinfo, 0, sizeof(drvinfo));
2590 dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
2591
2592 if (!strcmp(drvinfo.driver, "bnx2"))
2593 cdev = init_bnx2_cnic(dev);
2594 if (cdev) {
2595 write_lock(&cnic_dev_lock);
2596 list_add(&cdev->list, &cnic_dev_list);
2597 write_unlock(&cnic_dev_lock);
2598 }
2599 }
2600 return cdev;
2601}
2602
2603/**
2604 * netdev event handler
2605 */
2606static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
2607 void *ptr)
2608{
2609 struct net_device *netdev = ptr;
2610 struct cnic_dev *dev;
2611 int if_type;
2612 int new_dev = 0;
2613
2614 dev = cnic_from_netdev(netdev);
2615
2616 if (!dev && (event == NETDEV_REGISTER || event == NETDEV_UP)) {
2617 /* Check for the hot-plug device */
2618 dev = is_cnic_dev(netdev);
2619 if (dev) {
2620 new_dev = 1;
2621 cnic_hold(dev);
2622 }
2623 }
2624 if (dev) {
2625 struct cnic_local *cp = dev->cnic_priv;
2626
2627 if (new_dev)
2628 cnic_ulp_init(dev);
2629 else if (event == NETDEV_UNREGISTER)
2630 cnic_ulp_exit(dev);
2631 else if (event == NETDEV_UP) {
2632 mutex_lock(&cnic_lock);
2633 if (!cnic_start_hw(dev))
2634 cnic_ulp_start(dev);
2635 mutex_unlock(&cnic_lock);
2636 }
2637
2638 rcu_read_lock();
2639 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
2640 struct cnic_ulp_ops *ulp_ops;
2641 void *ctx;
2642
2643 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
2644 if (!ulp_ops || !ulp_ops->indicate_netevent)
2645 continue;
2646
2647 ctx = cp->ulp_handle[if_type];
2648
2649 ulp_ops->indicate_netevent(ctx, event);
2650 }
2651 rcu_read_unlock();
2652
2653 if (event == NETDEV_GOING_DOWN) {
2654 mutex_lock(&cnic_lock);
2655 cnic_ulp_stop(dev);
2656 cnic_stop_hw(dev);
2657 mutex_unlock(&cnic_lock);
2658 } else if (event == NETDEV_UNREGISTER) {
2659 write_lock(&cnic_dev_lock);
2660 list_del_init(&dev->list);
2661 write_unlock(&cnic_dev_lock);
2662
2663 cnic_put(dev);
2664 cnic_free_dev(dev);
2665 goto done;
2666 }
2667 cnic_put(dev);
2668 }
2669done:
2670 return NOTIFY_DONE;
2671}
2672
2673static struct notifier_block cnic_netdev_notifier = {
2674 .notifier_call = cnic_netdev_event
2675};
2676
2677static void cnic_release(void)
2678{
2679 struct cnic_dev *dev;
2680
2681 while (!list_empty(&cnic_dev_list)) {
2682 dev = list_entry(cnic_dev_list.next, struct cnic_dev, list);
2683 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
2684 cnic_ulp_stop(dev);
2685 cnic_stop_hw(dev);
2686 }
2687
2688 cnic_ulp_exit(dev);
2689 list_del_init(&dev->list);
2690 cnic_free_dev(dev);
2691 }
2692}
2693
2694static int __init cnic_init(void)
2695{
2696 int rc = 0;
2697
2698 printk(KERN_INFO "%s", version);
2699
2700 rc = register_netdevice_notifier(&cnic_netdev_notifier);
2701 if (rc) {
2702 cnic_release();
2703 return rc;
2704 }
2705
2706 return 0;
2707}
2708
2709static void __exit cnic_exit(void)
2710{
2711 unregister_netdevice_notifier(&cnic_netdev_notifier);
2712 cnic_release();
2713 return;
2714}
2715
2716module_init(cnic_init);
2717module_exit(cnic_exit);
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
new file mode 100644
index 000000000000..5192d4a9df5a
--- /dev/null
+++ b/drivers/net/cnic.h
@@ -0,0 +1,299 @@
1/* cnic.h: Broadcom CNIC core network driver.
2 *
3 * Copyright (c) 2006-2009 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 */
10
11
12#ifndef CNIC_H
13#define CNIC_H
14
15#define KWQ_PAGE_CNT 4
16#define KCQ_PAGE_CNT 16
17
18#define KWQ_CID 24
19#define KCQ_CID 25
20
21/*
22 * krnlq_context definition
23 */
24#define L5_KRNLQ_FLAGS 0x00000000
25#define L5_KRNLQ_SIZE 0x00000000
26#define L5_KRNLQ_TYPE 0x00000000
27#define KRNLQ_FLAGS_PG_SZ (0xf<<0)
28#define KRNLQ_FLAGS_PG_SZ_256 (0<<0)
29#define KRNLQ_FLAGS_PG_SZ_512 (1<<0)
30#define KRNLQ_FLAGS_PG_SZ_1K (2<<0)
31#define KRNLQ_FLAGS_PG_SZ_2K (3<<0)
32#define KRNLQ_FLAGS_PG_SZ_4K (4<<0)
33#define KRNLQ_FLAGS_PG_SZ_8K (5<<0)
34#define KRNLQ_FLAGS_PG_SZ_16K (6<<0)
35#define KRNLQ_FLAGS_PG_SZ_32K (7<<0)
36#define KRNLQ_FLAGS_PG_SZ_64K (8<<0)
37#define KRNLQ_FLAGS_PG_SZ_128K (9<<0)
38#define KRNLQ_FLAGS_PG_SZ_256K (10<<0)
39#define KRNLQ_FLAGS_PG_SZ_512K (11<<0)
40#define KRNLQ_FLAGS_PG_SZ_1M (12<<0)
41#define KRNLQ_FLAGS_PG_SZ_2M (13<<0)
42#define KRNLQ_FLAGS_QE_SELF_SEQ (1<<15)
43#define KRNLQ_SIZE_TYPE_SIZE ((((0x28 + 0x1f) & ~0x1f) / 0x20) << 16)
44#define KRNLQ_TYPE_TYPE (0xf<<28)
45#define KRNLQ_TYPE_TYPE_EMPTY (0<<28)
46#define KRNLQ_TYPE_TYPE_KRNLQ (6<<28)
47
48#define L5_KRNLQ_HOST_QIDX 0x00000004
49#define L5_KRNLQ_HOST_FW_QIDX 0x00000008
50#define L5_KRNLQ_NX_QE_SELF_SEQ 0x0000000c
51#define L5_KRNLQ_QE_SELF_SEQ_MAX 0x0000000c
52#define L5_KRNLQ_NX_QE_HADDR_HI 0x00000010
53#define L5_KRNLQ_NX_QE_HADDR_LO 0x00000014
54#define L5_KRNLQ_PGTBL_PGIDX 0x00000018
55#define L5_KRNLQ_NX_PG_QIDX 0x00000018
56#define L5_KRNLQ_PGTBL_NPAGES 0x0000001c
57#define L5_KRNLQ_QIDX_INCR 0x0000001c
58#define L5_KRNLQ_PGTBL_HADDR_HI 0x00000020
59#define L5_KRNLQ_PGTBL_HADDR_LO 0x00000024
60
61#define BNX2_PG_CTX_MAP 0x1a0034
62#define BNX2_ISCSI_CTX_MAP 0x1a0074
63
64struct cnic_redirect_entry {
65 struct dst_entry *old_dst;
66 struct dst_entry *new_dst;
67};
68
69#define MAX_COMPLETED_KCQE 64
70
71#define MAX_CNIC_L5_CONTEXT 256
72
73#define MAX_CM_SK_TBL_SZ MAX_CNIC_L5_CONTEXT
74
75#define MAX_ISCSI_TBL_SZ 256
76
77#define CNIC_LOCAL_PORT_MIN 60000
78#define CNIC_LOCAL_PORT_MAX 61000
79#define CNIC_LOCAL_PORT_RANGE (CNIC_LOCAL_PORT_MAX - CNIC_LOCAL_PORT_MIN)
80
81#define KWQE_CNT (BCM_PAGE_SIZE / sizeof(struct kwqe))
82#define KCQE_CNT (BCM_PAGE_SIZE / sizeof(struct kcqe))
83#define MAX_KWQE_CNT (KWQE_CNT - 1)
84#define MAX_KCQE_CNT (KCQE_CNT - 1)
85
86#define MAX_KWQ_IDX ((KWQ_PAGE_CNT * KWQE_CNT) - 1)
87#define MAX_KCQ_IDX ((KCQ_PAGE_CNT * KCQE_CNT) - 1)
88
89#define KWQ_PG(x) (((x) & ~MAX_KWQE_CNT) >> (BCM_PAGE_BITS - 5))
90#define KWQ_IDX(x) ((x) & MAX_KWQE_CNT)
91
92#define KCQ_PG(x) (((x) & ~MAX_KCQE_CNT) >> (BCM_PAGE_BITS - 5))
93#define KCQ_IDX(x) ((x) & MAX_KCQE_CNT)
94
95#define BNX2X_NEXT_KCQE(x) (((x) & (MAX_KCQE_CNT - 1)) == \
96 (MAX_KCQE_CNT - 1)) ? \
97 (x) + 2 : (x) + 1
98
99#define BNX2X_KWQ_DATA_PG(cp, x) ((x) / (cp)->kwq_16_data_pp)
100#define BNX2X_KWQ_DATA_IDX(cp, x) ((x) % (cp)->kwq_16_data_pp)
101#define BNX2X_KWQ_DATA(cp, x) \
102 &(cp)->kwq_16_data[BNX2X_KWQ_DATA_PG(cp, x)][BNX2X_KWQ_DATA_IDX(cp, x)]
103
104#define DEF_IPID_COUNT 0xc001
105
106#define DEF_KA_TIMEOUT 10000
107#define DEF_KA_INTERVAL 300000
108#define DEF_KA_MAX_PROBE_COUNT 3
109#define DEF_TOS 0
110#define DEF_TTL 0xfe
111#define DEF_SND_SEQ_SCALE 0
112#define DEF_RCV_BUF 0xffff
113#define DEF_SND_BUF 0xffff
114#define DEF_SEED 0
115#define DEF_MAX_RT_TIME 500
116#define DEF_MAX_DA_COUNT 2
117#define DEF_SWS_TIMER 1000
118#define DEF_MAX_CWND 0xffff
119
120struct cnic_ctx {
121 u32 cid;
122 void *ctx;
123 dma_addr_t mapping;
124};
125
126#define BNX2_MAX_CID 0x2000
127
128struct cnic_dma {
129 int num_pages;
130 void **pg_arr;
131 dma_addr_t *pg_map_arr;
132 int pgtbl_size;
133 u32 *pgtbl;
134 dma_addr_t pgtbl_map;
135};
136
137struct cnic_id_tbl {
138 spinlock_t lock;
139 u32 start;
140 u32 max;
141 u32 next;
142 unsigned long *table;
143};
144
145#define CNIC_KWQ16_DATA_SIZE 128
146
147struct kwqe_16_data {
148 u8 data[CNIC_KWQ16_DATA_SIZE];
149};
150
151struct cnic_iscsi {
152 struct cnic_dma task_array_info;
153 struct cnic_dma r2tq_info;
154 struct cnic_dma hq_info;
155};
156
157struct cnic_context {
158 u32 cid;
159 struct kwqe_16_data *kwqe_data;
160 dma_addr_t kwqe_data_mapping;
161 wait_queue_head_t waitq;
162 int wait_cond;
163 unsigned long timestamp;
164 u32 ctx_flags;
165#define CTX_FL_OFFLD_START 0x00000001
166 u8 ulp_proto_id;
167 union {
168 struct cnic_iscsi *iscsi;
169 } proto;
170};
171
172struct cnic_local {
173
174 spinlock_t cnic_ulp_lock;
175 void *ulp_handle[MAX_CNIC_ULP_TYPE];
176 unsigned long ulp_flags[MAX_CNIC_ULP_TYPE];
177#define ULP_F_INIT 0
178#define ULP_F_START 1
179 struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE];
180
181 /* protected by ulp_lock */
182 u32 cnic_local_flags;
183#define CNIC_LCL_FL_KWQ_INIT 0x00000001
184
185 struct cnic_dev *dev;
186
187 struct cnic_eth_dev *ethdev;
188
189 void *l2_ring;
190 dma_addr_t l2_ring_map;
191 int l2_ring_size;
192 int l2_rx_ring_size;
193
194 void *l2_buf;
195 dma_addr_t l2_buf_map;
196 int l2_buf_size;
197 int l2_single_buf_size;
198
199 u16 *rx_cons_ptr;
200 u16 *tx_cons_ptr;
201 u16 rx_cons;
202 u16 tx_cons;
203
204 u32 kwq_cid_addr;
205 u32 kcq_cid_addr;
206
207 struct cnic_dma kwq_info;
208 struct kwqe **kwq;
209
210 struct cnic_dma kwq_16_data_info;
211
212 u16 max_kwq_idx;
213
214 u16 kwq_prod_idx;
215 u32 kwq_io_addr;
216
217 u16 *kwq_con_idx_ptr;
218 u16 kwq_con_idx;
219
220 struct cnic_dma kcq_info;
221 struct kcqe **kcq;
222
223 u16 kcq_prod_idx;
224 u32 kcq_io_addr;
225
226 void *status_blk;
227 struct status_block_msix *bnx2_status_blk;
228 struct host_status_block *bnx2x_status_blk;
229
230 u32 status_blk_num;
231 u32 int_num;
232 u32 last_status_idx;
233 struct tasklet_struct cnic_irq_task;
234
235 struct kcqe *completed_kcq[MAX_COMPLETED_KCQE];
236
237 struct cnic_sock *csk_tbl;
238 struct cnic_id_tbl csk_port_tbl;
239
240 struct cnic_dma conn_buf_info;
241 struct cnic_dma gbl_buf_info;
242
243 struct cnic_iscsi *iscsi_tbl;
244 struct cnic_context *ctx_tbl;
245 struct cnic_id_tbl cid_tbl;
246 int max_iscsi_conn;
247 atomic_t iscsi_conn;
248
249 /* per connection parameters */
250 int num_iscsi_tasks;
251 int num_ccells;
252 int task_array_size;
253 int r2tq_size;
254 int hq_size;
255 int num_cqs;
256
257 struct cnic_ctx *ctx_arr;
258 int ctx_blks;
259 int ctx_blk_size;
260 int cids_per_blk;
261
262 u32 chip_id;
263 int func;
264 u32 shmem_base;
265
266 u32 uio_dev;
267 struct uio_info *cnic_uinfo;
268
269 struct cnic_ops *cnic_ops;
270 int (*start_hw)(struct cnic_dev *);
271 void (*stop_hw)(struct cnic_dev *);
272 void (*setup_pgtbl)(struct cnic_dev *,
273 struct cnic_dma *);
274 int (*alloc_resc)(struct cnic_dev *);
275 void (*free_resc)(struct cnic_dev *);
276 int (*start_cm)(struct cnic_dev *);
277 void (*stop_cm)(struct cnic_dev *);
278 void (*enable_int)(struct cnic_dev *);
279 void (*disable_int_sync)(struct cnic_dev *);
280 void (*ack_int)(struct cnic_dev *);
281 void (*close_conn)(struct cnic_sock *, u32 opcode);
282 u16 (*next_idx)(u16);
283 u16 (*hw_idx)(u16);
284};
285
286struct bnx2x_bd_chain_next {
287 u32 addr_lo;
288 u32 addr_hi;
289 u8 reserved[8];
290};
291
292#define ISCSI_RAMROD_CMD_ID_UPDATE_CONN (ISCSI_KCQE_OPCODE_UPDATE_CONN)
293#define ISCSI_RAMROD_CMD_ID_INIT (ISCSI_KCQE_OPCODE_INIT)
294
295#define CDU_REGION_NUMBER_XCM_AG 2
296#define CDU_REGION_NUMBER_UCM_AG 4
297
298#endif
299
diff --git a/drivers/net/cnic_defs.h b/drivers/net/cnic_defs.h
new file mode 100644
index 000000000000..cee80f694457
--- /dev/null
+++ b/drivers/net/cnic_defs.h
@@ -0,0 +1,580 @@
1
2/* cnic.c: Broadcom CNIC core network driver.
3 *
4 * Copyright (c) 2006-2009 Broadcom Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 */
11
12#ifndef CNIC_DEFS_H
13#define CNIC_DEFS_H
14
15/* KWQ (kernel work queue) request op codes */
16#define L2_KWQE_OPCODE_VALUE_FLUSH (4)
17
18#define L4_KWQE_OPCODE_VALUE_CONNECT1 (50)
19#define L4_KWQE_OPCODE_VALUE_CONNECT2 (51)
20#define L4_KWQE_OPCODE_VALUE_CONNECT3 (52)
21#define L4_KWQE_OPCODE_VALUE_RESET (53)
22#define L4_KWQE_OPCODE_VALUE_CLOSE (54)
23#define L4_KWQE_OPCODE_VALUE_UPDATE_SECRET (60)
24#define L4_KWQE_OPCODE_VALUE_INIT_ULP (61)
25
26#define L4_KWQE_OPCODE_VALUE_OFFLOAD_PG (1)
27#define L4_KWQE_OPCODE_VALUE_UPDATE_PG (9)
28#define L4_KWQE_OPCODE_VALUE_UPLOAD_PG (14)
29
30#define L5CM_RAMROD_CMD_ID_BASE (0x80)
31#define L5CM_RAMROD_CMD_ID_TCP_CONNECT (L5CM_RAMROD_CMD_ID_BASE + 3)
32#define L5CM_RAMROD_CMD_ID_CLOSE (L5CM_RAMROD_CMD_ID_BASE + 12)
33#define L5CM_RAMROD_CMD_ID_ABORT (L5CM_RAMROD_CMD_ID_BASE + 13)
34#define L5CM_RAMROD_CMD_ID_SEARCHER_DELETE (L5CM_RAMROD_CMD_ID_BASE + 14)
35#define L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD (L5CM_RAMROD_CMD_ID_BASE + 15)
36
37/* KCQ (kernel completion queue) response op codes */
38#define L4_KCQE_OPCODE_VALUE_CLOSE_COMP (53)
39#define L4_KCQE_OPCODE_VALUE_RESET_COMP (54)
40#define L4_KCQE_OPCODE_VALUE_FW_TCP_UPDATE (55)
41#define L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE (56)
42#define L4_KCQE_OPCODE_VALUE_RESET_RECEIVED (57)
43#define L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED (58)
44#define L4_KCQE_OPCODE_VALUE_INIT_ULP (61)
45
46#define L4_KCQE_OPCODE_VALUE_OFFLOAD_PG (1)
47#define L4_KCQE_OPCODE_VALUE_UPDATE_PG (9)
48#define L4_KCQE_OPCODE_VALUE_UPLOAD_PG (14)
49
50/* KCQ (kernel completion queue) completion status */
51#define L4_KCQE_COMPLETION_STATUS_SUCCESS (0)
52#define L4_KCQE_COMPLETION_STATUS_TIMEOUT (0x93)
53
54#define L4_LAYER_CODE (4)
55#define L2_LAYER_CODE (2)
56
57/*
58 * L4 KCQ CQE
59 */
60struct l4_kcq {
61 u32 cid;
62 u32 pg_cid;
63 u32 conn_id;
64 u32 pg_host_opaque;
65#if defined(__BIG_ENDIAN)
66 u16 status;
67 u16 reserved1;
68#elif defined(__LITTLE_ENDIAN)
69 u16 reserved1;
70 u16 status;
71#endif
72 u32 reserved2[2];
73#if defined(__BIG_ENDIAN)
74 u8 flags;
75#define L4_KCQ_RESERVED3 (0x7<<0)
76#define L4_KCQ_RESERVED3_SHIFT 0
77#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */
78#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3
79#define L4_KCQ_LAYER_CODE (0x7<<4)
80#define L4_KCQ_LAYER_CODE_SHIFT 4
81#define L4_KCQ_RESERVED4 (0x1<<7)
82#define L4_KCQ_RESERVED4_SHIFT 7
83 u8 op_code;
84 u16 qe_self_seq;
85#elif defined(__LITTLE_ENDIAN)
86 u16 qe_self_seq;
87 u8 op_code;
88 u8 flags;
89#define L4_KCQ_RESERVED3 (0xF<<0)
90#define L4_KCQ_RESERVED3_SHIFT 0
91#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */
92#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3
93#define L4_KCQ_LAYER_CODE (0x7<<4)
94#define L4_KCQ_LAYER_CODE_SHIFT 4
95#define L4_KCQ_RESERVED4 (0x1<<7)
96#define L4_KCQ_RESERVED4_SHIFT 7
97#endif
98};
99
100
101/*
102 * L4 KCQ CQE PG upload
103 */
104struct l4_kcq_upload_pg {
105 u32 pg_cid;
106#if defined(__BIG_ENDIAN)
107 u16 pg_status;
108 u16 pg_ipid_count;
109#elif defined(__LITTLE_ENDIAN)
110 u16 pg_ipid_count;
111 u16 pg_status;
112#endif
113 u32 reserved1[5];
114#if defined(__BIG_ENDIAN)
115 u8 flags;
116#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0)
117#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0
118#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4)
119#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4
120#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7)
121#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7
122 u8 op_code;
123 u16 qe_self_seq;
124#elif defined(__LITTLE_ENDIAN)
125 u16 qe_self_seq;
126 u8 op_code;
127 u8 flags;
128#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0)
129#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0
130#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4)
131#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4
132#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7)
133#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7
134#endif
135};
136
137
138/*
139 * Gracefully close the connection request
140 */
141struct l4_kwq_close_req {
142#if defined(__BIG_ENDIAN)
143 u8 flags;
144#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0)
145#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0
146#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4)
147#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4
148#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7)
149#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7
150 u8 op_code;
151 u16 reserved0;
152#elif defined(__LITTLE_ENDIAN)
153 u16 reserved0;
154 u8 op_code;
155 u8 flags;
156#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0)
157#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0
158#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4)
159#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4
160#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7)
161#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7
162#endif
163 u32 cid;
164 u32 reserved2[6];
165};
166
167
168/*
169 * The first request to be passed in order to establish connection in option2
170 */
171struct l4_kwq_connect_req1 {
172#if defined(__BIG_ENDIAN)
173 u8 flags;
174#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0)
175#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0
176#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4)
177#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4
178#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7)
179#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7
180 u8 op_code;
181 u8 reserved0;
182 u8 conn_flags;
183#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0)
184#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0
185#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1)
186#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1
187#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2)
188#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2
189#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3)
190#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3
191#elif defined(__LITTLE_ENDIAN)
192 u8 conn_flags;
193#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0)
194#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0
195#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1)
196#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1
197#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2)
198#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2
199#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3)
200#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3
201 u8 reserved0;
202 u8 op_code;
203 u8 flags;
204#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0)
205#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0
206#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4)
207#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4
208#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7)
209#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7
210#endif
211 u32 cid;
212 u32 pg_cid;
213 u32 src_ip;
214 u32 dst_ip;
215#if defined(__BIG_ENDIAN)
216 u16 dst_port;
217 u16 src_port;
218#elif defined(__LITTLE_ENDIAN)
219 u16 src_port;
220 u16 dst_port;
221#endif
222#if defined(__BIG_ENDIAN)
223 u8 rsrv1[3];
224 u8 tcp_flags;
225#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0)
226#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0
227#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1)
228#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1
229#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2)
230#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2
231#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3)
232#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3
233#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4)
234#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4
235#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5)
236#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5
237#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6)
238#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6
239#elif defined(__LITTLE_ENDIAN)
240 u8 tcp_flags;
241#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0)
242#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0
243#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1)
244#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1
245#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2)
246#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2
247#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3)
248#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3
249#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4)
250#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4
251#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5)
252#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5
253#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6)
254#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6
255 u8 rsrv1[3];
256#endif
257 u32 rsrv2;
258};
259
260
261/*
262 * The second ( optional )request to be passed in order to establish
263 * connection in option2 - for IPv6 only
264 */
265struct l4_kwq_connect_req2 {
266#if defined(__BIG_ENDIAN)
267 u8 flags;
268#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0)
269#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0
270#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4)
271#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4
272#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7)
273#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7
274 u8 op_code;
275 u8 reserved0;
276 u8 rsrv;
277#elif defined(__LITTLE_ENDIAN)
278 u8 rsrv;
279 u8 reserved0;
280 u8 op_code;
281 u8 flags;
282#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0)
283#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0
284#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4)
285#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4
286#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7)
287#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7
288#endif
289 u32 reserved2;
290 u32 src_ip_v6_2;
291 u32 src_ip_v6_3;
292 u32 src_ip_v6_4;
293 u32 dst_ip_v6_2;
294 u32 dst_ip_v6_3;
295 u32 dst_ip_v6_4;
296};
297
298
299/*
300 * The third ( and last )request to be passed in order to establish
301 * connection in option2
302 */
303struct l4_kwq_connect_req3 {
304#if defined(__BIG_ENDIAN)
305 u8 flags;
306#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0)
307#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0
308#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4)
309#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4
310#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7)
311#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7
312 u8 op_code;
313 u16 reserved0;
314#elif defined(__LITTLE_ENDIAN)
315 u16 reserved0;
316 u8 op_code;
317 u8 flags;
318#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0)
319#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0
320#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4)
321#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4
322#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7)
323#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7
324#endif
325 u32 ka_timeout;
326 u32 ka_interval ;
327#if defined(__BIG_ENDIAN)
328 u8 snd_seq_scale;
329 u8 ttl;
330 u8 tos;
331 u8 ka_max_probe_count;
332#elif defined(__LITTLE_ENDIAN)
333 u8 ka_max_probe_count;
334 u8 tos;
335 u8 ttl;
336 u8 snd_seq_scale;
337#endif
338#if defined(__BIG_ENDIAN)
339 u16 pmtu;
340 u16 mss;
341#elif defined(__LITTLE_ENDIAN)
342 u16 mss;
343 u16 pmtu;
344#endif
345 u32 rcv_buf;
346 u32 snd_buf;
347 u32 seed;
348};
349
350
351/*
352 * a KWQE request to offload a PG connection
353 */
354struct l4_kwq_offload_pg {
355#if defined(__BIG_ENDIAN)
356 u8 flags;
357#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0)
358#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0
359#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4)
360#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4
361#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7)
362#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7
363 u8 op_code;
364 u16 reserved0;
365#elif defined(__LITTLE_ENDIAN)
366 u16 reserved0;
367 u8 op_code;
368 u8 flags;
369#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0)
370#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0
371#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4)
372#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4
373#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7)
374#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7
375#endif
376#if defined(__BIG_ENDIAN)
377 u8 l2hdr_nbytes;
378 u8 pg_flags;
379#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0)
380#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0
381#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1)
382#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1
383#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2)
384#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2
385 u8 da0;
386 u8 da1;
387#elif defined(__LITTLE_ENDIAN)
388 u8 da1;
389 u8 da0;
390 u8 pg_flags;
391#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0)
392#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0
393#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1)
394#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1
395#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2)
396#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2
397 u8 l2hdr_nbytes;
398#endif
399#if defined(__BIG_ENDIAN)
400 u8 da2;
401 u8 da3;
402 u8 da4;
403 u8 da5;
404#elif defined(__LITTLE_ENDIAN)
405 u8 da5;
406 u8 da4;
407 u8 da3;
408 u8 da2;
409#endif
410#if defined(__BIG_ENDIAN)
411 u8 sa0;
412 u8 sa1;
413 u8 sa2;
414 u8 sa3;
415#elif defined(__LITTLE_ENDIAN)
416 u8 sa3;
417 u8 sa2;
418 u8 sa1;
419 u8 sa0;
420#endif
421#if defined(__BIG_ENDIAN)
422 u8 sa4;
423 u8 sa5;
424 u16 etype;
425#elif defined(__LITTLE_ENDIAN)
426 u16 etype;
427 u8 sa5;
428 u8 sa4;
429#endif
430#if defined(__BIG_ENDIAN)
431 u16 vlan_tag;
432 u16 ipid_start;
433#elif defined(__LITTLE_ENDIAN)
434 u16 ipid_start;
435 u16 vlan_tag;
436#endif
437#if defined(__BIG_ENDIAN)
438 u16 ipid_count;
439 u16 reserved3;
440#elif defined(__LITTLE_ENDIAN)
441 u16 reserved3;
442 u16 ipid_count;
443#endif
444 u32 host_opaque;
445};
446
447
448/*
449 * Abortively close the connection request
450 */
451struct l4_kwq_reset_req {
452#if defined(__BIG_ENDIAN)
453 u8 flags;
454#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0)
455#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0
456#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4)
457#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4
458#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7)
459#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7
460 u8 op_code;
461 u16 reserved0;
462#elif defined(__LITTLE_ENDIAN)
463 u16 reserved0;
464 u8 op_code;
465 u8 flags;
466#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0)
467#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0
468#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4)
469#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4
470#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7)
471#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7
472#endif
473 u32 cid;
474 u32 reserved2[6];
475};
476
477
478/*
479 * a KWQE request to update a PG connection
480 */
481struct l4_kwq_update_pg {
482#if defined(__BIG_ENDIAN)
483 u8 flags;
484#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0)
485#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0
486#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4)
487#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4
488#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7)
489#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7
490 u8 opcode;
491 u16 oper16;
492#elif defined(__LITTLE_ENDIAN)
493 u16 oper16;
494 u8 opcode;
495 u8 flags;
496#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0)
497#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0
498#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4)
499#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4
500#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7)
501#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7
502#endif
503 u32 pg_cid;
504 u32 pg_host_opaque;
505#if defined(__BIG_ENDIAN)
506 u8 pg_valids;
507#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0)
508#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0
509#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1)
510#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1
511#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2)
512#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2
513 u8 pg_unused_a;
514 u16 pg_ipid_count;
515#elif defined(__LITTLE_ENDIAN)
516 u16 pg_ipid_count;
517 u8 pg_unused_a;
518 u8 pg_valids;
519#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0)
520#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0
521#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1)
522#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1
523#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2)
524#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2
525#endif
526#if defined(__BIG_ENDIAN)
527 u16 reserverd3;
528 u8 da0;
529 u8 da1;
530#elif defined(__LITTLE_ENDIAN)
531 u8 da1;
532 u8 da0;
533 u16 reserverd3;
534#endif
535#if defined(__BIG_ENDIAN)
536 u8 da2;
537 u8 da3;
538 u8 da4;
539 u8 da5;
540#elif defined(__LITTLE_ENDIAN)
541 u8 da5;
542 u8 da4;
543 u8 da3;
544 u8 da2;
545#endif
546 u32 reserved4;
547 u32 reserved5;
548};
549
550
551/*
552 * a KWQE request to upload a PG or L4 context
553 */
554struct l4_kwq_upload {
555#if defined(__BIG_ENDIAN)
556 u8 flags;
557#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0)
558#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0
559#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4)
560#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4
561#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7)
562#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7
563 u8 opcode;
564 u16 oper16;
565#elif defined(__LITTLE_ENDIAN)
566 u16 oper16;
567 u8 opcode;
568 u8 flags;
569#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0)
570#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0
571#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4)
572#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4
573#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7)
574#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7
575#endif
576 u32 cid;
577 u32 reserved2[6];
578};
579
580#endif /* CNIC_DEFS_H */
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
new file mode 100644
index 000000000000..06380963a34e
--- /dev/null
+++ b/drivers/net/cnic_if.h
@@ -0,0 +1,299 @@
1/* cnic_if.h: Broadcom CNIC core network driver.
2 *
3 * Copyright (c) 2006 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 */
10
11
12#ifndef CNIC_IF_H
13#define CNIC_IF_H
14
15#define CNIC_MODULE_VERSION "2.0.0"
16#define CNIC_MODULE_RELDATE "May 21, 2009"
17
18#define CNIC_ULP_RDMA 0
19#define CNIC_ULP_ISCSI 1
20#define CNIC_ULP_L4 2
21#define MAX_CNIC_ULP_TYPE_EXT 2
22#define MAX_CNIC_ULP_TYPE 3
23
24struct kwqe {
25 u32 kwqe_op_flag;
26
27#define KWQE_OPCODE_MASK 0x00ff0000
28#define KWQE_OPCODE_SHIFT 16
29#define KWQE_FLAGS_LAYER_SHIFT 28
30#define KWQE_OPCODE(x) ((x & KWQE_OPCODE_MASK) >> KWQE_OPCODE_SHIFT)
31
32 u32 kwqe_info0;
33 u32 kwqe_info1;
34 u32 kwqe_info2;
35 u32 kwqe_info3;
36 u32 kwqe_info4;
37 u32 kwqe_info5;
38 u32 kwqe_info6;
39};
40
41struct kwqe_16 {
42 u32 kwqe_info0;
43 u32 kwqe_info1;
44 u32 kwqe_info2;
45 u32 kwqe_info3;
46};
47
48struct kcqe {
49 u32 kcqe_info0;
50 u32 kcqe_info1;
51 u32 kcqe_info2;
52 u32 kcqe_info3;
53 u32 kcqe_info4;
54 u32 kcqe_info5;
55 u32 kcqe_info6;
56 u32 kcqe_op_flag;
57 #define KCQE_RAMROD_COMPLETION (0x1<<27) /* Everest */
58 #define KCQE_FLAGS_LAYER_MASK (0x7<<28)
59 #define KCQE_FLAGS_LAYER_MASK_MISC (0<<28)
60 #define KCQE_FLAGS_LAYER_MASK_L2 (2<<28)
61 #define KCQE_FLAGS_LAYER_MASK_L3 (3<<28)
62 #define KCQE_FLAGS_LAYER_MASK_L4 (4<<28)
63 #define KCQE_FLAGS_LAYER_MASK_L5_RDMA (5<<28)
64 #define KCQE_FLAGS_LAYER_MASK_L5_ISCSI (6<<28)
65 #define KCQE_FLAGS_NEXT (1<<31)
66 #define KCQE_FLAGS_OPCODE_MASK (0xff<<16)
67 #define KCQE_FLAGS_OPCODE_SHIFT (16)
68 #define KCQE_OPCODE(op) \
69 (((op) & KCQE_FLAGS_OPCODE_MASK) >> KCQE_FLAGS_OPCODE_SHIFT)
70};
71
72#define MAX_CNIC_CTL_DATA 64
73#define MAX_DRV_CTL_DATA 64
74
75#define CNIC_CTL_STOP_CMD 1
76#define CNIC_CTL_START_CMD 2
77#define CNIC_CTL_COMPLETION_CMD 3
78
79#define DRV_CTL_IO_WR_CMD 0x101
80#define DRV_CTL_IO_RD_CMD 0x102
81#define DRV_CTL_CTX_WR_CMD 0x103
82#define DRV_CTL_CTXTBL_WR_CMD 0x104
83#define DRV_CTL_COMPLETION_CMD 0x105
84
85struct cnic_ctl_completion {
86 u32 cid;
87};
88
89struct drv_ctl_completion {
90 u32 comp_count;
91};
92
93struct cnic_ctl_info {
94 int cmd;
95 union {
96 struct cnic_ctl_completion comp;
97 char bytes[MAX_CNIC_CTL_DATA];
98 } data;
99};
100
101struct drv_ctl_io {
102 u32 cid_addr;
103 u32 offset;
104 u32 data;
105 dma_addr_t dma_addr;
106};
107
108struct drv_ctl_info {
109 int cmd;
110 union {
111 struct drv_ctl_completion comp;
112 struct drv_ctl_io io;
113 char bytes[MAX_DRV_CTL_DATA];
114 } data;
115};
116
117struct cnic_ops {
118 struct module *cnic_owner;
119 /* Calls to these functions are protected by RCU. When
120 * unregistering, we wait for any calls to complete before
121 * continuing.
122 */
123 int (*cnic_handler)(void *, void *);
124 int (*cnic_ctl)(void *, struct cnic_ctl_info *);
125};
126
127#define MAX_CNIC_VEC 8
128
129struct cnic_irq {
130 unsigned int vector;
131 void *status_blk;
132 u32 status_blk_num;
133 u32 irq_flags;
134#define CNIC_IRQ_FL_MSIX 0x00000001
135};
136
137struct cnic_eth_dev {
138 struct module *drv_owner;
139 u32 drv_state;
140#define CNIC_DRV_STATE_REGD 0x00000001
141#define CNIC_DRV_STATE_USING_MSIX 0x00000002
142 u32 chip_id;
143 u32 max_kwqe_pending;
144 struct pci_dev *pdev;
145 void __iomem *io_base;
146
147 u32 ctx_tbl_offset;
148 u32 ctx_tbl_len;
149 int ctx_blk_size;
150 u32 starting_cid;
151 u32 max_iscsi_conn;
152 u32 max_fcoe_conn;
153 u32 max_rdma_conn;
154 u32 reserved0[2];
155
156 int num_irq;
157 struct cnic_irq irq_arr[MAX_CNIC_VEC];
158 int (*drv_register_cnic)(struct net_device *,
159 struct cnic_ops *, void *);
160 int (*drv_unregister_cnic)(struct net_device *);
161 int (*drv_submit_kwqes_32)(struct net_device *,
162 struct kwqe *[], u32);
163 int (*drv_submit_kwqes_16)(struct net_device *,
164 struct kwqe_16 *[], u32);
165 int (*drv_ctl)(struct net_device *, struct drv_ctl_info *);
166 unsigned long reserved1[2];
167};
168
169struct cnic_sockaddr {
170 union {
171 struct sockaddr_in v4;
172 struct sockaddr_in6 v6;
173 } local;
174 union {
175 struct sockaddr_in v4;
176 struct sockaddr_in6 v6;
177 } remote;
178};
179
180struct cnic_sock {
181 struct cnic_dev *dev;
182 void *context;
183 u32 src_ip[4];
184 u32 dst_ip[4];
185 u16 src_port;
186 u16 dst_port;
187 u16 vlan_id;
188 unsigned char old_ha[6];
189 unsigned char ha[6];
190 u32 mtu;
191 u32 cid;
192 u32 l5_cid;
193 u32 pg_cid;
194 int ulp_type;
195
196 u32 ka_timeout;
197 u32 ka_interval;
198 u8 ka_max_probe_count;
199 u8 tos;
200 u8 ttl;
201 u8 snd_seq_scale;
202 u32 rcv_buf;
203 u32 snd_buf;
204 u32 seed;
205
206 unsigned long tcp_flags;
207#define SK_TCP_NO_DELAY_ACK 0x1
208#define SK_TCP_KEEP_ALIVE 0x2
209#define SK_TCP_NAGLE 0x4
210#define SK_TCP_TIMESTAMP 0x8
211#define SK_TCP_SACK 0x10
212#define SK_TCP_SEG_SCALING 0x20
213 unsigned long flags;
214#define SK_F_INUSE 0
215#define SK_F_OFFLD_COMPLETE 1
216#define SK_F_OFFLD_SCHED 2
217#define SK_F_PG_OFFLD_COMPLETE 3
218#define SK_F_CONNECT_START 4
219#define SK_F_IPV6 5
220#define SK_F_CLOSING 7
221
222 atomic_t ref_count;
223 u32 state;
224 struct kwqe kwqe1;
225 struct kwqe kwqe2;
226 struct kwqe kwqe3;
227};
228
229struct cnic_dev {
230 struct net_device *netdev;
231 struct pci_dev *pcidev;
232 void __iomem *regview;
233 struct list_head list;
234
235 int (*register_device)(struct cnic_dev *dev, int ulp_type,
236 void *ulp_ctx);
237 int (*unregister_device)(struct cnic_dev *dev, int ulp_type);
238 int (*submit_kwqes)(struct cnic_dev *dev, struct kwqe *wqes[],
239 u32 num_wqes);
240 int (*submit_kwqes_16)(struct cnic_dev *dev, struct kwqe_16 *wqes[],
241 u32 num_wqes);
242
243 int (*cm_create)(struct cnic_dev *, int, u32, u32, struct cnic_sock **,
244 void *);
245 int (*cm_destroy)(struct cnic_sock *);
246 int (*cm_connect)(struct cnic_sock *, struct cnic_sockaddr *);
247 int (*cm_abort)(struct cnic_sock *);
248 int (*cm_close)(struct cnic_sock *);
249 struct cnic_dev *(*cm_select_dev)(struct sockaddr_in *, int ulp_type);
250 int (*iscsi_nl_msg_recv)(struct cnic_dev *dev, u32 msg_type,
251 char *data, u16 data_size);
252 unsigned long flags;
253#define CNIC_F_CNIC_UP 1
254#define CNIC_F_BNX2_CLASS 3
255#define CNIC_F_BNX2X_CLASS 4
256 atomic_t ref_count;
257 u8 mac_addr[6];
258
259 int max_iscsi_conn;
260 int max_fcoe_conn;
261 int max_rdma_conn;
262
263 void *cnic_priv;
264};
265
266#define CNIC_WR(dev, off, val) writel(val, dev->regview + off)
267#define CNIC_WR16(dev, off, val) writew(val, dev->regview + off)
268#define CNIC_WR8(dev, off, val) writeb(val, dev->regview + off)
269#define CNIC_RD(dev, off) readl(dev->regview + off)
270#define CNIC_RD16(dev, off) readw(dev->regview + off)
271
272struct cnic_ulp_ops {
273 /* Calls to these functions are protected by RCU. When
274 * unregistering, we wait for any calls to complete before
275 * continuing.
276 */
277
278 void (*cnic_init)(struct cnic_dev *dev);
279 void (*cnic_exit)(struct cnic_dev *dev);
280 void (*cnic_start)(void *ulp_ctx);
281 void (*cnic_stop)(void *ulp_ctx);
282 void (*indicate_kcqes)(void *ulp_ctx, struct kcqe *cqes[],
283 u32 num_cqes);
284 void (*indicate_netevent)(void *ulp_ctx, unsigned long event);
285 void (*cm_connect_complete)(struct cnic_sock *);
286 void (*cm_close_complete)(struct cnic_sock *);
287 void (*cm_abort_complete)(struct cnic_sock *);
288 void (*cm_remote_close)(struct cnic_sock *);
289 void (*cm_remote_abort)(struct cnic_sock *);
290 void (*iscsi_nl_send_msg)(struct cnic_dev *dev, u32 msg_type,
291 char *data, u16 data_size);
292 struct module *owner;
293};
294
295extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
296
297extern int cnic_unregister_driver(int ulp_type);
298
299#endif
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index e52a2018e91e..f7929e89eb03 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -2921,7 +2921,7 @@ static int e100_resume(struct pci_dev *pdev)
2921 /* ack any pending wake events, disable PME */ 2921 /* ack any pending wake events, disable PME */
2922 pci_enable_wake(pdev, 0, 0); 2922 pci_enable_wake(pdev, 0, 0);
2923 2923
2924 /* disbale reverse auto-negotiation */ 2924 /* disable reverse auto-negotiation */
2925 if (nic->phy == phy_82552_v) { 2925 if (nic->phy == phy_82552_v) {
2926 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, 2926 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
2927 E100_82552_SMARTSPEED); 2927 E100_82552_SMARTSPEED);
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index d6e491bc58c9..981936c1fb46 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -62,7 +62,7 @@ struct e1000_info;
62 e_printk(KERN_NOTICE, adapter, format, ## arg) 62 e_printk(KERN_NOTICE, adapter, format, ## arg)
63 63
64 64
65/* Interrupt modes, as used by the IntMode paramter */ 65/* Interrupt modes, as used by the IntMode parameter */
66#define E1000E_INT_MODE_LEGACY 0 66#define E1000E_INT_MODE_LEGACY 0
67#define E1000E_INT_MODE_MSI 1 67#define E1000E_INT_MODE_MSI 1
68#define E1000E_INT_MODE_MSIX 2 68#define E1000E_INT_MODE_MSIX 2
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 16a41389575a..78952f8324e2 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -268,7 +268,7 @@ struct ehea_qp_init_attr {
268}; 268};
269 269
270/* 270/*
271 * Event Queue attributes, passed as paramter 271 * Event Queue attributes, passed as parameter
272 */ 272 */
273struct ehea_eq_attr { 273struct ehea_eq_attr {
274 u32 type; 274 u32 type;
diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h
index 2ad6cd756539..8e9b67ebbf8b 100644
--- a/drivers/net/igbvf/igbvf.h
+++ b/drivers/net/igbvf/igbvf.h
@@ -45,7 +45,7 @@ struct igbvf_adapter;
45/* Interrupt defines */ 45/* Interrupt defines */
46#define IGBVF_START_ITR 648 /* ~6000 ints/sec */ 46#define IGBVF_START_ITR 648 /* ~6000 ints/sec */
47 47
48/* Interrupt modes, as used by the IntMode paramter */ 48/* Interrupt modes, as used by the IntMode parameter */
49#define IGBVF_INT_MODE_LEGACY 0 49#define IGBVF_INT_MODE_LEGACY 0
50#define IGBVF_INT_MODE_MSI 1 50#define IGBVF_INT_MODE_MSI 1
51#define IGBVF_INT_MODE_MSIX 2 51#define IGBVF_INT_MODE_MSIX 2
diff --git a/drivers/net/ipg.h b/drivers/net/ipg.h
index dd9318f19497..dfc2541bb556 100644
--- a/drivers/net/ipg.h
+++ b/drivers/net/ipg.h
@@ -514,7 +514,7 @@ enum ipg_regs {
514#define IPG_DMALIST_ALIGN_PAD 0x07 514#define IPG_DMALIST_ALIGN_PAD 0x07
515#define IPG_MULTICAST_HASHTABLE_SIZE 0x40 515#define IPG_MULTICAST_HASHTABLE_SIZE 0x40
516 516
517/* Number of miliseconds to wait after issuing a software reset. 517/* Number of milliseconds to wait after issuing a software reset.
518 * 0x05 <= IPG_AC_RESETWAIT to account for proper 10Mbps operation. 518 * 0x05 <= IPG_AC_RESETWAIT to account for proper 10Mbps operation.
519 */ 519 */
520#define IPG_AC_RESETWAIT 0x05 520#define IPG_AC_RESETWAIT 0x05
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 0a7e78ade63f..e02bafdd3682 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -367,7 +367,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
367 int i; 367 int i;
368 368
369 /* If we haven't received a specific coalescing setting 369 /* If we haven't received a specific coalescing setting
370 * (module param), we set the moderation paramters as follows: 370 * (module param), we set the moderation parameters as follows:
371 * - moder_cnt is set to the number of mtu sized packets to 371 * - moder_cnt is set to the number of mtu sized packets to
372 * satisfy our coelsing target. 372 * satisfy our coelsing target.
373 * - moder_time is set to a fixed value. 373 * - moder_time is set to a fixed value.
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index dee188761a3c..b9ceddde46c0 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -497,8 +497,10 @@ static void mlx4_free_irqs(struct mlx4_dev *dev)
497 if (eq_table->have_irq) 497 if (eq_table->have_irq)
498 free_irq(dev->pdev->irq, dev); 498 free_irq(dev->pdev->irq, dev);
499 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) 499 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
500 if (eq_table->eq[i].have_irq) 500 if (eq_table->eq[i].have_irq) {
501 free_irq(eq_table->eq[i].irq, eq_table->eq + i); 501 free_irq(eq_table->eq[i].irq, eq_table->eq + i);
502 eq_table->eq[i].have_irq = 0;
503 }
502 504
503 kfree(eq_table->irq_names); 505 kfree(eq_table->irq_names);
504} 506}
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 30bea9689694..018348c01193 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -100,6 +100,10 @@ module_param_named(use_prio, use_prio, bool, 0444);
100MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports " 100MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
101 "(0/1, default 0)"); 101 "(0/1, default 0)");
102 102
103static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
104module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
105MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)");
106
103int mlx4_check_port_params(struct mlx4_dev *dev, 107int mlx4_check_port_params(struct mlx4_dev *dev,
104 enum mlx4_port_type *port_type) 108 enum mlx4_port_type *port_type)
105{ 109{
@@ -203,12 +207,13 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
203 dev->caps.max_cqes = dev_cap->max_cq_sz - 1; 207 dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
204 dev->caps.reserved_cqs = dev_cap->reserved_cqs; 208 dev->caps.reserved_cqs = dev_cap->reserved_cqs;
205 dev->caps.reserved_eqs = dev_cap->reserved_eqs; 209 dev->caps.reserved_eqs = dev_cap->reserved_eqs;
210 dev->caps.mtts_per_seg = 1 << log_mtts_per_seg;
206 dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts, 211 dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts,
207 MLX4_MTT_ENTRY_PER_SEG); 212 dev->caps.mtts_per_seg);
208 dev->caps.reserved_mrws = dev_cap->reserved_mrws; 213 dev->caps.reserved_mrws = dev_cap->reserved_mrws;
209 dev->caps.reserved_uars = dev_cap->reserved_uars; 214 dev->caps.reserved_uars = dev_cap->reserved_uars;
210 dev->caps.reserved_pds = dev_cap->reserved_pds; 215 dev->caps.reserved_pds = dev_cap->reserved_pds;
211 dev->caps.mtt_entry_sz = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz; 216 dev->caps.mtt_entry_sz = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
212 dev->caps.max_msg_sz = dev_cap->max_msg_sz; 217 dev->caps.max_msg_sz = dev_cap->max_msg_sz;
213 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); 218 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
214 dev->caps.flags = dev_cap->flags; 219 dev->caps.flags = dev_cap->flags;
@@ -1304,6 +1309,11 @@ static int __init mlx4_verify_params(void)
1304 return -1; 1309 return -1;
1305 } 1310 }
1306 1311
1312 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) {
1313 printk(KERN_WARNING "mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
1314 return -1;
1315 }
1316
1307 return 0; 1317 return 0;
1308} 1318}
1309 1319
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index 0a467785f065..5887e4764d22 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -209,7 +209,7 @@ int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
209 } else 209 } else
210 mtt->page_shift = page_shift; 210 mtt->page_shift = page_shift;
211 211
212 for (mtt->order = 0, i = MLX4_MTT_ENTRY_PER_SEG; i < npages; i <<= 1) 212 for (mtt->order = 0, i = dev->caps.mtts_per_seg; i < npages; i <<= 1)
213 ++mtt->order; 213 ++mtt->order;
214 214
215 mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order); 215 mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order);
@@ -350,7 +350,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
350 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG | 350 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
351 MLX4_MPT_PD_FLAG_RAE); 351 MLX4_MPT_PD_FLAG_RAE);
352 mpt_entry->mtt_sz = cpu_to_be32((1 << mr->mtt.order) * 352 mpt_entry->mtt_sz = cpu_to_be32((1 << mr->mtt.order) *
353 MLX4_MTT_ENTRY_PER_SEG); 353 dev->caps.mtts_per_seg);
354 } else { 354 } else {
355 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS); 355 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
356 } 356 }
@@ -391,7 +391,7 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
391 (start_index + npages - 1) / (PAGE_SIZE / sizeof (u64))) 391 (start_index + npages - 1) / (PAGE_SIZE / sizeof (u64)))
392 return -EINVAL; 392 return -EINVAL;
393 393
394 if (start_index & (MLX4_MTT_ENTRY_PER_SEG - 1)) 394 if (start_index & (dev->caps.mtts_per_seg - 1))
395 return -EINVAL; 395 return -EINVAL;
396 396
397 mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg + 397 mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg +
diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c
index cebdf3243ca1..bd22df95adf9 100644
--- a/drivers/net/mlx4/profile.c
+++ b/drivers/net/mlx4/profile.c
@@ -98,7 +98,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
98 profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz; 98 profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz;
99 profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz; 99 profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz;
100 profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz; 100 profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz;
101 profile[MLX4_RES_MTT].size = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz; 101 profile[MLX4_RES_MTT].size = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
102 profile[MLX4_RES_MCG].size = MLX4_MGM_ENTRY_SIZE; 102 profile[MLX4_RES_MCG].size = MLX4_MGM_ENTRY_SIZE;
103 103
104 profile[MLX4_RES_QP].num = request->num_qp; 104 profile[MLX4_RES_QP].num = request->num_qp;
diff --git a/drivers/net/niu.h b/drivers/net/niu.h
index 8754e44cadae..3bd0b5933d59 100644
--- a/drivers/net/niu.h
+++ b/drivers/net/niu.h
@@ -3242,8 +3242,8 @@ struct niu {
3242 struct niu_parent *parent; 3242 struct niu_parent *parent;
3243 3243
3244 u32 flags; 3244 u32 flags;
3245#define NIU_FLAGS_HOTPLUG_PHY_PRESENT 0x02000000 /* Removebale PHY detected*/ 3245#define NIU_FLAGS_HOTPLUG_PHY_PRESENT 0x02000000 /* Removeable PHY detected*/
3246#define NIU_FLAGS_HOTPLUG_PHY 0x01000000 /* Removebale PHY */ 3246#define NIU_FLAGS_HOTPLUG_PHY 0x01000000 /* Removeable PHY */
3247#define NIU_FLAGS_VPD_VALID 0x00800000 /* VPD has valid version */ 3247#define NIU_FLAGS_VPD_VALID 0x00800000 /* VPD has valid version */
3248#define NIU_FLAGS_MSIX 0x00400000 /* MSI-X in use */ 3248#define NIU_FLAGS_MSIX 0x00400000 /* MSI-X in use */
3249#define NIU_FLAGS_MCAST 0x00200000 /* multicast filter enabled */ 3249#define NIU_FLAGS_MCAST 0x00200000 /* multicast filter enabled */
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index b9a5f59d6c9b..90d1f76c0e8b 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -3224,7 +3224,7 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
3224 3224
3225 if (value & RST_FO_FR) { 3225 if (value & RST_FO_FR) {
3226 QPRINTK(qdev, IFDOWN, ERR, 3226 QPRINTK(qdev, IFDOWN, ERR,
3227 "ETIMEOUT!!! errored out of resetting the chip!\n"); 3227 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3228 status = -ETIMEDOUT; 3228 status = -ETIMEDOUT;
3229 } 3229 }
3230 3230
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index a67c14a7befd..71afbf8b9c50 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -141,7 +141,7 @@ end:
141/* We are being asked by firmware to accept 141/* We are being asked by firmware to accept
142 * a change to the port. This is only 142 * a change to the port. This is only
143 * a change to max frame sizes (Tx/Rx), pause 143 * a change to max frame sizes (Tx/Rx), pause
144 * paramters, or loopback mode. We wake up a worker 144 * parameters, or loopback mode. We wake up a worker
145 * to handler processing this since a mailbox command 145 * to handler processing this since a mailbox command
146 * will need to be sent to ACK the request. 146 * will need to be sent to ACK the request.
147 */ 147 */
@@ -371,7 +371,7 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
371 /* We are being asked by firmware to accept 371 /* We are being asked by firmware to accept
372 * a change to the port. This is only 372 * a change to the port. This is only
373 * a change to max frame sizes (Tx/Rx), pause 373 * a change to max frame sizes (Tx/Rx), pause
374 * paramters, or loopback mode. 374 * parameters, or loopback mode.
375 */ 375 */
376 case AEN_IDC_REQ: 376 case AEN_IDC_REQ:
377 status = ql_idc_req_aen(qdev); 377 status = ql_idc_req_aen(qdev);
@@ -380,7 +380,7 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
380 /* Process and inbound IDC event. 380 /* Process and inbound IDC event.
381 * This will happen when we're trying to 381 * This will happen when we're trying to
382 * change tx/rx max frame size, change pause 382 * change tx/rx max frame size, change pause
383 * paramters or loopback mode. 383 * parameters or loopback mode.
384 */ 384 */
385 case AEN_IDC_CMPLT: 385 case AEN_IDC_CMPLT:
386 case AEN_IDC_EXT: 386 case AEN_IDC_EXT:
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 007c881896d2..35196faa084e 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -66,7 +66,6 @@ static const int multicast_filter_limit = 32;
66#define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ 66#define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
67#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ 67#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
68#define EarlyTxThld 0x3F /* 0x3F means NO early transmit */ 68#define EarlyTxThld 0x3F /* 0x3F means NO early transmit */
69#define RxPacketMaxSize 0x3FE8 /* 16K - 1 - ETH_HLEN - VLAN - CRC... */
70#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */ 69#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
71#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ 70#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
72 71
@@ -2366,10 +2365,10 @@ static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
2366 return cmd; 2365 return cmd;
2367} 2366}
2368 2367
2369static void rtl_set_rx_max_size(void __iomem *ioaddr) 2368static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
2370{ 2369{
2371 /* Low hurts. Let's disable the filtering. */ 2370 /* Low hurts. Let's disable the filtering. */
2372 RTL_W16(RxMaxSize, 16383); 2371 RTL_W16(RxMaxSize, rx_buf_sz);
2373} 2372}
2374 2373
2375static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version) 2374static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
@@ -2416,7 +2415,7 @@ static void rtl_hw_start_8169(struct net_device *dev)
2416 2415
2417 RTL_W8(EarlyTxThres, EarlyTxThld); 2416 RTL_W8(EarlyTxThres, EarlyTxThld);
2418 2417
2419 rtl_set_rx_max_size(ioaddr); 2418 rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz);
2420 2419
2421 if ((tp->mac_version == RTL_GIGA_MAC_VER_01) || 2420 if ((tp->mac_version == RTL_GIGA_MAC_VER_01) ||
2422 (tp->mac_version == RTL_GIGA_MAC_VER_02) || 2421 (tp->mac_version == RTL_GIGA_MAC_VER_02) ||
@@ -2677,7 +2676,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
2677 2676
2678 RTL_W8(EarlyTxThres, EarlyTxThld); 2677 RTL_W8(EarlyTxThres, EarlyTxThld);
2679 2678
2680 rtl_set_rx_max_size(ioaddr); 2679 rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz);
2681 2680
2682 tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1; 2681 tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1;
2683 2682
@@ -2855,7 +2854,7 @@ static void rtl_hw_start_8101(struct net_device *dev)
2855 2854
2856 RTL_W8(EarlyTxThres, EarlyTxThld); 2855 RTL_W8(EarlyTxThres, EarlyTxThld);
2857 2856
2858 rtl_set_rx_max_size(ioaddr); 2857 rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz);
2859 2858
2860 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW; 2859 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
2861 2860
diff --git a/drivers/net/skfp/h/smt.h b/drivers/net/skfp/h/smt.h
index 1ff589988d10..2976757a36fb 100644
--- a/drivers/net/skfp/h/smt.h
+++ b/drivers/net/skfp/h/smt.h
@@ -413,7 +413,7 @@ struct smt_p_reason {
413#define SMT_RDF_SUCCESS 0x00000003 /* success (PMF) */ 413#define SMT_RDF_SUCCESS 0x00000003 /* success (PMF) */
414#define SMT_RDF_BADSET 0x00000004 /* bad set count (PMF) */ 414#define SMT_RDF_BADSET 0x00000004 /* bad set count (PMF) */
415#define SMT_RDF_ILLEGAL 0x00000005 /* read only (PMF) */ 415#define SMT_RDF_ILLEGAL 0x00000005 /* read only (PMF) */
416#define SMT_RDF_NOPARAM 0x6 /* paramter not supported (PMF) */ 416#define SMT_RDF_NOPARAM 0x6 /* parameter not supported (PMF) */
417#define SMT_RDF_RANGE 0x8 /* out of range */ 417#define SMT_RDF_RANGE 0x8 /* out of range */
418#define SMT_RDF_AUTHOR 0x9 /* not autohorized */ 418#define SMT_RDF_AUTHOR 0x9 /* not autohorized */
419#define SMT_RDF_LENGTH 0x0a /* length error */ 419#define SMT_RDF_LENGTH 0x0a /* length error */
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index 329f890e2903..f1f773b17fe1 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -45,7 +45,8 @@
45 defined(CONFIG_MACH_ZYLONITE) ||\ 45 defined(CONFIG_MACH_ZYLONITE) ||\
46 defined(CONFIG_MACH_LITTLETON) ||\ 46 defined(CONFIG_MACH_LITTLETON) ||\
47 defined(CONFIG_MACH_ZYLONITE2) ||\ 47 defined(CONFIG_MACH_ZYLONITE2) ||\
48 defined(CONFIG_ARCH_VIPER) 48 defined(CONFIG_ARCH_VIPER) ||\
49 defined(CONFIG_MACH_STARGATE2)
49 50
50#include <asm/mach-types.h> 51#include <asm/mach-types.h>
51 52
@@ -73,7 +74,7 @@
73/* We actually can't write halfwords properly if not word aligned */ 74/* We actually can't write halfwords properly if not word aligned */
74static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg) 75static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
75{ 76{
76 if (machine_is_mainstone() && reg & 2) { 77 if ((machine_is_mainstone() || machine_is_stargate2()) && reg & 2) {
77 unsigned int v = val << 16; 78 unsigned int v = val << 16;
78 v |= readl(ioaddr + (reg & ~2)) & 0xffff; 79 v |= readl(ioaddr + (reg & ~2)) & 0xffff;
79 writel(v, ioaddr + (reg & ~2)); 80 writel(v, ioaddr + (reg & ~2));
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index 13dbc59bfe42..b40b6de2d086 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -79,7 +79,7 @@ MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ;
79MODULE_DESCRIPTION("3Com 3C359 Velocity XL Token Ring Adapter Driver \n") ; 79MODULE_DESCRIPTION("3Com 3C359 Velocity XL Token Ring Adapter Driver \n") ;
80MODULE_FIRMWARE(FW_NAME); 80MODULE_FIRMWARE(FW_NAME);
81 81
82/* Module paramters */ 82/* Module parameters */
83 83
84/* Ring Speed 0,4,16 84/* Ring Speed 0,4,16
85 * 0 = Autosense 85 * 0 = Autosense
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index b358bbbce33a..b3715efdce56 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -169,7 +169,7 @@ static char *open_min_error[] = {
169 "Monitor Contention failer for RPL", "FDX Protocol Error" 169 "Monitor Contention failer for RPL", "FDX Protocol Error"
170}; 170};
171 171
172/* Module paramters */ 172/* Module parameters */
173 173
174/* Ring Speed 0,4,16 174/* Ring Speed 0,4,16
175 * 0 = Autosense 175 * 0 = Autosense
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index c36974925c15..451b54136ede 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -132,7 +132,7 @@ static char *open_min_error[] = {"No error", "Function Failure", "Signal Lost",
132 "Reserved", "Reserved", "No Monitor Detected for RPL", 132 "Reserved", "Reserved", "No Monitor Detected for RPL",
133 "Monitor Contention failer for RPL", "FDX Protocol Error"}; 133 "Monitor Contention failer for RPL", "FDX Protocol Error"};
134 134
135/* Module paramters */ 135/* Module parameters */
136 136
137MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ; 137MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ;
138MODULE_DESCRIPTION("Olympic PCI/Cardbus Chipset Driver") ; 138MODULE_DESCRIPTION("Olympic PCI/Cardbus Chipset Driver") ;
diff --git a/drivers/net/ucc_geth_ethtool.c b/drivers/net/ucc_geth_ethtool.c
index 6fcb500257bc..61fe80dda3e3 100644
--- a/drivers/net/ucc_geth_ethtool.c
+++ b/drivers/net/ucc_geth_ethtool.c
@@ -7,7 +7,7 @@
7 * 7 *
8 * Limitation: 8 * Limitation:
9 * Can only get/set setttings of the first queue. 9 * Can only get/set setttings of the first queue.
10 * Need to re-open the interface manually after changing some paramters. 10 * Need to re-open the interface manually after changing some parameters.
11 * 11 *
12 * This program is free software; you can redistribute it and/or modify it 12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the 13 * under the terms of the GNU General Public License as published by the
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index c94de6243140..22c0585a0319 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -429,7 +429,7 @@ static void rx_complete (struct urb *urb)
429 429
430 /* stalls need manual reset. this is rare ... except that 430 /* stalls need manual reset. this is rare ... except that
431 * when going through USB 2.0 TTs, unplug appears this way. 431 * when going through USB 2.0 TTs, unplug appears this way.
432 * we avoid the highspeed version of the ETIMEOUT/EILSEQ 432 * we avoid the highspeed version of the ETIMEDOUT/EILSEQ
433 * storm, recovering as needed. 433 * storm, recovering as needed.
434 */ 434 */
435 case -EPIPE: 435 case -EPIPE:
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index f0bb1a4c8323..52198f6797a4 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -843,6 +843,10 @@ static int virtnet_probe(struct virtio_device *vdev)
843 int err; 843 int err;
844 struct net_device *dev; 844 struct net_device *dev;
845 struct virtnet_info *vi; 845 struct virtnet_info *vi;
846 struct virtqueue *vqs[3];
847 vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL};
848 const char *names[] = { "input", "output", "control" };
849 int nvqs;
846 850
847 /* Allocate ourselves a network device with room for our info */ 851 /* Allocate ourselves a network device with room for our info */
848 dev = alloc_etherdev(sizeof(struct virtnet_info)); 852 dev = alloc_etherdev(sizeof(struct virtnet_info));
@@ -903,25 +907,19 @@ static int virtnet_probe(struct virtio_device *vdev)
903 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) 907 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
904 vi->mergeable_rx_bufs = true; 908 vi->mergeable_rx_bufs = true;
905 909
906 /* We expect two virtqueues, receive then send. */ 910 /* We expect two virtqueues, receive then send,
907 vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done); 911 * and optionally control. */
908 if (IS_ERR(vi->rvq)) { 912 nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2;
909 err = PTR_ERR(vi->rvq); 913
914 err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names);
915 if (err)
910 goto free; 916 goto free;
911 }
912 917
913 vi->svq = vdev->config->find_vq(vdev, 1, skb_xmit_done); 918 vi->rvq = vqs[0];
914 if (IS_ERR(vi->svq)) { 919 vi->svq = vqs[1];
915 err = PTR_ERR(vi->svq);
916 goto free_recv;
917 }
918 920
919 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) { 921 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
920 vi->cvq = vdev->config->find_vq(vdev, 2, NULL); 922 vi->cvq = vqs[2];
921 if (IS_ERR(vi->cvq)) {
922 err = PTR_ERR(vi->svq);
923 goto free_send;
924 }
925 923
926 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) 924 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
927 dev->features |= NETIF_F_HW_VLAN_FILTER; 925 dev->features |= NETIF_F_HW_VLAN_FILTER;
@@ -939,7 +937,7 @@ static int virtnet_probe(struct virtio_device *vdev)
939 err = register_netdev(dev); 937 err = register_netdev(dev);
940 if (err) { 938 if (err) {
941 pr_debug("virtio_net: registering device failed\n"); 939 pr_debug("virtio_net: registering device failed\n");
942 goto free_ctrl; 940 goto free_vqs;
943 } 941 }
944 942
945 /* Last of all, set up some receive buffers. */ 943 /* Last of all, set up some receive buffers. */
@@ -960,13 +958,8 @@ static int virtnet_probe(struct virtio_device *vdev)
960 958
961unregister: 959unregister:
962 unregister_netdev(dev); 960 unregister_netdev(dev);
963free_ctrl: 961free_vqs:
964 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) 962 vdev->config->del_vqs(vdev);
965 vdev->config->del_vq(vi->cvq);
966free_send:
967 vdev->config->del_vq(vi->svq);
968free_recv:
969 vdev->config->del_vq(vi->rvq);
970free: 963free:
971 free_netdev(dev); 964 free_netdev(dev);
972 return err; 965 return err;
@@ -992,12 +985,10 @@ static void virtnet_remove(struct virtio_device *vdev)
992 985
993 BUG_ON(vi->num != 0); 986 BUG_ON(vi->num != 0);
994 987
995 vdev->config->del_vq(vi->svq);
996 vdev->config->del_vq(vi->rvq);
997 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ))
998 vdev->config->del_vq(vi->cvq);
999 unregister_netdev(vi->dev); 988 unregister_netdev(vi->dev);
1000 989
990 vdev->config->del_vqs(vi->vdev);
991
1001 while (vi->pages) 992 while (vi->pages)
1002 __free_pages(get_a_page(vi, GFP_KERNEL), 0); 993 __free_pages(get_a_page(vi, GFP_KERNEL), 0);
1003 994
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index 08b1a284b690..bb719b6114cb 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -579,7 +579,8 @@ static inline void queue_put_desc(unsigned int queue, u32 phys,
579 debug_desc(phys, desc); 579 debug_desc(phys, desc);
580 BUG_ON(phys & 0x1F); 580 BUG_ON(phys & 0x1F);
581 qmgr_put_entry(queue, phys); 581 qmgr_put_entry(queue, phys);
582 BUG_ON(qmgr_stat_overflow(queue)); 582 /* Don't check for queue overflow here, we've allocated sufficient
583 length and queues >= 32 don't support this check anyway. */
583} 584}
584 585
585 586
@@ -789,10 +790,10 @@ static void hss_hdlc_txdone_irq(void *pdev)
789 free_buffer_irq(port->tx_buff_tab[n_desc]); 790 free_buffer_irq(port->tx_buff_tab[n_desc]);
790 port->tx_buff_tab[n_desc] = NULL; 791 port->tx_buff_tab[n_desc] = NULL;
791 792
792 start = qmgr_stat_empty(port->plat->txreadyq); 793 start = qmgr_stat_below_low_watermark(port->plat->txreadyq);
793 queue_put_desc(port->plat->txreadyq, 794 queue_put_desc(port->plat->txreadyq,
794 tx_desc_phys(port, n_desc), desc); 795 tx_desc_phys(port, n_desc), desc);
795 if (start) { 796 if (start) { /* TX-ready queue was empty */
796#if DEBUG_TX 797#if DEBUG_TX
797 printk(KERN_DEBUG "%s: hss_hdlc_txdone_irq xmit" 798 printk(KERN_DEBUG "%s: hss_hdlc_txdone_irq xmit"
798 " ready\n", dev->name); 799 " ready\n", dev->name);
@@ -867,13 +868,13 @@ static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev)
867 queue_put_desc(queue_ids[port->id].tx, tx_desc_phys(port, n), desc); 868 queue_put_desc(queue_ids[port->id].tx, tx_desc_phys(port, n), desc);
868 dev->trans_start = jiffies; 869 dev->trans_start = jiffies;
869 870
870 if (qmgr_stat_empty(txreadyq)) { 871 if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */
871#if DEBUG_TX 872#if DEBUG_TX
872 printk(KERN_DEBUG "%s: hss_hdlc_xmit queue full\n", dev->name); 873 printk(KERN_DEBUG "%s: hss_hdlc_xmit queue full\n", dev->name);
873#endif 874#endif
874 netif_stop_queue(dev); 875 netif_stop_queue(dev);
875 /* we could miss TX ready interrupt */ 876 /* we could miss TX ready interrupt */
876 if (!qmgr_stat_empty(txreadyq)) { 877 if (!qmgr_stat_below_low_watermark(txreadyq)) {
877#if DEBUG_TX 878#if DEBUG_TX
878 printk(KERN_DEBUG "%s: hss_hdlc_xmit ready again\n", 879 printk(KERN_DEBUG "%s: hss_hdlc_xmit ready again\n",
879 dev->name); 880 dev->name);
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index fb7541c28e58..5bc00db21b24 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -310,7 +310,7 @@ config PRISM54
310 If you want to compile the driver as a module ( = code which can be 310 If you want to compile the driver as a module ( = code which can be
311 inserted in and removed from the running kernel whenever you want), 311 inserted in and removed from the running kernel whenever you want),
312 say M here and read <file:Documentation/kbuild/modules.txt>. 312 say M here and read <file:Documentation/kbuild/modules.txt>.
313 The module will be called prism54.ko. 313 The module will be called prism54.
314 314
315config USB_ZD1201 315config USB_ZD1201
316 tristate "USB ZD1201 based Wireless device support" 316 tristate "USB ZD1201 based Wireless device support"
diff --git a/drivers/net/wireless/hostap/Kconfig b/drivers/net/wireless/hostap/Kconfig
index 932d207bce23..c15db2293515 100644
--- a/drivers/net/wireless/hostap/Kconfig
+++ b/drivers/net/wireless/hostap/Kconfig
@@ -29,7 +29,7 @@ config HOSTAP
29 PLX/PCI/CS version of the driver to actually use the driver. 29 PLX/PCI/CS version of the driver to actually use the driver.
30 30
31 The driver can be compiled as a module and it will be called 31 The driver can be compiled as a module and it will be called
32 "hostap.ko". 32 hostap.
33 33
34config HOSTAP_FIRMWARE 34config HOSTAP_FIRMWARE
35 bool "Support downloading firmware images with Host AP driver" 35 bool "Support downloading firmware images with Host AP driver"
@@ -68,7 +68,7 @@ config HOSTAP_PLX
68 driver. 68 driver.
69 69
70 The driver can be compiled as a module and will be named 70 The driver can be compiled as a module and will be named
71 "hostap_plx.ko". 71 hostap_plx.
72 72
73config HOSTAP_PCI 73config HOSTAP_PCI
74 tristate "Host AP driver for Prism2.5 PCI adaptors" 74 tristate "Host AP driver for Prism2.5 PCI adaptors"
@@ -81,7 +81,7 @@ config HOSTAP_PCI
81 driver. 81 driver.
82 82
83 The driver can be compiled as a module and will be named 83 The driver can be compiled as a module and will be named
84 "hostap_pci.ko". 84 hostap_pci.
85 85
86config HOSTAP_CS 86config HOSTAP_CS
87 tristate "Host AP driver for Prism2/2.5/3 PC Cards" 87 tristate "Host AP driver for Prism2/2.5/3 PC Cards"
@@ -94,4 +94,4 @@ config HOSTAP_CS
94 driver. 94 driver.
95 95
96 The driver can be compiled as a module and will be named 96 The driver can be compiled as a module and will be named
97 "hostap_cs.ko". 97 hostap_cs.
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 029ccb6bdbaa..e092af09d6bf 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -70,7 +70,7 @@ config IWLAGN
70 If you want to compile the driver as a module ( = code which can be 70 If you want to compile the driver as a module ( = code which can be
71 inserted in and removed from the running kernel whenever you want), 71 inserted in and removed from the running kernel whenever you want),
72 say M here and read <file:Documentation/kbuild/modules.txt>. The 72 say M here and read <file:Documentation/kbuild/modules.txt>. The
73 module will be called iwlagn.ko. 73 module will be called iwlagn.
74 74
75 75
76config IWL4965 76config IWL4965
@@ -108,7 +108,7 @@ config IWL3945
108 If you want to compile the driver as a module ( = code which can be 108 If you want to compile the driver as a module ( = code which can be
109 inserted in and removed from the running kernel whenever you want), 109 inserted in and removed from the running kernel whenever you want),
110 say M here and read <file:Documentation/kbuild/modules.txt>. The 110 say M here and read <file:Documentation/kbuild/modules.txt>. The
111 module will be called iwl3945.ko. 111 module will be called iwl3945.
112 112
113config IWL3945_SPECTRUM_MEASUREMENT 113config IWL3945_SPECTRUM_MEASUREMENT
114 bool "Enable Spectrum Measurement in iwl3945 driver" 114 bool "Enable Spectrum Measurement in iwl3945 driver"
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 7441d5585110..3bec3dbd3450 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -647,7 +647,7 @@ static int rndis_set_config_parameter(struct usbnet *dev, char *param,
647 ret = rndis_set_oid(dev, OID_GEN_RNDIS_CONFIG_PARAMETER, 647 ret = rndis_set_oid(dev, OID_GEN_RNDIS_CONFIG_PARAMETER,
648 infobuf, info_len); 648 infobuf, info_len);
649 if (ret != 0) 649 if (ret != 0)
650 devdbg(dev, "setting rndis config paramater failed, %d.", ret); 650 devdbg(dev, "setting rndis config parameter failed, %d.", ret);
651 651
652 kfree(infobuf); 652 kfree(infobuf);
653 return ret; 653 return ret;
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 18ee7d6c4028..8aab3e6754bd 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -9,11 +9,11 @@ menuconfig RT2X00
9 9
10 When building one of the individual drivers, the rt2x00 library 10 When building one of the individual drivers, the rt2x00 library
11 will also be created. That library (when the driver is built as 11 will also be created. That library (when the driver is built as
12 a module) will be called "rt2x00lib.ko". 12 a module) will be called rt2x00lib.
13 13
14 Additionally PCI and USB libraries will also be build depending 14 Additionally PCI and USB libraries will also be build depending
15 on the types of drivers being selected, these libraries will be 15 on the types of drivers being selected, these libraries will be
16 called "rt2x00pci.ko" and "rt2x00usb.ko". 16 called rt2x00pci and rt2x00usb.
17 17
18if RT2X00 18if RT2X00
19 19
@@ -26,7 +26,7 @@ config RT2400PCI
26 This adds support for rt2400 wireless chipset family. 26 This adds support for rt2400 wireless chipset family.
27 Supported chips: RT2460. 27 Supported chips: RT2460.
28 28
29 When compiled as a module, this driver will be called "rt2400pci.ko". 29 When compiled as a module, this driver will be called rt2400pci.
30 30
31config RT2500PCI 31config RT2500PCI
32 tristate "Ralink rt2500 (PCI/PCMCIA) support" 32 tristate "Ralink rt2500 (PCI/PCMCIA) support"
@@ -37,7 +37,7 @@ config RT2500PCI
37 This adds support for rt2500 wireless chipset family. 37 This adds support for rt2500 wireless chipset family.
38 Supported chips: RT2560. 38 Supported chips: RT2560.
39 39
40 When compiled as a module, this driver will be called "rt2500pci.ko". 40 When compiled as a module, this driver will be called rt2500pci.
41 41
42config RT61PCI 42config RT61PCI
43 tristate "Ralink rt2501/rt61 (PCI/PCMCIA) support" 43 tristate "Ralink rt2501/rt61 (PCI/PCMCIA) support"
@@ -51,7 +51,7 @@ config RT61PCI
51 This adds support for rt2501 wireless chipset family. 51 This adds support for rt2501 wireless chipset family.
52 Supported chips: RT2561, RT2561S & RT2661. 52 Supported chips: RT2561, RT2561S & RT2661.
53 53
54 When compiled as a module, this driver will be called "rt61pci.ko". 54 When compiled as a module, this driver will be called rt61pci.
55 55
56config RT2500USB 56config RT2500USB
57 tristate "Ralink rt2500 (USB) support" 57 tristate "Ralink rt2500 (USB) support"
@@ -62,7 +62,7 @@ config RT2500USB
62 This adds support for rt2500 wireless chipset family. 62 This adds support for rt2500 wireless chipset family.
63 Supported chips: RT2571 & RT2572. 63 Supported chips: RT2571 & RT2572.
64 64
65 When compiled as a module, this driver will be called "rt2500usb.ko". 65 When compiled as a module, this driver will be called rt2500usb.
66 66
67config RT73USB 67config RT73USB
68 tristate "Ralink rt2501/rt73 (USB) support" 68 tristate "Ralink rt2501/rt73 (USB) support"
@@ -75,7 +75,7 @@ config RT73USB
75 This adds support for rt2501 wireless chipset family. 75 This adds support for rt2501 wireless chipset family.
76 Supported chips: RT2571W, RT2573 & RT2671. 76 Supported chips: RT2571W, RT2573 & RT2671.
77 77
78 When compiled as a module, this driver will be called "rt73usb.ko". 78 When compiled as a module, this driver will be called rt73usb.
79 79
80config RT2800USB 80config RT2800USB
81 tristate "Ralink rt2800 (USB) support" 81 tristate "Ralink rt2800 (USB) support"
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index 39e00b3d7811..0bf2715fa93a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -261,7 +261,7 @@ void rt2x00link_reset_tuner(struct rt2x00_dev *rt2x00dev, bool antenna);
261 * @rt2x00dev: Pointer to &struct rt2x00_dev. 261 * @rt2x00dev: Pointer to &struct rt2x00_dev.
262 * 262 *
263 * Initialize work structure and all link tuning related 263 * Initialize work structure and all link tuning related
264 * paramters. This will not start the link tuning process itself. 264 * parameters. This will not start the link tuning process itself.
265 */ 265 */
266void rt2x00link_register(struct rt2x00_dev *rt2x00dev); 266void rt2x00link_register(struct rt2x00_dev *rt2x00dev);
267 267
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index 1a90d69f18a9..6af706408ac0 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -138,7 +138,7 @@ psa_read(struct net_device * dev,
138 138
139/*------------------------------------------------------------------*/ 139/*------------------------------------------------------------------*/
140/* 140/*
141 * Write the Paramter Storage Area to the WaveLAN card's memory 141 * Write the Parameter Storage Area to the WaveLAN card's memory
142 */ 142 */
143static void 143static void
144psa_write(struct net_device * dev, 144psa_write(struct net_device * dev,