diff options
Diffstat (limited to 'drivers/net/cnic.c')
-rw-r--r-- | drivers/net/cnic.c | 1875 |
1 files changed, 1833 insertions, 42 deletions
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c index 3bf1b04f2cab..d4c6e7fcff53 100644 --- a/drivers/net/cnic.c +++ b/drivers/net/cnic.c | |||
@@ -33,10 +33,16 @@ | |||
33 | #include <net/route.h> | 33 | #include <net/route.h> |
34 | #include <net/ipv6.h> | 34 | #include <net/ipv6.h> |
35 | #include <net/ip6_route.h> | 35 | #include <net/ip6_route.h> |
36 | #include <net/ip6_checksum.h> | ||
36 | #include <scsi/iscsi_if.h> | 37 | #include <scsi/iscsi_if.h> |
37 | 38 | ||
38 | #include "cnic_if.h" | 39 | #include "cnic_if.h" |
39 | #include "bnx2.h" | 40 | #include "bnx2.h" |
41 | #include "bnx2x_reg.h" | ||
42 | #include "bnx2x_fw_defs.h" | ||
43 | #include "bnx2x_hsi.h" | ||
44 | #include "../scsi/bnx2i/57xx_iscsi_constants.h" | ||
45 | #include "../scsi/bnx2i/57xx_iscsi_hsi.h" | ||
40 | #include "cnic.h" | 46 | #include "cnic.h" |
41 | #include "cnic_defs.h" | 47 | #include "cnic_defs.h" |
42 | 48 | ||
@@ -59,6 +65,7 @@ static DEFINE_MUTEX(cnic_lock); | |||
59 | static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE]; | 65 | static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE]; |
60 | 66 | ||
61 | static int cnic_service_bnx2(void *, void *); | 67 | static int cnic_service_bnx2(void *, void *); |
68 | static int cnic_service_bnx2x(void *, void *); | ||
62 | static int cnic_ctl(void *, struct cnic_ctl_info *); | 69 | static int cnic_ctl(void *, struct cnic_ctl_info *); |
63 | 70 | ||
64 | static struct cnic_ops cnic_bnx2_ops = { | 71 | static struct cnic_ops cnic_bnx2_ops = { |
@@ -67,9 +74,14 @@ static struct cnic_ops cnic_bnx2_ops = { | |||
67 | .cnic_ctl = cnic_ctl, | 74 | .cnic_ctl = cnic_ctl, |
68 | }; | 75 | }; |
69 | 76 | ||
70 | static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *); | 77 | static struct cnic_ops cnic_bnx2x_ops = { |
71 | static void cnic_init_bnx2_tx_ring(struct cnic_dev *); | 78 | .cnic_owner = THIS_MODULE, |
72 | static void cnic_init_bnx2_rx_ring(struct cnic_dev *); | 79 | .cnic_handler = cnic_service_bnx2x, |
80 | .cnic_ctl = cnic_ctl, | ||
81 | }; | ||
82 | |||
83 | static void cnic_shutdown_rings(struct cnic_dev *); | ||
84 | static void cnic_init_rings(struct cnic_dev *); | ||
73 | static int cnic_cm_set_pg(struct cnic_sock *); | 85 | static int cnic_cm_set_pg(struct cnic_sock *); |
74 | 86 | ||
75 | static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode) | 87 | static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode) |
@@ -83,10 +95,16 @@ static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode) | |||
83 | if (cp->uio_dev != -1) | 95 | if (cp->uio_dev != -1) |
84 | return -EBUSY; | 96 | return -EBUSY; |
85 | 97 | ||
98 | rtnl_lock(); | ||
99 | if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) { | ||
100 | rtnl_unlock(); | ||
101 | return -ENODEV; | ||
102 | } | ||
103 | |||
86 | cp->uio_dev = iminor(inode); | 104 | cp->uio_dev = iminor(inode); |
87 | 105 | ||
88 | cnic_init_bnx2_tx_ring(dev); | 106 | cnic_init_rings(dev); |
89 | cnic_init_bnx2_rx_ring(dev); | 107 | rtnl_unlock(); |
90 | 108 | ||
91 | return 0; | 109 | return 0; |
92 | } | 110 | } |
@@ -96,7 +114,7 @@ static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode) | |||
96 | struct cnic_dev *dev = uinfo->priv; | 114 | struct cnic_dev *dev = uinfo->priv; |
97 | struct cnic_local *cp = dev->cnic_priv; | 115 | struct cnic_local *cp = dev->cnic_priv; |
98 | 116 | ||
99 | cnic_shutdown_bnx2_rx_ring(dev); | 117 | cnic_shutdown_rings(dev); |
100 | 118 | ||
101 | cp->uio_dev = -1; | 119 | cp->uio_dev = -1; |
102 | return 0; | 120 | return 0; |
@@ -162,6 +180,36 @@ static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val) | |||
162 | ethdev->drv_ctl(dev->netdev, &info); | 180 | ethdev->drv_ctl(dev->netdev, &info); |
163 | } | 181 | } |
164 | 182 | ||
183 | static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr) | ||
184 | { | ||
185 | struct cnic_local *cp = dev->cnic_priv; | ||
186 | struct cnic_eth_dev *ethdev = cp->ethdev; | ||
187 | struct drv_ctl_info info; | ||
188 | struct drv_ctl_io *io = &info.data.io; | ||
189 | |||
190 | info.cmd = DRV_CTL_CTXTBL_WR_CMD; | ||
191 | io->offset = off; | ||
192 | io->dma_addr = addr; | ||
193 | ethdev->drv_ctl(dev->netdev, &info); | ||
194 | } | ||
195 | |||
196 | static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start) | ||
197 | { | ||
198 | struct cnic_local *cp = dev->cnic_priv; | ||
199 | struct cnic_eth_dev *ethdev = cp->ethdev; | ||
200 | struct drv_ctl_info info; | ||
201 | struct drv_ctl_l2_ring *ring = &info.data.ring; | ||
202 | |||
203 | if (start) | ||
204 | info.cmd = DRV_CTL_START_L2_CMD; | ||
205 | else | ||
206 | info.cmd = DRV_CTL_STOP_L2_CMD; | ||
207 | |||
208 | ring->cid = cid; | ||
209 | ring->client_id = cl_id; | ||
210 | ethdev->drv_ctl(dev->netdev, &info); | ||
211 | } | ||
212 | |||
165 | static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val) | 213 | static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val) |
166 | { | 214 | { |
167 | struct cnic_local *cp = dev->cnic_priv; | 215 | struct cnic_local *cp = dev->cnic_priv; |
@@ -204,6 +252,19 @@ static void cnic_kwq_completion(struct cnic_dev *dev, u32 count) | |||
204 | ethdev->drv_ctl(dev->netdev, &info); | 252 | ethdev->drv_ctl(dev->netdev, &info); |
205 | } | 253 | } |
206 | 254 | ||
255 | static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid) | ||
256 | { | ||
257 | u32 i; | ||
258 | |||
259 | for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) { | ||
260 | if (cp->ctx_tbl[i].cid == cid) { | ||
261 | *l5_cid = i; | ||
262 | return 0; | ||
263 | } | ||
264 | } | ||
265 | return -EINVAL; | ||
266 | } | ||
267 | |||
207 | static int cnic_send_nlmsg(struct cnic_local *cp, u32 type, | 268 | static int cnic_send_nlmsg(struct cnic_local *cp, u32 type, |
208 | struct cnic_sock *csk) | 269 | struct cnic_sock *csk) |
209 | { | 270 | { |
@@ -347,7 +408,7 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops) | |||
347 | { | 408 | { |
348 | struct cnic_dev *dev; | 409 | struct cnic_dev *dev; |
349 | 410 | ||
350 | if (ulp_type >= MAX_CNIC_ULP_TYPE) { | 411 | if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { |
351 | printk(KERN_ERR PFX "cnic_register_driver: Bad type %d\n", | 412 | printk(KERN_ERR PFX "cnic_register_driver: Bad type %d\n", |
352 | ulp_type); | 413 | ulp_type); |
353 | return -EINVAL; | 414 | return -EINVAL; |
@@ -393,7 +454,7 @@ int cnic_unregister_driver(int ulp_type) | |||
393 | struct cnic_ulp_ops *ulp_ops; | 454 | struct cnic_ulp_ops *ulp_ops; |
394 | int i = 0; | 455 | int i = 0; |
395 | 456 | ||
396 | if (ulp_type >= MAX_CNIC_ULP_TYPE) { | 457 | if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { |
397 | printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n", | 458 | printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n", |
398 | ulp_type); | 459 | ulp_type); |
399 | return -EINVAL; | 460 | return -EINVAL; |
@@ -449,7 +510,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type, | |||
449 | struct cnic_local *cp = dev->cnic_priv; | 510 | struct cnic_local *cp = dev->cnic_priv; |
450 | struct cnic_ulp_ops *ulp_ops; | 511 | struct cnic_ulp_ops *ulp_ops; |
451 | 512 | ||
452 | if (ulp_type >= MAX_CNIC_ULP_TYPE) { | 513 | if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { |
453 | printk(KERN_ERR PFX "cnic_register_device: Bad type %d\n", | 514 | printk(KERN_ERR PFX "cnic_register_device: Bad type %d\n", |
454 | ulp_type); | 515 | ulp_type); |
455 | return -EINVAL; | 516 | return -EINVAL; |
@@ -490,7 +551,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type) | |||
490 | struct cnic_local *cp = dev->cnic_priv; | 551 | struct cnic_local *cp = dev->cnic_priv; |
491 | int i = 0; | 552 | int i = 0; |
492 | 553 | ||
493 | if (ulp_type >= MAX_CNIC_ULP_TYPE) { | 554 | if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { |
494 | printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n", | 555 | printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n", |
495 | ulp_type); | 556 | ulp_type); |
496 | return -EINVAL; | 557 | return -EINVAL; |
@@ -606,14 +667,14 @@ static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma) | |||
606 | 667 | ||
607 | for (i = 0; i < dma->num_pages; i++) { | 668 | for (i = 0; i < dma->num_pages; i++) { |
608 | if (dma->pg_arr[i]) { | 669 | if (dma->pg_arr[i]) { |
609 | pci_free_consistent(dev->pcidev, BCM_PAGE_SIZE, | 670 | dma_free_coherent(&dev->pcidev->dev, BCM_PAGE_SIZE, |
610 | dma->pg_arr[i], dma->pg_map_arr[i]); | 671 | dma->pg_arr[i], dma->pg_map_arr[i]); |
611 | dma->pg_arr[i] = NULL; | 672 | dma->pg_arr[i] = NULL; |
612 | } | 673 | } |
613 | } | 674 | } |
614 | if (dma->pgtbl) { | 675 | if (dma->pgtbl) { |
615 | pci_free_consistent(dev->pcidev, dma->pgtbl_size, | 676 | dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size, |
616 | dma->pgtbl, dma->pgtbl_map); | 677 | dma->pgtbl, dma->pgtbl_map); |
617 | dma->pgtbl = NULL; | 678 | dma->pgtbl = NULL; |
618 | } | 679 | } |
619 | kfree(dma->pg_arr); | 680 | kfree(dma->pg_arr); |
@@ -635,6 +696,20 @@ static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma) | |||
635 | } | 696 | } |
636 | } | 697 | } |
637 | 698 | ||
699 | static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma) | ||
700 | { | ||
701 | int i; | ||
702 | u32 *page_table = dma->pgtbl; | ||
703 | |||
704 | for (i = 0; i < dma->num_pages; i++) { | ||
705 | /* Each entry needs to be in little endian format. */ | ||
706 | *page_table = dma->pg_map_arr[i] & 0xffffffff; | ||
707 | page_table++; | ||
708 | *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32); | ||
709 | page_table++; | ||
710 | } | ||
711 | } | ||
712 | |||
638 | static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma, | 713 | static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma, |
639 | int pages, int use_pg_tbl) | 714 | int pages, int use_pg_tbl) |
640 | { | 715 | { |
@@ -650,9 +725,10 @@ static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma, | |||
650 | dma->num_pages = pages; | 725 | dma->num_pages = pages; |
651 | 726 | ||
652 | for (i = 0; i < pages; i++) { | 727 | for (i = 0; i < pages; i++) { |
653 | dma->pg_arr[i] = pci_alloc_consistent(dev->pcidev, | 728 | dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev, |
654 | BCM_PAGE_SIZE, | 729 | BCM_PAGE_SIZE, |
655 | &dma->pg_map_arr[i]); | 730 | &dma->pg_map_arr[i], |
731 | GFP_ATOMIC); | ||
656 | if (dma->pg_arr[i] == NULL) | 732 | if (dma->pg_arr[i] == NULL) |
657 | goto error; | 733 | goto error; |
658 | } | 734 | } |
@@ -661,8 +737,8 @@ static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma, | |||
661 | 737 | ||
662 | dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) & | 738 | dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) & |
663 | ~(BCM_PAGE_SIZE - 1); | 739 | ~(BCM_PAGE_SIZE - 1); |
664 | dma->pgtbl = pci_alloc_consistent(dev->pcidev, dma->pgtbl_size, | 740 | dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size, |
665 | &dma->pgtbl_map); | 741 | &dma->pgtbl_map, GFP_ATOMIC); |
666 | if (dma->pgtbl == NULL) | 742 | if (dma->pgtbl == NULL) |
667 | goto error; | 743 | goto error; |
668 | 744 | ||
@@ -675,6 +751,21 @@ error: | |||
675 | return -ENOMEM; | 751 | return -ENOMEM; |
676 | } | 752 | } |
677 | 753 | ||
754 | static void cnic_free_context(struct cnic_dev *dev) | ||
755 | { | ||
756 | struct cnic_local *cp = dev->cnic_priv; | ||
757 | int i; | ||
758 | |||
759 | for (i = 0; i < cp->ctx_blks; i++) { | ||
760 | if (cp->ctx_arr[i].ctx) { | ||
761 | dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size, | ||
762 | cp->ctx_arr[i].ctx, | ||
763 | cp->ctx_arr[i].mapping); | ||
764 | cp->ctx_arr[i].ctx = NULL; | ||
765 | } | ||
766 | } | ||
767 | } | ||
768 | |||
678 | static void cnic_free_resc(struct cnic_dev *dev) | 769 | static void cnic_free_resc(struct cnic_dev *dev) |
679 | { | 770 | { |
680 | struct cnic_local *cp = dev->cnic_priv; | 771 | struct cnic_local *cp = dev->cnic_priv; |
@@ -691,25 +782,18 @@ static void cnic_free_resc(struct cnic_dev *dev) | |||
691 | } | 782 | } |
692 | 783 | ||
693 | if (cp->l2_buf) { | 784 | if (cp->l2_buf) { |
694 | pci_free_consistent(dev->pcidev, cp->l2_buf_size, | 785 | dma_free_coherent(&dev->pcidev->dev, cp->l2_buf_size, |
695 | cp->l2_buf, cp->l2_buf_map); | 786 | cp->l2_buf, cp->l2_buf_map); |
696 | cp->l2_buf = NULL; | 787 | cp->l2_buf = NULL; |
697 | } | 788 | } |
698 | 789 | ||
699 | if (cp->l2_ring) { | 790 | if (cp->l2_ring) { |
700 | pci_free_consistent(dev->pcidev, cp->l2_ring_size, | 791 | dma_free_coherent(&dev->pcidev->dev, cp->l2_ring_size, |
701 | cp->l2_ring, cp->l2_ring_map); | 792 | cp->l2_ring, cp->l2_ring_map); |
702 | cp->l2_ring = NULL; | 793 | cp->l2_ring = NULL; |
703 | } | 794 | } |
704 | 795 | ||
705 | for (i = 0; i < cp->ctx_blks; i++) { | 796 | cnic_free_context(dev); |
706 | if (cp->ctx_arr[i].ctx) { | ||
707 | pci_free_consistent(dev->pcidev, cp->ctx_blk_size, | ||
708 | cp->ctx_arr[i].ctx, | ||
709 | cp->ctx_arr[i].mapping); | ||
710 | cp->ctx_arr[i].ctx = NULL; | ||
711 | } | ||
712 | } | ||
713 | kfree(cp->ctx_arr); | 797 | kfree(cp->ctx_arr); |
714 | cp->ctx_arr = NULL; | 798 | cp->ctx_arr = NULL; |
715 | cp->ctx_blks = 0; | 799 | cp->ctx_blks = 0; |
@@ -717,6 +801,7 @@ static void cnic_free_resc(struct cnic_dev *dev) | |||
717 | cnic_free_dma(dev, &cp->gbl_buf_info); | 801 | cnic_free_dma(dev, &cp->gbl_buf_info); |
718 | cnic_free_dma(dev, &cp->conn_buf_info); | 802 | cnic_free_dma(dev, &cp->conn_buf_info); |
719 | cnic_free_dma(dev, &cp->kwq_info); | 803 | cnic_free_dma(dev, &cp->kwq_info); |
804 | cnic_free_dma(dev, &cp->kwq_16_data_info); | ||
720 | cnic_free_dma(dev, &cp->kcq_info); | 805 | cnic_free_dma(dev, &cp->kcq_info); |
721 | kfree(cp->iscsi_tbl); | 806 | kfree(cp->iscsi_tbl); |
722 | cp->iscsi_tbl = NULL; | 807 | cp->iscsi_tbl = NULL; |
@@ -765,8 +850,10 @@ static int cnic_alloc_context(struct cnic_dev *dev) | |||
765 | 850 | ||
766 | for (i = 0; i < cp->ctx_blks; i++) { | 851 | for (i = 0; i < cp->ctx_blks; i++) { |
767 | cp->ctx_arr[i].ctx = | 852 | cp->ctx_arr[i].ctx = |
768 | pci_alloc_consistent(dev->pcidev, BCM_PAGE_SIZE, | 853 | dma_alloc_coherent(&dev->pcidev->dev, |
769 | &cp->ctx_arr[i].mapping); | 854 | BCM_PAGE_SIZE, |
855 | &cp->ctx_arr[i].mapping, | ||
856 | GFP_KERNEL); | ||
770 | if (cp->ctx_arr[i].ctx == NULL) | 857 | if (cp->ctx_arr[i].ctx == NULL) |
771 | return -ENOMEM; | 858 | return -ENOMEM; |
772 | } | 859 | } |
@@ -779,15 +866,17 @@ static int cnic_alloc_l2_rings(struct cnic_dev *dev, int pages) | |||
779 | struct cnic_local *cp = dev->cnic_priv; | 866 | struct cnic_local *cp = dev->cnic_priv; |
780 | 867 | ||
781 | cp->l2_ring_size = pages * BCM_PAGE_SIZE; | 868 | cp->l2_ring_size = pages * BCM_PAGE_SIZE; |
782 | cp->l2_ring = pci_alloc_consistent(dev->pcidev, cp->l2_ring_size, | 869 | cp->l2_ring = dma_alloc_coherent(&dev->pcidev->dev, cp->l2_ring_size, |
783 | &cp->l2_ring_map); | 870 | &cp->l2_ring_map, |
871 | GFP_KERNEL | __GFP_COMP); | ||
784 | if (!cp->l2_ring) | 872 | if (!cp->l2_ring) |
785 | return -ENOMEM; | 873 | return -ENOMEM; |
786 | 874 | ||
787 | cp->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; | 875 | cp->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; |
788 | cp->l2_buf_size = PAGE_ALIGN(cp->l2_buf_size); | 876 | cp->l2_buf_size = PAGE_ALIGN(cp->l2_buf_size); |
789 | cp->l2_buf = pci_alloc_consistent(dev->pcidev, cp->l2_buf_size, | 877 | cp->l2_buf = dma_alloc_coherent(&dev->pcidev->dev, cp->l2_buf_size, |
790 | &cp->l2_buf_map); | 878 | &cp->l2_buf_map, |
879 | GFP_KERNEL | __GFP_COMP); | ||
791 | if (!cp->l2_buf) | 880 | if (!cp->l2_buf) |
792 | return -ENOMEM; | 881 | return -ENOMEM; |
793 | 882 | ||
@@ -808,14 +897,20 @@ static int cnic_alloc_uio(struct cnic_dev *dev) { | |||
808 | uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start; | 897 | uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start; |
809 | uinfo->mem[0].memtype = UIO_MEM_PHYS; | 898 | uinfo->mem[0].memtype = UIO_MEM_PHYS; |
810 | 899 | ||
811 | uinfo->mem[1].addr = (unsigned long) cp->status_blk & PAGE_MASK; | ||
812 | if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { | 900 | if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { |
901 | uinfo->mem[1].addr = (unsigned long) cp->status_blk & PAGE_MASK; | ||
813 | if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) | 902 | if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) |
814 | uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9; | 903 | uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9; |
815 | else | 904 | else |
816 | uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE; | 905 | uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE; |
817 | 906 | ||
818 | uinfo->name = "bnx2_cnic"; | 907 | uinfo->name = "bnx2_cnic"; |
908 | } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { | ||
909 | uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk & | ||
910 | PAGE_MASK; | ||
911 | uinfo->mem[1].size = sizeof(struct host_def_status_block); | ||
912 | |||
913 | uinfo->name = "bnx2x_cnic"; | ||
819 | } | 914 | } |
820 | 915 | ||
821 | uinfo->mem[1].memtype = UIO_MEM_LOGICAL; | 916 | uinfo->mem[1].memtype = UIO_MEM_LOGICAL; |
@@ -880,6 +975,152 @@ error: | |||
880 | return ret; | 975 | return ret; |
881 | } | 976 | } |
882 | 977 | ||
978 | static int cnic_alloc_bnx2x_context(struct cnic_dev *dev) | ||
979 | { | ||
980 | struct cnic_local *cp = dev->cnic_priv; | ||
981 | struct cnic_eth_dev *ethdev = cp->ethdev; | ||
982 | int ctx_blk_size = cp->ethdev->ctx_blk_size; | ||
983 | int total_mem, blks, i, cid_space; | ||
984 | |||
985 | if (BNX2X_ISCSI_START_CID < ethdev->starting_cid) | ||
986 | return -EINVAL; | ||
987 | |||
988 | cid_space = MAX_ISCSI_TBL_SZ + | ||
989 | (BNX2X_ISCSI_START_CID - ethdev->starting_cid); | ||
990 | |||
991 | total_mem = BNX2X_CONTEXT_MEM_SIZE * cid_space; | ||
992 | blks = total_mem / ctx_blk_size; | ||
993 | if (total_mem % ctx_blk_size) | ||
994 | blks++; | ||
995 | |||
996 | if (blks > cp->ethdev->ctx_tbl_len) | ||
997 | return -ENOMEM; | ||
998 | |||
999 | cp->ctx_arr = kzalloc(blks * sizeof(struct cnic_ctx), GFP_KERNEL); | ||
1000 | if (cp->ctx_arr == NULL) | ||
1001 | return -ENOMEM; | ||
1002 | |||
1003 | cp->ctx_blks = blks; | ||
1004 | cp->ctx_blk_size = ctx_blk_size; | ||
1005 | if (BNX2X_CHIP_IS_E1H(cp->chip_id)) | ||
1006 | cp->ctx_align = 0; | ||
1007 | else | ||
1008 | cp->ctx_align = ctx_blk_size; | ||
1009 | |||
1010 | cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE; | ||
1011 | |||
1012 | for (i = 0; i < blks; i++) { | ||
1013 | cp->ctx_arr[i].ctx = | ||
1014 | dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size, | ||
1015 | &cp->ctx_arr[i].mapping, | ||
1016 | GFP_KERNEL); | ||
1017 | if (cp->ctx_arr[i].ctx == NULL) | ||
1018 | return -ENOMEM; | ||
1019 | |||
1020 | if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) { | ||
1021 | if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) { | ||
1022 | cnic_free_context(dev); | ||
1023 | cp->ctx_blk_size += cp->ctx_align; | ||
1024 | i = -1; | ||
1025 | continue; | ||
1026 | } | ||
1027 | } | ||
1028 | } | ||
1029 | return 0; | ||
1030 | } | ||
1031 | |||
1032 | static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) | ||
1033 | { | ||
1034 | struct cnic_local *cp = dev->cnic_priv; | ||
1035 | int i, j, n, ret, pages; | ||
1036 | struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info; | ||
1037 | |||
1038 | cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ, | ||
1039 | GFP_KERNEL); | ||
1040 | if (!cp->iscsi_tbl) | ||
1041 | goto error; | ||
1042 | |||
1043 | cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) * | ||
1044 | MAX_CNIC_L5_CONTEXT, GFP_KERNEL); | ||
1045 | if (!cp->ctx_tbl) | ||
1046 | goto error; | ||
1047 | |||
1048 | for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) { | ||
1049 | cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i]; | ||
1050 | cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI; | ||
1051 | } | ||
1052 | |||
1053 | pages = PAGE_ALIGN(MAX_CNIC_L5_CONTEXT * CNIC_KWQ16_DATA_SIZE) / | ||
1054 | PAGE_SIZE; | ||
1055 | |||
1056 | ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0); | ||
1057 | if (ret) | ||
1058 | return -ENOMEM; | ||
1059 | |||
1060 | n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE; | ||
1061 | for (i = 0, j = 0; i < MAX_ISCSI_TBL_SZ; i++) { | ||
1062 | long off = CNIC_KWQ16_DATA_SIZE * (i % n); | ||
1063 | |||
1064 | cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off; | ||
1065 | cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] + | ||
1066 | off; | ||
1067 | |||
1068 | if ((i % n) == (n - 1)) | ||
1069 | j++; | ||
1070 | } | ||
1071 | |||
1072 | ret = cnic_alloc_dma(dev, &cp->kcq_info, KCQ_PAGE_CNT, 0); | ||
1073 | if (ret) | ||
1074 | goto error; | ||
1075 | cp->kcq = (struct kcqe **) cp->kcq_info.pg_arr; | ||
1076 | |||
1077 | for (i = 0; i < KCQ_PAGE_CNT; i++) { | ||
1078 | struct bnx2x_bd_chain_next *next = | ||
1079 | (struct bnx2x_bd_chain_next *) | ||
1080 | &cp->kcq[i][MAX_KCQE_CNT]; | ||
1081 | int j = i + 1; | ||
1082 | |||
1083 | if (j >= KCQ_PAGE_CNT) | ||
1084 | j = 0; | ||
1085 | next->addr_hi = (u64) cp->kcq_info.pg_map_arr[j] >> 32; | ||
1086 | next->addr_lo = cp->kcq_info.pg_map_arr[j] & 0xffffffff; | ||
1087 | } | ||
1088 | |||
1089 | pages = PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS * | ||
1090 | BNX2X_ISCSI_CONN_BUF_SIZE) / PAGE_SIZE; | ||
1091 | ret = cnic_alloc_dma(dev, &cp->conn_buf_info, pages, 1); | ||
1092 | if (ret) | ||
1093 | goto error; | ||
1094 | |||
1095 | pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE; | ||
1096 | ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0); | ||
1097 | if (ret) | ||
1098 | goto error; | ||
1099 | |||
1100 | ret = cnic_alloc_bnx2x_context(dev); | ||
1101 | if (ret) | ||
1102 | goto error; | ||
1103 | |||
1104 | cp->bnx2x_status_blk = cp->status_blk; | ||
1105 | cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk; | ||
1106 | |||
1107 | cp->l2_rx_ring_size = 15; | ||
1108 | |||
1109 | ret = cnic_alloc_l2_rings(dev, 4); | ||
1110 | if (ret) | ||
1111 | goto error; | ||
1112 | |||
1113 | ret = cnic_alloc_uio(dev); | ||
1114 | if (ret) | ||
1115 | goto error; | ||
1116 | |||
1117 | return 0; | ||
1118 | |||
1119 | error: | ||
1120 | cnic_free_resc(dev); | ||
1121 | return -ENOMEM; | ||
1122 | } | ||
1123 | |||
883 | static inline u32 cnic_kwq_avail(struct cnic_local *cp) | 1124 | static inline u32 cnic_kwq_avail(struct cnic_local *cp) |
884 | { | 1125 | { |
885 | return cp->max_kwq_idx - | 1126 | return cp->max_kwq_idx - |
@@ -921,6 +1162,880 @@ static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], | |||
921 | return 0; | 1162 | return 0; |
922 | } | 1163 | } |
923 | 1164 | ||
1165 | static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid, | ||
1166 | union l5cm_specific_data *l5_data) | ||
1167 | { | ||
1168 | struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; | ||
1169 | dma_addr_t map; | ||
1170 | |||
1171 | map = ctx->kwqe_data_mapping; | ||
1172 | l5_data->phy_address.lo = (u64) map & 0xffffffff; | ||
1173 | l5_data->phy_address.hi = (u64) map >> 32; | ||
1174 | return ctx->kwqe_data; | ||
1175 | } | ||
1176 | |||
1177 | static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid, | ||
1178 | u32 type, union l5cm_specific_data *l5_data) | ||
1179 | { | ||
1180 | struct cnic_local *cp = dev->cnic_priv; | ||
1181 | struct l5cm_spe kwqe; | ||
1182 | struct kwqe_16 *kwq[1]; | ||
1183 | int ret; | ||
1184 | |||
1185 | kwqe.hdr.conn_and_cmd_data = | ||
1186 | cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) | | ||
1187 | BNX2X_HW_CID(cid, cp->func))); | ||
1188 | kwqe.hdr.type = cpu_to_le16(type); | ||
1189 | kwqe.hdr.reserved = 0; | ||
1190 | kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo); | ||
1191 | kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi); | ||
1192 | |||
1193 | kwq[0] = (struct kwqe_16 *) &kwqe; | ||
1194 | |||
1195 | spin_lock_bh(&cp->cnic_ulp_lock); | ||
1196 | ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1); | ||
1197 | spin_unlock_bh(&cp->cnic_ulp_lock); | ||
1198 | |||
1199 | if (ret == 1) | ||
1200 | return 0; | ||
1201 | |||
1202 | return -EBUSY; | ||
1203 | } | ||
1204 | |||
1205 | static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type, | ||
1206 | struct kcqe *cqes[], u32 num_cqes) | ||
1207 | { | ||
1208 | struct cnic_local *cp = dev->cnic_priv; | ||
1209 | struct cnic_ulp_ops *ulp_ops; | ||
1210 | |||
1211 | rcu_read_lock(); | ||
1212 | ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); | ||
1213 | if (likely(ulp_ops)) { | ||
1214 | ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type], | ||
1215 | cqes, num_cqes); | ||
1216 | } | ||
1217 | rcu_read_unlock(); | ||
1218 | } | ||
1219 | |||
1220 | static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) | ||
1221 | { | ||
1222 | struct cnic_local *cp = dev->cnic_priv; | ||
1223 | struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe; | ||
1224 | int func = cp->func, pages; | ||
1225 | int hq_bds; | ||
1226 | |||
1227 | cp->num_iscsi_tasks = req1->num_tasks_per_conn; | ||
1228 | cp->num_ccells = req1->num_ccells_per_conn; | ||
1229 | cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE * | ||
1230 | cp->num_iscsi_tasks; | ||
1231 | cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS * | ||
1232 | BNX2X_ISCSI_R2TQE_SIZE; | ||
1233 | cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE; | ||
1234 | pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; | ||
1235 | hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE); | ||
1236 | cp->num_cqs = req1->num_cqs; | ||
1237 | |||
1238 | if (!dev->max_iscsi_conn) | ||
1239 | return 0; | ||
1240 | |||
1241 | /* init Tstorm RAM */ | ||
1242 | CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(func), | ||
1243 | req1->rq_num_wqes); | ||
1244 | CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(func), | ||
1245 | PAGE_SIZE); | ||
1246 | CNIC_WR8(dev, BAR_TSTRORM_INTMEM + | ||
1247 | TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT); | ||
1248 | CNIC_WR16(dev, BAR_TSTRORM_INTMEM + | ||
1249 | TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func), | ||
1250 | req1->num_tasks_per_conn); | ||
1251 | |||
1252 | /* init Ustorm RAM */ | ||
1253 | CNIC_WR16(dev, BAR_USTRORM_INTMEM + | ||
1254 | USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(func), | ||
1255 | req1->rq_buffer_size); | ||
1256 | CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(func), | ||
1257 | PAGE_SIZE); | ||
1258 | CNIC_WR8(dev, BAR_USTRORM_INTMEM + | ||
1259 | USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT); | ||
1260 | CNIC_WR16(dev, BAR_USTRORM_INTMEM + | ||
1261 | USTORM_ISCSI_NUM_OF_TASKS_OFFSET(func), | ||
1262 | req1->num_tasks_per_conn); | ||
1263 | CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(func), | ||
1264 | req1->rq_num_wqes); | ||
1265 | CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(func), | ||
1266 | req1->cq_num_wqes); | ||
1267 | CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(func), | ||
1268 | cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS); | ||
1269 | |||
1270 | /* init Xstorm RAM */ | ||
1271 | CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(func), | ||
1272 | PAGE_SIZE); | ||
1273 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + | ||
1274 | XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT); | ||
1275 | CNIC_WR16(dev, BAR_XSTRORM_INTMEM + | ||
1276 | XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func), | ||
1277 | req1->num_tasks_per_conn); | ||
1278 | CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(func), | ||
1279 | hq_bds); | ||
1280 | CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(func), | ||
1281 | req1->num_tasks_per_conn); | ||
1282 | CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(func), | ||
1283 | cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS); | ||
1284 | |||
1285 | /* init Cstorm RAM */ | ||
1286 | CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(func), | ||
1287 | PAGE_SIZE); | ||
1288 | CNIC_WR8(dev, BAR_CSTRORM_INTMEM + | ||
1289 | CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT); | ||
1290 | CNIC_WR16(dev, BAR_CSTRORM_INTMEM + | ||
1291 | CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func), | ||
1292 | req1->num_tasks_per_conn); | ||
1293 | CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(func), | ||
1294 | req1->cq_num_wqes); | ||
1295 | CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(func), | ||
1296 | hq_bds); | ||
1297 | |||
1298 | return 0; | ||
1299 | } | ||
1300 | |||
1301 | static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe) | ||
1302 | { | ||
1303 | struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe; | ||
1304 | struct cnic_local *cp = dev->cnic_priv; | ||
1305 | int func = cp->func; | ||
1306 | struct iscsi_kcqe kcqe; | ||
1307 | struct kcqe *cqes[1]; | ||
1308 | |||
1309 | memset(&kcqe, 0, sizeof(kcqe)); | ||
1310 | if (!dev->max_iscsi_conn) { | ||
1311 | kcqe.completion_status = | ||
1312 | ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED; | ||
1313 | goto done; | ||
1314 | } | ||
1315 | |||
1316 | CNIC_WR(dev, BAR_TSTRORM_INTMEM + | ||
1317 | TSTORM_ISCSI_ERROR_BITMAP_OFFSET(func), req2->error_bit_map[0]); | ||
1318 | CNIC_WR(dev, BAR_TSTRORM_INTMEM + | ||
1319 | TSTORM_ISCSI_ERROR_BITMAP_OFFSET(func) + 4, | ||
1320 | req2->error_bit_map[1]); | ||
1321 | |||
1322 | CNIC_WR16(dev, BAR_USTRORM_INTMEM + | ||
1323 | USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(func), req2->max_cq_sqn); | ||
1324 | CNIC_WR(dev, BAR_USTRORM_INTMEM + | ||
1325 | USTORM_ISCSI_ERROR_BITMAP_OFFSET(func), req2->error_bit_map[0]); | ||
1326 | CNIC_WR(dev, BAR_USTRORM_INTMEM + | ||
1327 | USTORM_ISCSI_ERROR_BITMAP_OFFSET(func) + 4, | ||
1328 | req2->error_bit_map[1]); | ||
1329 | |||
1330 | CNIC_WR16(dev, BAR_CSTRORM_INTMEM + | ||
1331 | CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(func), req2->max_cq_sqn); | ||
1332 | |||
1333 | kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; | ||
1334 | |||
1335 | done: | ||
1336 | kcqe.op_code = ISCSI_KCQE_OPCODE_INIT; | ||
1337 | cqes[0] = (struct kcqe *) &kcqe; | ||
1338 | cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); | ||
1339 | |||
1340 | return 0; | ||
1341 | } | ||
1342 | |||
1343 | static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) | ||
1344 | { | ||
1345 | struct cnic_local *cp = dev->cnic_priv; | ||
1346 | struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; | ||
1347 | |||
1348 | if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) { | ||
1349 | struct cnic_iscsi *iscsi = ctx->proto.iscsi; | ||
1350 | |||
1351 | cnic_free_dma(dev, &iscsi->hq_info); | ||
1352 | cnic_free_dma(dev, &iscsi->r2tq_info); | ||
1353 | cnic_free_dma(dev, &iscsi->task_array_info); | ||
1354 | } | ||
1355 | cnic_free_id(&cp->cid_tbl, ctx->cid); | ||
1356 | ctx->cid = 0; | ||
1357 | } | ||
1358 | |||
1359 | static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) | ||
1360 | { | ||
1361 | u32 cid; | ||
1362 | int ret, pages; | ||
1363 | struct cnic_local *cp = dev->cnic_priv; | ||
1364 | struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; | ||
1365 | struct cnic_iscsi *iscsi = ctx->proto.iscsi; | ||
1366 | |||
1367 | cid = cnic_alloc_new_id(&cp->cid_tbl); | ||
1368 | if (cid == -1) { | ||
1369 | ret = -ENOMEM; | ||
1370 | goto error; | ||
1371 | } | ||
1372 | |||
1373 | ctx->cid = cid; | ||
1374 | pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE; | ||
1375 | |||
1376 | ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1); | ||
1377 | if (ret) | ||
1378 | goto error; | ||
1379 | |||
1380 | pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE; | ||
1381 | ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1); | ||
1382 | if (ret) | ||
1383 | goto error; | ||
1384 | |||
1385 | pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; | ||
1386 | ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1); | ||
1387 | if (ret) | ||
1388 | goto error; | ||
1389 | |||
1390 | return 0; | ||
1391 | |||
1392 | error: | ||
1393 | cnic_free_bnx2x_conn_resc(dev, l5_cid); | ||
1394 | return ret; | ||
1395 | } | ||
1396 | |||
1397 | static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init, | ||
1398 | struct regpair *ctx_addr) | ||
1399 | { | ||
1400 | struct cnic_local *cp = dev->cnic_priv; | ||
1401 | struct cnic_eth_dev *ethdev = cp->ethdev; | ||
1402 | int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk; | ||
1403 | int off = (cid - ethdev->starting_cid) % cp->cids_per_blk; | ||
1404 | unsigned long align_off = 0; | ||
1405 | dma_addr_t ctx_map; | ||
1406 | void *ctx; | ||
1407 | |||
1408 | if (cp->ctx_align) { | ||
1409 | unsigned long mask = cp->ctx_align - 1; | ||
1410 | |||
1411 | if (cp->ctx_arr[blk].mapping & mask) | ||
1412 | align_off = cp->ctx_align - | ||
1413 | (cp->ctx_arr[blk].mapping & mask); | ||
1414 | } | ||
1415 | ctx_map = cp->ctx_arr[blk].mapping + align_off + | ||
1416 | (off * BNX2X_CONTEXT_MEM_SIZE); | ||
1417 | ctx = cp->ctx_arr[blk].ctx + align_off + | ||
1418 | (off * BNX2X_CONTEXT_MEM_SIZE); | ||
1419 | if (init) | ||
1420 | memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE); | ||
1421 | |||
1422 | ctx_addr->lo = ctx_map & 0xffffffff; | ||
1423 | ctx_addr->hi = (u64) ctx_map >> 32; | ||
1424 | return ctx; | ||
1425 | } | ||
1426 | |||
1427 | static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[], | ||
1428 | u32 num) | ||
1429 | { | ||
1430 | struct cnic_local *cp = dev->cnic_priv; | ||
1431 | struct iscsi_kwqe_conn_offload1 *req1 = | ||
1432 | (struct iscsi_kwqe_conn_offload1 *) wqes[0]; | ||
1433 | struct iscsi_kwqe_conn_offload2 *req2 = | ||
1434 | (struct iscsi_kwqe_conn_offload2 *) wqes[1]; | ||
1435 | struct iscsi_kwqe_conn_offload3 *req3; | ||
1436 | struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id]; | ||
1437 | struct cnic_iscsi *iscsi = ctx->proto.iscsi; | ||
1438 | u32 cid = ctx->cid; | ||
1439 | u32 hw_cid = BNX2X_HW_CID(cid, cp->func); | ||
1440 | struct iscsi_context *ictx; | ||
1441 | struct regpair context_addr; | ||
1442 | int i, j, n = 2, n_max; | ||
1443 | |||
1444 | ctx->ctx_flags = 0; | ||
1445 | if (!req2->num_additional_wqes) | ||
1446 | return -EINVAL; | ||
1447 | |||
1448 | n_max = req2->num_additional_wqes + 2; | ||
1449 | |||
1450 | ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr); | ||
1451 | if (ictx == NULL) | ||
1452 | return -ENOMEM; | ||
1453 | |||
1454 | req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++]; | ||
1455 | |||
1456 | ictx->xstorm_ag_context.hq_prod = 1; | ||
1457 | |||
1458 | ictx->xstorm_st_context.iscsi.first_burst_length = | ||
1459 | ISCSI_DEF_FIRST_BURST_LEN; | ||
1460 | ictx->xstorm_st_context.iscsi.max_send_pdu_length = | ||
1461 | ISCSI_DEF_MAX_RECV_SEG_LEN; | ||
1462 | ictx->xstorm_st_context.iscsi.sq_pbl_base.lo = | ||
1463 | req1->sq_page_table_addr_lo; | ||
1464 | ictx->xstorm_st_context.iscsi.sq_pbl_base.hi = | ||
1465 | req1->sq_page_table_addr_hi; | ||
1466 | ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi; | ||
1467 | ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo; | ||
1468 | ictx->xstorm_st_context.iscsi.hq_pbl_base.lo = | ||
1469 | iscsi->hq_info.pgtbl_map & 0xffffffff; | ||
1470 | ictx->xstorm_st_context.iscsi.hq_pbl_base.hi = | ||
1471 | (u64) iscsi->hq_info.pgtbl_map >> 32; | ||
1472 | ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo = | ||
1473 | iscsi->hq_info.pgtbl[0]; | ||
1474 | ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi = | ||
1475 | iscsi->hq_info.pgtbl[1]; | ||
1476 | ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo = | ||
1477 | iscsi->r2tq_info.pgtbl_map & 0xffffffff; | ||
1478 | ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi = | ||
1479 | (u64) iscsi->r2tq_info.pgtbl_map >> 32; | ||
1480 | ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo = | ||
1481 | iscsi->r2tq_info.pgtbl[0]; | ||
1482 | ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi = | ||
1483 | iscsi->r2tq_info.pgtbl[1]; | ||
1484 | ictx->xstorm_st_context.iscsi.task_pbl_base.lo = | ||
1485 | iscsi->task_array_info.pgtbl_map & 0xffffffff; | ||
1486 | ictx->xstorm_st_context.iscsi.task_pbl_base.hi = | ||
1487 | (u64) iscsi->task_array_info.pgtbl_map >> 32; | ||
1488 | ictx->xstorm_st_context.iscsi.task_pbl_cache_idx = | ||
1489 | BNX2X_ISCSI_PBL_NOT_CACHED; | ||
1490 | ictx->xstorm_st_context.iscsi.flags.flags |= | ||
1491 | XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA; | ||
1492 | ictx->xstorm_st_context.iscsi.flags.flags |= | ||
1493 | XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T; | ||
1494 | |||
1495 | ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE; | ||
1496 | /* TSTORM requires the base address of RQ DB & not PTE */ | ||
1497 | ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo = | ||
1498 | req2->rq_page_table_addr_lo & PAGE_MASK; | ||
1499 | ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi = | ||
1500 | req2->rq_page_table_addr_hi; | ||
1501 | ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id; | ||
1502 | ictx->tstorm_st_context.tcp.cwnd = 0x5A8; | ||
1503 | ictx->tstorm_st_context.tcp.flags2 |= | ||
1504 | TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN; | ||
1505 | |||
1506 | ictx->timers_context.flags |= ISCSI_TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG; | ||
1507 | |||
1508 | ictx->ustorm_st_context.ring.rq.pbl_base.lo = | ||
1509 | req2->rq_page_table_addr_lo; | ||
1510 | ictx->ustorm_st_context.ring.rq.pbl_base.hi = | ||
1511 | req2->rq_page_table_addr_hi; | ||
1512 | ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi; | ||
1513 | ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo; | ||
1514 | ictx->ustorm_st_context.ring.r2tq.pbl_base.lo = | ||
1515 | iscsi->r2tq_info.pgtbl_map & 0xffffffff; | ||
1516 | ictx->ustorm_st_context.ring.r2tq.pbl_base.hi = | ||
1517 | (u64) iscsi->r2tq_info.pgtbl_map >> 32; | ||
1518 | ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo = | ||
1519 | iscsi->r2tq_info.pgtbl[0]; | ||
1520 | ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi = | ||
1521 | iscsi->r2tq_info.pgtbl[1]; | ||
1522 | ictx->ustorm_st_context.ring.cq_pbl_base.lo = | ||
1523 | req1->cq_page_table_addr_lo; | ||
1524 | ictx->ustorm_st_context.ring.cq_pbl_base.hi = | ||
1525 | req1->cq_page_table_addr_hi; | ||
1526 | ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN; | ||
1527 | ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi; | ||
1528 | ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo; | ||
1529 | ictx->ustorm_st_context.task_pbe_cache_index = | ||
1530 | BNX2X_ISCSI_PBL_NOT_CACHED; | ||
1531 | ictx->ustorm_st_context.task_pdu_cache_index = | ||
1532 | BNX2X_ISCSI_PDU_HEADER_NOT_CACHED; | ||
1533 | |||
1534 | for (i = 1, j = 1; i < cp->num_cqs; i++, j++) { | ||
1535 | if (j == 3) { | ||
1536 | if (n >= n_max) | ||
1537 | break; | ||
1538 | req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++]; | ||
1539 | j = 0; | ||
1540 | } | ||
1541 | ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN; | ||
1542 | ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo = | ||
1543 | req3->qp_first_pte[j].hi; | ||
1544 | ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi = | ||
1545 | req3->qp_first_pte[j].lo; | ||
1546 | } | ||
1547 | |||
1548 | ictx->ustorm_st_context.task_pbl_base.lo = | ||
1549 | iscsi->task_array_info.pgtbl_map & 0xffffffff; | ||
1550 | ictx->ustorm_st_context.task_pbl_base.hi = | ||
1551 | (u64) iscsi->task_array_info.pgtbl_map >> 32; | ||
1552 | ictx->ustorm_st_context.tce_phy_addr.lo = | ||
1553 | iscsi->task_array_info.pgtbl[0]; | ||
1554 | ictx->ustorm_st_context.tce_phy_addr.hi = | ||
1555 | iscsi->task_array_info.pgtbl[1]; | ||
1556 | ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; | ||
1557 | ictx->ustorm_st_context.num_cqs = cp->num_cqs; | ||
1558 | ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN; | ||
1559 | ictx->ustorm_st_context.negotiated_rx_and_flags |= | ||
1560 | ISCSI_DEF_MAX_BURST_LEN; | ||
1561 | ictx->ustorm_st_context.negotiated_rx |= | ||
1562 | ISCSI_DEFAULT_MAX_OUTSTANDING_R2T << | ||
1563 | USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT; | ||
1564 | |||
1565 | ictx->cstorm_st_context.hq_pbl_base.lo = | ||
1566 | iscsi->hq_info.pgtbl_map & 0xffffffff; | ||
1567 | ictx->cstorm_st_context.hq_pbl_base.hi = | ||
1568 | (u64) iscsi->hq_info.pgtbl_map >> 32; | ||
1569 | ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0]; | ||
1570 | ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1]; | ||
1571 | ictx->cstorm_st_context.task_pbl_base.lo = | ||
1572 | iscsi->task_array_info.pgtbl_map & 0xffffffff; | ||
1573 | ictx->cstorm_st_context.task_pbl_base.hi = | ||
1574 | (u64) iscsi->task_array_info.pgtbl_map >> 32; | ||
1575 | /* CSTORM and USTORM initialization is different, CSTORM requires | ||
1576 | * CQ DB base & not PTE addr */ | ||
1577 | ictx->cstorm_st_context.cq_db_base.lo = | ||
1578 | req1->cq_page_table_addr_lo & PAGE_MASK; | ||
1579 | ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi; | ||
1580 | ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; | ||
1581 | ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1; | ||
1582 | for (i = 0; i < cp->num_cqs; i++) { | ||
1583 | ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] = | ||
1584 | ISCSI_INITIAL_SN; | ||
1585 | ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] = | ||
1586 | ISCSI_INITIAL_SN; | ||
1587 | } | ||
1588 | |||
1589 | ictx->xstorm_ag_context.cdu_reserved = | ||
1590 | CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG, | ||
1591 | ISCSI_CONNECTION_TYPE); | ||
1592 | ictx->ustorm_ag_context.cdu_usage = | ||
1593 | CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG, | ||
1594 | ISCSI_CONNECTION_TYPE); | ||
1595 | return 0; | ||
1596 | |||
1597 | } | ||
1598 | |||
1599 | static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], | ||
1600 | u32 num, int *work) | ||
1601 | { | ||
1602 | struct iscsi_kwqe_conn_offload1 *req1; | ||
1603 | struct iscsi_kwqe_conn_offload2 *req2; | ||
1604 | struct cnic_local *cp = dev->cnic_priv; | ||
1605 | struct iscsi_kcqe kcqe; | ||
1606 | struct kcqe *cqes[1]; | ||
1607 | u32 l5_cid; | ||
1608 | int ret; | ||
1609 | |||
1610 | if (num < 2) { | ||
1611 | *work = num; | ||
1612 | return -EINVAL; | ||
1613 | } | ||
1614 | |||
1615 | req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0]; | ||
1616 | req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1]; | ||
1617 | if ((num - 2) < req2->num_additional_wqes) { | ||
1618 | *work = num; | ||
1619 | return -EINVAL; | ||
1620 | } | ||
1621 | *work = 2 + req2->num_additional_wqes;; | ||
1622 | |||
1623 | l5_cid = req1->iscsi_conn_id; | ||
1624 | if (l5_cid >= MAX_ISCSI_TBL_SZ) | ||
1625 | return -EINVAL; | ||
1626 | |||
1627 | memset(&kcqe, 0, sizeof(kcqe)); | ||
1628 | kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN; | ||
1629 | kcqe.iscsi_conn_id = l5_cid; | ||
1630 | kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE; | ||
1631 | |||
1632 | if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) { | ||
1633 | atomic_dec(&cp->iscsi_conn); | ||
1634 | ret = 0; | ||
1635 | goto done; | ||
1636 | } | ||
1637 | ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid); | ||
1638 | if (ret) { | ||
1639 | atomic_dec(&cp->iscsi_conn); | ||
1640 | ret = 0; | ||
1641 | goto done; | ||
1642 | } | ||
1643 | ret = cnic_setup_bnx2x_ctx(dev, wqes, num); | ||
1644 | if (ret < 0) { | ||
1645 | cnic_free_bnx2x_conn_resc(dev, l5_cid); | ||
1646 | atomic_dec(&cp->iscsi_conn); | ||
1647 | goto done; | ||
1648 | } | ||
1649 | |||
1650 | kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; | ||
1651 | kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp->ctx_tbl[l5_cid].cid, | ||
1652 | cp->func); | ||
1653 | |||
1654 | done: | ||
1655 | cqes[0] = (struct kcqe *) &kcqe; | ||
1656 | cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); | ||
1657 | return ret; | ||
1658 | } | ||
1659 | |||
1660 | |||
1661 | static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe) | ||
1662 | { | ||
1663 | struct cnic_local *cp = dev->cnic_priv; | ||
1664 | struct iscsi_kwqe_conn_update *req = | ||
1665 | (struct iscsi_kwqe_conn_update *) kwqe; | ||
1666 | void *data; | ||
1667 | union l5cm_specific_data l5_data; | ||
1668 | u32 l5_cid, cid = BNX2X_SW_CID(req->context_id); | ||
1669 | int ret; | ||
1670 | |||
1671 | if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0) | ||
1672 | return -EINVAL; | ||
1673 | |||
1674 | data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); | ||
1675 | if (!data) | ||
1676 | return -ENOMEM; | ||
1677 | |||
1678 | memcpy(data, kwqe, sizeof(struct kwqe)); | ||
1679 | |||
1680 | ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN, | ||
1681 | req->context_id, ISCSI_CONNECTION_TYPE, &l5_data); | ||
1682 | return ret; | ||
1683 | } | ||
1684 | |||
1685 | static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe) | ||
1686 | { | ||
1687 | struct cnic_local *cp = dev->cnic_priv; | ||
1688 | struct iscsi_kwqe_conn_destroy *req = | ||
1689 | (struct iscsi_kwqe_conn_destroy *) kwqe; | ||
1690 | union l5cm_specific_data l5_data; | ||
1691 | u32 l5_cid = req->reserved0; | ||
1692 | struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; | ||
1693 | int ret = 0; | ||
1694 | struct iscsi_kcqe kcqe; | ||
1695 | struct kcqe *cqes[1]; | ||
1696 | |||
1697 | if (!(ctx->ctx_flags & CTX_FL_OFFLD_START)) | ||
1698 | goto skip_cfc_delete; | ||
1699 | |||
1700 | while (!time_after(jiffies, ctx->timestamp + (2 * HZ))) | ||
1701 | msleep(250); | ||
1702 | |||
1703 | init_waitqueue_head(&ctx->waitq); | ||
1704 | ctx->wait_cond = 0; | ||
1705 | memset(&l5_data, 0, sizeof(l5_data)); | ||
1706 | ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CFC_DEL, | ||
1707 | req->context_id, | ||
1708 | ETH_CONNECTION_TYPE | | ||
1709 | (1 << SPE_HDR_COMMON_RAMROD_SHIFT), | ||
1710 | &l5_data); | ||
1711 | if (ret == 0) | ||
1712 | wait_event(ctx->waitq, ctx->wait_cond); | ||
1713 | |||
1714 | skip_cfc_delete: | ||
1715 | cnic_free_bnx2x_conn_resc(dev, l5_cid); | ||
1716 | |||
1717 | atomic_dec(&cp->iscsi_conn); | ||
1718 | |||
1719 | memset(&kcqe, 0, sizeof(kcqe)); | ||
1720 | kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN; | ||
1721 | kcqe.iscsi_conn_id = l5_cid; | ||
1722 | kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; | ||
1723 | kcqe.iscsi_conn_context_id = req->context_id; | ||
1724 | |||
1725 | cqes[0] = (struct kcqe *) &kcqe; | ||
1726 | cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); | ||
1727 | |||
1728 | return ret; | ||
1729 | } | ||
1730 | |||
1731 | static void cnic_init_storm_conn_bufs(struct cnic_dev *dev, | ||
1732 | struct l4_kwq_connect_req1 *kwqe1, | ||
1733 | struct l4_kwq_connect_req3 *kwqe3, | ||
1734 | struct l5cm_active_conn_buffer *conn_buf) | ||
1735 | { | ||
1736 | struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf; | ||
1737 | struct l5cm_xstorm_conn_buffer *xstorm_buf = | ||
1738 | &conn_buf->xstorm_conn_buffer; | ||
1739 | struct l5cm_tstorm_conn_buffer *tstorm_buf = | ||
1740 | &conn_buf->tstorm_conn_buffer; | ||
1741 | struct regpair context_addr; | ||
1742 | u32 cid = BNX2X_SW_CID(kwqe1->cid); | ||
1743 | struct in6_addr src_ip, dst_ip; | ||
1744 | int i; | ||
1745 | u32 *addrp; | ||
1746 | |||
1747 | addrp = (u32 *) &conn_addr->local_ip_addr; | ||
1748 | for (i = 0; i < 4; i++, addrp++) | ||
1749 | src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp); | ||
1750 | |||
1751 | addrp = (u32 *) &conn_addr->remote_ip_addr; | ||
1752 | for (i = 0; i < 4; i++, addrp++) | ||
1753 | dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp); | ||
1754 | |||
1755 | cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr); | ||
1756 | |||
1757 | xstorm_buf->context_addr.hi = context_addr.hi; | ||
1758 | xstorm_buf->context_addr.lo = context_addr.lo; | ||
1759 | xstorm_buf->mss = 0xffff; | ||
1760 | xstorm_buf->rcv_buf = kwqe3->rcv_buf; | ||
1761 | if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE) | ||
1762 | xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE; | ||
1763 | xstorm_buf->pseudo_header_checksum = | ||
1764 | swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0)); | ||
1765 | |||
1766 | if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK)) | ||
1767 | tstorm_buf->params |= | ||
1768 | L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE; | ||
1769 | if (kwqe3->ka_timeout) { | ||
1770 | tstorm_buf->ka_enable = 1; | ||
1771 | tstorm_buf->ka_timeout = kwqe3->ka_timeout; | ||
1772 | tstorm_buf->ka_interval = kwqe3->ka_interval; | ||
1773 | tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count; | ||
1774 | } | ||
1775 | tstorm_buf->rcv_buf = kwqe3->rcv_buf; | ||
1776 | tstorm_buf->snd_buf = kwqe3->snd_buf; | ||
1777 | tstorm_buf->max_rt_time = 0xffffffff; | ||
1778 | } | ||
1779 | |||
1780 | static void cnic_init_bnx2x_mac(struct cnic_dev *dev) | ||
1781 | { | ||
1782 | struct cnic_local *cp = dev->cnic_priv; | ||
1783 | int func = CNIC_FUNC(cp); | ||
1784 | u8 *mac = dev->mac_addr; | ||
1785 | |||
1786 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + | ||
1787 | XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(func), mac[0]); | ||
1788 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + | ||
1789 | XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(func), mac[1]); | ||
1790 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + | ||
1791 | XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(func), mac[2]); | ||
1792 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + | ||
1793 | XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(func), mac[3]); | ||
1794 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + | ||
1795 | XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(func), mac[4]); | ||
1796 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + | ||
1797 | XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(func), mac[5]); | ||
1798 | |||
1799 | CNIC_WR8(dev, BAR_TSTRORM_INTMEM + | ||
1800 | TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(func), mac[5]); | ||
1801 | CNIC_WR8(dev, BAR_TSTRORM_INTMEM + | ||
1802 | TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(func) + 1, | ||
1803 | mac[4]); | ||
1804 | CNIC_WR8(dev, BAR_TSTRORM_INTMEM + | ||
1805 | TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func), mac[3]); | ||
1806 | CNIC_WR8(dev, BAR_TSTRORM_INTMEM + | ||
1807 | TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func) + 1, | ||
1808 | mac[2]); | ||
1809 | CNIC_WR8(dev, BAR_TSTRORM_INTMEM + | ||
1810 | TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func) + 2, | ||
1811 | mac[1]); | ||
1812 | CNIC_WR8(dev, BAR_TSTRORM_INTMEM + | ||
1813 | TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func) + 3, | ||
1814 | mac[0]); | ||
1815 | } | ||
1816 | |||
1817 | static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts) | ||
1818 | { | ||
1819 | struct cnic_local *cp = dev->cnic_priv; | ||
1820 | u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN; | ||
1821 | u16 tstorm_flags = 0; | ||
1822 | |||
1823 | if (tcp_ts) { | ||
1824 | xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED; | ||
1825 | tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED; | ||
1826 | } | ||
1827 | |||
1828 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + | ||
1829 | XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->func), xstorm_flags); | ||
1830 | |||
1831 | CNIC_WR16(dev, BAR_TSTRORM_INTMEM + | ||
1832 | TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->func), tstorm_flags); | ||
1833 | } | ||
1834 | |||
1835 | static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[], | ||
1836 | u32 num, int *work) | ||
1837 | { | ||
1838 | struct cnic_local *cp = dev->cnic_priv; | ||
1839 | struct l4_kwq_connect_req1 *kwqe1 = | ||
1840 | (struct l4_kwq_connect_req1 *) wqes[0]; | ||
1841 | struct l4_kwq_connect_req3 *kwqe3; | ||
1842 | struct l5cm_active_conn_buffer *conn_buf; | ||
1843 | struct l5cm_conn_addr_params *conn_addr; | ||
1844 | union l5cm_specific_data l5_data; | ||
1845 | u32 l5_cid = kwqe1->pg_cid; | ||
1846 | struct cnic_sock *csk = &cp->csk_tbl[l5_cid]; | ||
1847 | struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; | ||
1848 | int ret; | ||
1849 | |||
1850 | if (num < 2) { | ||
1851 | *work = num; | ||
1852 | return -EINVAL; | ||
1853 | } | ||
1854 | |||
1855 | if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) | ||
1856 | *work = 3; | ||
1857 | else | ||
1858 | *work = 2; | ||
1859 | |||
1860 | if (num < *work) { | ||
1861 | *work = num; | ||
1862 | return -EINVAL; | ||
1863 | } | ||
1864 | |||
1865 | if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) { | ||
1866 | printk(KERN_ERR PFX "%s: conn_buf size too big\n", | ||
1867 | dev->netdev->name); | ||
1868 | return -ENOMEM; | ||
1869 | } | ||
1870 | conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); | ||
1871 | if (!conn_buf) | ||
1872 | return -ENOMEM; | ||
1873 | |||
1874 | memset(conn_buf, 0, sizeof(*conn_buf)); | ||
1875 | |||
1876 | conn_addr = &conn_buf->conn_addr_buf; | ||
1877 | conn_addr->remote_addr_0 = csk->ha[0]; | ||
1878 | conn_addr->remote_addr_1 = csk->ha[1]; | ||
1879 | conn_addr->remote_addr_2 = csk->ha[2]; | ||
1880 | conn_addr->remote_addr_3 = csk->ha[3]; | ||
1881 | conn_addr->remote_addr_4 = csk->ha[4]; | ||
1882 | conn_addr->remote_addr_5 = csk->ha[5]; | ||
1883 | |||
1884 | if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) { | ||
1885 | struct l4_kwq_connect_req2 *kwqe2 = | ||
1886 | (struct l4_kwq_connect_req2 *) wqes[1]; | ||
1887 | |||
1888 | conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4; | ||
1889 | conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3; | ||
1890 | conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2; | ||
1891 | |||
1892 | conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4; | ||
1893 | conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3; | ||
1894 | conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2; | ||
1895 | conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION; | ||
1896 | } | ||
1897 | kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1]; | ||
1898 | |||
1899 | conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip; | ||
1900 | conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip; | ||
1901 | conn_addr->local_tcp_port = kwqe1->src_port; | ||
1902 | conn_addr->remote_tcp_port = kwqe1->dst_port; | ||
1903 | |||
1904 | conn_addr->pmtu = kwqe3->pmtu; | ||
1905 | cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf); | ||
1906 | |||
1907 | CNIC_WR16(dev, BAR_XSTRORM_INTMEM + | ||
1908 | XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->func), csk->vlan_id); | ||
1909 | |||
1910 | cnic_bnx2x_set_tcp_timestamp(dev, | ||
1911 | kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP); | ||
1912 | |||
1913 | ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT, | ||
1914 | kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data); | ||
1915 | if (!ret) | ||
1916 | ctx->ctx_flags |= CTX_FL_OFFLD_START; | ||
1917 | |||
1918 | return ret; | ||
1919 | } | ||
1920 | |||
1921 | static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe) | ||
1922 | { | ||
1923 | struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe; | ||
1924 | union l5cm_specific_data l5_data; | ||
1925 | int ret; | ||
1926 | |||
1927 | memset(&l5_data, 0, sizeof(l5_data)); | ||
1928 | ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE, | ||
1929 | req->cid, ISCSI_CONNECTION_TYPE, &l5_data); | ||
1930 | return ret; | ||
1931 | } | ||
1932 | |||
1933 | static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe) | ||
1934 | { | ||
1935 | struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe; | ||
1936 | union l5cm_specific_data l5_data; | ||
1937 | int ret; | ||
1938 | |||
1939 | memset(&l5_data, 0, sizeof(l5_data)); | ||
1940 | ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT, | ||
1941 | req->cid, ISCSI_CONNECTION_TYPE, &l5_data); | ||
1942 | return ret; | ||
1943 | } | ||
1944 | static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe) | ||
1945 | { | ||
1946 | struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe; | ||
1947 | struct l4_kcq kcqe; | ||
1948 | struct kcqe *cqes[1]; | ||
1949 | |||
1950 | memset(&kcqe, 0, sizeof(kcqe)); | ||
1951 | kcqe.pg_host_opaque = req->host_opaque; | ||
1952 | kcqe.pg_cid = req->host_opaque; | ||
1953 | kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG; | ||
1954 | cqes[0] = (struct kcqe *) &kcqe; | ||
1955 | cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1); | ||
1956 | return 0; | ||
1957 | } | ||
1958 | |||
1959 | static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe) | ||
1960 | { | ||
1961 | struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe; | ||
1962 | struct l4_kcq kcqe; | ||
1963 | struct kcqe *cqes[1]; | ||
1964 | |||
1965 | memset(&kcqe, 0, sizeof(kcqe)); | ||
1966 | kcqe.pg_host_opaque = req->pg_host_opaque; | ||
1967 | kcqe.pg_cid = req->pg_cid; | ||
1968 | kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG; | ||
1969 | cqes[0] = (struct kcqe *) &kcqe; | ||
1970 | cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1); | ||
1971 | return 0; | ||
1972 | } | ||
1973 | |||
1974 | static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], | ||
1975 | u32 num_wqes) | ||
1976 | { | ||
1977 | int i, work, ret; | ||
1978 | u32 opcode; | ||
1979 | struct kwqe *kwqe; | ||
1980 | |||
1981 | if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) | ||
1982 | return -EAGAIN; /* bnx2 is down */ | ||
1983 | |||
1984 | for (i = 0; i < num_wqes; ) { | ||
1985 | kwqe = wqes[i]; | ||
1986 | opcode = KWQE_OPCODE(kwqe->kwqe_op_flag); | ||
1987 | work = 1; | ||
1988 | |||
1989 | switch (opcode) { | ||
1990 | case ISCSI_KWQE_OPCODE_INIT1: | ||
1991 | ret = cnic_bnx2x_iscsi_init1(dev, kwqe); | ||
1992 | break; | ||
1993 | case ISCSI_KWQE_OPCODE_INIT2: | ||
1994 | ret = cnic_bnx2x_iscsi_init2(dev, kwqe); | ||
1995 | break; | ||
1996 | case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1: | ||
1997 | ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i], | ||
1998 | num_wqes - i, &work); | ||
1999 | break; | ||
2000 | case ISCSI_KWQE_OPCODE_UPDATE_CONN: | ||
2001 | ret = cnic_bnx2x_iscsi_update(dev, kwqe); | ||
2002 | break; | ||
2003 | case ISCSI_KWQE_OPCODE_DESTROY_CONN: | ||
2004 | ret = cnic_bnx2x_iscsi_destroy(dev, kwqe); | ||
2005 | break; | ||
2006 | case L4_KWQE_OPCODE_VALUE_CONNECT1: | ||
2007 | ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i, | ||
2008 | &work); | ||
2009 | break; | ||
2010 | case L4_KWQE_OPCODE_VALUE_CLOSE: | ||
2011 | ret = cnic_bnx2x_close(dev, kwqe); | ||
2012 | break; | ||
2013 | case L4_KWQE_OPCODE_VALUE_RESET: | ||
2014 | ret = cnic_bnx2x_reset(dev, kwqe); | ||
2015 | break; | ||
2016 | case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG: | ||
2017 | ret = cnic_bnx2x_offload_pg(dev, kwqe); | ||
2018 | break; | ||
2019 | case L4_KWQE_OPCODE_VALUE_UPDATE_PG: | ||
2020 | ret = cnic_bnx2x_update_pg(dev, kwqe); | ||
2021 | break; | ||
2022 | case L4_KWQE_OPCODE_VALUE_UPLOAD_PG: | ||
2023 | ret = 0; | ||
2024 | break; | ||
2025 | default: | ||
2026 | ret = 0; | ||
2027 | printk(KERN_ERR PFX "%s: Unknown type of KWQE(0x%x)\n", | ||
2028 | dev->netdev->name, opcode); | ||
2029 | break; | ||
2030 | } | ||
2031 | if (ret < 0) | ||
2032 | printk(KERN_ERR PFX "%s: KWQE(0x%x) failed\n", | ||
2033 | dev->netdev->name, opcode); | ||
2034 | i += work; | ||
2035 | } | ||
2036 | return 0; | ||
2037 | } | ||
2038 | |||
924 | static void service_kcqes(struct cnic_dev *dev, int num_cqes) | 2039 | static void service_kcqes(struct cnic_dev *dev, int num_cqes) |
925 | { | 2040 | { |
926 | struct cnic_local *cp = dev->cnic_priv; | 2041 | struct cnic_local *cp = dev->cnic_priv; |
@@ -987,6 +2102,22 @@ static u16 cnic_bnx2_hw_idx(u16 idx) | |||
987 | return idx; | 2102 | return idx; |
988 | } | 2103 | } |
989 | 2104 | ||
2105 | static u16 cnic_bnx2x_next_idx(u16 idx) | ||
2106 | { | ||
2107 | idx++; | ||
2108 | if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT) | ||
2109 | idx++; | ||
2110 | |||
2111 | return idx; | ||
2112 | } | ||
2113 | |||
2114 | static u16 cnic_bnx2x_hw_idx(u16 idx) | ||
2115 | { | ||
2116 | if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT) | ||
2117 | idx++; | ||
2118 | return idx; | ||
2119 | } | ||
2120 | |||
990 | static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod) | 2121 | static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod) |
991 | { | 2122 | { |
992 | struct cnic_local *cp = dev->cnic_priv; | 2123 | struct cnic_local *cp = dev->cnic_priv; |
@@ -1012,7 +2143,7 @@ static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod) | |||
1012 | return last_cnt; | 2143 | return last_cnt; |
1013 | } | 2144 | } |
1014 | 2145 | ||
1015 | static void cnic_chk_bnx2_pkt_rings(struct cnic_local *cp) | 2146 | static void cnic_chk_pkt_rings(struct cnic_local *cp) |
1016 | { | 2147 | { |
1017 | u16 rx_cons = *cp->rx_cons_ptr; | 2148 | u16 rx_cons = *cp->rx_cons_ptr; |
1018 | u16 tx_cons = *cp->tx_cons_ptr; | 2149 | u16 tx_cons = *cp->tx_cons_ptr; |
@@ -1020,6 +2151,7 @@ static void cnic_chk_bnx2_pkt_rings(struct cnic_local *cp) | |||
1020 | if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) { | 2151 | if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) { |
1021 | cp->tx_cons = tx_cons; | 2152 | cp->tx_cons = tx_cons; |
1022 | cp->rx_cons = rx_cons; | 2153 | cp->rx_cons = rx_cons; |
2154 | |||
1023 | uio_event_notify(cp->cnic_uinfo); | 2155 | uio_event_notify(cp->cnic_uinfo); |
1024 | } | 2156 | } |
1025 | } | 2157 | } |
@@ -1062,7 +2194,7 @@ done: | |||
1062 | 2194 | ||
1063 | cp->kcq_prod_idx = sw_prod; | 2195 | cp->kcq_prod_idx = sw_prod; |
1064 | 2196 | ||
1065 | cnic_chk_bnx2_pkt_rings(cp); | 2197 | cnic_chk_pkt_rings(cp); |
1066 | return status_idx; | 2198 | return status_idx; |
1067 | } | 2199 | } |
1068 | 2200 | ||
@@ -1100,7 +2232,7 @@ done: | |||
1100 | CNIC_WR16(dev, cp->kcq_io_addr, sw_prod); | 2232 | CNIC_WR16(dev, cp->kcq_io_addr, sw_prod); |
1101 | cp->kcq_prod_idx = sw_prod; | 2233 | cp->kcq_prod_idx = sw_prod; |
1102 | 2234 | ||
1103 | cnic_chk_bnx2_pkt_rings(cp); | 2235 | cnic_chk_pkt_rings(cp); |
1104 | 2236 | ||
1105 | cp->last_status_idx = status_idx; | 2237 | cp->last_status_idx = status_idx; |
1106 | CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | | 2238 | CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | |
@@ -1125,6 +2257,91 @@ static irqreturn_t cnic_irq(int irq, void *dev_instance) | |||
1125 | return IRQ_HANDLED; | 2257 | return IRQ_HANDLED; |
1126 | } | 2258 | } |
1127 | 2259 | ||
2260 | static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm, | ||
2261 | u16 index, u8 op, u8 update) | ||
2262 | { | ||
2263 | struct cnic_local *cp = dev->cnic_priv; | ||
2264 | u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 + | ||
2265 | COMMAND_REG_INT_ACK); | ||
2266 | struct igu_ack_register igu_ack; | ||
2267 | |||
2268 | igu_ack.status_block_index = index; | ||
2269 | igu_ack.sb_id_and_flags = | ||
2270 | ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) | | ||
2271 | (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) | | ||
2272 | (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | | ||
2273 | (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); | ||
2274 | |||
2275 | CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack)); | ||
2276 | } | ||
2277 | |||
2278 | static void cnic_ack_bnx2x_msix(struct cnic_dev *dev) | ||
2279 | { | ||
2280 | struct cnic_local *cp = dev->cnic_priv; | ||
2281 | |||
2282 | cnic_ack_bnx2x_int(dev, cp->status_blk_num, CSTORM_ID, 0, | ||
2283 | IGU_INT_DISABLE, 0); | ||
2284 | } | ||
2285 | |||
2286 | static void cnic_service_bnx2x_bh(unsigned long data) | ||
2287 | { | ||
2288 | struct cnic_dev *dev = (struct cnic_dev *) data; | ||
2289 | struct cnic_local *cp = dev->cnic_priv; | ||
2290 | u16 hw_prod, sw_prod; | ||
2291 | struct cstorm_status_block_c *sblk = | ||
2292 | &cp->bnx2x_status_blk->c_status_block; | ||
2293 | u32 status_idx = sblk->status_block_index; | ||
2294 | int kcqe_cnt; | ||
2295 | |||
2296 | if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) | ||
2297 | return; | ||
2298 | |||
2299 | hw_prod = sblk->index_values[HC_INDEX_C_ISCSI_EQ_CONS]; | ||
2300 | hw_prod = cp->hw_idx(hw_prod); | ||
2301 | sw_prod = cp->kcq_prod_idx; | ||
2302 | while (sw_prod != hw_prod) { | ||
2303 | kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod); | ||
2304 | if (kcqe_cnt == 0) | ||
2305 | goto done; | ||
2306 | |||
2307 | service_kcqes(dev, kcqe_cnt); | ||
2308 | |||
2309 | /* Tell compiler that sblk fields can change. */ | ||
2310 | barrier(); | ||
2311 | if (status_idx == sblk->status_block_index) | ||
2312 | break; | ||
2313 | |||
2314 | status_idx = sblk->status_block_index; | ||
2315 | hw_prod = sblk->index_values[HC_INDEX_C_ISCSI_EQ_CONS]; | ||
2316 | hw_prod = cp->hw_idx(hw_prod); | ||
2317 | } | ||
2318 | |||
2319 | done: | ||
2320 | CNIC_WR16(dev, cp->kcq_io_addr, sw_prod + MAX_KCQ_IDX); | ||
2321 | cnic_ack_bnx2x_int(dev, cp->status_blk_num, CSTORM_ID, | ||
2322 | status_idx, IGU_INT_ENABLE, 1); | ||
2323 | |||
2324 | cp->kcq_prod_idx = sw_prod; | ||
2325 | return; | ||
2326 | } | ||
2327 | |||
2328 | static int cnic_service_bnx2x(void *data, void *status_blk) | ||
2329 | { | ||
2330 | struct cnic_dev *dev = data; | ||
2331 | struct cnic_local *cp = dev->cnic_priv; | ||
2332 | u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX; | ||
2333 | |||
2334 | prefetch(cp->status_blk); | ||
2335 | prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); | ||
2336 | |||
2337 | if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) | ||
2338 | tasklet_schedule(&cp->cnic_irq_task); | ||
2339 | |||
2340 | cnic_chk_pkt_rings(cp); | ||
2341 | |||
2342 | return 0; | ||
2343 | } | ||
2344 | |||
1128 | static void cnic_ulp_stop(struct cnic_dev *dev) | 2345 | static void cnic_ulp_stop(struct cnic_dev *dev) |
1129 | { | 2346 | { |
1130 | struct cnic_local *cp = dev->cnic_priv; | 2347 | struct cnic_local *cp = dev->cnic_priv; |
@@ -1197,6 +2414,19 @@ static int cnic_ctl(void *data, struct cnic_ctl_info *info) | |||
1197 | 2414 | ||
1198 | cnic_put(dev); | 2415 | cnic_put(dev); |
1199 | break; | 2416 | break; |
2417 | case CNIC_CTL_COMPLETION_CMD: { | ||
2418 | u32 cid = BNX2X_SW_CID(info->data.comp.cid); | ||
2419 | u32 l5_cid; | ||
2420 | struct cnic_local *cp = dev->cnic_priv; | ||
2421 | |||
2422 | if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) { | ||
2423 | struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; | ||
2424 | |||
2425 | ctx->wait_cond = 1; | ||
2426 | wake_up(&ctx->waitq); | ||
2427 | } | ||
2428 | break; | ||
2429 | } | ||
1200 | default: | 2430 | default: |
1201 | return -EINVAL; | 2431 | return -EINVAL; |
1202 | } | 2432 | } |
@@ -1872,6 +3102,8 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe) | |||
1872 | /* fall through */ | 3102 | /* fall through */ |
1873 | case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: | 3103 | case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: |
1874 | case L4_KCQE_OPCODE_VALUE_RESET_COMP: | 3104 | case L4_KCQE_OPCODE_VALUE_RESET_COMP: |
3105 | case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE: | ||
3106 | case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD: | ||
1875 | cp->close_conn(csk, opcode); | 3107 | cp->close_conn(csk, opcode); |
1876 | break; | 3108 | break; |
1877 | 3109 | ||
@@ -1957,6 +3189,76 @@ static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev) | |||
1957 | return 0; | 3189 | return 0; |
1958 | } | 3190 | } |
1959 | 3191 | ||
3192 | static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode) | ||
3193 | { | ||
3194 | struct cnic_dev *dev = csk->dev; | ||
3195 | struct cnic_local *cp = dev->cnic_priv; | ||
3196 | struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid]; | ||
3197 | union l5cm_specific_data l5_data; | ||
3198 | u32 cmd = 0; | ||
3199 | int close_complete = 0; | ||
3200 | |||
3201 | switch (opcode) { | ||
3202 | case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: | ||
3203 | case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: | ||
3204 | case L4_KCQE_OPCODE_VALUE_RESET_COMP: | ||
3205 | if (cnic_ready_to_close(csk, opcode)) | ||
3206 | cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE; | ||
3207 | break; | ||
3208 | case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE: | ||
3209 | cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD; | ||
3210 | break; | ||
3211 | case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD: | ||
3212 | close_complete = 1; | ||
3213 | break; | ||
3214 | } | ||
3215 | if (cmd) { | ||
3216 | memset(&l5_data, 0, sizeof(l5_data)); | ||
3217 | |||
3218 | cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE, | ||
3219 | &l5_data); | ||
3220 | } else if (close_complete) { | ||
3221 | ctx->timestamp = jiffies; | ||
3222 | cnic_close_conn(csk); | ||
3223 | cnic_cm_upcall(cp, csk, csk->state); | ||
3224 | } | ||
3225 | } | ||
3226 | |||
3227 | static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev) | ||
3228 | { | ||
3229 | } | ||
3230 | |||
3231 | static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev) | ||
3232 | { | ||
3233 | struct cnic_local *cp = dev->cnic_priv; | ||
3234 | int func = CNIC_FUNC(cp); | ||
3235 | |||
3236 | cnic_init_bnx2x_mac(dev); | ||
3237 | cnic_bnx2x_set_tcp_timestamp(dev, 1); | ||
3238 | |||
3239 | CNIC_WR16(dev, BAR_XSTRORM_INTMEM + | ||
3240 | XSTORM_ISCSI_LOCAL_VLAN_OFFSET(func), 0); | ||
3241 | |||
3242 | CNIC_WR(dev, BAR_XSTRORM_INTMEM + | ||
3243 | XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(func), 1); | ||
3244 | CNIC_WR(dev, BAR_XSTRORM_INTMEM + | ||
3245 | XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(func), | ||
3246 | DEF_MAX_DA_COUNT); | ||
3247 | |||
3248 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + | ||
3249 | XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(func), DEF_TTL); | ||
3250 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + | ||
3251 | XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(func), DEF_TOS); | ||
3252 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + | ||
3253 | XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(func), 2); | ||
3254 | CNIC_WR(dev, BAR_XSTRORM_INTMEM + | ||
3255 | XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(func), DEF_SWS_TIMER); | ||
3256 | |||
3257 | CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(func), | ||
3258 | DEF_MAX_CWND); | ||
3259 | return 0; | ||
3260 | } | ||
3261 | |||
1960 | static int cnic_cm_open(struct cnic_dev *dev) | 3262 | static int cnic_cm_open(struct cnic_dev *dev) |
1961 | { | 3263 | { |
1962 | struct cnic_local *cp = dev->cnic_priv; | 3264 | struct cnic_local *cp = dev->cnic_priv; |
@@ -2091,7 +3393,7 @@ static int cnic_init_bnx2_irq(struct cnic_dev *dev) | |||
2091 | 3393 | ||
2092 | cp->bnx2_status_blk = cp->status_blk; | 3394 | cp->bnx2_status_blk = cp->status_blk; |
2093 | cp->last_status_idx = cp->bnx2_status_blk->status_idx; | 3395 | cp->last_status_idx = cp->bnx2_status_blk->status_idx; |
2094 | tasklet_init(&cp->cnic_irq_task, &cnic_service_bnx2_msix, | 3396 | tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix, |
2095 | (unsigned long) dev); | 3397 | (unsigned long) dev); |
2096 | err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, | 3398 | err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, |
2097 | "cnic", dev); | 3399 | "cnic", dev); |
@@ -2464,6 +3766,426 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev) | |||
2464 | return 0; | 3766 | return 0; |
2465 | } | 3767 | } |
2466 | 3768 | ||
3769 | static void cnic_setup_bnx2x_context(struct cnic_dev *dev) | ||
3770 | { | ||
3771 | struct cnic_local *cp = dev->cnic_priv; | ||
3772 | struct cnic_eth_dev *ethdev = cp->ethdev; | ||
3773 | u32 start_offset = ethdev->ctx_tbl_offset; | ||
3774 | int i; | ||
3775 | |||
3776 | for (i = 0; i < cp->ctx_blks; i++) { | ||
3777 | struct cnic_ctx *ctx = &cp->ctx_arr[i]; | ||
3778 | dma_addr_t map = ctx->mapping; | ||
3779 | |||
3780 | if (cp->ctx_align) { | ||
3781 | unsigned long mask = cp->ctx_align - 1; | ||
3782 | |||
3783 | map = (map + mask) & ~mask; | ||
3784 | } | ||
3785 | |||
3786 | cnic_ctx_tbl_wr(dev, start_offset + i, map); | ||
3787 | } | ||
3788 | } | ||
3789 | |||
3790 | static int cnic_init_bnx2x_irq(struct cnic_dev *dev) | ||
3791 | { | ||
3792 | struct cnic_local *cp = dev->cnic_priv; | ||
3793 | struct cnic_eth_dev *ethdev = cp->ethdev; | ||
3794 | int err = 0; | ||
3795 | |||
3796 | tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh, | ||
3797 | (unsigned long) dev); | ||
3798 | if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { | ||
3799 | err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, | ||
3800 | "cnic", dev); | ||
3801 | if (err) | ||
3802 | tasklet_disable(&cp->cnic_irq_task); | ||
3803 | } | ||
3804 | return err; | ||
3805 | } | ||
3806 | |||
3807 | static void cnic_enable_bnx2x_int(struct cnic_dev *dev) | ||
3808 | { | ||
3809 | struct cnic_local *cp = dev->cnic_priv; | ||
3810 | u8 sb_id = cp->status_blk_num; | ||
3811 | int port = CNIC_PORT(cp); | ||
3812 | |||
3813 | CNIC_WR8(dev, BAR_CSTRORM_INTMEM + | ||
3814 | CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id, | ||
3815 | HC_INDEX_C_ISCSI_EQ_CONS), | ||
3816 | 64 / 12); | ||
3817 | CNIC_WR16(dev, BAR_CSTRORM_INTMEM + | ||
3818 | CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, | ||
3819 | HC_INDEX_C_ISCSI_EQ_CONS), 0); | ||
3820 | } | ||
3821 | |||
3822 | static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev) | ||
3823 | { | ||
3824 | } | ||
3825 | |||
3826 | static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev) | ||
3827 | { | ||
3828 | struct cnic_local *cp = dev->cnic_priv; | ||
3829 | union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) cp->l2_ring; | ||
3830 | struct eth_context *context; | ||
3831 | struct regpair context_addr; | ||
3832 | dma_addr_t buf_map; | ||
3833 | int func = CNIC_FUNC(cp); | ||
3834 | int port = CNIC_PORT(cp); | ||
3835 | int i; | ||
3836 | int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); | ||
3837 | u32 val; | ||
3838 | |||
3839 | memset(txbd, 0, BCM_PAGE_SIZE); | ||
3840 | |||
3841 | buf_map = cp->l2_buf_map; | ||
3842 | for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) { | ||
3843 | struct eth_tx_start_bd *start_bd = &txbd->start_bd; | ||
3844 | struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd); | ||
3845 | |||
3846 | start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32); | ||
3847 | start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); | ||
3848 | reg_bd->addr_hi = start_bd->addr_hi; | ||
3849 | reg_bd->addr_lo = start_bd->addr_lo + 0x10; | ||
3850 | start_bd->nbytes = cpu_to_le16(0x10); | ||
3851 | start_bd->nbd = cpu_to_le16(3); | ||
3852 | start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; | ||
3853 | start_bd->general_data = (UNICAST_ADDRESS << | ||
3854 | ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT); | ||
3855 | start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); | ||
3856 | |||
3857 | } | ||
3858 | context = cnic_get_bnx2x_ctx(dev, BNX2X_ISCSI_L2_CID, 1, &context_addr); | ||
3859 | |||
3860 | val = (u64) cp->l2_ring_map >> 32; | ||
3861 | txbd->next_bd.addr_hi = cpu_to_le32(val); | ||
3862 | |||
3863 | context->xstorm_st_context.tx_bd_page_base_hi = val; | ||
3864 | |||
3865 | val = (u64) cp->l2_ring_map & 0xffffffff; | ||
3866 | txbd->next_bd.addr_lo = cpu_to_le32(val); | ||
3867 | |||
3868 | context->xstorm_st_context.tx_bd_page_base_lo = val; | ||
3869 | |||
3870 | context->cstorm_st_context.sb_index_number = | ||
3871 | HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS; | ||
3872 | context->cstorm_st_context.status_block_id = BNX2X_DEF_SB_ID; | ||
3873 | |||
3874 | context->xstorm_st_context.statistics_data = (cli | | ||
3875 | XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE); | ||
3876 | |||
3877 | context->xstorm_ag_context.cdu_reserved = | ||
3878 | CDU_RSRVD_VALUE_TYPE_A(BNX2X_HW_CID(BNX2X_ISCSI_L2_CID, func), | ||
3879 | CDU_REGION_NUMBER_XCM_AG, | ||
3880 | ETH_CONNECTION_TYPE); | ||
3881 | |||
3882 | /* reset xstorm per client statistics */ | ||
3883 | val = BAR_XSTRORM_INTMEM + | ||
3884 | XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli); | ||
3885 | for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) | ||
3886 | CNIC_WR(dev, val + i * 4, 0); | ||
3887 | |||
3888 | cp->tx_cons_ptr = | ||
3889 | &cp->bnx2x_def_status_blk->c_def_status_block.index_values[ | ||
3890 | HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS]; | ||
3891 | } | ||
3892 | |||
3893 | static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev) | ||
3894 | { | ||
3895 | struct cnic_local *cp = dev->cnic_priv; | ||
3896 | struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (cp->l2_ring + | ||
3897 | BCM_PAGE_SIZE); | ||
3898 | struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *) | ||
3899 | (cp->l2_ring + (2 * BCM_PAGE_SIZE)); | ||
3900 | struct eth_context *context; | ||
3901 | struct regpair context_addr; | ||
3902 | int i; | ||
3903 | int port = CNIC_PORT(cp); | ||
3904 | int func = CNIC_FUNC(cp); | ||
3905 | int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); | ||
3906 | u32 val; | ||
3907 | struct tstorm_eth_client_config tstorm_client = {0}; | ||
3908 | |||
3909 | for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) { | ||
3910 | dma_addr_t buf_map; | ||
3911 | int n = (i % cp->l2_rx_ring_size) + 1; | ||
3912 | |||
3913 | buf_map = cp->l2_buf_map + (n * cp->l2_single_buf_size); | ||
3914 | rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32); | ||
3915 | rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); | ||
3916 | } | ||
3917 | context = cnic_get_bnx2x_ctx(dev, BNX2X_ISCSI_L2_CID, 0, &context_addr); | ||
3918 | |||
3919 | val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) >> 32; | ||
3920 | rxbd->addr_hi = cpu_to_le32(val); | ||
3921 | |||
3922 | context->ustorm_st_context.common.bd_page_base_hi = val; | ||
3923 | |||
3924 | val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) & 0xffffffff; | ||
3925 | rxbd->addr_lo = cpu_to_le32(val); | ||
3926 | |||
3927 | context->ustorm_st_context.common.bd_page_base_lo = val; | ||
3928 | |||
3929 | context->ustorm_st_context.common.sb_index_numbers = | ||
3930 | BNX2X_ISCSI_RX_SB_INDEX_NUM; | ||
3931 | context->ustorm_st_context.common.clientId = cli; | ||
3932 | context->ustorm_st_context.common.status_block_id = BNX2X_DEF_SB_ID; | ||
3933 | context->ustorm_st_context.common.flags = | ||
3934 | USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS; | ||
3935 | context->ustorm_st_context.common.statistics_counter_id = cli; | ||
3936 | context->ustorm_st_context.common.mc_alignment_log_size = 0; | ||
3937 | context->ustorm_st_context.common.bd_buff_size = | ||
3938 | cp->l2_single_buf_size; | ||
3939 | |||
3940 | context->ustorm_ag_context.cdu_usage = | ||
3941 | CDU_RSRVD_VALUE_TYPE_A(BNX2X_HW_CID(BNX2X_ISCSI_L2_CID, func), | ||
3942 | CDU_REGION_NUMBER_UCM_AG, | ||
3943 | ETH_CONNECTION_TYPE); | ||
3944 | |||
3945 | rxcqe += BNX2X_MAX_RCQ_DESC_CNT; | ||
3946 | val = (u64) (cp->l2_ring_map + (2 * BCM_PAGE_SIZE)) >> 32; | ||
3947 | rxcqe->addr_hi = cpu_to_le32(val); | ||
3948 | |||
3949 | CNIC_WR(dev, BAR_USTRORM_INTMEM + | ||
3950 | USTORM_CQE_PAGE_BASE_OFFSET(port, cli) + 4, val); | ||
3951 | |||
3952 | CNIC_WR(dev, BAR_USTRORM_INTMEM + | ||
3953 | USTORM_CQE_PAGE_NEXT_OFFSET(port, cli) + 4, val); | ||
3954 | |||
3955 | val = (u64) (cp->l2_ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff; | ||
3956 | rxcqe->addr_lo = cpu_to_le32(val); | ||
3957 | |||
3958 | CNIC_WR(dev, BAR_USTRORM_INTMEM + | ||
3959 | USTORM_CQE_PAGE_BASE_OFFSET(port, cli), val); | ||
3960 | |||
3961 | CNIC_WR(dev, BAR_USTRORM_INTMEM + | ||
3962 | USTORM_CQE_PAGE_NEXT_OFFSET(port, cli), val); | ||
3963 | |||
3964 | /* client tstorm info */ | ||
3965 | tstorm_client.mtu = cp->l2_single_buf_size - 14; | ||
3966 | tstorm_client.config_flags = | ||
3967 | (TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE | | ||
3968 | TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE); | ||
3969 | tstorm_client.statistics_counter_id = cli; | ||
3970 | |||
3971 | CNIC_WR(dev, BAR_TSTRORM_INTMEM + | ||
3972 | TSTORM_CLIENT_CONFIG_OFFSET(port, cli), | ||
3973 | ((u32 *)&tstorm_client)[0]); | ||
3974 | CNIC_WR(dev, BAR_TSTRORM_INTMEM + | ||
3975 | TSTORM_CLIENT_CONFIG_OFFSET(port, cli) + 4, | ||
3976 | ((u32 *)&tstorm_client)[1]); | ||
3977 | |||
3978 | /* reset tstorm per client statistics */ | ||
3979 | val = BAR_TSTRORM_INTMEM + | ||
3980 | TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli); | ||
3981 | for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) | ||
3982 | CNIC_WR(dev, val + i * 4, 0); | ||
3983 | |||
3984 | /* reset ustorm per client statistics */ | ||
3985 | val = BAR_USTRORM_INTMEM + | ||
3986 | USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli); | ||
3987 | for (i = 0; i < sizeof(struct ustorm_per_client_stats) / 4; i++) | ||
3988 | CNIC_WR(dev, val + i * 4, 0); | ||
3989 | |||
3990 | cp->rx_cons_ptr = | ||
3991 | &cp->bnx2x_def_status_blk->u_def_status_block.index_values[ | ||
3992 | HC_INDEX_DEF_U_ETH_ISCSI_RX_CQ_CONS]; | ||
3993 | } | ||
3994 | |||
3995 | static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev) | ||
3996 | { | ||
3997 | struct cnic_local *cp = dev->cnic_priv; | ||
3998 | u32 base, addr, val; | ||
3999 | int port = CNIC_PORT(cp); | ||
4000 | |||
4001 | dev->max_iscsi_conn = 0; | ||
4002 | base = CNIC_RD(dev, MISC_REG_SHARED_MEM_ADDR); | ||
4003 | if (base < 0xa0000 || base >= 0xc0000) | ||
4004 | return; | ||
4005 | |||
4006 | addr = BNX2X_SHMEM_ADDR(base, | ||
4007 | dev_info.port_hw_config[port].iscsi_mac_upper); | ||
4008 | |||
4009 | val = CNIC_RD(dev, addr); | ||
4010 | |||
4011 | dev->mac_addr[0] = (u8) (val >> 8); | ||
4012 | dev->mac_addr[1] = (u8) val; | ||
4013 | |||
4014 | addr = BNX2X_SHMEM_ADDR(base, | ||
4015 | dev_info.port_hw_config[port].iscsi_mac_lower); | ||
4016 | |||
4017 | val = CNIC_RD(dev, addr); | ||
4018 | |||
4019 | dev->mac_addr[2] = (u8) (val >> 24); | ||
4020 | dev->mac_addr[3] = (u8) (val >> 16); | ||
4021 | dev->mac_addr[4] = (u8) (val >> 8); | ||
4022 | dev->mac_addr[5] = (u8) val; | ||
4023 | |||
4024 | addr = BNX2X_SHMEM_ADDR(base, validity_map[port]); | ||
4025 | val = CNIC_RD(dev, addr); | ||
4026 | |||
4027 | if (!(val & SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT)) { | ||
4028 | u16 val16; | ||
4029 | |||
4030 | addr = BNX2X_SHMEM_ADDR(base, | ||
4031 | drv_lic_key[port].max_iscsi_init_conn); | ||
4032 | val16 = CNIC_RD16(dev, addr); | ||
4033 | |||
4034 | if (val16) | ||
4035 | val16 ^= 0x1e1e; | ||
4036 | dev->max_iscsi_conn = val16; | ||
4037 | } | ||
4038 | if (BNX2X_CHIP_IS_E1H(cp->chip_id)) { | ||
4039 | int func = CNIC_FUNC(cp); | ||
4040 | |||
4041 | addr = BNX2X_SHMEM_ADDR(base, | ||
4042 | mf_cfg.func_mf_config[func].e1hov_tag); | ||
4043 | val = CNIC_RD(dev, addr); | ||
4044 | val &= FUNC_MF_CFG_E1HOV_TAG_MASK; | ||
4045 | if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { | ||
4046 | addr = BNX2X_SHMEM_ADDR(base, | ||
4047 | mf_cfg.func_mf_config[func].config); | ||
4048 | val = CNIC_RD(dev, addr); | ||
4049 | val &= FUNC_MF_CFG_PROTOCOL_MASK; | ||
4050 | if (val != FUNC_MF_CFG_PROTOCOL_ISCSI) | ||
4051 | dev->max_iscsi_conn = 0; | ||
4052 | } | ||
4053 | } | ||
4054 | } | ||
4055 | |||
4056 | static int cnic_start_bnx2x_hw(struct cnic_dev *dev) | ||
4057 | { | ||
4058 | struct cnic_local *cp = dev->cnic_priv; | ||
4059 | int func = CNIC_FUNC(cp), ret, i; | ||
4060 | int port = CNIC_PORT(cp); | ||
4061 | u16 eq_idx; | ||
4062 | u8 sb_id = cp->status_blk_num; | ||
4063 | |||
4064 | ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ, | ||
4065 | BNX2X_ISCSI_START_CID); | ||
4066 | |||
4067 | if (ret) | ||
4068 | return -ENOMEM; | ||
4069 | |||
4070 | cp->kcq_io_addr = BAR_CSTRORM_INTMEM + | ||
4071 | CSTORM_ISCSI_EQ_PROD_OFFSET(func, 0); | ||
4072 | cp->kcq_prod_idx = 0; | ||
4073 | |||
4074 | cnic_get_bnx2x_iscsi_info(dev); | ||
4075 | |||
4076 | /* Only 1 EQ */ | ||
4077 | CNIC_WR16(dev, cp->kcq_io_addr, MAX_KCQ_IDX); | ||
4078 | CNIC_WR(dev, BAR_CSTRORM_INTMEM + | ||
4079 | CSTORM_ISCSI_EQ_CONS_OFFSET(func, 0), 0); | ||
4080 | CNIC_WR(dev, BAR_CSTRORM_INTMEM + | ||
4081 | CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, 0), | ||
4082 | cp->kcq_info.pg_map_arr[1] & 0xffffffff); | ||
4083 | CNIC_WR(dev, BAR_CSTRORM_INTMEM + | ||
4084 | CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, 0) + 4, | ||
4085 | (u64) cp->kcq_info.pg_map_arr[1] >> 32); | ||
4086 | CNIC_WR(dev, BAR_CSTRORM_INTMEM + | ||
4087 | CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, 0), | ||
4088 | cp->kcq_info.pg_map_arr[0] & 0xffffffff); | ||
4089 | CNIC_WR(dev, BAR_CSTRORM_INTMEM + | ||
4090 | CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, 0) + 4, | ||
4091 | (u64) cp->kcq_info.pg_map_arr[0] >> 32); | ||
4092 | CNIC_WR8(dev, BAR_CSTRORM_INTMEM + | ||
4093 | CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(func, 0), 1); | ||
4094 | CNIC_WR16(dev, BAR_CSTRORM_INTMEM + | ||
4095 | CSTORM_ISCSI_EQ_SB_NUM_OFFSET(func, 0), cp->status_blk_num); | ||
4096 | CNIC_WR8(dev, BAR_CSTRORM_INTMEM + | ||
4097 | CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(func, 0), | ||
4098 | HC_INDEX_C_ISCSI_EQ_CONS); | ||
4099 | |||
4100 | for (i = 0; i < cp->conn_buf_info.num_pages; i++) { | ||
4101 | CNIC_WR(dev, BAR_TSTRORM_INTMEM + | ||
4102 | TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(func, i), | ||
4103 | cp->conn_buf_info.pgtbl[2 * i]); | ||
4104 | CNIC_WR(dev, BAR_TSTRORM_INTMEM + | ||
4105 | TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(func, i) + 4, | ||
4106 | cp->conn_buf_info.pgtbl[(2 * i) + 1]); | ||
4107 | } | ||
4108 | |||
4109 | CNIC_WR(dev, BAR_USTRORM_INTMEM + | ||
4110 | USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(func), | ||
4111 | cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff); | ||
4112 | CNIC_WR(dev, BAR_USTRORM_INTMEM + | ||
4113 | USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(func) + 4, | ||
4114 | (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32); | ||
4115 | |||
4116 | cnic_setup_bnx2x_context(dev); | ||
4117 | |||
4118 | eq_idx = CNIC_RD16(dev, BAR_CSTRORM_INTMEM + | ||
4119 | CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id) + | ||
4120 | offsetof(struct cstorm_status_block_c, | ||
4121 | index_values[HC_INDEX_C_ISCSI_EQ_CONS])); | ||
4122 | if (eq_idx != 0) { | ||
4123 | printk(KERN_ERR PFX "%s: EQ cons index %x != 0\n", | ||
4124 | dev->netdev->name, eq_idx); | ||
4125 | return -EBUSY; | ||
4126 | } | ||
4127 | ret = cnic_init_bnx2x_irq(dev); | ||
4128 | if (ret) | ||
4129 | return ret; | ||
4130 | |||
4131 | cnic_init_bnx2x_tx_ring(dev); | ||
4132 | cnic_init_bnx2x_rx_ring(dev); | ||
4133 | |||
4134 | return 0; | ||
4135 | } | ||
4136 | |||
4137 | static void cnic_init_rings(struct cnic_dev *dev) | ||
4138 | { | ||
4139 | if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { | ||
4140 | cnic_init_bnx2_tx_ring(dev); | ||
4141 | cnic_init_bnx2_rx_ring(dev); | ||
4142 | } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { | ||
4143 | struct cnic_local *cp = dev->cnic_priv; | ||
4144 | u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); | ||
4145 | union l5cm_specific_data l5_data; | ||
4146 | struct ustorm_eth_rx_producers rx_prods = {0}; | ||
4147 | u32 off, i; | ||
4148 | |||
4149 | rx_prods.bd_prod = 0; | ||
4150 | rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT; | ||
4151 | barrier(); | ||
4152 | |||
4153 | off = BAR_USTRORM_INTMEM + | ||
4154 | USTORM_RX_PRODS_OFFSET(CNIC_PORT(cp), cli); | ||
4155 | |||
4156 | for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++) | ||
4157 | CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]); | ||
4158 | |||
4159 | cnic_init_bnx2x_tx_ring(dev); | ||
4160 | cnic_init_bnx2x_rx_ring(dev); | ||
4161 | |||
4162 | l5_data.phy_address.lo = cli; | ||
4163 | l5_data.phy_address.hi = 0; | ||
4164 | cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP, | ||
4165 | BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE, &l5_data); | ||
4166 | cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 1); | ||
4167 | } | ||
4168 | } | ||
4169 | |||
4170 | static void cnic_shutdown_rings(struct cnic_dev *dev) | ||
4171 | { | ||
4172 | if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { | ||
4173 | cnic_shutdown_bnx2_rx_ring(dev); | ||
4174 | } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { | ||
4175 | struct cnic_local *cp = dev->cnic_priv; | ||
4176 | u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); | ||
4177 | union l5cm_specific_data l5_data; | ||
4178 | |||
4179 | cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 0); | ||
4180 | |||
4181 | l5_data.phy_address.lo = cli; | ||
4182 | l5_data.phy_address.hi = 0; | ||
4183 | cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT, | ||
4184 | BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE, &l5_data); | ||
4185 | msleep(10); | ||
4186 | } | ||
4187 | } | ||
4188 | |||
2467 | static int cnic_register_netdev(struct cnic_dev *dev) | 4189 | static int cnic_register_netdev(struct cnic_dev *dev) |
2468 | { | 4190 | { |
2469 | struct cnic_local *cp = dev->cnic_priv; | 4191 | struct cnic_local *cp = dev->cnic_priv; |
@@ -2554,6 +4276,22 @@ static void cnic_stop_bnx2_hw(struct cnic_dev *dev) | |||
2554 | cnic_free_resc(dev); | 4276 | cnic_free_resc(dev); |
2555 | } | 4277 | } |
2556 | 4278 | ||
4279 | |||
4280 | static void cnic_stop_bnx2x_hw(struct cnic_dev *dev) | ||
4281 | { | ||
4282 | struct cnic_local *cp = dev->cnic_priv; | ||
4283 | u8 sb_id = cp->status_blk_num; | ||
4284 | int port = CNIC_PORT(cp); | ||
4285 | |||
4286 | cnic_free_irq(dev); | ||
4287 | CNIC_WR16(dev, BAR_CSTRORM_INTMEM + | ||
4288 | CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id) + | ||
4289 | offsetof(struct cstorm_status_block_c, | ||
4290 | index_values[HC_INDEX_C_ISCSI_EQ_CONS]), | ||
4291 | 0); | ||
4292 | cnic_free_resc(dev); | ||
4293 | } | ||
4294 | |||
2557 | static void cnic_stop_hw(struct cnic_dev *dev) | 4295 | static void cnic_stop_hw(struct cnic_dev *dev) |
2558 | { | 4296 | { |
2559 | if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { | 4297 | if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { |
@@ -2685,6 +4423,57 @@ cnic_err: | |||
2685 | return NULL; | 4423 | return NULL; |
2686 | } | 4424 | } |
2687 | 4425 | ||
4426 | static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev) | ||
4427 | { | ||
4428 | struct pci_dev *pdev; | ||
4429 | struct cnic_dev *cdev; | ||
4430 | struct cnic_local *cp; | ||
4431 | struct cnic_eth_dev *ethdev = NULL; | ||
4432 | struct cnic_eth_dev *(*probe)(struct net_device *) = NULL; | ||
4433 | |||
4434 | probe = symbol_get(bnx2x_cnic_probe); | ||
4435 | if (probe) { | ||
4436 | ethdev = (*probe)(dev); | ||
4437 | symbol_put(bnx2x_cnic_probe); | ||
4438 | } | ||
4439 | if (!ethdev) | ||
4440 | return NULL; | ||
4441 | |||
4442 | pdev = ethdev->pdev; | ||
4443 | if (!pdev) | ||
4444 | return NULL; | ||
4445 | |||
4446 | dev_hold(dev); | ||
4447 | cdev = cnic_alloc_dev(dev, pdev); | ||
4448 | if (cdev == NULL) { | ||
4449 | dev_put(dev); | ||
4450 | return NULL; | ||
4451 | } | ||
4452 | |||
4453 | set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags); | ||
4454 | cdev->submit_kwqes = cnic_submit_bnx2x_kwqes; | ||
4455 | |||
4456 | cp = cdev->cnic_priv; | ||
4457 | cp->ethdev = ethdev; | ||
4458 | cdev->pcidev = pdev; | ||
4459 | |||
4460 | cp->cnic_ops = &cnic_bnx2x_ops; | ||
4461 | cp->start_hw = cnic_start_bnx2x_hw; | ||
4462 | cp->stop_hw = cnic_stop_bnx2x_hw; | ||
4463 | cp->setup_pgtbl = cnic_setup_page_tbl_le; | ||
4464 | cp->alloc_resc = cnic_alloc_bnx2x_resc; | ||
4465 | cp->free_resc = cnic_free_resc; | ||
4466 | cp->start_cm = cnic_cm_init_bnx2x_hw; | ||
4467 | cp->stop_cm = cnic_cm_stop_bnx2x_hw; | ||
4468 | cp->enable_int = cnic_enable_bnx2x_int; | ||
4469 | cp->disable_int_sync = cnic_disable_bnx2x_int_sync; | ||
4470 | cp->ack_int = cnic_ack_bnx2x_msix; | ||
4471 | cp->close_conn = cnic_close_bnx2x_conn; | ||
4472 | cp->next_idx = cnic_bnx2x_next_idx; | ||
4473 | cp->hw_idx = cnic_bnx2x_hw_idx; | ||
4474 | return cdev; | ||
4475 | } | ||
4476 | |||
2688 | static struct cnic_dev *is_cnic_dev(struct net_device *dev) | 4477 | static struct cnic_dev *is_cnic_dev(struct net_device *dev) |
2689 | { | 4478 | { |
2690 | struct ethtool_drvinfo drvinfo; | 4479 | struct ethtool_drvinfo drvinfo; |
@@ -2696,6 +4485,8 @@ static struct cnic_dev *is_cnic_dev(struct net_device *dev) | |||
2696 | 4485 | ||
2697 | if (!strcmp(drvinfo.driver, "bnx2")) | 4486 | if (!strcmp(drvinfo.driver, "bnx2")) |
2698 | cdev = init_bnx2_cnic(dev); | 4487 | cdev = init_bnx2_cnic(dev); |
4488 | if (!strcmp(drvinfo.driver, "bnx2x")) | ||
4489 | cdev = init_bnx2x_cnic(dev); | ||
2699 | if (cdev) { | 4490 | if (cdev) { |
2700 | write_lock(&cnic_dev_lock); | 4491 | write_lock(&cnic_dev_lock); |
2701 | list_add(&cdev->list, &cnic_dev_list); | 4492 | list_add(&cdev->list, &cnic_dev_list); |