diff options
Diffstat (limited to 'drivers/net/cnic.c')
-rw-r--r-- | drivers/net/cnic.c | 1822 |
1 files changed, 1799 insertions, 23 deletions
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c index 3bf1b04f2cab..ee7eb9ee77e2 100644 --- a/drivers/net/cnic.c +++ b/drivers/net/cnic.c | |||
@@ -33,10 +33,16 @@ | |||
33 | #include <net/route.h> | 33 | #include <net/route.h> |
34 | #include <net/ipv6.h> | 34 | #include <net/ipv6.h> |
35 | #include <net/ip6_route.h> | 35 | #include <net/ip6_route.h> |
36 | #include <net/ip6_checksum.h> | ||
36 | #include <scsi/iscsi_if.h> | 37 | #include <scsi/iscsi_if.h> |
37 | 38 | ||
38 | #include "cnic_if.h" | 39 | #include "cnic_if.h" |
39 | #include "bnx2.h" | 40 | #include "bnx2.h" |
41 | #include "bnx2x_reg.h" | ||
42 | #include "bnx2x_fw_defs.h" | ||
43 | #include "bnx2x_hsi.h" | ||
44 | #include "../scsi/bnx2i/57xx_iscsi_constants.h" | ||
45 | #include "../scsi/bnx2i/57xx_iscsi_hsi.h" | ||
40 | #include "cnic.h" | 46 | #include "cnic.h" |
41 | #include "cnic_defs.h" | 47 | #include "cnic_defs.h" |
42 | 48 | ||
@@ -59,6 +65,7 @@ static DEFINE_MUTEX(cnic_lock); | |||
59 | static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE]; | 65 | static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE]; |
60 | 66 | ||
61 | static int cnic_service_bnx2(void *, void *); | 67 | static int cnic_service_bnx2(void *, void *); |
68 | static int cnic_service_bnx2x(void *, void *); | ||
62 | static int cnic_ctl(void *, struct cnic_ctl_info *); | 69 | static int cnic_ctl(void *, struct cnic_ctl_info *); |
63 | 70 | ||
64 | static struct cnic_ops cnic_bnx2_ops = { | 71 | static struct cnic_ops cnic_bnx2_ops = { |
@@ -67,9 +74,14 @@ static struct cnic_ops cnic_bnx2_ops = { | |||
67 | .cnic_ctl = cnic_ctl, | 74 | .cnic_ctl = cnic_ctl, |
68 | }; | 75 | }; |
69 | 76 | ||
70 | static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *); | 77 | static struct cnic_ops cnic_bnx2x_ops = { |
71 | static void cnic_init_bnx2_tx_ring(struct cnic_dev *); | 78 | .cnic_owner = THIS_MODULE, |
72 | static void cnic_init_bnx2_rx_ring(struct cnic_dev *); | 79 | .cnic_handler = cnic_service_bnx2x, |
80 | .cnic_ctl = cnic_ctl, | ||
81 | }; | ||
82 | |||
83 | static void cnic_shutdown_rings(struct cnic_dev *); | ||
84 | static void cnic_init_rings(struct cnic_dev *); | ||
73 | static int cnic_cm_set_pg(struct cnic_sock *); | 85 | static int cnic_cm_set_pg(struct cnic_sock *); |
74 | 86 | ||
75 | static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode) | 87 | static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode) |
@@ -83,10 +95,16 @@ static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode) | |||
83 | if (cp->uio_dev != -1) | 95 | if (cp->uio_dev != -1) |
84 | return -EBUSY; | 96 | return -EBUSY; |
85 | 97 | ||
98 | rtnl_lock(); | ||
99 | if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) { | ||
100 | rtnl_unlock(); | ||
101 | return -ENODEV; | ||
102 | } | ||
103 | |||
86 | cp->uio_dev = iminor(inode); | 104 | cp->uio_dev = iminor(inode); |
87 | 105 | ||
88 | cnic_init_bnx2_tx_ring(dev); | 106 | cnic_init_rings(dev); |
89 | cnic_init_bnx2_rx_ring(dev); | 107 | rtnl_unlock(); |
90 | 108 | ||
91 | return 0; | 109 | return 0; |
92 | } | 110 | } |
@@ -96,7 +114,7 @@ static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode) | |||
96 | struct cnic_dev *dev = uinfo->priv; | 114 | struct cnic_dev *dev = uinfo->priv; |
97 | struct cnic_local *cp = dev->cnic_priv; | 115 | struct cnic_local *cp = dev->cnic_priv; |
98 | 116 | ||
99 | cnic_shutdown_bnx2_rx_ring(dev); | 117 | cnic_shutdown_rings(dev); |
100 | 118 | ||
101 | cp->uio_dev = -1; | 119 | cp->uio_dev = -1; |
102 | return 0; | 120 | return 0; |
@@ -162,6 +180,36 @@ static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val) | |||
162 | ethdev->drv_ctl(dev->netdev, &info); | 180 | ethdev->drv_ctl(dev->netdev, &info); |
163 | } | 181 | } |
164 | 182 | ||
183 | static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr) | ||
184 | { | ||
185 | struct cnic_local *cp = dev->cnic_priv; | ||
186 | struct cnic_eth_dev *ethdev = cp->ethdev; | ||
187 | struct drv_ctl_info info; | ||
188 | struct drv_ctl_io *io = &info.data.io; | ||
189 | |||
190 | info.cmd = DRV_CTL_CTXTBL_WR_CMD; | ||
191 | io->offset = off; | ||
192 | io->dma_addr = addr; | ||
193 | ethdev->drv_ctl(dev->netdev, &info); | ||
194 | } | ||
195 | |||
196 | static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start) | ||
197 | { | ||
198 | struct cnic_local *cp = dev->cnic_priv; | ||
199 | struct cnic_eth_dev *ethdev = cp->ethdev; | ||
200 | struct drv_ctl_info info; | ||
201 | struct drv_ctl_l2_ring *ring = &info.data.ring; | ||
202 | |||
203 | if (start) | ||
204 | info.cmd = DRV_CTL_START_L2_CMD; | ||
205 | else | ||
206 | info.cmd = DRV_CTL_STOP_L2_CMD; | ||
207 | |||
208 | ring->cid = cid; | ||
209 | ring->client_id = cl_id; | ||
210 | ethdev->drv_ctl(dev->netdev, &info); | ||
211 | } | ||
212 | |||
165 | static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val) | 213 | static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val) |
166 | { | 214 | { |
167 | struct cnic_local *cp = dev->cnic_priv; | 215 | struct cnic_local *cp = dev->cnic_priv; |
@@ -204,6 +252,19 @@ static void cnic_kwq_completion(struct cnic_dev *dev, u32 count) | |||
204 | ethdev->drv_ctl(dev->netdev, &info); | 252 | ethdev->drv_ctl(dev->netdev, &info); |
205 | } | 253 | } |
206 | 254 | ||
255 | static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid) | ||
256 | { | ||
257 | u32 i; | ||
258 | |||
259 | for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) { | ||
260 | if (cp->ctx_tbl[i].cid == cid) { | ||
261 | *l5_cid = i; | ||
262 | return 0; | ||
263 | } | ||
264 | } | ||
265 | return -EINVAL; | ||
266 | } | ||
267 | |||
207 | static int cnic_send_nlmsg(struct cnic_local *cp, u32 type, | 268 | static int cnic_send_nlmsg(struct cnic_local *cp, u32 type, |
208 | struct cnic_sock *csk) | 269 | struct cnic_sock *csk) |
209 | { | 270 | { |
@@ -347,7 +408,7 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops) | |||
347 | { | 408 | { |
348 | struct cnic_dev *dev; | 409 | struct cnic_dev *dev; |
349 | 410 | ||
350 | if (ulp_type >= MAX_CNIC_ULP_TYPE) { | 411 | if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { |
351 | printk(KERN_ERR PFX "cnic_register_driver: Bad type %d\n", | 412 | printk(KERN_ERR PFX "cnic_register_driver: Bad type %d\n", |
352 | ulp_type); | 413 | ulp_type); |
353 | return -EINVAL; | 414 | return -EINVAL; |
@@ -393,7 +454,7 @@ int cnic_unregister_driver(int ulp_type) | |||
393 | struct cnic_ulp_ops *ulp_ops; | 454 | struct cnic_ulp_ops *ulp_ops; |
394 | int i = 0; | 455 | int i = 0; |
395 | 456 | ||
396 | if (ulp_type >= MAX_CNIC_ULP_TYPE) { | 457 | if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { |
397 | printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n", | 458 | printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n", |
398 | ulp_type); | 459 | ulp_type); |
399 | return -EINVAL; | 460 | return -EINVAL; |
@@ -449,7 +510,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type, | |||
449 | struct cnic_local *cp = dev->cnic_priv; | 510 | struct cnic_local *cp = dev->cnic_priv; |
450 | struct cnic_ulp_ops *ulp_ops; | 511 | struct cnic_ulp_ops *ulp_ops; |
451 | 512 | ||
452 | if (ulp_type >= MAX_CNIC_ULP_TYPE) { | 513 | if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { |
453 | printk(KERN_ERR PFX "cnic_register_device: Bad type %d\n", | 514 | printk(KERN_ERR PFX "cnic_register_device: Bad type %d\n", |
454 | ulp_type); | 515 | ulp_type); |
455 | return -EINVAL; | 516 | return -EINVAL; |
@@ -490,7 +551,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type) | |||
490 | struct cnic_local *cp = dev->cnic_priv; | 551 | struct cnic_local *cp = dev->cnic_priv; |
491 | int i = 0; | 552 | int i = 0; |
492 | 553 | ||
493 | if (ulp_type >= MAX_CNIC_ULP_TYPE) { | 554 | if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { |
494 | printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n", | 555 | printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n", |
495 | ulp_type); | 556 | ulp_type); |
496 | return -EINVAL; | 557 | return -EINVAL; |
@@ -635,6 +696,20 @@ static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma) | |||
635 | } | 696 | } |
636 | } | 697 | } |
637 | 698 | ||
699 | static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma) | ||
700 | { | ||
701 | int i; | ||
702 | u32 *page_table = dma->pgtbl; | ||
703 | |||
704 | for (i = 0; i < dma->num_pages; i++) { | ||
705 | /* Each entry needs to be in little endian format. */ | ||
706 | *page_table = dma->pg_map_arr[i] & 0xffffffff; | ||
707 | page_table++; | ||
708 | *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32); | ||
709 | page_table++; | ||
710 | } | ||
711 | } | ||
712 | |||
638 | static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma, | 713 | static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma, |
639 | int pages, int use_pg_tbl) | 714 | int pages, int use_pg_tbl) |
640 | { | 715 | { |
@@ -675,6 +750,21 @@ error: | |||
675 | return -ENOMEM; | 750 | return -ENOMEM; |
676 | } | 751 | } |
677 | 752 | ||
753 | static void cnic_free_context(struct cnic_dev *dev) | ||
754 | { | ||
755 | struct cnic_local *cp = dev->cnic_priv; | ||
756 | int i; | ||
757 | |||
758 | for (i = 0; i < cp->ctx_blks; i++) { | ||
759 | if (cp->ctx_arr[i].ctx) { | ||
760 | pci_free_consistent(dev->pcidev, cp->ctx_blk_size, | ||
761 | cp->ctx_arr[i].ctx, | ||
762 | cp->ctx_arr[i].mapping); | ||
763 | cp->ctx_arr[i].ctx = NULL; | ||
764 | } | ||
765 | } | ||
766 | } | ||
767 | |||
678 | static void cnic_free_resc(struct cnic_dev *dev) | 768 | static void cnic_free_resc(struct cnic_dev *dev) |
679 | { | 769 | { |
680 | struct cnic_local *cp = dev->cnic_priv; | 770 | struct cnic_local *cp = dev->cnic_priv; |
@@ -702,14 +792,7 @@ static void cnic_free_resc(struct cnic_dev *dev) | |||
702 | cp->l2_ring = NULL; | 792 | cp->l2_ring = NULL; |
703 | } | 793 | } |
704 | 794 | ||
705 | for (i = 0; i < cp->ctx_blks; i++) { | 795 | cnic_free_context(dev); |
706 | if (cp->ctx_arr[i].ctx) { | ||
707 | pci_free_consistent(dev->pcidev, cp->ctx_blk_size, | ||
708 | cp->ctx_arr[i].ctx, | ||
709 | cp->ctx_arr[i].mapping); | ||
710 | cp->ctx_arr[i].ctx = NULL; | ||
711 | } | ||
712 | } | ||
713 | kfree(cp->ctx_arr); | 796 | kfree(cp->ctx_arr); |
714 | cp->ctx_arr = NULL; | 797 | cp->ctx_arr = NULL; |
715 | cp->ctx_blks = 0; | 798 | cp->ctx_blks = 0; |
@@ -717,6 +800,7 @@ static void cnic_free_resc(struct cnic_dev *dev) | |||
717 | cnic_free_dma(dev, &cp->gbl_buf_info); | 800 | cnic_free_dma(dev, &cp->gbl_buf_info); |
718 | cnic_free_dma(dev, &cp->conn_buf_info); | 801 | cnic_free_dma(dev, &cp->conn_buf_info); |
719 | cnic_free_dma(dev, &cp->kwq_info); | 802 | cnic_free_dma(dev, &cp->kwq_info); |
803 | cnic_free_dma(dev, &cp->kwq_16_data_info); | ||
720 | cnic_free_dma(dev, &cp->kcq_info); | 804 | cnic_free_dma(dev, &cp->kcq_info); |
721 | kfree(cp->iscsi_tbl); | 805 | kfree(cp->iscsi_tbl); |
722 | cp->iscsi_tbl = NULL; | 806 | cp->iscsi_tbl = NULL; |
@@ -808,14 +892,20 @@ static int cnic_alloc_uio(struct cnic_dev *dev) { | |||
808 | uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start; | 892 | uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start; |
809 | uinfo->mem[0].memtype = UIO_MEM_PHYS; | 893 | uinfo->mem[0].memtype = UIO_MEM_PHYS; |
810 | 894 | ||
811 | uinfo->mem[1].addr = (unsigned long) cp->status_blk & PAGE_MASK; | ||
812 | if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { | 895 | if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { |
896 | uinfo->mem[1].addr = (unsigned long) cp->status_blk & PAGE_MASK; | ||
813 | if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) | 897 | if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) |
814 | uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9; | 898 | uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9; |
815 | else | 899 | else |
816 | uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE; | 900 | uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE; |
817 | 901 | ||
818 | uinfo->name = "bnx2_cnic"; | 902 | uinfo->name = "bnx2_cnic"; |
903 | } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { | ||
904 | uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk & | ||
905 | PAGE_MASK; | ||
906 | uinfo->mem[1].size = sizeof(struct host_def_status_block); | ||
907 | |||
908 | uinfo->name = "bnx2x_cnic"; | ||
819 | } | 909 | } |
820 | 910 | ||
821 | uinfo->mem[1].memtype = UIO_MEM_LOGICAL; | 911 | uinfo->mem[1].memtype = UIO_MEM_LOGICAL; |
@@ -880,6 +970,151 @@ error: | |||
880 | return ret; | 970 | return ret; |
881 | } | 971 | } |
882 | 972 | ||
973 | static int cnic_alloc_bnx2x_context(struct cnic_dev *dev) | ||
974 | { | ||
975 | struct cnic_local *cp = dev->cnic_priv; | ||
976 | struct cnic_eth_dev *ethdev = cp->ethdev; | ||
977 | int ctx_blk_size = cp->ethdev->ctx_blk_size; | ||
978 | int total_mem, blks, i, cid_space; | ||
979 | |||
980 | if (BNX2X_ISCSI_START_CID < ethdev->starting_cid) | ||
981 | return -EINVAL; | ||
982 | |||
983 | cid_space = MAX_ISCSI_TBL_SZ + | ||
984 | (BNX2X_ISCSI_START_CID - ethdev->starting_cid); | ||
985 | |||
986 | total_mem = BNX2X_CONTEXT_MEM_SIZE * cid_space; | ||
987 | blks = total_mem / ctx_blk_size; | ||
988 | if (total_mem % ctx_blk_size) | ||
989 | blks++; | ||
990 | |||
991 | if (blks > cp->ethdev->ctx_tbl_len) | ||
992 | return -ENOMEM; | ||
993 | |||
994 | cp->ctx_arr = kzalloc(blks * sizeof(struct cnic_ctx), GFP_KERNEL); | ||
995 | if (cp->ctx_arr == NULL) | ||
996 | return -ENOMEM; | ||
997 | |||
998 | cp->ctx_blks = blks; | ||
999 | cp->ctx_blk_size = ctx_blk_size; | ||
1000 | if (BNX2X_CHIP_IS_E1H(cp->chip_id)) | ||
1001 | cp->ctx_align = 0; | ||
1002 | else | ||
1003 | cp->ctx_align = ctx_blk_size; | ||
1004 | |||
1005 | cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE; | ||
1006 | |||
1007 | for (i = 0; i < blks; i++) { | ||
1008 | cp->ctx_arr[i].ctx = | ||
1009 | pci_alloc_consistent(dev->pcidev, cp->ctx_blk_size, | ||
1010 | &cp->ctx_arr[i].mapping); | ||
1011 | if (cp->ctx_arr[i].ctx == NULL) | ||
1012 | return -ENOMEM; | ||
1013 | |||
1014 | if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) { | ||
1015 | if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) { | ||
1016 | cnic_free_context(dev); | ||
1017 | cp->ctx_blk_size += cp->ctx_align; | ||
1018 | i = -1; | ||
1019 | continue; | ||
1020 | } | ||
1021 | } | ||
1022 | } | ||
1023 | return 0; | ||
1024 | } | ||
1025 | |||
1026 | static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) | ||
1027 | { | ||
1028 | struct cnic_local *cp = dev->cnic_priv; | ||
1029 | int i, j, n, ret, pages; | ||
1030 | struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info; | ||
1031 | |||
1032 | cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ, | ||
1033 | GFP_KERNEL); | ||
1034 | if (!cp->iscsi_tbl) | ||
1035 | goto error; | ||
1036 | |||
1037 | cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) * | ||
1038 | MAX_CNIC_L5_CONTEXT, GFP_KERNEL); | ||
1039 | if (!cp->ctx_tbl) | ||
1040 | goto error; | ||
1041 | |||
1042 | for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) { | ||
1043 | cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i]; | ||
1044 | cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI; | ||
1045 | } | ||
1046 | |||
1047 | pages = PAGE_ALIGN(MAX_CNIC_L5_CONTEXT * CNIC_KWQ16_DATA_SIZE) / | ||
1048 | PAGE_SIZE; | ||
1049 | |||
1050 | ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0); | ||
1051 | if (ret) | ||
1052 | return -ENOMEM; | ||
1053 | |||
1054 | n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE; | ||
1055 | for (i = 0, j = 0; i < MAX_ISCSI_TBL_SZ; i++) { | ||
1056 | long off = CNIC_KWQ16_DATA_SIZE * (i % n); | ||
1057 | |||
1058 | cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off; | ||
1059 | cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] + | ||
1060 | off; | ||
1061 | |||
1062 | if ((i % n) == (n - 1)) | ||
1063 | j++; | ||
1064 | } | ||
1065 | |||
1066 | ret = cnic_alloc_dma(dev, &cp->kcq_info, KCQ_PAGE_CNT, 0); | ||
1067 | if (ret) | ||
1068 | goto error; | ||
1069 | cp->kcq = (struct kcqe **) cp->kcq_info.pg_arr; | ||
1070 | |||
1071 | for (i = 0; i < KCQ_PAGE_CNT; i++) { | ||
1072 | struct bnx2x_bd_chain_next *next = | ||
1073 | (struct bnx2x_bd_chain_next *) | ||
1074 | &cp->kcq[i][MAX_KCQE_CNT]; | ||
1075 | int j = i + 1; | ||
1076 | |||
1077 | if (j >= KCQ_PAGE_CNT) | ||
1078 | j = 0; | ||
1079 | next->addr_hi = (u64) cp->kcq_info.pg_map_arr[j] >> 32; | ||
1080 | next->addr_lo = cp->kcq_info.pg_map_arr[j] & 0xffffffff; | ||
1081 | } | ||
1082 | |||
1083 | pages = PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS * | ||
1084 | BNX2X_ISCSI_CONN_BUF_SIZE) / PAGE_SIZE; | ||
1085 | ret = cnic_alloc_dma(dev, &cp->conn_buf_info, pages, 1); | ||
1086 | if (ret) | ||
1087 | goto error; | ||
1088 | |||
1089 | pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE; | ||
1090 | ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0); | ||
1091 | if (ret) | ||
1092 | goto error; | ||
1093 | |||
1094 | ret = cnic_alloc_bnx2x_context(dev); | ||
1095 | if (ret) | ||
1096 | goto error; | ||
1097 | |||
1098 | cp->bnx2x_status_blk = cp->status_blk; | ||
1099 | cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk; | ||
1100 | |||
1101 | cp->l2_rx_ring_size = 15; | ||
1102 | |||
1103 | ret = cnic_alloc_l2_rings(dev, 4); | ||
1104 | if (ret) | ||
1105 | goto error; | ||
1106 | |||
1107 | ret = cnic_alloc_uio(dev); | ||
1108 | if (ret) | ||
1109 | goto error; | ||
1110 | |||
1111 | return 0; | ||
1112 | |||
1113 | error: | ||
1114 | cnic_free_resc(dev); | ||
1115 | return -ENOMEM; | ||
1116 | } | ||
1117 | |||
883 | static inline u32 cnic_kwq_avail(struct cnic_local *cp) | 1118 | static inline u32 cnic_kwq_avail(struct cnic_local *cp) |
884 | { | 1119 | { |
885 | return cp->max_kwq_idx - | 1120 | return cp->max_kwq_idx - |
@@ -921,6 +1156,880 @@ static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], | |||
921 | return 0; | 1156 | return 0; |
922 | } | 1157 | } |
923 | 1158 | ||
1159 | static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid, | ||
1160 | union l5cm_specific_data *l5_data) | ||
1161 | { | ||
1162 | struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; | ||
1163 | dma_addr_t map; | ||
1164 | |||
1165 | map = ctx->kwqe_data_mapping; | ||
1166 | l5_data->phy_address.lo = (u64) map & 0xffffffff; | ||
1167 | l5_data->phy_address.hi = (u64) map >> 32; | ||
1168 | return ctx->kwqe_data; | ||
1169 | } | ||
1170 | |||
1171 | static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid, | ||
1172 | u32 type, union l5cm_specific_data *l5_data) | ||
1173 | { | ||
1174 | struct cnic_local *cp = dev->cnic_priv; | ||
1175 | struct l5cm_spe kwqe; | ||
1176 | struct kwqe_16 *kwq[1]; | ||
1177 | int ret; | ||
1178 | |||
1179 | kwqe.hdr.conn_and_cmd_data = | ||
1180 | cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) | | ||
1181 | BNX2X_HW_CID(cid, cp->func))); | ||
1182 | kwqe.hdr.type = cpu_to_le16(type); | ||
1183 | kwqe.hdr.reserved = 0; | ||
1184 | kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo); | ||
1185 | kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi); | ||
1186 | |||
1187 | kwq[0] = (struct kwqe_16 *) &kwqe; | ||
1188 | |||
1189 | spin_lock_bh(&cp->cnic_ulp_lock); | ||
1190 | ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1); | ||
1191 | spin_unlock_bh(&cp->cnic_ulp_lock); | ||
1192 | |||
1193 | if (ret == 1) | ||
1194 | return 0; | ||
1195 | |||
1196 | return -EBUSY; | ||
1197 | } | ||
1198 | |||
1199 | static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type, | ||
1200 | struct kcqe *cqes[], u32 num_cqes) | ||
1201 | { | ||
1202 | struct cnic_local *cp = dev->cnic_priv; | ||
1203 | struct cnic_ulp_ops *ulp_ops; | ||
1204 | |||
1205 | rcu_read_lock(); | ||
1206 | ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); | ||
1207 | if (likely(ulp_ops)) { | ||
1208 | ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type], | ||
1209 | cqes, num_cqes); | ||
1210 | } | ||
1211 | rcu_read_unlock(); | ||
1212 | } | ||
1213 | |||
1214 | static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) | ||
1215 | { | ||
1216 | struct cnic_local *cp = dev->cnic_priv; | ||
1217 | struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe; | ||
1218 | int func = cp->func, pages; | ||
1219 | int hq_bds; | ||
1220 | |||
1221 | cp->num_iscsi_tasks = req1->num_tasks_per_conn; | ||
1222 | cp->num_ccells = req1->num_ccells_per_conn; | ||
1223 | cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE * | ||
1224 | cp->num_iscsi_tasks; | ||
1225 | cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS * | ||
1226 | BNX2X_ISCSI_R2TQE_SIZE; | ||
1227 | cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE; | ||
1228 | pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; | ||
1229 | hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE); | ||
1230 | cp->num_cqs = req1->num_cqs; | ||
1231 | |||
1232 | if (!dev->max_iscsi_conn) | ||
1233 | return 0; | ||
1234 | |||
1235 | /* init Tstorm RAM */ | ||
1236 | CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(func), | ||
1237 | req1->rq_num_wqes); | ||
1238 | CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(func), | ||
1239 | PAGE_SIZE); | ||
1240 | CNIC_WR8(dev, BAR_TSTRORM_INTMEM + | ||
1241 | TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT); | ||
1242 | CNIC_WR16(dev, BAR_TSTRORM_INTMEM + | ||
1243 | TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func), | ||
1244 | req1->num_tasks_per_conn); | ||
1245 | |||
1246 | /* init Ustorm RAM */ | ||
1247 | CNIC_WR16(dev, BAR_USTRORM_INTMEM + | ||
1248 | USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(func), | ||
1249 | req1->rq_buffer_size); | ||
1250 | CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(func), | ||
1251 | PAGE_SIZE); | ||
1252 | CNIC_WR8(dev, BAR_USTRORM_INTMEM + | ||
1253 | USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT); | ||
1254 | CNIC_WR16(dev, BAR_USTRORM_INTMEM + | ||
1255 | USTORM_ISCSI_NUM_OF_TASKS_OFFSET(func), | ||
1256 | req1->num_tasks_per_conn); | ||
1257 | CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(func), | ||
1258 | req1->rq_num_wqes); | ||
1259 | CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(func), | ||
1260 | req1->cq_num_wqes); | ||
1261 | CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(func), | ||
1262 | cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS); | ||
1263 | |||
1264 | /* init Xstorm RAM */ | ||
1265 | CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(func), | ||
1266 | PAGE_SIZE); | ||
1267 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + | ||
1268 | XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT); | ||
1269 | CNIC_WR16(dev, BAR_XSTRORM_INTMEM + | ||
1270 | XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func), | ||
1271 | req1->num_tasks_per_conn); | ||
1272 | CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(func), | ||
1273 | hq_bds); | ||
1274 | CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(func), | ||
1275 | req1->num_tasks_per_conn); | ||
1276 | CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(func), | ||
1277 | cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS); | ||
1278 | |||
1279 | /* init Cstorm RAM */ | ||
1280 | CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(func), | ||
1281 | PAGE_SIZE); | ||
1282 | CNIC_WR8(dev, BAR_CSTRORM_INTMEM + | ||
1283 | CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT); | ||
1284 | CNIC_WR16(dev, BAR_CSTRORM_INTMEM + | ||
1285 | CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func), | ||
1286 | req1->num_tasks_per_conn); | ||
1287 | CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(func), | ||
1288 | req1->cq_num_wqes); | ||
1289 | CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(func), | ||
1290 | hq_bds); | ||
1291 | |||
1292 | return 0; | ||
1293 | } | ||
1294 | |||
1295 | static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe) | ||
1296 | { | ||
1297 | struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe; | ||
1298 | struct cnic_local *cp = dev->cnic_priv; | ||
1299 | int func = cp->func; | ||
1300 | struct iscsi_kcqe kcqe; | ||
1301 | struct kcqe *cqes[1]; | ||
1302 | |||
1303 | memset(&kcqe, 0, sizeof(kcqe)); | ||
1304 | if (!dev->max_iscsi_conn) { | ||
1305 | kcqe.completion_status = | ||
1306 | ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED; | ||
1307 | goto done; | ||
1308 | } | ||
1309 | |||
1310 | CNIC_WR(dev, BAR_TSTRORM_INTMEM + | ||
1311 | TSTORM_ISCSI_ERROR_BITMAP_OFFSET(func), req2->error_bit_map[0]); | ||
1312 | CNIC_WR(dev, BAR_TSTRORM_INTMEM + | ||
1313 | TSTORM_ISCSI_ERROR_BITMAP_OFFSET(func) + 4, | ||
1314 | req2->error_bit_map[1]); | ||
1315 | |||
1316 | CNIC_WR16(dev, BAR_USTRORM_INTMEM + | ||
1317 | USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(func), req2->max_cq_sqn); | ||
1318 | CNIC_WR(dev, BAR_USTRORM_INTMEM + | ||
1319 | USTORM_ISCSI_ERROR_BITMAP_OFFSET(func), req2->error_bit_map[0]); | ||
1320 | CNIC_WR(dev, BAR_USTRORM_INTMEM + | ||
1321 | USTORM_ISCSI_ERROR_BITMAP_OFFSET(func) + 4, | ||
1322 | req2->error_bit_map[1]); | ||
1323 | |||
1324 | CNIC_WR16(dev, BAR_CSTRORM_INTMEM + | ||
1325 | CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(func), req2->max_cq_sqn); | ||
1326 | |||
1327 | kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; | ||
1328 | |||
1329 | done: | ||
1330 | kcqe.op_code = ISCSI_KCQE_OPCODE_INIT; | ||
1331 | cqes[0] = (struct kcqe *) &kcqe; | ||
1332 | cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); | ||
1333 | |||
1334 | return 0; | ||
1335 | } | ||
1336 | |||
1337 | static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) | ||
1338 | { | ||
1339 | struct cnic_local *cp = dev->cnic_priv; | ||
1340 | struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; | ||
1341 | |||
1342 | if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) { | ||
1343 | struct cnic_iscsi *iscsi = ctx->proto.iscsi; | ||
1344 | |||
1345 | cnic_free_dma(dev, &iscsi->hq_info); | ||
1346 | cnic_free_dma(dev, &iscsi->r2tq_info); | ||
1347 | cnic_free_dma(dev, &iscsi->task_array_info); | ||
1348 | } | ||
1349 | cnic_free_id(&cp->cid_tbl, ctx->cid); | ||
1350 | ctx->cid = 0; | ||
1351 | } | ||
1352 | |||
1353 | static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) | ||
1354 | { | ||
1355 | u32 cid; | ||
1356 | int ret, pages; | ||
1357 | struct cnic_local *cp = dev->cnic_priv; | ||
1358 | struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; | ||
1359 | struct cnic_iscsi *iscsi = ctx->proto.iscsi; | ||
1360 | |||
1361 | cid = cnic_alloc_new_id(&cp->cid_tbl); | ||
1362 | if (cid == -1) { | ||
1363 | ret = -ENOMEM; | ||
1364 | goto error; | ||
1365 | } | ||
1366 | |||
1367 | ctx->cid = cid; | ||
1368 | pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE; | ||
1369 | |||
1370 | ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1); | ||
1371 | if (ret) | ||
1372 | goto error; | ||
1373 | |||
1374 | pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE; | ||
1375 | ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1); | ||
1376 | if (ret) | ||
1377 | goto error; | ||
1378 | |||
1379 | pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; | ||
1380 | ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1); | ||
1381 | if (ret) | ||
1382 | goto error; | ||
1383 | |||
1384 | return 0; | ||
1385 | |||
1386 | error: | ||
1387 | cnic_free_bnx2x_conn_resc(dev, l5_cid); | ||
1388 | return ret; | ||
1389 | } | ||
1390 | |||
1391 | static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init, | ||
1392 | struct regpair *ctx_addr) | ||
1393 | { | ||
1394 | struct cnic_local *cp = dev->cnic_priv; | ||
1395 | struct cnic_eth_dev *ethdev = cp->ethdev; | ||
1396 | int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk; | ||
1397 | int off = (cid - ethdev->starting_cid) % cp->cids_per_blk; | ||
1398 | unsigned long align_off = 0; | ||
1399 | dma_addr_t ctx_map; | ||
1400 | void *ctx; | ||
1401 | |||
1402 | if (cp->ctx_align) { | ||
1403 | unsigned long mask = cp->ctx_align - 1; | ||
1404 | |||
1405 | if (cp->ctx_arr[blk].mapping & mask) | ||
1406 | align_off = cp->ctx_align - | ||
1407 | (cp->ctx_arr[blk].mapping & mask); | ||
1408 | } | ||
1409 | ctx_map = cp->ctx_arr[blk].mapping + align_off + | ||
1410 | (off * BNX2X_CONTEXT_MEM_SIZE); | ||
1411 | ctx = cp->ctx_arr[blk].ctx + align_off + | ||
1412 | (off * BNX2X_CONTEXT_MEM_SIZE); | ||
1413 | if (init) | ||
1414 | memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE); | ||
1415 | |||
1416 | ctx_addr->lo = ctx_map & 0xffffffff; | ||
1417 | ctx_addr->hi = (u64) ctx_map >> 32; | ||
1418 | return ctx; | ||
1419 | } | ||
1420 | |||
1421 | static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[], | ||
1422 | u32 num) | ||
1423 | { | ||
1424 | struct cnic_local *cp = dev->cnic_priv; | ||
1425 | struct iscsi_kwqe_conn_offload1 *req1 = | ||
1426 | (struct iscsi_kwqe_conn_offload1 *) wqes[0]; | ||
1427 | struct iscsi_kwqe_conn_offload2 *req2 = | ||
1428 | (struct iscsi_kwqe_conn_offload2 *) wqes[1]; | ||
1429 | struct iscsi_kwqe_conn_offload3 *req3; | ||
1430 | struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id]; | ||
1431 | struct cnic_iscsi *iscsi = ctx->proto.iscsi; | ||
1432 | u32 cid = ctx->cid; | ||
1433 | u32 hw_cid = BNX2X_HW_CID(cid, cp->func); | ||
1434 | struct iscsi_context *ictx; | ||
1435 | struct regpair context_addr; | ||
1436 | int i, j, n = 2, n_max; | ||
1437 | |||
1438 | ctx->ctx_flags = 0; | ||
1439 | if (!req2->num_additional_wqes) | ||
1440 | return -EINVAL; | ||
1441 | |||
1442 | n_max = req2->num_additional_wqes + 2; | ||
1443 | |||
1444 | ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr); | ||
1445 | if (ictx == NULL) | ||
1446 | return -ENOMEM; | ||
1447 | |||
1448 | req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++]; | ||
1449 | |||
1450 | ictx->xstorm_ag_context.hq_prod = 1; | ||
1451 | |||
1452 | ictx->xstorm_st_context.iscsi.first_burst_length = | ||
1453 | ISCSI_DEF_FIRST_BURST_LEN; | ||
1454 | ictx->xstorm_st_context.iscsi.max_send_pdu_length = | ||
1455 | ISCSI_DEF_MAX_RECV_SEG_LEN; | ||
1456 | ictx->xstorm_st_context.iscsi.sq_pbl_base.lo = | ||
1457 | req1->sq_page_table_addr_lo; | ||
1458 | ictx->xstorm_st_context.iscsi.sq_pbl_base.hi = | ||
1459 | req1->sq_page_table_addr_hi; | ||
1460 | ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi; | ||
1461 | ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo; | ||
1462 | ictx->xstorm_st_context.iscsi.hq_pbl_base.lo = | ||
1463 | iscsi->hq_info.pgtbl_map & 0xffffffff; | ||
1464 | ictx->xstorm_st_context.iscsi.hq_pbl_base.hi = | ||
1465 | (u64) iscsi->hq_info.pgtbl_map >> 32; | ||
1466 | ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo = | ||
1467 | iscsi->hq_info.pgtbl[0]; | ||
1468 | ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi = | ||
1469 | iscsi->hq_info.pgtbl[1]; | ||
1470 | ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo = | ||
1471 | iscsi->r2tq_info.pgtbl_map & 0xffffffff; | ||
1472 | ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi = | ||
1473 | (u64) iscsi->r2tq_info.pgtbl_map >> 32; | ||
1474 | ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo = | ||
1475 | iscsi->r2tq_info.pgtbl[0]; | ||
1476 | ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi = | ||
1477 | iscsi->r2tq_info.pgtbl[1]; | ||
1478 | ictx->xstorm_st_context.iscsi.task_pbl_base.lo = | ||
1479 | iscsi->task_array_info.pgtbl_map & 0xffffffff; | ||
1480 | ictx->xstorm_st_context.iscsi.task_pbl_base.hi = | ||
1481 | (u64) iscsi->task_array_info.pgtbl_map >> 32; | ||
1482 | ictx->xstorm_st_context.iscsi.task_pbl_cache_idx = | ||
1483 | BNX2X_ISCSI_PBL_NOT_CACHED; | ||
1484 | ictx->xstorm_st_context.iscsi.flags.flags |= | ||
1485 | XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA; | ||
1486 | ictx->xstorm_st_context.iscsi.flags.flags |= | ||
1487 | XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T; | ||
1488 | |||
1489 | ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE; | ||
1490 | /* TSTORM requires the base address of RQ DB & not PTE */ | ||
1491 | ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo = | ||
1492 | req2->rq_page_table_addr_lo & PAGE_MASK; | ||
1493 | ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi = | ||
1494 | req2->rq_page_table_addr_hi; | ||
1495 | ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id; | ||
1496 | ictx->tstorm_st_context.tcp.cwnd = 0x5A8; | ||
1497 | ictx->tstorm_st_context.tcp.flags2 |= | ||
1498 | TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN; | ||
1499 | |||
1500 | ictx->timers_context.flags |= ISCSI_TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG; | ||
1501 | |||
1502 | ictx->ustorm_st_context.ring.rq.pbl_base.lo = | ||
1503 | req2->rq_page_table_addr_lo & 0xffffffff; | ||
1504 | ictx->ustorm_st_context.ring.rq.pbl_base.hi = | ||
1505 | (u64) req2->rq_page_table_addr_hi >> 32; | ||
1506 | ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi; | ||
1507 | ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo; | ||
1508 | ictx->ustorm_st_context.ring.r2tq.pbl_base.lo = | ||
1509 | iscsi->r2tq_info.pgtbl_map & 0xffffffff; | ||
1510 | ictx->ustorm_st_context.ring.r2tq.pbl_base.hi = | ||
1511 | (u64) iscsi->r2tq_info.pgtbl_map >> 32; | ||
1512 | ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo = | ||
1513 | iscsi->r2tq_info.pgtbl[0]; | ||
1514 | ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi = | ||
1515 | iscsi->r2tq_info.pgtbl[1]; | ||
1516 | ictx->ustorm_st_context.ring.cq_pbl_base.lo = | ||
1517 | req1->cq_page_table_addr_lo; | ||
1518 | ictx->ustorm_st_context.ring.cq_pbl_base.hi = | ||
1519 | req1->cq_page_table_addr_hi; | ||
1520 | ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN; | ||
1521 | ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi; | ||
1522 | ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo; | ||
1523 | ictx->ustorm_st_context.task_pbe_cache_index = | ||
1524 | BNX2X_ISCSI_PBL_NOT_CACHED; | ||
1525 | ictx->ustorm_st_context.task_pdu_cache_index = | ||
1526 | BNX2X_ISCSI_PDU_HEADER_NOT_CACHED; | ||
1527 | |||
1528 | for (i = 1, j = 1; i < cp->num_cqs; i++, j++) { | ||
1529 | if (j == 3) { | ||
1530 | if (n >= n_max) | ||
1531 | break; | ||
1532 | req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++]; | ||
1533 | j = 0; | ||
1534 | } | ||
1535 | ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN; | ||
1536 | ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo = | ||
1537 | req3->qp_first_pte[j].hi; | ||
1538 | ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi = | ||
1539 | req3->qp_first_pte[j].lo; | ||
1540 | } | ||
1541 | |||
1542 | ictx->ustorm_st_context.task_pbl_base.lo = | ||
1543 | iscsi->task_array_info.pgtbl_map & 0xffffffff; | ||
1544 | ictx->ustorm_st_context.task_pbl_base.hi = | ||
1545 | (u64) iscsi->task_array_info.pgtbl_map >> 32; | ||
1546 | ictx->ustorm_st_context.tce_phy_addr.lo = | ||
1547 | iscsi->task_array_info.pgtbl[0]; | ||
1548 | ictx->ustorm_st_context.tce_phy_addr.hi = | ||
1549 | iscsi->task_array_info.pgtbl[1]; | ||
1550 | ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; | ||
1551 | ictx->ustorm_st_context.num_cqs = cp->num_cqs; | ||
1552 | ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN; | ||
1553 | ictx->ustorm_st_context.negotiated_rx_and_flags |= | ||
1554 | ISCSI_DEF_MAX_BURST_LEN; | ||
1555 | ictx->ustorm_st_context.negotiated_rx |= | ||
1556 | ISCSI_DEFAULT_MAX_OUTSTANDING_R2T << | ||
1557 | USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT; | ||
1558 | |||
1559 | ictx->cstorm_st_context.hq_pbl_base.lo = | ||
1560 | iscsi->hq_info.pgtbl_map & 0xffffffff; | ||
1561 | ictx->cstorm_st_context.hq_pbl_base.hi = | ||
1562 | (u64) iscsi->hq_info.pgtbl_map >> 32; | ||
1563 | ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0]; | ||
1564 | ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1]; | ||
1565 | ictx->cstorm_st_context.task_pbl_base.lo = | ||
1566 | iscsi->task_array_info.pgtbl_map & 0xffffffff; | ||
1567 | ictx->cstorm_st_context.task_pbl_base.hi = | ||
1568 | (u64) iscsi->task_array_info.pgtbl_map >> 32; | ||
1569 | /* CSTORM and USTORM initialization is different, CSTORM requires | ||
1570 | * CQ DB base & not PTE addr */ | ||
1571 | ictx->cstorm_st_context.cq_db_base.lo = | ||
1572 | req1->cq_page_table_addr_lo & PAGE_MASK; | ||
1573 | ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi; | ||
1574 | ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; | ||
1575 | ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1; | ||
1576 | for (i = 0; i < cp->num_cqs; i++) { | ||
1577 | ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] = | ||
1578 | ISCSI_INITIAL_SN; | ||
1579 | ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] = | ||
1580 | ISCSI_INITIAL_SN; | ||
1581 | } | ||
1582 | |||
1583 | ictx->xstorm_ag_context.cdu_reserved = | ||
1584 | CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG, | ||
1585 | ISCSI_CONNECTION_TYPE); | ||
1586 | ictx->ustorm_ag_context.cdu_usage = | ||
1587 | CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG, | ||
1588 | ISCSI_CONNECTION_TYPE); | ||
1589 | return 0; | ||
1590 | |||
1591 | } | ||
1592 | |||
1593 | static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], | ||
1594 | u32 num, int *work) | ||
1595 | { | ||
1596 | struct iscsi_kwqe_conn_offload1 *req1; | ||
1597 | struct iscsi_kwqe_conn_offload2 *req2; | ||
1598 | struct cnic_local *cp = dev->cnic_priv; | ||
1599 | struct iscsi_kcqe kcqe; | ||
1600 | struct kcqe *cqes[1]; | ||
1601 | u32 l5_cid; | ||
1602 | int ret; | ||
1603 | |||
1604 | if (num < 2) { | ||
1605 | *work = num; | ||
1606 | return -EINVAL; | ||
1607 | } | ||
1608 | |||
1609 | req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0]; | ||
1610 | req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1]; | ||
1611 | if ((num - 2) < req2->num_additional_wqes) { | ||
1612 | *work = num; | ||
1613 | return -EINVAL; | ||
1614 | } | ||
1615 | *work = 2 + req2->num_additional_wqes;; | ||
1616 | |||
1617 | l5_cid = req1->iscsi_conn_id; | ||
1618 | if (l5_cid >= MAX_ISCSI_TBL_SZ) | ||
1619 | return -EINVAL; | ||
1620 | |||
1621 | memset(&kcqe, 0, sizeof(kcqe)); | ||
1622 | kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN; | ||
1623 | kcqe.iscsi_conn_id = l5_cid; | ||
1624 | kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE; | ||
1625 | |||
1626 | if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) { | ||
1627 | atomic_dec(&cp->iscsi_conn); | ||
1628 | ret = 0; | ||
1629 | goto done; | ||
1630 | } | ||
1631 | ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid); | ||
1632 | if (ret) { | ||
1633 | atomic_dec(&cp->iscsi_conn); | ||
1634 | ret = 0; | ||
1635 | goto done; | ||
1636 | } | ||
1637 | ret = cnic_setup_bnx2x_ctx(dev, wqes, num); | ||
1638 | if (ret < 0) { | ||
1639 | cnic_free_bnx2x_conn_resc(dev, l5_cid); | ||
1640 | atomic_dec(&cp->iscsi_conn); | ||
1641 | goto done; | ||
1642 | } | ||
1643 | |||
1644 | kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; | ||
1645 | kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp->ctx_tbl[l5_cid].cid, | ||
1646 | cp->func); | ||
1647 | |||
1648 | done: | ||
1649 | cqes[0] = (struct kcqe *) &kcqe; | ||
1650 | cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); | ||
1651 | return ret; | ||
1652 | } | ||
1653 | |||
1654 | |||
1655 | static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe) | ||
1656 | { | ||
1657 | struct cnic_local *cp = dev->cnic_priv; | ||
1658 | struct iscsi_kwqe_conn_update *req = | ||
1659 | (struct iscsi_kwqe_conn_update *) kwqe; | ||
1660 | void *data; | ||
1661 | union l5cm_specific_data l5_data; | ||
1662 | u32 l5_cid, cid = BNX2X_SW_CID(req->context_id); | ||
1663 | int ret; | ||
1664 | |||
1665 | if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0) | ||
1666 | return -EINVAL; | ||
1667 | |||
1668 | data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); | ||
1669 | if (!data) | ||
1670 | return -ENOMEM; | ||
1671 | |||
1672 | memcpy(data, kwqe, sizeof(struct kwqe)); | ||
1673 | |||
1674 | ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN, | ||
1675 | req->context_id, ISCSI_CONNECTION_TYPE, &l5_data); | ||
1676 | return ret; | ||
1677 | } | ||
1678 | |||
1679 | static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe) | ||
1680 | { | ||
1681 | struct cnic_local *cp = dev->cnic_priv; | ||
1682 | struct iscsi_kwqe_conn_destroy *req = | ||
1683 | (struct iscsi_kwqe_conn_destroy *) kwqe; | ||
1684 | union l5cm_specific_data l5_data; | ||
1685 | u32 l5_cid = req->reserved0; | ||
1686 | struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; | ||
1687 | int ret = 0; | ||
1688 | struct iscsi_kcqe kcqe; | ||
1689 | struct kcqe *cqes[1]; | ||
1690 | |||
1691 | if (!(ctx->ctx_flags & CTX_FL_OFFLD_START)) | ||
1692 | goto skip_cfc_delete; | ||
1693 | |||
1694 | while (!time_after(jiffies, ctx->timestamp + (2 * HZ))) | ||
1695 | msleep(250); | ||
1696 | |||
1697 | init_waitqueue_head(&ctx->waitq); | ||
1698 | ctx->wait_cond = 0; | ||
1699 | memset(&l5_data, 0, sizeof(l5_data)); | ||
1700 | ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CFC_DEL, | ||
1701 | req->context_id, | ||
1702 | ETH_CONNECTION_TYPE | | ||
1703 | (1 << SPE_HDR_COMMON_RAMROD_SHIFT), | ||
1704 | &l5_data); | ||
1705 | if (ret == 0) | ||
1706 | wait_event(ctx->waitq, ctx->wait_cond); | ||
1707 | |||
1708 | skip_cfc_delete: | ||
1709 | cnic_free_bnx2x_conn_resc(dev, l5_cid); | ||
1710 | |||
1711 | atomic_dec(&cp->iscsi_conn); | ||
1712 | |||
1713 | memset(&kcqe, 0, sizeof(kcqe)); | ||
1714 | kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN; | ||
1715 | kcqe.iscsi_conn_id = l5_cid; | ||
1716 | kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; | ||
1717 | kcqe.iscsi_conn_context_id = req->context_id; | ||
1718 | |||
1719 | cqes[0] = (struct kcqe *) &kcqe; | ||
1720 | cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); | ||
1721 | |||
1722 | return ret; | ||
1723 | } | ||
1724 | |||
1725 | static void cnic_init_storm_conn_bufs(struct cnic_dev *dev, | ||
1726 | struct l4_kwq_connect_req1 *kwqe1, | ||
1727 | struct l4_kwq_connect_req3 *kwqe3, | ||
1728 | struct l5cm_active_conn_buffer *conn_buf) | ||
1729 | { | ||
1730 | struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf; | ||
1731 | struct l5cm_xstorm_conn_buffer *xstorm_buf = | ||
1732 | &conn_buf->xstorm_conn_buffer; | ||
1733 | struct l5cm_tstorm_conn_buffer *tstorm_buf = | ||
1734 | &conn_buf->tstorm_conn_buffer; | ||
1735 | struct regpair context_addr; | ||
1736 | u32 cid = BNX2X_SW_CID(kwqe1->cid); | ||
1737 | struct in6_addr src_ip, dst_ip; | ||
1738 | int i; | ||
1739 | u32 *addrp; | ||
1740 | |||
1741 | addrp = (u32 *) &conn_addr->local_ip_addr; | ||
1742 | for (i = 0; i < 4; i++, addrp++) | ||
1743 | src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp); | ||
1744 | |||
1745 | addrp = (u32 *) &conn_addr->remote_ip_addr; | ||
1746 | for (i = 0; i < 4; i++, addrp++) | ||
1747 | dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp); | ||
1748 | |||
1749 | cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr); | ||
1750 | |||
1751 | xstorm_buf->context_addr.hi = context_addr.hi; | ||
1752 | xstorm_buf->context_addr.lo = context_addr.lo; | ||
1753 | xstorm_buf->mss = 0xffff; | ||
1754 | xstorm_buf->rcv_buf = kwqe3->rcv_buf; | ||
1755 | if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE) | ||
1756 | xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE; | ||
1757 | xstorm_buf->pseudo_header_checksum = | ||
1758 | swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0)); | ||
1759 | |||
1760 | if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK)) | ||
1761 | tstorm_buf->params |= | ||
1762 | L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE; | ||
1763 | if (kwqe3->ka_timeout) { | ||
1764 | tstorm_buf->ka_enable = 1; | ||
1765 | tstorm_buf->ka_timeout = kwqe3->ka_timeout; | ||
1766 | tstorm_buf->ka_interval = kwqe3->ka_interval; | ||
1767 | tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count; | ||
1768 | } | ||
1769 | tstorm_buf->rcv_buf = kwqe3->rcv_buf; | ||
1770 | tstorm_buf->snd_buf = kwqe3->snd_buf; | ||
1771 | tstorm_buf->max_rt_time = 0xffffffff; | ||
1772 | } | ||
1773 | |||
1774 | static void cnic_init_bnx2x_mac(struct cnic_dev *dev) | ||
1775 | { | ||
1776 | struct cnic_local *cp = dev->cnic_priv; | ||
1777 | int func = CNIC_FUNC(cp); | ||
1778 | u8 *mac = dev->mac_addr; | ||
1779 | |||
1780 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + | ||
1781 | XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(func), mac[0]); | ||
1782 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + | ||
1783 | XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(func), mac[1]); | ||
1784 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + | ||
1785 | XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(func), mac[2]); | ||
1786 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + | ||
1787 | XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(func), mac[3]); | ||
1788 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + | ||
1789 | XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(func), mac[4]); | ||
1790 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + | ||
1791 | XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(func), mac[5]); | ||
1792 | |||
1793 | CNIC_WR8(dev, BAR_TSTRORM_INTMEM + | ||
1794 | TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(func), mac[5]); | ||
1795 | CNIC_WR8(dev, BAR_TSTRORM_INTMEM + | ||
1796 | TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(func) + 1, | ||
1797 | mac[4]); | ||
1798 | CNIC_WR8(dev, BAR_TSTRORM_INTMEM + | ||
1799 | TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func), mac[3]); | ||
1800 | CNIC_WR8(dev, BAR_TSTRORM_INTMEM + | ||
1801 | TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func) + 1, | ||
1802 | mac[2]); | ||
1803 | CNIC_WR8(dev, BAR_TSTRORM_INTMEM + | ||
1804 | TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func) + 2, | ||
1805 | mac[1]); | ||
1806 | CNIC_WR8(dev, BAR_TSTRORM_INTMEM + | ||
1807 | TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func) + 3, | ||
1808 | mac[0]); | ||
1809 | } | ||
1810 | |||
1811 | static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts) | ||
1812 | { | ||
1813 | struct cnic_local *cp = dev->cnic_priv; | ||
1814 | u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN; | ||
1815 | u16 tstorm_flags = 0; | ||
1816 | |||
1817 | if (tcp_ts) { | ||
1818 | xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED; | ||
1819 | tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED; | ||
1820 | } | ||
1821 | |||
1822 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + | ||
1823 | XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->func), xstorm_flags); | ||
1824 | |||
1825 | CNIC_WR16(dev, BAR_TSTRORM_INTMEM + | ||
1826 | TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->func), tstorm_flags); | ||
1827 | } | ||
1828 | |||
1829 | static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[], | ||
1830 | u32 num, int *work) | ||
1831 | { | ||
1832 | struct cnic_local *cp = dev->cnic_priv; | ||
1833 | struct l4_kwq_connect_req1 *kwqe1 = | ||
1834 | (struct l4_kwq_connect_req1 *) wqes[0]; | ||
1835 | struct l4_kwq_connect_req3 *kwqe3; | ||
1836 | struct l5cm_active_conn_buffer *conn_buf; | ||
1837 | struct l5cm_conn_addr_params *conn_addr; | ||
1838 | union l5cm_specific_data l5_data; | ||
1839 | u32 l5_cid = kwqe1->pg_cid; | ||
1840 | struct cnic_sock *csk = &cp->csk_tbl[l5_cid]; | ||
1841 | struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; | ||
1842 | int ret; | ||
1843 | |||
1844 | if (num < 2) { | ||
1845 | *work = num; | ||
1846 | return -EINVAL; | ||
1847 | } | ||
1848 | |||
1849 | if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) | ||
1850 | *work = 3; | ||
1851 | else | ||
1852 | *work = 2; | ||
1853 | |||
1854 | if (num < *work) { | ||
1855 | *work = num; | ||
1856 | return -EINVAL; | ||
1857 | } | ||
1858 | |||
1859 | if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) { | ||
1860 | printk(KERN_ERR PFX "%s: conn_buf size too big\n", | ||
1861 | dev->netdev->name); | ||
1862 | return -ENOMEM; | ||
1863 | } | ||
1864 | conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); | ||
1865 | if (!conn_buf) | ||
1866 | return -ENOMEM; | ||
1867 | |||
1868 | memset(conn_buf, 0, sizeof(*conn_buf)); | ||
1869 | |||
1870 | conn_addr = &conn_buf->conn_addr_buf; | ||
1871 | conn_addr->remote_addr_0 = csk->ha[0]; | ||
1872 | conn_addr->remote_addr_1 = csk->ha[1]; | ||
1873 | conn_addr->remote_addr_2 = csk->ha[2]; | ||
1874 | conn_addr->remote_addr_3 = csk->ha[3]; | ||
1875 | conn_addr->remote_addr_4 = csk->ha[4]; | ||
1876 | conn_addr->remote_addr_5 = csk->ha[5]; | ||
1877 | |||
1878 | if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) { | ||
1879 | struct l4_kwq_connect_req2 *kwqe2 = | ||
1880 | (struct l4_kwq_connect_req2 *) wqes[1]; | ||
1881 | |||
1882 | conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4; | ||
1883 | conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3; | ||
1884 | conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2; | ||
1885 | |||
1886 | conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4; | ||
1887 | conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3; | ||
1888 | conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2; | ||
1889 | conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION; | ||
1890 | } | ||
1891 | kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1]; | ||
1892 | |||
1893 | conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip; | ||
1894 | conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip; | ||
1895 | conn_addr->local_tcp_port = kwqe1->src_port; | ||
1896 | conn_addr->remote_tcp_port = kwqe1->dst_port; | ||
1897 | |||
1898 | conn_addr->pmtu = kwqe3->pmtu; | ||
1899 | cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf); | ||
1900 | |||
1901 | CNIC_WR16(dev, BAR_XSTRORM_INTMEM + | ||
1902 | XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->func), csk->vlan_id); | ||
1903 | |||
1904 | cnic_bnx2x_set_tcp_timestamp(dev, | ||
1905 | kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP); | ||
1906 | |||
1907 | ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT, | ||
1908 | kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data); | ||
1909 | if (!ret) | ||
1910 | ctx->ctx_flags |= CTX_FL_OFFLD_START; | ||
1911 | |||
1912 | return ret; | ||
1913 | } | ||
1914 | |||
1915 | static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe) | ||
1916 | { | ||
1917 | struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe; | ||
1918 | union l5cm_specific_data l5_data; | ||
1919 | int ret; | ||
1920 | |||
1921 | memset(&l5_data, 0, sizeof(l5_data)); | ||
1922 | ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE, | ||
1923 | req->cid, ISCSI_CONNECTION_TYPE, &l5_data); | ||
1924 | return ret; | ||
1925 | } | ||
1926 | |||
1927 | static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe) | ||
1928 | { | ||
1929 | struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe; | ||
1930 | union l5cm_specific_data l5_data; | ||
1931 | int ret; | ||
1932 | |||
1933 | memset(&l5_data, 0, sizeof(l5_data)); | ||
1934 | ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT, | ||
1935 | req->cid, ISCSI_CONNECTION_TYPE, &l5_data); | ||
1936 | return ret; | ||
1937 | } | ||
1938 | static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe) | ||
1939 | { | ||
1940 | struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe; | ||
1941 | struct l4_kcq kcqe; | ||
1942 | struct kcqe *cqes[1]; | ||
1943 | |||
1944 | memset(&kcqe, 0, sizeof(kcqe)); | ||
1945 | kcqe.pg_host_opaque = req->host_opaque; | ||
1946 | kcqe.pg_cid = req->host_opaque; | ||
1947 | kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG; | ||
1948 | cqes[0] = (struct kcqe *) &kcqe; | ||
1949 | cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1); | ||
1950 | return 0; | ||
1951 | } | ||
1952 | |||
1953 | static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe) | ||
1954 | { | ||
1955 | struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe; | ||
1956 | struct l4_kcq kcqe; | ||
1957 | struct kcqe *cqes[1]; | ||
1958 | |||
1959 | memset(&kcqe, 0, sizeof(kcqe)); | ||
1960 | kcqe.pg_host_opaque = req->pg_host_opaque; | ||
1961 | kcqe.pg_cid = req->pg_cid; | ||
1962 | kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG; | ||
1963 | cqes[0] = (struct kcqe *) &kcqe; | ||
1964 | cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1); | ||
1965 | return 0; | ||
1966 | } | ||
1967 | |||
1968 | static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], | ||
1969 | u32 num_wqes) | ||
1970 | { | ||
1971 | int i, work, ret; | ||
1972 | u32 opcode; | ||
1973 | struct kwqe *kwqe; | ||
1974 | |||
1975 | if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) | ||
1976 | return -EAGAIN; /* bnx2 is down */ | ||
1977 | |||
1978 | for (i = 0; i < num_wqes; ) { | ||
1979 | kwqe = wqes[i]; | ||
1980 | opcode = KWQE_OPCODE(kwqe->kwqe_op_flag); | ||
1981 | work = 1; | ||
1982 | |||
1983 | switch (opcode) { | ||
1984 | case ISCSI_KWQE_OPCODE_INIT1: | ||
1985 | ret = cnic_bnx2x_iscsi_init1(dev, kwqe); | ||
1986 | break; | ||
1987 | case ISCSI_KWQE_OPCODE_INIT2: | ||
1988 | ret = cnic_bnx2x_iscsi_init2(dev, kwqe); | ||
1989 | break; | ||
1990 | case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1: | ||
1991 | ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i], | ||
1992 | num_wqes - i, &work); | ||
1993 | break; | ||
1994 | case ISCSI_KWQE_OPCODE_UPDATE_CONN: | ||
1995 | ret = cnic_bnx2x_iscsi_update(dev, kwqe); | ||
1996 | break; | ||
1997 | case ISCSI_KWQE_OPCODE_DESTROY_CONN: | ||
1998 | ret = cnic_bnx2x_iscsi_destroy(dev, kwqe); | ||
1999 | break; | ||
2000 | case L4_KWQE_OPCODE_VALUE_CONNECT1: | ||
2001 | ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i, | ||
2002 | &work); | ||
2003 | break; | ||
2004 | case L4_KWQE_OPCODE_VALUE_CLOSE: | ||
2005 | ret = cnic_bnx2x_close(dev, kwqe); | ||
2006 | break; | ||
2007 | case L4_KWQE_OPCODE_VALUE_RESET: | ||
2008 | ret = cnic_bnx2x_reset(dev, kwqe); | ||
2009 | break; | ||
2010 | case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG: | ||
2011 | ret = cnic_bnx2x_offload_pg(dev, kwqe); | ||
2012 | break; | ||
2013 | case L4_KWQE_OPCODE_VALUE_UPDATE_PG: | ||
2014 | ret = cnic_bnx2x_update_pg(dev, kwqe); | ||
2015 | break; | ||
2016 | case L4_KWQE_OPCODE_VALUE_UPLOAD_PG: | ||
2017 | ret = 0; | ||
2018 | break; | ||
2019 | default: | ||
2020 | ret = 0; | ||
2021 | printk(KERN_ERR PFX "%s: Unknown type of KWQE(0x%x)\n", | ||
2022 | dev->netdev->name, opcode); | ||
2023 | break; | ||
2024 | } | ||
2025 | if (ret < 0) | ||
2026 | printk(KERN_ERR PFX "%s: KWQE(0x%x) failed\n", | ||
2027 | dev->netdev->name, opcode); | ||
2028 | i += work; | ||
2029 | } | ||
2030 | return 0; | ||
2031 | } | ||
2032 | |||
924 | static void service_kcqes(struct cnic_dev *dev, int num_cqes) | 2033 | static void service_kcqes(struct cnic_dev *dev, int num_cqes) |
925 | { | 2034 | { |
926 | struct cnic_local *cp = dev->cnic_priv; | 2035 | struct cnic_local *cp = dev->cnic_priv; |
@@ -987,6 +2096,22 @@ static u16 cnic_bnx2_hw_idx(u16 idx) | |||
987 | return idx; | 2096 | return idx; |
988 | } | 2097 | } |
989 | 2098 | ||
2099 | static u16 cnic_bnx2x_next_idx(u16 idx) | ||
2100 | { | ||
2101 | idx++; | ||
2102 | if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT) | ||
2103 | idx++; | ||
2104 | |||
2105 | return idx; | ||
2106 | } | ||
2107 | |||
2108 | static u16 cnic_bnx2x_hw_idx(u16 idx) | ||
2109 | { | ||
2110 | if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT) | ||
2111 | idx++; | ||
2112 | return idx; | ||
2113 | } | ||
2114 | |||
990 | static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod) | 2115 | static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod) |
991 | { | 2116 | { |
992 | struct cnic_local *cp = dev->cnic_priv; | 2117 | struct cnic_local *cp = dev->cnic_priv; |
@@ -1012,7 +2137,7 @@ static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod) | |||
1012 | return last_cnt; | 2137 | return last_cnt; |
1013 | } | 2138 | } |
1014 | 2139 | ||
1015 | static void cnic_chk_bnx2_pkt_rings(struct cnic_local *cp) | 2140 | static void cnic_chk_pkt_rings(struct cnic_local *cp) |
1016 | { | 2141 | { |
1017 | u16 rx_cons = *cp->rx_cons_ptr; | 2142 | u16 rx_cons = *cp->rx_cons_ptr; |
1018 | u16 tx_cons = *cp->tx_cons_ptr; | 2143 | u16 tx_cons = *cp->tx_cons_ptr; |
@@ -1020,6 +2145,7 @@ static void cnic_chk_bnx2_pkt_rings(struct cnic_local *cp) | |||
1020 | if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) { | 2145 | if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) { |
1021 | cp->tx_cons = tx_cons; | 2146 | cp->tx_cons = tx_cons; |
1022 | cp->rx_cons = rx_cons; | 2147 | cp->rx_cons = rx_cons; |
2148 | |||
1023 | uio_event_notify(cp->cnic_uinfo); | 2149 | uio_event_notify(cp->cnic_uinfo); |
1024 | } | 2150 | } |
1025 | } | 2151 | } |
@@ -1062,7 +2188,7 @@ done: | |||
1062 | 2188 | ||
1063 | cp->kcq_prod_idx = sw_prod; | 2189 | cp->kcq_prod_idx = sw_prod; |
1064 | 2190 | ||
1065 | cnic_chk_bnx2_pkt_rings(cp); | 2191 | cnic_chk_pkt_rings(cp); |
1066 | return status_idx; | 2192 | return status_idx; |
1067 | } | 2193 | } |
1068 | 2194 | ||
@@ -1100,7 +2226,7 @@ done: | |||
1100 | CNIC_WR16(dev, cp->kcq_io_addr, sw_prod); | 2226 | CNIC_WR16(dev, cp->kcq_io_addr, sw_prod); |
1101 | cp->kcq_prod_idx = sw_prod; | 2227 | cp->kcq_prod_idx = sw_prod; |
1102 | 2228 | ||
1103 | cnic_chk_bnx2_pkt_rings(cp); | 2229 | cnic_chk_pkt_rings(cp); |
1104 | 2230 | ||
1105 | cp->last_status_idx = status_idx; | 2231 | cp->last_status_idx = status_idx; |
1106 | CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | | 2232 | CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | |
@@ -1125,6 +2251,91 @@ static irqreturn_t cnic_irq(int irq, void *dev_instance) | |||
1125 | return IRQ_HANDLED; | 2251 | return IRQ_HANDLED; |
1126 | } | 2252 | } |
1127 | 2253 | ||
2254 | static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm, | ||
2255 | u16 index, u8 op, u8 update) | ||
2256 | { | ||
2257 | struct cnic_local *cp = dev->cnic_priv; | ||
2258 | u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 + | ||
2259 | COMMAND_REG_INT_ACK); | ||
2260 | struct igu_ack_register igu_ack; | ||
2261 | |||
2262 | igu_ack.status_block_index = index; | ||
2263 | igu_ack.sb_id_and_flags = | ||
2264 | ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) | | ||
2265 | (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) | | ||
2266 | (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | | ||
2267 | (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); | ||
2268 | |||
2269 | CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack)); | ||
2270 | } | ||
2271 | |||
2272 | static void cnic_ack_bnx2x_msix(struct cnic_dev *dev) | ||
2273 | { | ||
2274 | struct cnic_local *cp = dev->cnic_priv; | ||
2275 | |||
2276 | cnic_ack_bnx2x_int(dev, cp->status_blk_num, CSTORM_ID, 0, | ||
2277 | IGU_INT_DISABLE, 0); | ||
2278 | } | ||
2279 | |||
2280 | static void cnic_service_bnx2x_bh(unsigned long data) | ||
2281 | { | ||
2282 | struct cnic_dev *dev = (struct cnic_dev *) data; | ||
2283 | struct cnic_local *cp = dev->cnic_priv; | ||
2284 | u16 hw_prod, sw_prod; | ||
2285 | struct cstorm_status_block_c *sblk = | ||
2286 | &cp->bnx2x_status_blk->c_status_block; | ||
2287 | u32 status_idx = sblk->status_block_index; | ||
2288 | int kcqe_cnt; | ||
2289 | |||
2290 | if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) | ||
2291 | return; | ||
2292 | |||
2293 | hw_prod = sblk->index_values[HC_INDEX_C_ISCSI_EQ_CONS]; | ||
2294 | hw_prod = cp->hw_idx(hw_prod); | ||
2295 | sw_prod = cp->kcq_prod_idx; | ||
2296 | while (sw_prod != hw_prod) { | ||
2297 | kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod); | ||
2298 | if (kcqe_cnt == 0) | ||
2299 | goto done; | ||
2300 | |||
2301 | service_kcqes(dev, kcqe_cnt); | ||
2302 | |||
2303 | /* Tell compiler that sblk fields can change. */ | ||
2304 | barrier(); | ||
2305 | if (status_idx == sblk->status_block_index) | ||
2306 | break; | ||
2307 | |||
2308 | status_idx = sblk->status_block_index; | ||
2309 | hw_prod = sblk->index_values[HC_INDEX_C_ISCSI_EQ_CONS]; | ||
2310 | hw_prod = cp->hw_idx(hw_prod); | ||
2311 | } | ||
2312 | |||
2313 | done: | ||
2314 | CNIC_WR16(dev, cp->kcq_io_addr, sw_prod + MAX_KCQ_IDX); | ||
2315 | cnic_ack_bnx2x_int(dev, cp->status_blk_num, CSTORM_ID, | ||
2316 | status_idx, IGU_INT_ENABLE, 1); | ||
2317 | |||
2318 | cp->kcq_prod_idx = sw_prod; | ||
2319 | return; | ||
2320 | } | ||
2321 | |||
2322 | static int cnic_service_bnx2x(void *data, void *status_blk) | ||
2323 | { | ||
2324 | struct cnic_dev *dev = data; | ||
2325 | struct cnic_local *cp = dev->cnic_priv; | ||
2326 | u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX; | ||
2327 | |||
2328 | prefetch(cp->status_blk); | ||
2329 | prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); | ||
2330 | |||
2331 | if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) | ||
2332 | tasklet_schedule(&cp->cnic_irq_task); | ||
2333 | |||
2334 | cnic_chk_pkt_rings(cp); | ||
2335 | |||
2336 | return 0; | ||
2337 | } | ||
2338 | |||
1128 | static void cnic_ulp_stop(struct cnic_dev *dev) | 2339 | static void cnic_ulp_stop(struct cnic_dev *dev) |
1129 | { | 2340 | { |
1130 | struct cnic_local *cp = dev->cnic_priv; | 2341 | struct cnic_local *cp = dev->cnic_priv; |
@@ -1197,6 +2408,19 @@ static int cnic_ctl(void *data, struct cnic_ctl_info *info) | |||
1197 | 2408 | ||
1198 | cnic_put(dev); | 2409 | cnic_put(dev); |
1199 | break; | 2410 | break; |
2411 | case CNIC_CTL_COMPLETION_CMD: { | ||
2412 | u32 cid = BNX2X_SW_CID(info->data.comp.cid); | ||
2413 | u32 l5_cid; | ||
2414 | struct cnic_local *cp = dev->cnic_priv; | ||
2415 | |||
2416 | if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) { | ||
2417 | struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; | ||
2418 | |||
2419 | ctx->wait_cond = 1; | ||
2420 | wake_up(&ctx->waitq); | ||
2421 | } | ||
2422 | break; | ||
2423 | } | ||
1200 | default: | 2424 | default: |
1201 | return -EINVAL; | 2425 | return -EINVAL; |
1202 | } | 2426 | } |
@@ -1872,6 +3096,8 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe) | |||
1872 | /* fall through */ | 3096 | /* fall through */ |
1873 | case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: | 3097 | case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: |
1874 | case L4_KCQE_OPCODE_VALUE_RESET_COMP: | 3098 | case L4_KCQE_OPCODE_VALUE_RESET_COMP: |
3099 | case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE: | ||
3100 | case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD: | ||
1875 | cp->close_conn(csk, opcode); | 3101 | cp->close_conn(csk, opcode); |
1876 | break; | 3102 | break; |
1877 | 3103 | ||
@@ -1957,6 +3183,76 @@ static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev) | |||
1957 | return 0; | 3183 | return 0; |
1958 | } | 3184 | } |
1959 | 3185 | ||
3186 | static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode) | ||
3187 | { | ||
3188 | struct cnic_dev *dev = csk->dev; | ||
3189 | struct cnic_local *cp = dev->cnic_priv; | ||
3190 | struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid]; | ||
3191 | union l5cm_specific_data l5_data; | ||
3192 | u32 cmd = 0; | ||
3193 | int close_complete = 0; | ||
3194 | |||
3195 | switch (opcode) { | ||
3196 | case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: | ||
3197 | case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: | ||
3198 | case L4_KCQE_OPCODE_VALUE_RESET_COMP: | ||
3199 | if (cnic_ready_to_close(csk, opcode)) | ||
3200 | cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE; | ||
3201 | break; | ||
3202 | case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE: | ||
3203 | cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD; | ||
3204 | break; | ||
3205 | case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD: | ||
3206 | close_complete = 1; | ||
3207 | break; | ||
3208 | } | ||
3209 | if (cmd) { | ||
3210 | memset(&l5_data, 0, sizeof(l5_data)); | ||
3211 | |||
3212 | cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE, | ||
3213 | &l5_data); | ||
3214 | } else if (close_complete) { | ||
3215 | ctx->timestamp = jiffies; | ||
3216 | cnic_close_conn(csk); | ||
3217 | cnic_cm_upcall(cp, csk, csk->state); | ||
3218 | } | ||
3219 | } | ||
3220 | |||
3221 | static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev) | ||
3222 | { | ||
3223 | } | ||
3224 | |||
3225 | static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev) | ||
3226 | { | ||
3227 | struct cnic_local *cp = dev->cnic_priv; | ||
3228 | int func = CNIC_FUNC(cp); | ||
3229 | |||
3230 | cnic_init_bnx2x_mac(dev); | ||
3231 | cnic_bnx2x_set_tcp_timestamp(dev, 1); | ||
3232 | |||
3233 | CNIC_WR16(dev, BAR_XSTRORM_INTMEM + | ||
3234 | XSTORM_ISCSI_LOCAL_VLAN_OFFSET(func), 0); | ||
3235 | |||
3236 | CNIC_WR(dev, BAR_XSTRORM_INTMEM + | ||
3237 | XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(func), 1); | ||
3238 | CNIC_WR(dev, BAR_XSTRORM_INTMEM + | ||
3239 | XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(func), | ||
3240 | DEF_MAX_DA_COUNT); | ||
3241 | |||
3242 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + | ||
3243 | XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(func), DEF_TTL); | ||
3244 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + | ||
3245 | XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(func), DEF_TOS); | ||
3246 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + | ||
3247 | XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(func), 2); | ||
3248 | CNIC_WR(dev, BAR_XSTRORM_INTMEM + | ||
3249 | XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(func), DEF_SWS_TIMER); | ||
3250 | |||
3251 | CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(func), | ||
3252 | DEF_MAX_CWND); | ||
3253 | return 0; | ||
3254 | } | ||
3255 | |||
1960 | static int cnic_cm_open(struct cnic_dev *dev) | 3256 | static int cnic_cm_open(struct cnic_dev *dev) |
1961 | { | 3257 | { |
1962 | struct cnic_local *cp = dev->cnic_priv; | 3258 | struct cnic_local *cp = dev->cnic_priv; |
@@ -2091,7 +3387,7 @@ static int cnic_init_bnx2_irq(struct cnic_dev *dev) | |||
2091 | 3387 | ||
2092 | cp->bnx2_status_blk = cp->status_blk; | 3388 | cp->bnx2_status_blk = cp->status_blk; |
2093 | cp->last_status_idx = cp->bnx2_status_blk->status_idx; | 3389 | cp->last_status_idx = cp->bnx2_status_blk->status_idx; |
2094 | tasklet_init(&cp->cnic_irq_task, &cnic_service_bnx2_msix, | 3390 | tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix, |
2095 | (unsigned long) dev); | 3391 | (unsigned long) dev); |
2096 | err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, | 3392 | err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, |
2097 | "cnic", dev); | 3393 | "cnic", dev); |
@@ -2464,6 +3760,417 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev) | |||
2464 | return 0; | 3760 | return 0; |
2465 | } | 3761 | } |
2466 | 3762 | ||
3763 | static void cnic_setup_bnx2x_context(struct cnic_dev *dev) | ||
3764 | { | ||
3765 | struct cnic_local *cp = dev->cnic_priv; | ||
3766 | struct cnic_eth_dev *ethdev = cp->ethdev; | ||
3767 | u32 start_offset = ethdev->ctx_tbl_offset; | ||
3768 | int i; | ||
3769 | |||
3770 | for (i = 0; i < cp->ctx_blks; i++) { | ||
3771 | struct cnic_ctx *ctx = &cp->ctx_arr[i]; | ||
3772 | dma_addr_t map = ctx->mapping; | ||
3773 | |||
3774 | if (cp->ctx_align) { | ||
3775 | unsigned long mask = cp->ctx_align - 1; | ||
3776 | |||
3777 | map = (map + mask) & ~mask; | ||
3778 | } | ||
3779 | |||
3780 | cnic_ctx_tbl_wr(dev, start_offset + i, map); | ||
3781 | } | ||
3782 | } | ||
3783 | |||
3784 | static int cnic_init_bnx2x_irq(struct cnic_dev *dev) | ||
3785 | { | ||
3786 | struct cnic_local *cp = dev->cnic_priv; | ||
3787 | struct cnic_eth_dev *ethdev = cp->ethdev; | ||
3788 | int err = 0; | ||
3789 | |||
3790 | tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh, | ||
3791 | (unsigned long) dev); | ||
3792 | if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { | ||
3793 | err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, | ||
3794 | "cnic", dev); | ||
3795 | if (err) | ||
3796 | tasklet_disable(&cp->cnic_irq_task); | ||
3797 | } | ||
3798 | return err; | ||
3799 | } | ||
3800 | |||
3801 | static void cnic_enable_bnx2x_int(struct cnic_dev *dev) | ||
3802 | { | ||
3803 | struct cnic_local *cp = dev->cnic_priv; | ||
3804 | u8 sb_id = cp->status_blk_num; | ||
3805 | int port = CNIC_PORT(cp); | ||
3806 | |||
3807 | CNIC_WR8(dev, BAR_CSTRORM_INTMEM + | ||
3808 | CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id, | ||
3809 | HC_INDEX_C_ISCSI_EQ_CONS), | ||
3810 | 64 / 12); | ||
3811 | CNIC_WR16(dev, BAR_CSTRORM_INTMEM + | ||
3812 | CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, | ||
3813 | HC_INDEX_C_ISCSI_EQ_CONS), 0); | ||
3814 | } | ||
3815 | |||
3816 | static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev) | ||
3817 | { | ||
3818 | } | ||
3819 | |||
3820 | static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev) | ||
3821 | { | ||
3822 | struct cnic_local *cp = dev->cnic_priv; | ||
3823 | union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) cp->l2_ring; | ||
3824 | struct eth_context *context; | ||
3825 | struct regpair context_addr; | ||
3826 | dma_addr_t buf_map; | ||
3827 | int func = CNIC_FUNC(cp); | ||
3828 | int port = CNIC_PORT(cp); | ||
3829 | int i; | ||
3830 | int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); | ||
3831 | u32 val; | ||
3832 | |||
3833 | memset(txbd, 0, BCM_PAGE_SIZE); | ||
3834 | |||
3835 | buf_map = cp->l2_buf_map; | ||
3836 | for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) { | ||
3837 | struct eth_tx_start_bd *start_bd = &txbd->start_bd; | ||
3838 | struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd); | ||
3839 | |||
3840 | start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32); | ||
3841 | start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); | ||
3842 | reg_bd->addr_hi = start_bd->addr_hi; | ||
3843 | reg_bd->addr_lo = start_bd->addr_lo + 0x10; | ||
3844 | start_bd->nbytes = cpu_to_le16(0x10); | ||
3845 | start_bd->nbd = cpu_to_le16(3); | ||
3846 | start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; | ||
3847 | start_bd->general_data = (UNICAST_ADDRESS << | ||
3848 | ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT); | ||
3849 | start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); | ||
3850 | |||
3851 | } | ||
3852 | context = cnic_get_bnx2x_ctx(dev, BNX2X_ISCSI_L2_CID, 1, &context_addr); | ||
3853 | |||
3854 | val = (u64) cp->l2_ring_map >> 32; | ||
3855 | txbd->next_bd.addr_hi = cpu_to_le32(val); | ||
3856 | |||
3857 | context->xstorm_st_context.tx_bd_page_base_hi = val; | ||
3858 | |||
3859 | val = (u64) cp->l2_ring_map & 0xffffffff; | ||
3860 | txbd->next_bd.addr_lo = cpu_to_le32(val); | ||
3861 | |||
3862 | context->xstorm_st_context.tx_bd_page_base_lo = val; | ||
3863 | |||
3864 | context->cstorm_st_context.sb_index_number = | ||
3865 | HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS; | ||
3866 | context->cstorm_st_context.status_block_id = BNX2X_DEF_SB_ID; | ||
3867 | |||
3868 | context->xstorm_st_context.statistics_data = (cli | | ||
3869 | XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE); | ||
3870 | |||
3871 | context->xstorm_ag_context.cdu_reserved = | ||
3872 | CDU_RSRVD_VALUE_TYPE_A(BNX2X_HW_CID(BNX2X_ISCSI_L2_CID, func), | ||
3873 | CDU_REGION_NUMBER_XCM_AG, | ||
3874 | ETH_CONNECTION_TYPE); | ||
3875 | |||
3876 | /* reset xstorm per client statistics */ | ||
3877 | val = BAR_XSTRORM_INTMEM + | ||
3878 | XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli); | ||
3879 | for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) | ||
3880 | CNIC_WR(dev, val + i * 4, 0); | ||
3881 | |||
3882 | cp->tx_cons_ptr = | ||
3883 | &cp->bnx2x_def_status_blk->c_def_status_block.index_values[ | ||
3884 | HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS]; | ||
3885 | } | ||
3886 | |||
3887 | static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev) | ||
3888 | { | ||
3889 | struct cnic_local *cp = dev->cnic_priv; | ||
3890 | struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (cp->l2_ring + | ||
3891 | BCM_PAGE_SIZE); | ||
3892 | struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *) | ||
3893 | (cp->l2_ring + (2 * BCM_PAGE_SIZE)); | ||
3894 | struct eth_context *context; | ||
3895 | struct regpair context_addr; | ||
3896 | int i; | ||
3897 | int port = CNIC_PORT(cp); | ||
3898 | int func = CNIC_FUNC(cp); | ||
3899 | int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); | ||
3900 | u32 val; | ||
3901 | struct tstorm_eth_client_config tstorm_client = {0}; | ||
3902 | |||
3903 | for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) { | ||
3904 | dma_addr_t buf_map; | ||
3905 | int n = (i % cp->l2_rx_ring_size) + 1; | ||
3906 | |||
3907 | buf_map = cp->l2_buf_map + (n * cp->l2_single_buf_size); | ||
3908 | rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32); | ||
3909 | rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); | ||
3910 | } | ||
3911 | context = cnic_get_bnx2x_ctx(dev, BNX2X_ISCSI_L2_CID, 0, &context_addr); | ||
3912 | |||
3913 | val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) >> 32; | ||
3914 | rxbd->addr_hi = cpu_to_le32(val); | ||
3915 | |||
3916 | context->ustorm_st_context.common.bd_page_base_hi = val; | ||
3917 | |||
3918 | val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) & 0xffffffff; | ||
3919 | rxbd->addr_lo = cpu_to_le32(val); | ||
3920 | |||
3921 | context->ustorm_st_context.common.bd_page_base_lo = val; | ||
3922 | |||
3923 | context->ustorm_st_context.common.sb_index_numbers = | ||
3924 | BNX2X_ISCSI_RX_SB_INDEX_NUM; | ||
3925 | context->ustorm_st_context.common.clientId = cli; | ||
3926 | context->ustorm_st_context.common.status_block_id = BNX2X_DEF_SB_ID; | ||
3927 | context->ustorm_st_context.common.flags = | ||
3928 | USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS; | ||
3929 | context->ustorm_st_context.common.statistics_counter_id = cli; | ||
3930 | context->ustorm_st_context.common.mc_alignment_log_size = 0; | ||
3931 | context->ustorm_st_context.common.bd_buff_size = | ||
3932 | cp->l2_single_buf_size; | ||
3933 | |||
3934 | context->ustorm_ag_context.cdu_usage = | ||
3935 | CDU_RSRVD_VALUE_TYPE_A(BNX2X_HW_CID(BNX2X_ISCSI_L2_CID, func), | ||
3936 | CDU_REGION_NUMBER_UCM_AG, | ||
3937 | ETH_CONNECTION_TYPE); | ||
3938 | |||
3939 | rxcqe += BNX2X_MAX_RCQ_DESC_CNT; | ||
3940 | val = (u64) (cp->l2_ring_map + (2 * BCM_PAGE_SIZE)) >> 32; | ||
3941 | rxcqe->addr_hi = cpu_to_le32(val); | ||
3942 | |||
3943 | CNIC_WR(dev, BAR_USTRORM_INTMEM + | ||
3944 | USTORM_CQE_PAGE_BASE_OFFSET(port, cli) + 4, val); | ||
3945 | |||
3946 | CNIC_WR(dev, BAR_USTRORM_INTMEM + | ||
3947 | USTORM_CQE_PAGE_NEXT_OFFSET(port, cli) + 4, val); | ||
3948 | |||
3949 | val = (u64) (cp->l2_ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff; | ||
3950 | rxcqe->addr_lo = cpu_to_le32(val); | ||
3951 | |||
3952 | CNIC_WR(dev, BAR_USTRORM_INTMEM + | ||
3953 | USTORM_CQE_PAGE_BASE_OFFSET(port, cli), val); | ||
3954 | |||
3955 | CNIC_WR(dev, BAR_USTRORM_INTMEM + | ||
3956 | USTORM_CQE_PAGE_NEXT_OFFSET(port, cli), val); | ||
3957 | |||
3958 | /* client tstorm info */ | ||
3959 | tstorm_client.mtu = cp->l2_single_buf_size - 14; | ||
3960 | tstorm_client.config_flags = | ||
3961 | (TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE | | ||
3962 | TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE); | ||
3963 | tstorm_client.statistics_counter_id = cli; | ||
3964 | |||
3965 | CNIC_WR(dev, BAR_TSTRORM_INTMEM + | ||
3966 | TSTORM_CLIENT_CONFIG_OFFSET(port, cli), | ||
3967 | ((u32 *)&tstorm_client)[0]); | ||
3968 | CNIC_WR(dev, BAR_TSTRORM_INTMEM + | ||
3969 | TSTORM_CLIENT_CONFIG_OFFSET(port, cli) + 4, | ||
3970 | ((u32 *)&tstorm_client)[1]); | ||
3971 | |||
3972 | /* reset tstorm per client statistics */ | ||
3973 | val = BAR_TSTRORM_INTMEM + | ||
3974 | TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli); | ||
3975 | for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) | ||
3976 | CNIC_WR(dev, val + i * 4, 0); | ||
3977 | |||
3978 | /* reset ustorm per client statistics */ | ||
3979 | val = BAR_USTRORM_INTMEM + | ||
3980 | USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli); | ||
3981 | for (i = 0; i < sizeof(struct ustorm_per_client_stats) / 4; i++) | ||
3982 | CNIC_WR(dev, val + i * 4, 0); | ||
3983 | |||
3984 | cp->rx_cons_ptr = | ||
3985 | &cp->bnx2x_def_status_blk->u_def_status_block.index_values[ | ||
3986 | HC_INDEX_DEF_U_ETH_ISCSI_RX_CQ_CONS]; | ||
3987 | } | ||
3988 | |||
3989 | static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev) | ||
3990 | { | ||
3991 | struct cnic_local *cp = dev->cnic_priv; | ||
3992 | u32 base, addr, val; | ||
3993 | int port = CNIC_PORT(cp); | ||
3994 | |||
3995 | dev->max_iscsi_conn = 0; | ||
3996 | base = CNIC_RD(dev, MISC_REG_SHARED_MEM_ADDR); | ||
3997 | if (base < 0xa0000 || base >= 0xc0000) | ||
3998 | return; | ||
3999 | |||
4000 | val = BNX2X_SHMEM_ADDR(base, | ||
4001 | dev_info.port_hw_config[port].iscsi_mac_upper); | ||
4002 | |||
4003 | dev->mac_addr[0] = (u8) (val >> 8); | ||
4004 | dev->mac_addr[1] = (u8) val; | ||
4005 | |||
4006 | val = BNX2X_SHMEM_ADDR(base, | ||
4007 | dev_info.port_hw_config[port].iscsi_mac_lower); | ||
4008 | |||
4009 | dev->mac_addr[2] = (u8) (val >> 24); | ||
4010 | dev->mac_addr[3] = (u8) (val >> 16); | ||
4011 | dev->mac_addr[4] = (u8) (val >> 8); | ||
4012 | dev->mac_addr[5] = (u8) val; | ||
4013 | |||
4014 | addr = BNX2X_SHMEM_ADDR(base, validity_map[port]); | ||
4015 | val = CNIC_RD(dev, addr); | ||
4016 | |||
4017 | if (!(val & SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT)) { | ||
4018 | u16 val16; | ||
4019 | |||
4020 | addr = BNX2X_SHMEM_ADDR(base, | ||
4021 | drv_lic_key[port].max_iscsi_init_conn); | ||
4022 | val16 = CNIC_RD16(dev, addr); | ||
4023 | |||
4024 | if (val16) | ||
4025 | val16 ^= 0x1e1e; | ||
4026 | dev->max_iscsi_conn = val16; | ||
4027 | } | ||
4028 | if (BNX2X_CHIP_IS_E1H(cp->chip_id)) { | ||
4029 | int func = CNIC_FUNC(cp); | ||
4030 | |||
4031 | addr = BNX2X_SHMEM_ADDR(base, | ||
4032 | mf_cfg.func_mf_config[func].e1hov_tag); | ||
4033 | val = CNIC_RD(dev, addr); | ||
4034 | val &= FUNC_MF_CFG_E1HOV_TAG_MASK; | ||
4035 | if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { | ||
4036 | addr = BNX2X_SHMEM_ADDR(base, | ||
4037 | mf_cfg.func_mf_config[func].config); | ||
4038 | val = CNIC_RD(dev, addr); | ||
4039 | val &= FUNC_MF_CFG_PROTOCOL_MASK; | ||
4040 | if (val != FUNC_MF_CFG_PROTOCOL_ISCSI) | ||
4041 | dev->max_iscsi_conn = 0; | ||
4042 | } | ||
4043 | } | ||
4044 | } | ||
4045 | |||
4046 | static int cnic_start_bnx2x_hw(struct cnic_dev *dev) | ||
4047 | { | ||
4048 | struct cnic_local *cp = dev->cnic_priv; | ||
4049 | int func = CNIC_FUNC(cp), ret, i; | ||
4050 | int port = CNIC_PORT(cp); | ||
4051 | u16 eq_idx; | ||
4052 | u8 sb_id = cp->status_blk_num; | ||
4053 | |||
4054 | ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ, | ||
4055 | BNX2X_ISCSI_START_CID); | ||
4056 | |||
4057 | if (ret) | ||
4058 | return -ENOMEM; | ||
4059 | |||
4060 | cp->kcq_io_addr = BAR_CSTRORM_INTMEM + | ||
4061 | CSTORM_ISCSI_EQ_PROD_OFFSET(func, 0); | ||
4062 | cp->kcq_prod_idx = 0; | ||
4063 | |||
4064 | cnic_get_bnx2x_iscsi_info(dev); | ||
4065 | |||
4066 | /* Only 1 EQ */ | ||
4067 | CNIC_WR16(dev, cp->kcq_io_addr, MAX_KCQ_IDX); | ||
4068 | CNIC_WR(dev, BAR_CSTRORM_INTMEM + | ||
4069 | CSTORM_ISCSI_EQ_CONS_OFFSET(func, 0), 0); | ||
4070 | CNIC_WR(dev, BAR_CSTRORM_INTMEM + | ||
4071 | CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, 0), | ||
4072 | cp->kcq_info.pg_map_arr[1] & 0xffffffff); | ||
4073 | CNIC_WR(dev, BAR_CSTRORM_INTMEM + | ||
4074 | CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, 0) + 4, | ||
4075 | (u64) cp->kcq_info.pg_map_arr[1] >> 32); | ||
4076 | CNIC_WR(dev, BAR_CSTRORM_INTMEM + | ||
4077 | CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, 0), | ||
4078 | cp->kcq_info.pg_map_arr[0] & 0xffffffff); | ||
4079 | CNIC_WR(dev, BAR_CSTRORM_INTMEM + | ||
4080 | CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, 0) + 4, | ||
4081 | (u64) cp->kcq_info.pg_map_arr[0] >> 32); | ||
4082 | CNIC_WR8(dev, BAR_CSTRORM_INTMEM + | ||
4083 | CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(func, 0), 1); | ||
4084 | CNIC_WR16(dev, BAR_CSTRORM_INTMEM + | ||
4085 | CSTORM_ISCSI_EQ_SB_NUM_OFFSET(func, 0), cp->status_blk_num); | ||
4086 | CNIC_WR8(dev, BAR_CSTRORM_INTMEM + | ||
4087 | CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(func, 0), | ||
4088 | HC_INDEX_C_ISCSI_EQ_CONS); | ||
4089 | |||
4090 | for (i = 0; i < cp->conn_buf_info.num_pages; i++) { | ||
4091 | CNIC_WR(dev, BAR_TSTRORM_INTMEM + | ||
4092 | TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(func, i), | ||
4093 | cp->conn_buf_info.pgtbl[2 * i]); | ||
4094 | CNIC_WR(dev, BAR_TSTRORM_INTMEM + | ||
4095 | TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(func, i) + 4, | ||
4096 | cp->conn_buf_info.pgtbl[(2 * i) + 1]); | ||
4097 | } | ||
4098 | |||
4099 | CNIC_WR(dev, BAR_USTRORM_INTMEM + | ||
4100 | USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(func), | ||
4101 | cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff); | ||
4102 | CNIC_WR(dev, BAR_USTRORM_INTMEM + | ||
4103 | USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(func) + 4, | ||
4104 | (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32); | ||
4105 | |||
4106 | cnic_setup_bnx2x_context(dev); | ||
4107 | |||
4108 | eq_idx = CNIC_RD16(dev, BAR_CSTRORM_INTMEM + | ||
4109 | CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id) + | ||
4110 | offsetof(struct cstorm_status_block_c, | ||
4111 | index_values[HC_INDEX_C_ISCSI_EQ_CONS])); | ||
4112 | if (eq_idx != 0) { | ||
4113 | printk(KERN_ERR PFX "%s: EQ cons index %x != 0\n", | ||
4114 | dev->netdev->name, eq_idx); | ||
4115 | return -EBUSY; | ||
4116 | } | ||
4117 | ret = cnic_init_bnx2x_irq(dev); | ||
4118 | if (ret) | ||
4119 | return ret; | ||
4120 | |||
4121 | cnic_init_bnx2x_tx_ring(dev); | ||
4122 | cnic_init_bnx2x_rx_ring(dev); | ||
4123 | |||
4124 | return 0; | ||
4125 | } | ||
4126 | |||
4127 | static void cnic_init_rings(struct cnic_dev *dev) | ||
4128 | { | ||
4129 | if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { | ||
4130 | cnic_init_bnx2_tx_ring(dev); | ||
4131 | cnic_init_bnx2_rx_ring(dev); | ||
4132 | } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { | ||
4133 | struct cnic_local *cp = dev->cnic_priv; | ||
4134 | struct cnic_eth_dev *ethdev = cp->ethdev; | ||
4135 | u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); | ||
4136 | union l5cm_specific_data l5_data; | ||
4137 | struct ustorm_eth_rx_producers rx_prods = {0}; | ||
4138 | void __iomem *doorbell; | ||
4139 | int i; | ||
4140 | |||
4141 | rx_prods.bd_prod = 0; | ||
4142 | rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT; | ||
4143 | barrier(); | ||
4144 | |||
4145 | doorbell = ethdev->io_base2 + BAR_USTRORM_INTMEM + | ||
4146 | USTORM_RX_PRODS_OFFSET(CNIC_PORT(cp), cli); | ||
4147 | |||
4148 | for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++) | ||
4149 | writel(((u32 *) &rx_prods)[i], doorbell + i * 4); | ||
4150 | |||
4151 | cnic_init_bnx2x_tx_ring(dev); | ||
4152 | cnic_init_bnx2x_rx_ring(dev); | ||
4153 | |||
4154 | l5_data.phy_address.lo = cli; | ||
4155 | l5_data.phy_address.hi = 0; | ||
4156 | cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP, | ||
4157 | BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE, &l5_data); | ||
4158 | cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 1); | ||
4159 | } | ||
4160 | } | ||
4161 | |||
4162 | static void cnic_shutdown_rings(struct cnic_dev *dev) | ||
4163 | { | ||
4164 | if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { | ||
4165 | cnic_shutdown_bnx2_rx_ring(dev); | ||
4166 | } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { | ||
4167 | struct cnic_local *cp = dev->cnic_priv; | ||
4168 | u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); | ||
4169 | |||
4170 | cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 0); | ||
4171 | } | ||
4172 | } | ||
4173 | |||
2467 | static int cnic_register_netdev(struct cnic_dev *dev) | 4174 | static int cnic_register_netdev(struct cnic_dev *dev) |
2468 | { | 4175 | { |
2469 | struct cnic_local *cp = dev->cnic_priv; | 4176 | struct cnic_local *cp = dev->cnic_priv; |
@@ -2554,6 +4261,22 @@ static void cnic_stop_bnx2_hw(struct cnic_dev *dev) | |||
2554 | cnic_free_resc(dev); | 4261 | cnic_free_resc(dev); |
2555 | } | 4262 | } |
2556 | 4263 | ||
4264 | |||
4265 | static void cnic_stop_bnx2x_hw(struct cnic_dev *dev) | ||
4266 | { | ||
4267 | struct cnic_local *cp = dev->cnic_priv; | ||
4268 | u8 sb_id = cp->status_blk_num; | ||
4269 | int port = CNIC_PORT(cp); | ||
4270 | |||
4271 | cnic_free_irq(dev); | ||
4272 | CNIC_WR16(dev, BAR_CSTRORM_INTMEM + | ||
4273 | CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id) + | ||
4274 | offsetof(struct cstorm_status_block_c, | ||
4275 | index_values[HC_INDEX_C_ISCSI_EQ_CONS]), | ||
4276 | 0); | ||
4277 | cnic_free_resc(dev); | ||
4278 | } | ||
4279 | |||
2557 | static void cnic_stop_hw(struct cnic_dev *dev) | 4280 | static void cnic_stop_hw(struct cnic_dev *dev) |
2558 | { | 4281 | { |
2559 | if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { | 4282 | if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { |
@@ -2685,6 +4408,57 @@ cnic_err: | |||
2685 | return NULL; | 4408 | return NULL; |
2686 | } | 4409 | } |
2687 | 4410 | ||
4411 | static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev) | ||
4412 | { | ||
4413 | struct pci_dev *pdev; | ||
4414 | struct cnic_dev *cdev; | ||
4415 | struct cnic_local *cp; | ||
4416 | struct cnic_eth_dev *ethdev = NULL; | ||
4417 | struct cnic_eth_dev *(*probe)(struct net_device *) = NULL; | ||
4418 | |||
4419 | probe = symbol_get(bnx2x_cnic_probe); | ||
4420 | if (probe) { | ||
4421 | ethdev = (*probe)(dev); | ||
4422 | symbol_put(bnx2x_cnic_probe); | ||
4423 | } | ||
4424 | if (!ethdev) | ||
4425 | return NULL; | ||
4426 | |||
4427 | pdev = ethdev->pdev; | ||
4428 | if (!pdev) | ||
4429 | return NULL; | ||
4430 | |||
4431 | dev_hold(dev); | ||
4432 | cdev = cnic_alloc_dev(dev, pdev); | ||
4433 | if (cdev == NULL) { | ||
4434 | dev_put(dev); | ||
4435 | return NULL; | ||
4436 | } | ||
4437 | |||
4438 | set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags); | ||
4439 | cdev->submit_kwqes = cnic_submit_bnx2x_kwqes; | ||
4440 | |||
4441 | cp = cdev->cnic_priv; | ||
4442 | cp->ethdev = ethdev; | ||
4443 | cdev->pcidev = pdev; | ||
4444 | |||
4445 | cp->cnic_ops = &cnic_bnx2x_ops; | ||
4446 | cp->start_hw = cnic_start_bnx2x_hw; | ||
4447 | cp->stop_hw = cnic_stop_bnx2x_hw; | ||
4448 | cp->setup_pgtbl = cnic_setup_page_tbl_le; | ||
4449 | cp->alloc_resc = cnic_alloc_bnx2x_resc; | ||
4450 | cp->free_resc = cnic_free_resc; | ||
4451 | cp->start_cm = cnic_cm_init_bnx2x_hw; | ||
4452 | cp->stop_cm = cnic_cm_stop_bnx2x_hw; | ||
4453 | cp->enable_int = cnic_enable_bnx2x_int; | ||
4454 | cp->disable_int_sync = cnic_disable_bnx2x_int_sync; | ||
4455 | cp->ack_int = cnic_ack_bnx2x_msix; | ||
4456 | cp->close_conn = cnic_close_bnx2x_conn; | ||
4457 | cp->next_idx = cnic_bnx2x_next_idx; | ||
4458 | cp->hw_idx = cnic_bnx2x_hw_idx; | ||
4459 | return cdev; | ||
4460 | } | ||
4461 | |||
2688 | static struct cnic_dev *is_cnic_dev(struct net_device *dev) | 4462 | static struct cnic_dev *is_cnic_dev(struct net_device *dev) |
2689 | { | 4463 | { |
2690 | struct ethtool_drvinfo drvinfo; | 4464 | struct ethtool_drvinfo drvinfo; |
@@ -2696,6 +4470,8 @@ static struct cnic_dev *is_cnic_dev(struct net_device *dev) | |||
2696 | 4470 | ||
2697 | if (!strcmp(drvinfo.driver, "bnx2")) | 4471 | if (!strcmp(drvinfo.driver, "bnx2")) |
2698 | cdev = init_bnx2_cnic(dev); | 4472 | cdev = init_bnx2_cnic(dev); |
4473 | if (!strcmp(drvinfo.driver, "bnx2x")) | ||
4474 | cdev = init_bnx2x_cnic(dev); | ||
2699 | if (cdev) { | 4475 | if (cdev) { |
2700 | write_lock(&cnic_dev_lock); | 4476 | write_lock(&cnic_dev_lock); |
2701 | list_add(&cdev->list, &cnic_dev_list); | 4477 | list_add(&cdev->list, &cnic_dev_list); |