diff options
author | Michael Chan <mchan@broadcom.com> | 2009-10-10 09:46:59 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-10-12 02:30:18 -0400 |
commit | 71034ba845c9ff219373066f904286c0b7506922 (patch) | |
tree | 14090de1317f142fc5f7b218a21aa5e1c4812a17 /drivers/net/cnic.c | |
parent | e251306556d30c9c678feca60204acaaa0784cbd (diff) |
cnic: Add main functions to support bnx2x devices.
Add iSCSI support for bnx2x devices.
Signed-off-by: Michael Chan <mchan@broadcom.com>
Signed-off-by: Shmulik Ravid - Rabinovitz <shmulikr@broadcom.com>
Acked-by: Eilon Greenstein <eilong@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/cnic.c')
-rw-r--r-- | drivers/net/cnic.c | 1742 |
1 files changed, 1742 insertions, 0 deletions
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c index 1fd10584bada..6e7af7bb4855 100644 --- a/drivers/net/cnic.c +++ b/drivers/net/cnic.c | |||
@@ -64,6 +64,7 @@ static DEFINE_MUTEX(cnic_lock); | |||
64 | static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE]; | 64 | static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE]; |
65 | 65 | ||
66 | static int cnic_service_bnx2(void *, void *); | 66 | static int cnic_service_bnx2(void *, void *); |
67 | static int cnic_service_bnx2x(void *, void *); | ||
67 | static int cnic_ctl(void *, struct cnic_ctl_info *); | 68 | static int cnic_ctl(void *, struct cnic_ctl_info *); |
68 | 69 | ||
69 | static struct cnic_ops cnic_bnx2_ops = { | 70 | static struct cnic_ops cnic_bnx2_ops = { |
@@ -72,6 +73,12 @@ static struct cnic_ops cnic_bnx2_ops = { | |||
72 | .cnic_ctl = cnic_ctl, | 73 | .cnic_ctl = cnic_ctl, |
73 | }; | 74 | }; |
74 | 75 | ||
76 | static struct cnic_ops cnic_bnx2x_ops = { | ||
77 | .cnic_owner = THIS_MODULE, | ||
78 | .cnic_handler = cnic_service_bnx2x, | ||
79 | .cnic_ctl = cnic_ctl, | ||
80 | }; | ||
81 | |||
75 | static void cnic_shutdown_rings(struct cnic_dev *); | 82 | static void cnic_shutdown_rings(struct cnic_dev *); |
76 | static void cnic_init_rings(struct cnic_dev *); | 83 | static void cnic_init_rings(struct cnic_dev *); |
77 | static int cnic_cm_set_pg(struct cnic_sock *); | 84 | static int cnic_cm_set_pg(struct cnic_sock *); |
@@ -172,6 +179,36 @@ static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val) | |||
172 | ethdev->drv_ctl(dev->netdev, &info); | 179 | ethdev->drv_ctl(dev->netdev, &info); |
173 | } | 180 | } |
174 | 181 | ||
182 | static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr) | ||
183 | { | ||
184 | struct cnic_local *cp = dev->cnic_priv; | ||
185 | struct cnic_eth_dev *ethdev = cp->ethdev; | ||
186 | struct drv_ctl_info info; | ||
187 | struct drv_ctl_io *io = &info.data.io; | ||
188 | |||
189 | info.cmd = DRV_CTL_CTXTBL_WR_CMD; | ||
190 | io->offset = off; | ||
191 | io->dma_addr = addr; | ||
192 | ethdev->drv_ctl(dev->netdev, &info); | ||
193 | } | ||
194 | |||
195 | static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start) | ||
196 | { | ||
197 | struct cnic_local *cp = dev->cnic_priv; | ||
198 | struct cnic_eth_dev *ethdev = cp->ethdev; | ||
199 | struct drv_ctl_info info; | ||
200 | struct drv_ctl_l2_ring *ring = &info.data.ring; | ||
201 | |||
202 | if (start) | ||
203 | info.cmd = DRV_CTL_START_L2_CMD; | ||
204 | else | ||
205 | info.cmd = DRV_CTL_STOP_L2_CMD; | ||
206 | |||
207 | ring->cid = cid; | ||
208 | ring->client_id = cl_id; | ||
209 | ethdev->drv_ctl(dev->netdev, &info); | ||
210 | } | ||
211 | |||
175 | static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val) | 212 | static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val) |
176 | { | 213 | { |
177 | struct cnic_local *cp = dev->cnic_priv; | 214 | struct cnic_local *cp = dev->cnic_priv; |
@@ -214,6 +251,19 @@ static void cnic_kwq_completion(struct cnic_dev *dev, u32 count) | |||
214 | ethdev->drv_ctl(dev->netdev, &info); | 251 | ethdev->drv_ctl(dev->netdev, &info); |
215 | } | 252 | } |
216 | 253 | ||
254 | static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid) | ||
255 | { | ||
256 | u32 i; | ||
257 | |||
258 | for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) { | ||
259 | if (cp->ctx_tbl[i].cid == cid) { | ||
260 | *l5_cid = i; | ||
261 | return 0; | ||
262 | } | ||
263 | } | ||
264 | return -EINVAL; | ||
265 | } | ||
266 | |||
217 | static int cnic_send_nlmsg(struct cnic_local *cp, u32 type, | 267 | static int cnic_send_nlmsg(struct cnic_local *cp, u32 type, |
218 | struct cnic_sock *csk) | 268 | struct cnic_sock *csk) |
219 | { | 269 | { |
@@ -645,6 +695,20 @@ static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma) | |||
645 | } | 695 | } |
646 | } | 696 | } |
647 | 697 | ||
698 | static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma) | ||
699 | { | ||
700 | int i; | ||
701 | u32 *page_table = dma->pgtbl; | ||
702 | |||
703 | for (i = 0; i < dma->num_pages; i++) { | ||
704 | /* Each entry needs to be in little endian format. */ | ||
705 | *page_table = dma->pg_map_arr[i] & 0xffffffff; | ||
706 | page_table++; | ||
707 | *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32); | ||
708 | page_table++; | ||
709 | } | ||
710 | } | ||
711 | |||
648 | static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma, | 712 | static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma, |
649 | int pages, int use_pg_tbl) | 713 | int pages, int use_pg_tbl) |
650 | { | 714 | { |
@@ -735,6 +799,7 @@ static void cnic_free_resc(struct cnic_dev *dev) | |||
735 | cnic_free_dma(dev, &cp->gbl_buf_info); | 799 | cnic_free_dma(dev, &cp->gbl_buf_info); |
736 | cnic_free_dma(dev, &cp->conn_buf_info); | 800 | cnic_free_dma(dev, &cp->conn_buf_info); |
737 | cnic_free_dma(dev, &cp->kwq_info); | 801 | cnic_free_dma(dev, &cp->kwq_info); |
802 | cnic_free_dma(dev, &cp->kwq_16_data_info); | ||
738 | cnic_free_dma(dev, &cp->kcq_info); | 803 | cnic_free_dma(dev, &cp->kcq_info); |
739 | kfree(cp->iscsi_tbl); | 804 | kfree(cp->iscsi_tbl); |
740 | cp->iscsi_tbl = NULL; | 805 | cp->iscsi_tbl = NULL; |
@@ -834,6 +899,12 @@ static int cnic_alloc_uio(struct cnic_dev *dev) { | |||
834 | uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE; | 899 | uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE; |
835 | 900 | ||
836 | uinfo->name = "bnx2_cnic"; | 901 | uinfo->name = "bnx2_cnic"; |
902 | } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { | ||
903 | uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk & | ||
904 | PAGE_MASK; | ||
905 | uinfo->mem[1].size = sizeof(struct host_def_status_block); | ||
906 | |||
907 | uinfo->name = "bnx2x_cnic"; | ||
837 | } | 908 | } |
838 | 909 | ||
839 | uinfo->mem[1].memtype = UIO_MEM_LOGICAL; | 910 | uinfo->mem[1].memtype = UIO_MEM_LOGICAL; |
@@ -898,6 +969,151 @@ error: | |||
898 | return ret; | 969 | return ret; |
899 | } | 970 | } |
900 | 971 | ||
972 | static int cnic_alloc_bnx2x_context(struct cnic_dev *dev) | ||
973 | { | ||
974 | struct cnic_local *cp = dev->cnic_priv; | ||
975 | struct cnic_eth_dev *ethdev = cp->ethdev; | ||
976 | int ctx_blk_size = cp->ethdev->ctx_blk_size; | ||
977 | int total_mem, blks, i, cid_space; | ||
978 | |||
979 | if (BNX2X_ISCSI_START_CID < ethdev->starting_cid) | ||
980 | return -EINVAL; | ||
981 | |||
982 | cid_space = MAX_ISCSI_TBL_SZ + | ||
983 | (BNX2X_ISCSI_START_CID - ethdev->starting_cid); | ||
984 | |||
985 | total_mem = BNX2X_CONTEXT_MEM_SIZE * cid_space; | ||
986 | blks = total_mem / ctx_blk_size; | ||
987 | if (total_mem % ctx_blk_size) | ||
988 | blks++; | ||
989 | |||
990 | if (blks > cp->ethdev->ctx_tbl_len) | ||
991 | return -ENOMEM; | ||
992 | |||
993 | cp->ctx_arr = kzalloc(blks * sizeof(struct cnic_ctx), GFP_KERNEL); | ||
994 | if (cp->ctx_arr == NULL) | ||
995 | return -ENOMEM; | ||
996 | |||
997 | cp->ctx_blks = blks; | ||
998 | cp->ctx_blk_size = ctx_blk_size; | ||
999 | if (BNX2X_CHIP_IS_E1H(cp->chip_id)) | ||
1000 | cp->ctx_align = 0; | ||
1001 | else | ||
1002 | cp->ctx_align = ctx_blk_size; | ||
1003 | |||
1004 | cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE; | ||
1005 | |||
1006 | for (i = 0; i < blks; i++) { | ||
1007 | cp->ctx_arr[i].ctx = | ||
1008 | pci_alloc_consistent(dev->pcidev, cp->ctx_blk_size, | ||
1009 | &cp->ctx_arr[i].mapping); | ||
1010 | if (cp->ctx_arr[i].ctx == NULL) | ||
1011 | return -ENOMEM; | ||
1012 | |||
1013 | if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) { | ||
1014 | if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) { | ||
1015 | cnic_free_context(dev); | ||
1016 | cp->ctx_blk_size += cp->ctx_align; | ||
1017 | i = -1; | ||
1018 | continue; | ||
1019 | } | ||
1020 | } | ||
1021 | } | ||
1022 | return 0; | ||
1023 | } | ||
1024 | |||
1025 | static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) | ||
1026 | { | ||
1027 | struct cnic_local *cp = dev->cnic_priv; | ||
1028 | int i, j, n, ret, pages; | ||
1029 | struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info; | ||
1030 | |||
1031 | cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ, | ||
1032 | GFP_KERNEL); | ||
1033 | if (!cp->iscsi_tbl) | ||
1034 | goto error; | ||
1035 | |||
1036 | cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) * | ||
1037 | MAX_CNIC_L5_CONTEXT, GFP_KERNEL); | ||
1038 | if (!cp->ctx_tbl) | ||
1039 | goto error; | ||
1040 | |||
1041 | for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) { | ||
1042 | cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i]; | ||
1043 | cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI; | ||
1044 | } | ||
1045 | |||
1046 | pages = PAGE_ALIGN(MAX_CNIC_L5_CONTEXT * CNIC_KWQ16_DATA_SIZE) / | ||
1047 | PAGE_SIZE; | ||
1048 | |||
1049 | ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0); | ||
1050 | if (ret) | ||
1051 | return -ENOMEM; | ||
1052 | |||
1053 | n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE; | ||
1054 | for (i = 0, j = 0; i < MAX_ISCSI_TBL_SZ; i++) { | ||
1055 | long off = CNIC_KWQ16_DATA_SIZE * (i % n); | ||
1056 | |||
1057 | cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off; | ||
1058 | cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] + | ||
1059 | off; | ||
1060 | |||
1061 | if ((i % n) == (n - 1)) | ||
1062 | j++; | ||
1063 | } | ||
1064 | |||
1065 | ret = cnic_alloc_dma(dev, &cp->kcq_info, KCQ_PAGE_CNT, 0); | ||
1066 | if (ret) | ||
1067 | goto error; | ||
1068 | cp->kcq = (struct kcqe **) cp->kcq_info.pg_arr; | ||
1069 | |||
1070 | for (i = 0; i < KCQ_PAGE_CNT; i++) { | ||
1071 | struct bnx2x_bd_chain_next *next = | ||
1072 | (struct bnx2x_bd_chain_next *) | ||
1073 | &cp->kcq[i][MAX_KCQE_CNT]; | ||
1074 | int j = i + 1; | ||
1075 | |||
1076 | if (j >= KCQ_PAGE_CNT) | ||
1077 | j = 0; | ||
1078 | next->addr_hi = (u64) cp->kcq_info.pg_map_arr[j] >> 32; | ||
1079 | next->addr_lo = cp->kcq_info.pg_map_arr[j] & 0xffffffff; | ||
1080 | } | ||
1081 | |||
1082 | pages = PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS * | ||
1083 | BNX2X_ISCSI_CONN_BUF_SIZE) / PAGE_SIZE; | ||
1084 | ret = cnic_alloc_dma(dev, &cp->conn_buf_info, pages, 1); | ||
1085 | if (ret) | ||
1086 | goto error; | ||
1087 | |||
1088 | pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE; | ||
1089 | ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0); | ||
1090 | if (ret) | ||
1091 | goto error; | ||
1092 | |||
1093 | ret = cnic_alloc_bnx2x_context(dev); | ||
1094 | if (ret) | ||
1095 | goto error; | ||
1096 | |||
1097 | cp->bnx2x_status_blk = cp->status_blk; | ||
1098 | cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk; | ||
1099 | |||
1100 | cp->l2_rx_ring_size = 15; | ||
1101 | |||
1102 | ret = cnic_alloc_l2_rings(dev, 4); | ||
1103 | if (ret) | ||
1104 | goto error; | ||
1105 | |||
1106 | ret = cnic_alloc_uio(dev); | ||
1107 | if (ret) | ||
1108 | goto error; | ||
1109 | |||
1110 | return 0; | ||
1111 | |||
1112 | error: | ||
1113 | cnic_free_resc(dev); | ||
1114 | return -ENOMEM; | ||
1115 | } | ||
1116 | |||
901 | static inline u32 cnic_kwq_avail(struct cnic_local *cp) | 1117 | static inline u32 cnic_kwq_avail(struct cnic_local *cp) |
902 | { | 1118 | { |
903 | return cp->max_kwq_idx - | 1119 | return cp->max_kwq_idx - |
@@ -939,6 +1155,880 @@ static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], | |||
939 | return 0; | 1155 | return 0; |
940 | } | 1156 | } |
941 | 1157 | ||
1158 | static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid, | ||
1159 | union l5cm_specific_data *l5_data) | ||
1160 | { | ||
1161 | struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; | ||
1162 | dma_addr_t map; | ||
1163 | |||
1164 | map = ctx->kwqe_data_mapping; | ||
1165 | l5_data->phy_address.lo = (u64) map & 0xffffffff; | ||
1166 | l5_data->phy_address.hi = (u64) map >> 32; | ||
1167 | return ctx->kwqe_data; | ||
1168 | } | ||
1169 | |||
1170 | static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid, | ||
1171 | u32 type, union l5cm_specific_data *l5_data) | ||
1172 | { | ||
1173 | struct cnic_local *cp = dev->cnic_priv; | ||
1174 | struct l5cm_spe kwqe; | ||
1175 | struct kwqe_16 *kwq[1]; | ||
1176 | int ret; | ||
1177 | |||
1178 | kwqe.hdr.conn_and_cmd_data = | ||
1179 | cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) | | ||
1180 | BNX2X_HW_CID(cid, cp->func))); | ||
1181 | kwqe.hdr.type = cpu_to_le16(type); | ||
1182 | kwqe.hdr.reserved = 0; | ||
1183 | kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo); | ||
1184 | kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi); | ||
1185 | |||
1186 | kwq[0] = (struct kwqe_16 *) &kwqe; | ||
1187 | |||
1188 | spin_lock_bh(&cp->cnic_ulp_lock); | ||
1189 | ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1); | ||
1190 | spin_unlock_bh(&cp->cnic_ulp_lock); | ||
1191 | |||
1192 | if (ret == 1) | ||
1193 | return 0; | ||
1194 | |||
1195 | return -EBUSY; | ||
1196 | } | ||
1197 | |||
1198 | static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type, | ||
1199 | struct kcqe *cqes[], u32 num_cqes) | ||
1200 | { | ||
1201 | struct cnic_local *cp = dev->cnic_priv; | ||
1202 | struct cnic_ulp_ops *ulp_ops; | ||
1203 | |||
1204 | rcu_read_lock(); | ||
1205 | ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); | ||
1206 | if (likely(ulp_ops)) { | ||
1207 | ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type], | ||
1208 | cqes, num_cqes); | ||
1209 | } | ||
1210 | rcu_read_unlock(); | ||
1211 | } | ||
1212 | |||
1213 | static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) | ||
1214 | { | ||
1215 | struct cnic_local *cp = dev->cnic_priv; | ||
1216 | struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe; | ||
1217 | int func = cp->func, pages; | ||
1218 | int hq_bds; | ||
1219 | |||
1220 | cp->num_iscsi_tasks = req1->num_tasks_per_conn; | ||
1221 | cp->num_ccells = req1->num_ccells_per_conn; | ||
1222 | cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE * | ||
1223 | cp->num_iscsi_tasks; | ||
1224 | cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS * | ||
1225 | BNX2X_ISCSI_R2TQE_SIZE; | ||
1226 | cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE; | ||
1227 | pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; | ||
1228 | hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE); | ||
1229 | cp->num_cqs = req1->num_cqs; | ||
1230 | |||
1231 | if (!dev->max_iscsi_conn) | ||
1232 | return 0; | ||
1233 | |||
1234 | /* init Tstorm RAM */ | ||
1235 | CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(func), | ||
1236 | req1->rq_num_wqes); | ||
1237 | CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(func), | ||
1238 | PAGE_SIZE); | ||
1239 | CNIC_WR8(dev, BAR_TSTRORM_INTMEM + | ||
1240 | TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT); | ||
1241 | CNIC_WR16(dev, BAR_TSTRORM_INTMEM + | ||
1242 | TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func), | ||
1243 | req1->num_tasks_per_conn); | ||
1244 | |||
1245 | /* init Ustorm RAM */ | ||
1246 | CNIC_WR16(dev, BAR_USTRORM_INTMEM + | ||
1247 | USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(func), | ||
1248 | req1->rq_buffer_size); | ||
1249 | CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(func), | ||
1250 | PAGE_SIZE); | ||
1251 | CNIC_WR8(dev, BAR_USTRORM_INTMEM + | ||
1252 | USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT); | ||
1253 | CNIC_WR16(dev, BAR_USTRORM_INTMEM + | ||
1254 | USTORM_ISCSI_NUM_OF_TASKS_OFFSET(func), | ||
1255 | req1->num_tasks_per_conn); | ||
1256 | CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(func), | ||
1257 | req1->rq_num_wqes); | ||
1258 | CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(func), | ||
1259 | req1->cq_num_wqes); | ||
1260 | CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(func), | ||
1261 | cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS); | ||
1262 | |||
1263 | /* init Xstorm RAM */ | ||
1264 | CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(func), | ||
1265 | PAGE_SIZE); | ||
1266 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + | ||
1267 | XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT); | ||
1268 | CNIC_WR16(dev, BAR_XSTRORM_INTMEM + | ||
1269 | XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func), | ||
1270 | req1->num_tasks_per_conn); | ||
1271 | CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(func), | ||
1272 | hq_bds); | ||
1273 | CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(func), | ||
1274 | req1->num_tasks_per_conn); | ||
1275 | CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(func), | ||
1276 | cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS); | ||
1277 | |||
1278 | /* init Cstorm RAM */ | ||
1279 | CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(func), | ||
1280 | PAGE_SIZE); | ||
1281 | CNIC_WR8(dev, BAR_CSTRORM_INTMEM + | ||
1282 | CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT); | ||
1283 | CNIC_WR16(dev, BAR_CSTRORM_INTMEM + | ||
1284 | CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func), | ||
1285 | req1->num_tasks_per_conn); | ||
1286 | CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(func), | ||
1287 | req1->cq_num_wqes); | ||
1288 | CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(func), | ||
1289 | hq_bds); | ||
1290 | |||
1291 | return 0; | ||
1292 | } | ||
1293 | |||
1294 | static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe) | ||
1295 | { | ||
1296 | struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe; | ||
1297 | struct cnic_local *cp = dev->cnic_priv; | ||
1298 | int func = cp->func; | ||
1299 | struct iscsi_kcqe kcqe; | ||
1300 | struct kcqe *cqes[1]; | ||
1301 | |||
1302 | memset(&kcqe, 0, sizeof(kcqe)); | ||
1303 | if (!dev->max_iscsi_conn) { | ||
1304 | kcqe.completion_status = | ||
1305 | ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED; | ||
1306 | goto done; | ||
1307 | } | ||
1308 | |||
1309 | CNIC_WR(dev, BAR_TSTRORM_INTMEM + | ||
1310 | TSTORM_ISCSI_ERROR_BITMAP_OFFSET(func), req2->error_bit_map[0]); | ||
1311 | CNIC_WR(dev, BAR_TSTRORM_INTMEM + | ||
1312 | TSTORM_ISCSI_ERROR_BITMAP_OFFSET(func) + 4, | ||
1313 | req2->error_bit_map[1]); | ||
1314 | |||
1315 | CNIC_WR16(dev, BAR_USTRORM_INTMEM + | ||
1316 | USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(func), req2->max_cq_sqn); | ||
1317 | CNIC_WR(dev, BAR_USTRORM_INTMEM + | ||
1318 | USTORM_ISCSI_ERROR_BITMAP_OFFSET(func), req2->error_bit_map[0]); | ||
1319 | CNIC_WR(dev, BAR_USTRORM_INTMEM + | ||
1320 | USTORM_ISCSI_ERROR_BITMAP_OFFSET(func) + 4, | ||
1321 | req2->error_bit_map[1]); | ||
1322 | |||
1323 | CNIC_WR16(dev, BAR_CSTRORM_INTMEM + | ||
1324 | CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(func), req2->max_cq_sqn); | ||
1325 | |||
1326 | kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; | ||
1327 | |||
1328 | done: | ||
1329 | kcqe.op_code = ISCSI_KCQE_OPCODE_INIT; | ||
1330 | cqes[0] = (struct kcqe *) &kcqe; | ||
1331 | cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); | ||
1332 | |||
1333 | return 0; | ||
1334 | } | ||
1335 | |||
1336 | static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) | ||
1337 | { | ||
1338 | struct cnic_local *cp = dev->cnic_priv; | ||
1339 | struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; | ||
1340 | |||
1341 | if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) { | ||
1342 | struct cnic_iscsi *iscsi = ctx->proto.iscsi; | ||
1343 | |||
1344 | cnic_free_dma(dev, &iscsi->hq_info); | ||
1345 | cnic_free_dma(dev, &iscsi->r2tq_info); | ||
1346 | cnic_free_dma(dev, &iscsi->task_array_info); | ||
1347 | } | ||
1348 | cnic_free_id(&cp->cid_tbl, ctx->cid); | ||
1349 | ctx->cid = 0; | ||
1350 | } | ||
1351 | |||
1352 | static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) | ||
1353 | { | ||
1354 | u32 cid; | ||
1355 | int ret, pages; | ||
1356 | struct cnic_local *cp = dev->cnic_priv; | ||
1357 | struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; | ||
1358 | struct cnic_iscsi *iscsi = ctx->proto.iscsi; | ||
1359 | |||
1360 | cid = cnic_alloc_new_id(&cp->cid_tbl); | ||
1361 | if (cid == -1) { | ||
1362 | ret = -ENOMEM; | ||
1363 | goto error; | ||
1364 | } | ||
1365 | |||
1366 | ctx->cid = cid; | ||
1367 | pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE; | ||
1368 | |||
1369 | ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1); | ||
1370 | if (ret) | ||
1371 | goto error; | ||
1372 | |||
1373 | pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE; | ||
1374 | ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1); | ||
1375 | if (ret) | ||
1376 | goto error; | ||
1377 | |||
1378 | pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; | ||
1379 | ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1); | ||
1380 | if (ret) | ||
1381 | goto error; | ||
1382 | |||
1383 | return 0; | ||
1384 | |||
1385 | error: | ||
1386 | cnic_free_bnx2x_conn_resc(dev, l5_cid); | ||
1387 | return ret; | ||
1388 | } | ||
1389 | |||
1390 | static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init, | ||
1391 | struct regpair *ctx_addr) | ||
1392 | { | ||
1393 | struct cnic_local *cp = dev->cnic_priv; | ||
1394 | struct cnic_eth_dev *ethdev = cp->ethdev; | ||
1395 | int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk; | ||
1396 | int off = (cid - ethdev->starting_cid) % cp->cids_per_blk; | ||
1397 | unsigned long align_off = 0; | ||
1398 | dma_addr_t ctx_map; | ||
1399 | void *ctx; | ||
1400 | |||
1401 | if (cp->ctx_align) { | ||
1402 | unsigned long mask = cp->ctx_align - 1; | ||
1403 | |||
1404 | if (cp->ctx_arr[blk].mapping & mask) | ||
1405 | align_off = cp->ctx_align - | ||
1406 | (cp->ctx_arr[blk].mapping & mask); | ||
1407 | } | ||
1408 | ctx_map = cp->ctx_arr[blk].mapping + align_off + | ||
1409 | (off * BNX2X_CONTEXT_MEM_SIZE); | ||
1410 | ctx = cp->ctx_arr[blk].ctx + align_off + | ||
1411 | (off * BNX2X_CONTEXT_MEM_SIZE); | ||
1412 | if (init) | ||
1413 | memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE); | ||
1414 | |||
1415 | ctx_addr->lo = ctx_map & 0xffffffff; | ||
1416 | ctx_addr->hi = (u64) ctx_map >> 32; | ||
1417 | return ctx; | ||
1418 | } | ||
1419 | |||
1420 | static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[], | ||
1421 | u32 num) | ||
1422 | { | ||
1423 | struct cnic_local *cp = dev->cnic_priv; | ||
1424 | struct iscsi_kwqe_conn_offload1 *req1 = | ||
1425 | (struct iscsi_kwqe_conn_offload1 *) wqes[0]; | ||
1426 | struct iscsi_kwqe_conn_offload2 *req2 = | ||
1427 | (struct iscsi_kwqe_conn_offload2 *) wqes[1]; | ||
1428 | struct iscsi_kwqe_conn_offload3 *req3; | ||
1429 | struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id]; | ||
1430 | struct cnic_iscsi *iscsi = ctx->proto.iscsi; | ||
1431 | u32 cid = ctx->cid; | ||
1432 | u32 hw_cid = BNX2X_HW_CID(cid, cp->func); | ||
1433 | struct iscsi_context *ictx; | ||
1434 | struct regpair context_addr; | ||
1435 | int i, j, n = 2, n_max; | ||
1436 | |||
1437 | ctx->ctx_flags = 0; | ||
1438 | if (!req2->num_additional_wqes) | ||
1439 | return -EINVAL; | ||
1440 | |||
1441 | n_max = req2->num_additional_wqes + 2; | ||
1442 | |||
1443 | ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr); | ||
1444 | if (ictx == NULL) | ||
1445 | return -ENOMEM; | ||
1446 | |||
1447 | req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++]; | ||
1448 | |||
1449 | ictx->xstorm_ag_context.hq_prod = 1; | ||
1450 | |||
1451 | ictx->xstorm_st_context.iscsi.first_burst_length = | ||
1452 | ISCSI_DEF_FIRST_BURST_LEN; | ||
1453 | ictx->xstorm_st_context.iscsi.max_send_pdu_length = | ||
1454 | ISCSI_DEF_MAX_RECV_SEG_LEN; | ||
1455 | ictx->xstorm_st_context.iscsi.sq_pbl_base.lo = | ||
1456 | req1->sq_page_table_addr_lo; | ||
1457 | ictx->xstorm_st_context.iscsi.sq_pbl_base.hi = | ||
1458 | req1->sq_page_table_addr_hi; | ||
1459 | ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi; | ||
1460 | ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo; | ||
1461 | ictx->xstorm_st_context.iscsi.hq_pbl_base.lo = | ||
1462 | iscsi->hq_info.pgtbl_map & 0xffffffff; | ||
1463 | ictx->xstorm_st_context.iscsi.hq_pbl_base.hi = | ||
1464 | (u64) iscsi->hq_info.pgtbl_map >> 32; | ||
1465 | ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo = | ||
1466 | iscsi->hq_info.pgtbl[0]; | ||
1467 | ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi = | ||
1468 | iscsi->hq_info.pgtbl[1]; | ||
1469 | ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo = | ||
1470 | iscsi->r2tq_info.pgtbl_map & 0xffffffff; | ||
1471 | ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi = | ||
1472 | (u64) iscsi->r2tq_info.pgtbl_map >> 32; | ||
1473 | ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo = | ||
1474 | iscsi->r2tq_info.pgtbl[0]; | ||
1475 | ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi = | ||
1476 | iscsi->r2tq_info.pgtbl[1]; | ||
1477 | ictx->xstorm_st_context.iscsi.task_pbl_base.lo = | ||
1478 | iscsi->task_array_info.pgtbl_map & 0xffffffff; | ||
1479 | ictx->xstorm_st_context.iscsi.task_pbl_base.hi = | ||
1480 | (u64) iscsi->task_array_info.pgtbl_map >> 32; | ||
1481 | ictx->xstorm_st_context.iscsi.task_pbl_cache_idx = | ||
1482 | BNX2X_ISCSI_PBL_NOT_CACHED; | ||
1483 | ictx->xstorm_st_context.iscsi.flags.flags |= | ||
1484 | XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA; | ||
1485 | ictx->xstorm_st_context.iscsi.flags.flags |= | ||
1486 | XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T; | ||
1487 | |||
1488 | ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE; | ||
1489 | /* TSTORM requires the base address of RQ DB & not PTE */ | ||
1490 | ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo = | ||
1491 | req2->rq_page_table_addr_lo & PAGE_MASK; | ||
1492 | ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi = | ||
1493 | req2->rq_page_table_addr_hi; | ||
1494 | ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id; | ||
1495 | ictx->tstorm_st_context.tcp.cwnd = 0x5A8; | ||
1496 | ictx->tstorm_st_context.tcp.flags2 |= | ||
1497 | TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN; | ||
1498 | |||
1499 | ictx->timers_context.flags |= ISCSI_TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG; | ||
1500 | |||
1501 | ictx->ustorm_st_context.ring.rq.pbl_base.lo = | ||
1502 | req2->rq_page_table_addr_lo & 0xffffffff; | ||
1503 | ictx->ustorm_st_context.ring.rq.pbl_base.hi = | ||
1504 | (u64) req2->rq_page_table_addr_hi >> 32; | ||
1505 | ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi; | ||
1506 | ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo; | ||
1507 | ictx->ustorm_st_context.ring.r2tq.pbl_base.lo = | ||
1508 | iscsi->r2tq_info.pgtbl_map & 0xffffffff; | ||
1509 | ictx->ustorm_st_context.ring.r2tq.pbl_base.hi = | ||
1510 | (u64) iscsi->r2tq_info.pgtbl_map >> 32; | ||
1511 | ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo = | ||
1512 | iscsi->r2tq_info.pgtbl[0]; | ||
1513 | ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi = | ||
1514 | iscsi->r2tq_info.pgtbl[1]; | ||
1515 | ictx->ustorm_st_context.ring.cq_pbl_base.lo = | ||
1516 | req1->cq_page_table_addr_lo; | ||
1517 | ictx->ustorm_st_context.ring.cq_pbl_base.hi = | ||
1518 | req1->cq_page_table_addr_hi; | ||
1519 | ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN; | ||
1520 | ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi; | ||
1521 | ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo; | ||
1522 | ictx->ustorm_st_context.task_pbe_cache_index = | ||
1523 | BNX2X_ISCSI_PBL_NOT_CACHED; | ||
1524 | ictx->ustorm_st_context.task_pdu_cache_index = | ||
1525 | BNX2X_ISCSI_PDU_HEADER_NOT_CACHED; | ||
1526 | |||
1527 | for (i = 1, j = 1; i < cp->num_cqs; i++, j++) { | ||
1528 | if (j == 3) { | ||
1529 | if (n >= n_max) | ||
1530 | break; | ||
1531 | req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++]; | ||
1532 | j = 0; | ||
1533 | } | ||
1534 | ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN; | ||
1535 | ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo = | ||
1536 | req3->qp_first_pte[j].hi; | ||
1537 | ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi = | ||
1538 | req3->qp_first_pte[j].lo; | ||
1539 | } | ||
1540 | |||
1541 | ictx->ustorm_st_context.task_pbl_base.lo = | ||
1542 | iscsi->task_array_info.pgtbl_map & 0xffffffff; | ||
1543 | ictx->ustorm_st_context.task_pbl_base.hi = | ||
1544 | (u64) iscsi->task_array_info.pgtbl_map >> 32; | ||
1545 | ictx->ustorm_st_context.tce_phy_addr.lo = | ||
1546 | iscsi->task_array_info.pgtbl[0]; | ||
1547 | ictx->ustorm_st_context.tce_phy_addr.hi = | ||
1548 | iscsi->task_array_info.pgtbl[1]; | ||
1549 | ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; | ||
1550 | ictx->ustorm_st_context.num_cqs = cp->num_cqs; | ||
1551 | ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN; | ||
1552 | ictx->ustorm_st_context.negotiated_rx_and_flags |= | ||
1553 | ISCSI_DEF_MAX_BURST_LEN; | ||
1554 | ictx->ustorm_st_context.negotiated_rx |= | ||
1555 | ISCSI_DEFAULT_MAX_OUTSTANDING_R2T << | ||
1556 | USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT; | ||
1557 | |||
1558 | ictx->cstorm_st_context.hq_pbl_base.lo = | ||
1559 | iscsi->hq_info.pgtbl_map & 0xffffffff; | ||
1560 | ictx->cstorm_st_context.hq_pbl_base.hi = | ||
1561 | (u64) iscsi->hq_info.pgtbl_map >> 32; | ||
1562 | ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0]; | ||
1563 | ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1]; | ||
1564 | ictx->cstorm_st_context.task_pbl_base.lo = | ||
1565 | iscsi->task_array_info.pgtbl_map & 0xffffffff; | ||
1566 | ictx->cstorm_st_context.task_pbl_base.hi = | ||
1567 | (u64) iscsi->task_array_info.pgtbl_map >> 32; | ||
1568 | /* CSTORM and USTORM initialization is different, CSTORM requires | ||
1569 | * CQ DB base & not PTE addr */ | ||
1570 | ictx->cstorm_st_context.cq_db_base.lo = | ||
1571 | req1->cq_page_table_addr_lo & PAGE_MASK; | ||
1572 | ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi; | ||
1573 | ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; | ||
1574 | ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1; | ||
1575 | for (i = 0; i < cp->num_cqs; i++) { | ||
1576 | ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] = | ||
1577 | ISCSI_INITIAL_SN; | ||
1578 | ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] = | ||
1579 | ISCSI_INITIAL_SN; | ||
1580 | } | ||
1581 | |||
1582 | ictx->xstorm_ag_context.cdu_reserved = | ||
1583 | CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG, | ||
1584 | ISCSI_CONNECTION_TYPE); | ||
1585 | ictx->ustorm_ag_context.cdu_usage = | ||
1586 | CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG, | ||
1587 | ISCSI_CONNECTION_TYPE); | ||
1588 | return 0; | ||
1589 | |||
1590 | } | ||
1591 | |||
1592 | static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], | ||
1593 | u32 num, int *work) | ||
1594 | { | ||
1595 | struct iscsi_kwqe_conn_offload1 *req1; | ||
1596 | struct iscsi_kwqe_conn_offload2 *req2; | ||
1597 | struct cnic_local *cp = dev->cnic_priv; | ||
1598 | struct iscsi_kcqe kcqe; | ||
1599 | struct kcqe *cqes[1]; | ||
1600 | u32 l5_cid; | ||
1601 | int ret; | ||
1602 | |||
1603 | if (num < 2) { | ||
1604 | *work = num; | ||
1605 | return -EINVAL; | ||
1606 | } | ||
1607 | |||
1608 | req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0]; | ||
1609 | req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1]; | ||
1610 | if ((num - 2) < req2->num_additional_wqes) { | ||
1611 | *work = num; | ||
1612 | return -EINVAL; | ||
1613 | } | ||
1614 | *work = 2 + req2->num_additional_wqes;; | ||
1615 | |||
1616 | l5_cid = req1->iscsi_conn_id; | ||
1617 | if (l5_cid >= MAX_ISCSI_TBL_SZ) | ||
1618 | return -EINVAL; | ||
1619 | |||
1620 | memset(&kcqe, 0, sizeof(kcqe)); | ||
1621 | kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN; | ||
1622 | kcqe.iscsi_conn_id = l5_cid; | ||
1623 | kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE; | ||
1624 | |||
1625 | if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) { | ||
1626 | atomic_dec(&cp->iscsi_conn); | ||
1627 | ret = 0; | ||
1628 | goto done; | ||
1629 | } | ||
1630 | ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid); | ||
1631 | if (ret) { | ||
1632 | atomic_dec(&cp->iscsi_conn); | ||
1633 | ret = 0; | ||
1634 | goto done; | ||
1635 | } | ||
1636 | ret = cnic_setup_bnx2x_ctx(dev, wqes, num); | ||
1637 | if (ret < 0) { | ||
1638 | cnic_free_bnx2x_conn_resc(dev, l5_cid); | ||
1639 | atomic_dec(&cp->iscsi_conn); | ||
1640 | goto done; | ||
1641 | } | ||
1642 | |||
1643 | kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; | ||
1644 | kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp->ctx_tbl[l5_cid].cid, | ||
1645 | cp->func); | ||
1646 | |||
1647 | done: | ||
1648 | cqes[0] = (struct kcqe *) &kcqe; | ||
1649 | cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); | ||
1650 | return ret; | ||
1651 | } | ||
1652 | |||
1653 | |||
1654 | static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe) | ||
1655 | { | ||
1656 | struct cnic_local *cp = dev->cnic_priv; | ||
1657 | struct iscsi_kwqe_conn_update *req = | ||
1658 | (struct iscsi_kwqe_conn_update *) kwqe; | ||
1659 | void *data; | ||
1660 | union l5cm_specific_data l5_data; | ||
1661 | u32 l5_cid, cid = BNX2X_SW_CID(req->context_id); | ||
1662 | int ret; | ||
1663 | |||
1664 | if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0) | ||
1665 | return -EINVAL; | ||
1666 | |||
1667 | data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); | ||
1668 | if (!data) | ||
1669 | return -ENOMEM; | ||
1670 | |||
1671 | memcpy(data, kwqe, sizeof(struct kwqe)); | ||
1672 | |||
1673 | ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN, | ||
1674 | req->context_id, ISCSI_CONNECTION_TYPE, &l5_data); | ||
1675 | return ret; | ||
1676 | } | ||
1677 | |||
1678 | static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe) | ||
1679 | { | ||
1680 | struct cnic_local *cp = dev->cnic_priv; | ||
1681 | struct iscsi_kwqe_conn_destroy *req = | ||
1682 | (struct iscsi_kwqe_conn_destroy *) kwqe; | ||
1683 | union l5cm_specific_data l5_data; | ||
1684 | u32 l5_cid = req->reserved0; | ||
1685 | struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; | ||
1686 | int ret = 0; | ||
1687 | struct iscsi_kcqe kcqe; | ||
1688 | struct kcqe *cqes[1]; | ||
1689 | |||
1690 | if (!(ctx->ctx_flags & CTX_FL_OFFLD_START)) | ||
1691 | goto skip_cfc_delete; | ||
1692 | |||
1693 | while (!time_after(jiffies, ctx->timestamp + (2 * HZ))) | ||
1694 | msleep(250); | ||
1695 | |||
1696 | init_waitqueue_head(&ctx->waitq); | ||
1697 | ctx->wait_cond = 0; | ||
1698 | memset(&l5_data, 0, sizeof(l5_data)); | ||
1699 | ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CFC_DEL, | ||
1700 | req->context_id, | ||
1701 | ETH_CONNECTION_TYPE | | ||
1702 | (1 << SPE_HDR_COMMON_RAMROD_SHIFT), | ||
1703 | &l5_data); | ||
1704 | if (ret == 0) | ||
1705 | wait_event(ctx->waitq, ctx->wait_cond); | ||
1706 | |||
1707 | skip_cfc_delete: | ||
1708 | cnic_free_bnx2x_conn_resc(dev, l5_cid); | ||
1709 | |||
1710 | atomic_dec(&cp->iscsi_conn); | ||
1711 | |||
1712 | memset(&kcqe, 0, sizeof(kcqe)); | ||
1713 | kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN; | ||
1714 | kcqe.iscsi_conn_id = l5_cid; | ||
1715 | kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; | ||
1716 | kcqe.iscsi_conn_context_id = req->context_id; | ||
1717 | |||
1718 | cqes[0] = (struct kcqe *) &kcqe; | ||
1719 | cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); | ||
1720 | |||
1721 | return ret; | ||
1722 | } | ||
1723 | |||
1724 | static void cnic_init_storm_conn_bufs(struct cnic_dev *dev, | ||
1725 | struct l4_kwq_connect_req1 *kwqe1, | ||
1726 | struct l4_kwq_connect_req3 *kwqe3, | ||
1727 | struct l5cm_active_conn_buffer *conn_buf) | ||
1728 | { | ||
1729 | struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf; | ||
1730 | struct l5cm_xstorm_conn_buffer *xstorm_buf = | ||
1731 | &conn_buf->xstorm_conn_buffer; | ||
1732 | struct l5cm_tstorm_conn_buffer *tstorm_buf = | ||
1733 | &conn_buf->tstorm_conn_buffer; | ||
1734 | struct regpair context_addr; | ||
1735 | u32 cid = BNX2X_SW_CID(kwqe1->cid); | ||
1736 | struct in6_addr src_ip, dst_ip; | ||
1737 | int i; | ||
1738 | u32 *addrp; | ||
1739 | |||
1740 | addrp = (u32 *) &conn_addr->local_ip_addr; | ||
1741 | for (i = 0; i < 4; i++, addrp++) | ||
1742 | src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp); | ||
1743 | |||
1744 | addrp = (u32 *) &conn_addr->remote_ip_addr; | ||
1745 | for (i = 0; i < 4; i++, addrp++) | ||
1746 | dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp); | ||
1747 | |||
1748 | cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr); | ||
1749 | |||
1750 | xstorm_buf->context_addr.hi = context_addr.hi; | ||
1751 | xstorm_buf->context_addr.lo = context_addr.lo; | ||
1752 | xstorm_buf->mss = 0xffff; | ||
1753 | xstorm_buf->rcv_buf = kwqe3->rcv_buf; | ||
1754 | if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE) | ||
1755 | xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE; | ||
1756 | xstorm_buf->pseudo_header_checksum = | ||
1757 | swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0)); | ||
1758 | |||
1759 | if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK)) | ||
1760 | tstorm_buf->params |= | ||
1761 | L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE; | ||
1762 | if (kwqe3->ka_timeout) { | ||
1763 | tstorm_buf->ka_enable = 1; | ||
1764 | tstorm_buf->ka_timeout = kwqe3->ka_timeout; | ||
1765 | tstorm_buf->ka_interval = kwqe3->ka_interval; | ||
1766 | tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count; | ||
1767 | } | ||
1768 | tstorm_buf->rcv_buf = kwqe3->rcv_buf; | ||
1769 | tstorm_buf->snd_buf = kwqe3->snd_buf; | ||
1770 | tstorm_buf->max_rt_time = 0xffffffff; | ||
1771 | } | ||
1772 | |||
1773 | static void cnic_init_bnx2x_mac(struct cnic_dev *dev) | ||
1774 | { | ||
1775 | struct cnic_local *cp = dev->cnic_priv; | ||
1776 | int func = CNIC_FUNC(cp); | ||
1777 | u8 *mac = dev->mac_addr; | ||
1778 | |||
1779 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + | ||
1780 | XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(func), mac[0]); | ||
1781 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + | ||
1782 | XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(func), mac[1]); | ||
1783 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + | ||
1784 | XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(func), mac[2]); | ||
1785 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + | ||
1786 | XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(func), mac[3]); | ||
1787 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + | ||
1788 | XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(func), mac[4]); | ||
1789 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + | ||
1790 | XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(func), mac[5]); | ||
1791 | |||
1792 | CNIC_WR8(dev, BAR_TSTRORM_INTMEM + | ||
1793 | TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(func), mac[5]); | ||
1794 | CNIC_WR8(dev, BAR_TSTRORM_INTMEM + | ||
1795 | TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(func) + 1, | ||
1796 | mac[4]); | ||
1797 | CNIC_WR8(dev, BAR_TSTRORM_INTMEM + | ||
1798 | TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func), mac[3]); | ||
1799 | CNIC_WR8(dev, BAR_TSTRORM_INTMEM + | ||
1800 | TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func) + 1, | ||
1801 | mac[2]); | ||
1802 | CNIC_WR8(dev, BAR_TSTRORM_INTMEM + | ||
1803 | TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func) + 2, | ||
1804 | mac[1]); | ||
1805 | CNIC_WR8(dev, BAR_TSTRORM_INTMEM + | ||
1806 | TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func) + 3, | ||
1807 | mac[0]); | ||
1808 | } | ||
1809 | |||
1810 | static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts) | ||
1811 | { | ||
1812 | struct cnic_local *cp = dev->cnic_priv; | ||
1813 | u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN; | ||
1814 | u16 tstorm_flags = 0; | ||
1815 | |||
1816 | if (tcp_ts) { | ||
1817 | xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED; | ||
1818 | tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED; | ||
1819 | } | ||
1820 | |||
1821 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + | ||
1822 | XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->func), xstorm_flags); | ||
1823 | |||
1824 | CNIC_WR16(dev, BAR_TSTRORM_INTMEM + | ||
1825 | TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->func), tstorm_flags); | ||
1826 | } | ||
1827 | |||
1828 | static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[], | ||
1829 | u32 num, int *work) | ||
1830 | { | ||
1831 | struct cnic_local *cp = dev->cnic_priv; | ||
1832 | struct l4_kwq_connect_req1 *kwqe1 = | ||
1833 | (struct l4_kwq_connect_req1 *) wqes[0]; | ||
1834 | struct l4_kwq_connect_req3 *kwqe3; | ||
1835 | struct l5cm_active_conn_buffer *conn_buf; | ||
1836 | struct l5cm_conn_addr_params *conn_addr; | ||
1837 | union l5cm_specific_data l5_data; | ||
1838 | u32 l5_cid = kwqe1->pg_cid; | ||
1839 | struct cnic_sock *csk = &cp->csk_tbl[l5_cid]; | ||
1840 | struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; | ||
1841 | int ret; | ||
1842 | |||
1843 | if (num < 2) { | ||
1844 | *work = num; | ||
1845 | return -EINVAL; | ||
1846 | } | ||
1847 | |||
1848 | if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) | ||
1849 | *work = 3; | ||
1850 | else | ||
1851 | *work = 2; | ||
1852 | |||
1853 | if (num < *work) { | ||
1854 | *work = num; | ||
1855 | return -EINVAL; | ||
1856 | } | ||
1857 | |||
1858 | if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) { | ||
1859 | printk(KERN_ERR PFX "%s: conn_buf size too big\n", | ||
1860 | dev->netdev->name); | ||
1861 | return -ENOMEM; | ||
1862 | } | ||
1863 | conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); | ||
1864 | if (!conn_buf) | ||
1865 | return -ENOMEM; | ||
1866 | |||
1867 | memset(conn_buf, 0, sizeof(*conn_buf)); | ||
1868 | |||
1869 | conn_addr = &conn_buf->conn_addr_buf; | ||
1870 | conn_addr->remote_addr_0 = csk->ha[0]; | ||
1871 | conn_addr->remote_addr_1 = csk->ha[1]; | ||
1872 | conn_addr->remote_addr_2 = csk->ha[2]; | ||
1873 | conn_addr->remote_addr_3 = csk->ha[3]; | ||
1874 | conn_addr->remote_addr_4 = csk->ha[4]; | ||
1875 | conn_addr->remote_addr_5 = csk->ha[5]; | ||
1876 | |||
1877 | if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) { | ||
1878 | struct l4_kwq_connect_req2 *kwqe2 = | ||
1879 | (struct l4_kwq_connect_req2 *) wqes[1]; | ||
1880 | |||
1881 | conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4; | ||
1882 | conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3; | ||
1883 | conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2; | ||
1884 | |||
1885 | conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4; | ||
1886 | conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3; | ||
1887 | conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2; | ||
1888 | conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION; | ||
1889 | } | ||
1890 | kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1]; | ||
1891 | |||
1892 | conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip; | ||
1893 | conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip; | ||
1894 | conn_addr->local_tcp_port = kwqe1->src_port; | ||
1895 | conn_addr->remote_tcp_port = kwqe1->dst_port; | ||
1896 | |||
1897 | conn_addr->pmtu = kwqe3->pmtu; | ||
1898 | cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf); | ||
1899 | |||
1900 | CNIC_WR16(dev, BAR_XSTRORM_INTMEM + | ||
1901 | XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->func), csk->vlan_id); | ||
1902 | |||
1903 | cnic_bnx2x_set_tcp_timestamp(dev, | ||
1904 | kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP); | ||
1905 | |||
1906 | ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT, | ||
1907 | kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data); | ||
1908 | if (!ret) | ||
1909 | ctx->ctx_flags |= CTX_FL_OFFLD_START; | ||
1910 | |||
1911 | return ret; | ||
1912 | } | ||
1913 | |||
1914 | static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe) | ||
1915 | { | ||
1916 | struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe; | ||
1917 | union l5cm_specific_data l5_data; | ||
1918 | int ret; | ||
1919 | |||
1920 | memset(&l5_data, 0, sizeof(l5_data)); | ||
1921 | ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE, | ||
1922 | req->cid, ISCSI_CONNECTION_TYPE, &l5_data); | ||
1923 | return ret; | ||
1924 | } | ||
1925 | |||
1926 | static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe) | ||
1927 | { | ||
1928 | struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe; | ||
1929 | union l5cm_specific_data l5_data; | ||
1930 | int ret; | ||
1931 | |||
1932 | memset(&l5_data, 0, sizeof(l5_data)); | ||
1933 | ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT, | ||
1934 | req->cid, ISCSI_CONNECTION_TYPE, &l5_data); | ||
1935 | return ret; | ||
1936 | } | ||
1937 | static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe) | ||
1938 | { | ||
1939 | struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe; | ||
1940 | struct l4_kcq kcqe; | ||
1941 | struct kcqe *cqes[1]; | ||
1942 | |||
1943 | memset(&kcqe, 0, sizeof(kcqe)); | ||
1944 | kcqe.pg_host_opaque = req->host_opaque; | ||
1945 | kcqe.pg_cid = req->host_opaque; | ||
1946 | kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG; | ||
1947 | cqes[0] = (struct kcqe *) &kcqe; | ||
1948 | cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1); | ||
1949 | return 0; | ||
1950 | } | ||
1951 | |||
1952 | static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe) | ||
1953 | { | ||
1954 | struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe; | ||
1955 | struct l4_kcq kcqe; | ||
1956 | struct kcqe *cqes[1]; | ||
1957 | |||
1958 | memset(&kcqe, 0, sizeof(kcqe)); | ||
1959 | kcqe.pg_host_opaque = req->pg_host_opaque; | ||
1960 | kcqe.pg_cid = req->pg_cid; | ||
1961 | kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG; | ||
1962 | cqes[0] = (struct kcqe *) &kcqe; | ||
1963 | cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1); | ||
1964 | return 0; | ||
1965 | } | ||
1966 | |||
1967 | static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], | ||
1968 | u32 num_wqes) | ||
1969 | { | ||
1970 | int i, work, ret; | ||
1971 | u32 opcode; | ||
1972 | struct kwqe *kwqe; | ||
1973 | |||
1974 | if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) | ||
1975 | return -EAGAIN; /* bnx2 is down */ | ||
1976 | |||
1977 | for (i = 0; i < num_wqes; ) { | ||
1978 | kwqe = wqes[i]; | ||
1979 | opcode = KWQE_OPCODE(kwqe->kwqe_op_flag); | ||
1980 | work = 1; | ||
1981 | |||
1982 | switch (opcode) { | ||
1983 | case ISCSI_KWQE_OPCODE_INIT1: | ||
1984 | ret = cnic_bnx2x_iscsi_init1(dev, kwqe); | ||
1985 | break; | ||
1986 | case ISCSI_KWQE_OPCODE_INIT2: | ||
1987 | ret = cnic_bnx2x_iscsi_init2(dev, kwqe); | ||
1988 | break; | ||
1989 | case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1: | ||
1990 | ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i], | ||
1991 | num_wqes - i, &work); | ||
1992 | break; | ||
1993 | case ISCSI_KWQE_OPCODE_UPDATE_CONN: | ||
1994 | ret = cnic_bnx2x_iscsi_update(dev, kwqe); | ||
1995 | break; | ||
1996 | case ISCSI_KWQE_OPCODE_DESTROY_CONN: | ||
1997 | ret = cnic_bnx2x_iscsi_destroy(dev, kwqe); | ||
1998 | break; | ||
1999 | case L4_KWQE_OPCODE_VALUE_CONNECT1: | ||
2000 | ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i, | ||
2001 | &work); | ||
2002 | break; | ||
2003 | case L4_KWQE_OPCODE_VALUE_CLOSE: | ||
2004 | ret = cnic_bnx2x_close(dev, kwqe); | ||
2005 | break; | ||
2006 | case L4_KWQE_OPCODE_VALUE_RESET: | ||
2007 | ret = cnic_bnx2x_reset(dev, kwqe); | ||
2008 | break; | ||
2009 | case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG: | ||
2010 | ret = cnic_bnx2x_offload_pg(dev, kwqe); | ||
2011 | break; | ||
2012 | case L4_KWQE_OPCODE_VALUE_UPDATE_PG: | ||
2013 | ret = cnic_bnx2x_update_pg(dev, kwqe); | ||
2014 | break; | ||
2015 | case L4_KWQE_OPCODE_VALUE_UPLOAD_PG: | ||
2016 | ret = 0; | ||
2017 | break; | ||
2018 | default: | ||
2019 | ret = 0; | ||
2020 | printk(KERN_ERR PFX "%s: Unknown type of KWQE(0x%x)\n", | ||
2021 | dev->netdev->name, opcode); | ||
2022 | break; | ||
2023 | } | ||
2024 | if (ret < 0) | ||
2025 | printk(KERN_ERR PFX "%s: KWQE(0x%x) failed\n", | ||
2026 | dev->netdev->name, opcode); | ||
2027 | i += work; | ||
2028 | } | ||
2029 | return 0; | ||
2030 | } | ||
2031 | |||
942 | static void service_kcqes(struct cnic_dev *dev, int num_cqes) | 2032 | static void service_kcqes(struct cnic_dev *dev, int num_cqes) |
943 | { | 2033 | { |
944 | struct cnic_local *cp = dev->cnic_priv; | 2034 | struct cnic_local *cp = dev->cnic_priv; |
@@ -1005,6 +2095,22 @@ static u16 cnic_bnx2_hw_idx(u16 idx) | |||
1005 | return idx; | 2095 | return idx; |
1006 | } | 2096 | } |
1007 | 2097 | ||
2098 | static u16 cnic_bnx2x_next_idx(u16 idx) | ||
2099 | { | ||
2100 | idx++; | ||
2101 | if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT) | ||
2102 | idx++; | ||
2103 | |||
2104 | return idx; | ||
2105 | } | ||
2106 | |||
2107 | static u16 cnic_bnx2x_hw_idx(u16 idx) | ||
2108 | { | ||
2109 | if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT) | ||
2110 | idx++; | ||
2111 | return idx; | ||
2112 | } | ||
2113 | |||
1008 | static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod) | 2114 | static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod) |
1009 | { | 2115 | { |
1010 | struct cnic_local *cp = dev->cnic_priv; | 2116 | struct cnic_local *cp = dev->cnic_priv; |
@@ -1038,6 +2144,7 @@ static void cnic_chk_pkt_rings(struct cnic_local *cp) | |||
1038 | if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) { | 2144 | if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) { |
1039 | cp->tx_cons = tx_cons; | 2145 | cp->tx_cons = tx_cons; |
1040 | cp->rx_cons = rx_cons; | 2146 | cp->rx_cons = rx_cons; |
2147 | |||
1041 | uio_event_notify(cp->cnic_uinfo); | 2148 | uio_event_notify(cp->cnic_uinfo); |
1042 | } | 2149 | } |
1043 | } | 2150 | } |
@@ -1143,6 +2250,91 @@ static irqreturn_t cnic_irq(int irq, void *dev_instance) | |||
1143 | return IRQ_HANDLED; | 2250 | return IRQ_HANDLED; |
1144 | } | 2251 | } |
1145 | 2252 | ||
2253 | static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm, | ||
2254 | u16 index, u8 op, u8 update) | ||
2255 | { | ||
2256 | struct cnic_local *cp = dev->cnic_priv; | ||
2257 | u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 + | ||
2258 | COMMAND_REG_INT_ACK); | ||
2259 | struct igu_ack_register igu_ack; | ||
2260 | |||
2261 | igu_ack.status_block_index = index; | ||
2262 | igu_ack.sb_id_and_flags = | ||
2263 | ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) | | ||
2264 | (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) | | ||
2265 | (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | | ||
2266 | (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); | ||
2267 | |||
2268 | CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack)); | ||
2269 | } | ||
2270 | |||
2271 | static void cnic_ack_bnx2x_msix(struct cnic_dev *dev) | ||
2272 | { | ||
2273 | struct cnic_local *cp = dev->cnic_priv; | ||
2274 | |||
2275 | cnic_ack_bnx2x_int(dev, cp->status_blk_num, CSTORM_ID, 0, | ||
2276 | IGU_INT_DISABLE, 0); | ||
2277 | } | ||
2278 | |||
2279 | static void cnic_service_bnx2x_bh(unsigned long data) | ||
2280 | { | ||
2281 | struct cnic_dev *dev = (struct cnic_dev *) data; | ||
2282 | struct cnic_local *cp = dev->cnic_priv; | ||
2283 | u16 hw_prod, sw_prod; | ||
2284 | struct cstorm_status_block_c *sblk = | ||
2285 | &cp->bnx2x_status_blk->c_status_block; | ||
2286 | u32 status_idx = sblk->status_block_index; | ||
2287 | int kcqe_cnt; | ||
2288 | |||
2289 | if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) | ||
2290 | return; | ||
2291 | |||
2292 | hw_prod = sblk->index_values[HC_INDEX_C_ISCSI_EQ_CONS]; | ||
2293 | hw_prod = cp->hw_idx(hw_prod); | ||
2294 | sw_prod = cp->kcq_prod_idx; | ||
2295 | while (sw_prod != hw_prod) { | ||
2296 | kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod); | ||
2297 | if (kcqe_cnt == 0) | ||
2298 | goto done; | ||
2299 | |||
2300 | service_kcqes(dev, kcqe_cnt); | ||
2301 | |||
2302 | /* Tell compiler that sblk fields can change. */ | ||
2303 | barrier(); | ||
2304 | if (status_idx == sblk->status_block_index) | ||
2305 | break; | ||
2306 | |||
2307 | status_idx = sblk->status_block_index; | ||
2308 | hw_prod = sblk->index_values[HC_INDEX_C_ISCSI_EQ_CONS]; | ||
2309 | hw_prod = cp->hw_idx(hw_prod); | ||
2310 | } | ||
2311 | |||
2312 | done: | ||
2313 | CNIC_WR16(dev, cp->kcq_io_addr, sw_prod + MAX_KCQ_IDX); | ||
2314 | cnic_ack_bnx2x_int(dev, cp->status_blk_num, CSTORM_ID, | ||
2315 | status_idx, IGU_INT_ENABLE, 1); | ||
2316 | |||
2317 | cp->kcq_prod_idx = sw_prod; | ||
2318 | return; | ||
2319 | } | ||
2320 | |||
2321 | static int cnic_service_bnx2x(void *data, void *status_blk) | ||
2322 | { | ||
2323 | struct cnic_dev *dev = data; | ||
2324 | struct cnic_local *cp = dev->cnic_priv; | ||
2325 | u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX; | ||
2326 | |||
2327 | prefetch(cp->status_blk); | ||
2328 | prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); | ||
2329 | |||
2330 | if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) | ||
2331 | tasklet_schedule(&cp->cnic_irq_task); | ||
2332 | |||
2333 | cnic_chk_pkt_rings(cp); | ||
2334 | |||
2335 | return 0; | ||
2336 | } | ||
2337 | |||
1146 | static void cnic_ulp_stop(struct cnic_dev *dev) | 2338 | static void cnic_ulp_stop(struct cnic_dev *dev) |
1147 | { | 2339 | { |
1148 | struct cnic_local *cp = dev->cnic_priv; | 2340 | struct cnic_local *cp = dev->cnic_priv; |
@@ -1215,6 +2407,19 @@ static int cnic_ctl(void *data, struct cnic_ctl_info *info) | |||
1215 | 2407 | ||
1216 | cnic_put(dev); | 2408 | cnic_put(dev); |
1217 | break; | 2409 | break; |
2410 | case CNIC_CTL_COMPLETION_CMD: { | ||
2411 | u32 cid = BNX2X_SW_CID(info->data.comp.cid); | ||
2412 | u32 l5_cid; | ||
2413 | struct cnic_local *cp = dev->cnic_priv; | ||
2414 | |||
2415 | if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) { | ||
2416 | struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; | ||
2417 | |||
2418 | ctx->wait_cond = 1; | ||
2419 | wake_up(&ctx->waitq); | ||
2420 | } | ||
2421 | break; | ||
2422 | } | ||
1218 | default: | 2423 | default: |
1219 | return -EINVAL; | 2424 | return -EINVAL; |
1220 | } | 2425 | } |
@@ -1890,6 +3095,8 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe) | |||
1890 | /* fall through */ | 3095 | /* fall through */ |
1891 | case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: | 3096 | case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: |
1892 | case L4_KCQE_OPCODE_VALUE_RESET_COMP: | 3097 | case L4_KCQE_OPCODE_VALUE_RESET_COMP: |
3098 | case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE: | ||
3099 | case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD: | ||
1893 | cp->close_conn(csk, opcode); | 3100 | cp->close_conn(csk, opcode); |
1894 | break; | 3101 | break; |
1895 | 3102 | ||
@@ -1975,6 +3182,76 @@ static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev) | |||
1975 | return 0; | 3182 | return 0; |
1976 | } | 3183 | } |
1977 | 3184 | ||
3185 | static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode) | ||
3186 | { | ||
3187 | struct cnic_dev *dev = csk->dev; | ||
3188 | struct cnic_local *cp = dev->cnic_priv; | ||
3189 | struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid]; | ||
3190 | union l5cm_specific_data l5_data; | ||
3191 | u32 cmd = 0; | ||
3192 | int close_complete = 0; | ||
3193 | |||
3194 | switch (opcode) { | ||
3195 | case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: | ||
3196 | case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: | ||
3197 | case L4_KCQE_OPCODE_VALUE_RESET_COMP: | ||
3198 | if (cnic_ready_to_close(csk, opcode)) | ||
3199 | cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE; | ||
3200 | break; | ||
3201 | case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE: | ||
3202 | cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD; | ||
3203 | break; | ||
3204 | case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD: | ||
3205 | close_complete = 1; | ||
3206 | break; | ||
3207 | } | ||
3208 | if (cmd) { | ||
3209 | memset(&l5_data, 0, sizeof(l5_data)); | ||
3210 | |||
3211 | cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE, | ||
3212 | &l5_data); | ||
3213 | } else if (close_complete) { | ||
3214 | ctx->timestamp = jiffies; | ||
3215 | cnic_close_conn(csk); | ||
3216 | cnic_cm_upcall(cp, csk, csk->state); | ||
3217 | } | ||
3218 | } | ||
3219 | |||
3220 | static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev) | ||
3221 | { | ||
3222 | } | ||
3223 | |||
3224 | static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev) | ||
3225 | { | ||
3226 | struct cnic_local *cp = dev->cnic_priv; | ||
3227 | int func = CNIC_FUNC(cp); | ||
3228 | |||
3229 | cnic_init_bnx2x_mac(dev); | ||
3230 | cnic_bnx2x_set_tcp_timestamp(dev, 1); | ||
3231 | |||
3232 | CNIC_WR16(dev, BAR_XSTRORM_INTMEM + | ||
3233 | XSTORM_ISCSI_LOCAL_VLAN_OFFSET(func), 0); | ||
3234 | |||
3235 | CNIC_WR(dev, BAR_XSTRORM_INTMEM + | ||
3236 | XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(func), 1); | ||
3237 | CNIC_WR(dev, BAR_XSTRORM_INTMEM + | ||
3238 | XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(func), | ||
3239 | DEF_MAX_DA_COUNT); | ||
3240 | |||
3241 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + | ||
3242 | XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(func), DEF_TTL); | ||
3243 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + | ||
3244 | XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(func), DEF_TOS); | ||
3245 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + | ||
3246 | XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(func), 2); | ||
3247 | CNIC_WR(dev, BAR_XSTRORM_INTMEM + | ||
3248 | XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(func), DEF_SWS_TIMER); | ||
3249 | |||
3250 | CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(func), | ||
3251 | DEF_MAX_CWND); | ||
3252 | return 0; | ||
3253 | } | ||
3254 | |||
1978 | static int cnic_cm_open(struct cnic_dev *dev) | 3255 | static int cnic_cm_open(struct cnic_dev *dev) |
1979 | { | 3256 | { |
1980 | struct cnic_local *cp = dev->cnic_priv; | 3257 | struct cnic_local *cp = dev->cnic_priv; |
@@ -2482,11 +3759,402 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev) | |||
2482 | return 0; | 3759 | return 0; |
2483 | } | 3760 | } |
2484 | 3761 | ||
3762 | static void cnic_setup_bnx2x_context(struct cnic_dev *dev) | ||
3763 | { | ||
3764 | struct cnic_local *cp = dev->cnic_priv; | ||
3765 | struct cnic_eth_dev *ethdev = cp->ethdev; | ||
3766 | u32 start_offset = ethdev->ctx_tbl_offset; | ||
3767 | int i; | ||
3768 | |||
3769 | for (i = 0; i < cp->ctx_blks; i++) { | ||
3770 | struct cnic_ctx *ctx = &cp->ctx_arr[i]; | ||
3771 | dma_addr_t map = ctx->mapping; | ||
3772 | |||
3773 | if (cp->ctx_align) { | ||
3774 | unsigned long mask = cp->ctx_align - 1; | ||
3775 | |||
3776 | map = (map + mask) & ~mask; | ||
3777 | } | ||
3778 | |||
3779 | cnic_ctx_tbl_wr(dev, start_offset + i, map); | ||
3780 | } | ||
3781 | } | ||
3782 | |||
3783 | static int cnic_init_bnx2x_irq(struct cnic_dev *dev) | ||
3784 | { | ||
3785 | struct cnic_local *cp = dev->cnic_priv; | ||
3786 | struct cnic_eth_dev *ethdev = cp->ethdev; | ||
3787 | int err = 0; | ||
3788 | |||
3789 | tasklet_init(&cp->cnic_irq_task, &cnic_service_bnx2x_bh, | ||
3790 | (unsigned long) dev); | ||
3791 | if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { | ||
3792 | err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, | ||
3793 | "cnic", dev); | ||
3794 | if (err) | ||
3795 | tasklet_disable(&cp->cnic_irq_task); | ||
3796 | } | ||
3797 | return err; | ||
3798 | } | ||
3799 | |||
3800 | static void cnic_enable_bnx2x_int(struct cnic_dev *dev) | ||
3801 | { | ||
3802 | struct cnic_local *cp = dev->cnic_priv; | ||
3803 | u8 sb_id = cp->status_blk_num; | ||
3804 | int port = CNIC_PORT(cp); | ||
3805 | |||
3806 | CNIC_WR8(dev, BAR_CSTRORM_INTMEM + | ||
3807 | CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id, | ||
3808 | HC_INDEX_C_ISCSI_EQ_CONS), | ||
3809 | 64 / 12); | ||
3810 | CNIC_WR16(dev, BAR_CSTRORM_INTMEM + | ||
3811 | CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, | ||
3812 | HC_INDEX_C_ISCSI_EQ_CONS), 0); | ||
3813 | } | ||
3814 | |||
3815 | static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev) | ||
3816 | { | ||
3817 | } | ||
3818 | |||
3819 | static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev) | ||
3820 | { | ||
3821 | struct cnic_local *cp = dev->cnic_priv; | ||
3822 | union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) cp->l2_ring; | ||
3823 | struct eth_context *context; | ||
3824 | struct regpair context_addr; | ||
3825 | dma_addr_t buf_map; | ||
3826 | int func = CNIC_FUNC(cp); | ||
3827 | int port = CNIC_PORT(cp); | ||
3828 | int i; | ||
3829 | int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); | ||
3830 | u32 val; | ||
3831 | |||
3832 | memset(txbd, 0, BCM_PAGE_SIZE); | ||
3833 | |||
3834 | buf_map = cp->l2_buf_map; | ||
3835 | for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) { | ||
3836 | struct eth_tx_start_bd *start_bd = &txbd->start_bd; | ||
3837 | struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd); | ||
3838 | |||
3839 | start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32); | ||
3840 | start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); | ||
3841 | reg_bd->addr_hi = start_bd->addr_hi; | ||
3842 | reg_bd->addr_lo = start_bd->addr_lo + 0x10; | ||
3843 | start_bd->nbytes = cpu_to_le16(0x10); | ||
3844 | start_bd->nbd = cpu_to_le16(3); | ||
3845 | start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; | ||
3846 | start_bd->general_data = (UNICAST_ADDRESS << | ||
3847 | ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT); | ||
3848 | start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); | ||
3849 | |||
3850 | } | ||
3851 | context = cnic_get_bnx2x_ctx(dev, BNX2X_ISCSI_L2_CID, 1, &context_addr); | ||
3852 | |||
3853 | val = (u64) cp->l2_ring_map >> 32; | ||
3854 | txbd->next_bd.addr_hi = cpu_to_le32(val); | ||
3855 | |||
3856 | context->xstorm_st_context.tx_bd_page_base_hi = val; | ||
3857 | |||
3858 | val = (u64) cp->l2_ring_map & 0xffffffff; | ||
3859 | txbd->next_bd.addr_lo = cpu_to_le32(val); | ||
3860 | |||
3861 | context->xstorm_st_context.tx_bd_page_base_lo = val; | ||
3862 | |||
3863 | context->cstorm_st_context.sb_index_number = | ||
3864 | HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS; | ||
3865 | context->cstorm_st_context.status_block_id = BNX2X_DEF_SB_ID; | ||
3866 | |||
3867 | context->xstorm_st_context.statistics_data = (cli | | ||
3868 | XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE); | ||
3869 | |||
3870 | context->xstorm_ag_context.cdu_reserved = | ||
3871 | CDU_RSRVD_VALUE_TYPE_A(BNX2X_HW_CID(BNX2X_ISCSI_L2_CID, func), | ||
3872 | CDU_REGION_NUMBER_XCM_AG, | ||
3873 | ETH_CONNECTION_TYPE); | ||
3874 | |||
3875 | /* reset xstorm per client statistics */ | ||
3876 | val = BAR_XSTRORM_INTMEM + | ||
3877 | XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli); | ||
3878 | for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) | ||
3879 | CNIC_WR(dev, val + i * 4, 0); | ||
3880 | |||
3881 | cp->tx_cons_ptr = | ||
3882 | &cp->bnx2x_def_status_blk->c_def_status_block.index_values[ | ||
3883 | HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS]; | ||
3884 | } | ||
3885 | |||
3886 | static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev) | ||
3887 | { | ||
3888 | struct cnic_local *cp = dev->cnic_priv; | ||
3889 | struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (cp->l2_ring + | ||
3890 | BCM_PAGE_SIZE); | ||
3891 | struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *) | ||
3892 | (cp->l2_ring + (2 * BCM_PAGE_SIZE)); | ||
3893 | struct eth_context *context; | ||
3894 | struct regpair context_addr; | ||
3895 | int i; | ||
3896 | int port = CNIC_PORT(cp); | ||
3897 | int func = CNIC_FUNC(cp); | ||
3898 | int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); | ||
3899 | u32 val; | ||
3900 | struct tstorm_eth_client_config tstorm_client = {0}; | ||
3901 | |||
3902 | for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) { | ||
3903 | dma_addr_t buf_map; | ||
3904 | int n = (i % cp->l2_rx_ring_size) + 1; | ||
3905 | |||
3906 | buf_map = cp->l2_buf_map + (n * cp->l2_single_buf_size); | ||
3907 | rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32); | ||
3908 | rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); | ||
3909 | } | ||
3910 | context = cnic_get_bnx2x_ctx(dev, BNX2X_ISCSI_L2_CID, 0, &context_addr); | ||
3911 | |||
3912 | val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) >> 32; | ||
3913 | rxbd->addr_hi = cpu_to_le32(val); | ||
3914 | |||
3915 | context->ustorm_st_context.common.bd_page_base_hi = val; | ||
3916 | |||
3917 | val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) & 0xffffffff; | ||
3918 | rxbd->addr_lo = cpu_to_le32(val); | ||
3919 | |||
3920 | context->ustorm_st_context.common.bd_page_base_lo = val; | ||
3921 | |||
3922 | context->ustorm_st_context.common.sb_index_numbers = | ||
3923 | BNX2X_ISCSI_RX_SB_INDEX_NUM; | ||
3924 | context->ustorm_st_context.common.clientId = cli; | ||
3925 | context->ustorm_st_context.common.status_block_id = BNX2X_DEF_SB_ID; | ||
3926 | context->ustorm_st_context.common.flags = | ||
3927 | USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS; | ||
3928 | context->ustorm_st_context.common.statistics_counter_id = cli; | ||
3929 | context->ustorm_st_context.common.mc_alignment_log_size = 0; | ||
3930 | context->ustorm_st_context.common.bd_buff_size = | ||
3931 | cp->l2_single_buf_size; | ||
3932 | |||
3933 | context->ustorm_ag_context.cdu_usage = | ||
3934 | CDU_RSRVD_VALUE_TYPE_A(BNX2X_HW_CID(BNX2X_ISCSI_L2_CID, func), | ||
3935 | CDU_REGION_NUMBER_UCM_AG, | ||
3936 | ETH_CONNECTION_TYPE); | ||
3937 | |||
3938 | rxcqe += BNX2X_MAX_RCQ_DESC_CNT; | ||
3939 | val = (u64) (cp->l2_ring_map + (2 * BCM_PAGE_SIZE)) >> 32; | ||
3940 | rxcqe->addr_hi = cpu_to_le32(val); | ||
3941 | |||
3942 | CNIC_WR(dev, BAR_USTRORM_INTMEM + | ||
3943 | USTORM_CQE_PAGE_BASE_OFFSET(port, cli) + 4, val); | ||
3944 | |||
3945 | CNIC_WR(dev, BAR_USTRORM_INTMEM + | ||
3946 | USTORM_CQE_PAGE_NEXT_OFFSET(port, cli) + 4, val); | ||
3947 | |||
3948 | val = (u64) (cp->l2_ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff; | ||
3949 | rxcqe->addr_lo = cpu_to_le32(val); | ||
3950 | |||
3951 | CNIC_WR(dev, BAR_USTRORM_INTMEM + | ||
3952 | USTORM_CQE_PAGE_BASE_OFFSET(port, cli), val); | ||
3953 | |||
3954 | CNIC_WR(dev, BAR_USTRORM_INTMEM + | ||
3955 | USTORM_CQE_PAGE_NEXT_OFFSET(port, cli), val); | ||
3956 | |||
3957 | /* client tstorm info */ | ||
3958 | tstorm_client.mtu = cp->l2_single_buf_size - 14; | ||
3959 | tstorm_client.config_flags = | ||
3960 | (TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE | | ||
3961 | TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE); | ||
3962 | tstorm_client.statistics_counter_id = cli; | ||
3963 | |||
3964 | CNIC_WR(dev, BAR_TSTRORM_INTMEM + | ||
3965 | TSTORM_CLIENT_CONFIG_OFFSET(port, cli), | ||
3966 | ((u32 *)&tstorm_client)[0]); | ||
3967 | CNIC_WR(dev, BAR_TSTRORM_INTMEM + | ||
3968 | TSTORM_CLIENT_CONFIG_OFFSET(port, cli) + 4, | ||
3969 | ((u32 *)&tstorm_client)[1]); | ||
3970 | |||
3971 | /* reset tstorm per client statistics */ | ||
3972 | val = BAR_TSTRORM_INTMEM + | ||
3973 | TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli); | ||
3974 | for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) | ||
3975 | CNIC_WR(dev, val + i * 4, 0); | ||
3976 | |||
3977 | /* reset ustorm per client statistics */ | ||
3978 | val = BAR_USTRORM_INTMEM + | ||
3979 | USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli); | ||
3980 | for (i = 0; i < sizeof(struct ustorm_per_client_stats) / 4; i++) | ||
3981 | CNIC_WR(dev, val + i * 4, 0); | ||
3982 | |||
3983 | cp->rx_cons_ptr = | ||
3984 | &cp->bnx2x_def_status_blk->u_def_status_block.index_values[ | ||
3985 | HC_INDEX_DEF_U_ETH_ISCSI_RX_CQ_CONS]; | ||
3986 | } | ||
3987 | |||
3988 | static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev) | ||
3989 | { | ||
3990 | struct cnic_local *cp = dev->cnic_priv; | ||
3991 | u32 base, addr, val; | ||
3992 | int port = CNIC_PORT(cp); | ||
3993 | |||
3994 | dev->max_iscsi_conn = 0; | ||
3995 | base = CNIC_RD(dev, MISC_REG_SHARED_MEM_ADDR); | ||
3996 | if (base < 0xa0000 || base >= 0xc0000) | ||
3997 | return; | ||
3998 | |||
3999 | val = BNX2X_SHMEM_ADDR(base, | ||
4000 | dev_info.port_hw_config[port].iscsi_mac_upper); | ||
4001 | |||
4002 | dev->mac_addr[0] = (u8) (val >> 8); | ||
4003 | dev->mac_addr[1] = (u8) val; | ||
4004 | |||
4005 | val = BNX2X_SHMEM_ADDR(base, | ||
4006 | dev_info.port_hw_config[port].iscsi_mac_lower); | ||
4007 | |||
4008 | dev->mac_addr[2] = (u8) (val >> 24); | ||
4009 | dev->mac_addr[3] = (u8) (val >> 16); | ||
4010 | dev->mac_addr[4] = (u8) (val >> 8); | ||
4011 | dev->mac_addr[5] = (u8) val; | ||
4012 | |||
4013 | addr = BNX2X_SHMEM_ADDR(base, validity_map[port]); | ||
4014 | val = CNIC_RD(dev, addr); | ||
4015 | |||
4016 | if (!(val & SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT)) { | ||
4017 | u16 val16; | ||
4018 | |||
4019 | addr = BNX2X_SHMEM_ADDR(base, | ||
4020 | drv_lic_key[port].max_iscsi_init_conn); | ||
4021 | val16 = CNIC_RD16(dev, addr); | ||
4022 | |||
4023 | if (val16) | ||
4024 | val16 ^= 0x1e1e; | ||
4025 | dev->max_iscsi_conn = val16; | ||
4026 | } | ||
4027 | if (BNX2X_CHIP_IS_E1H(cp->chip_id)) { | ||
4028 | int func = CNIC_FUNC(cp); | ||
4029 | |||
4030 | addr = BNX2X_SHMEM_ADDR(base, | ||
4031 | mf_cfg.func_mf_config[func].e1hov_tag); | ||
4032 | val = CNIC_RD(dev, addr); | ||
4033 | val &= FUNC_MF_CFG_E1HOV_TAG_MASK; | ||
4034 | if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { | ||
4035 | addr = BNX2X_SHMEM_ADDR(base, | ||
4036 | mf_cfg.func_mf_config[func].config); | ||
4037 | val = CNIC_RD(dev, addr); | ||
4038 | val &= FUNC_MF_CFG_PROTOCOL_MASK; | ||
4039 | if (val != FUNC_MF_CFG_PROTOCOL_ISCSI) | ||
4040 | dev->max_iscsi_conn = 0; | ||
4041 | } | ||
4042 | } | ||
4043 | } | ||
4044 | |||
4045 | static int cnic_start_bnx2x_hw(struct cnic_dev *dev) | ||
4046 | { | ||
4047 | struct cnic_local *cp = dev->cnic_priv; | ||
4048 | int func = CNIC_FUNC(cp), ret, i; | ||
4049 | int port = CNIC_PORT(cp); | ||
4050 | u16 eq_idx; | ||
4051 | u8 sb_id = cp->status_blk_num; | ||
4052 | |||
4053 | ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ, | ||
4054 | BNX2X_ISCSI_START_CID); | ||
4055 | |||
4056 | if (ret) | ||
4057 | return -ENOMEM; | ||
4058 | |||
4059 | cp->kcq_io_addr = BAR_CSTRORM_INTMEM + | ||
4060 | CSTORM_ISCSI_EQ_PROD_OFFSET(func, 0); | ||
4061 | cp->kcq_prod_idx = 0; | ||
4062 | |||
4063 | cnic_get_bnx2x_iscsi_info(dev); | ||
4064 | |||
4065 | /* Only 1 EQ */ | ||
4066 | CNIC_WR16(dev, cp->kcq_io_addr, MAX_KCQ_IDX); | ||
4067 | CNIC_WR(dev, BAR_CSTRORM_INTMEM + | ||
4068 | CSTORM_ISCSI_EQ_CONS_OFFSET(func, 0), 0); | ||
4069 | CNIC_WR(dev, BAR_CSTRORM_INTMEM + | ||
4070 | CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, 0), | ||
4071 | cp->kcq_info.pg_map_arr[1] & 0xffffffff); | ||
4072 | CNIC_WR(dev, BAR_CSTRORM_INTMEM + | ||
4073 | CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, 0) + 4, | ||
4074 | (u64) cp->kcq_info.pg_map_arr[1] >> 32); | ||
4075 | CNIC_WR(dev, BAR_CSTRORM_INTMEM + | ||
4076 | CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, 0), | ||
4077 | cp->kcq_info.pg_map_arr[0] & 0xffffffff); | ||
4078 | CNIC_WR(dev, BAR_CSTRORM_INTMEM + | ||
4079 | CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, 0) + 4, | ||
4080 | (u64) cp->kcq_info.pg_map_arr[0] >> 32); | ||
4081 | CNIC_WR8(dev, BAR_CSTRORM_INTMEM + | ||
4082 | CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(func, 0), 1); | ||
4083 | CNIC_WR16(dev, BAR_CSTRORM_INTMEM + | ||
4084 | CSTORM_ISCSI_EQ_SB_NUM_OFFSET(func, 0), cp->status_blk_num); | ||
4085 | CNIC_WR8(dev, BAR_CSTRORM_INTMEM + | ||
4086 | CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(func, 0), | ||
4087 | HC_INDEX_C_ISCSI_EQ_CONS); | ||
4088 | |||
4089 | for (i = 0; i < cp->conn_buf_info.num_pages; i++) { | ||
4090 | CNIC_WR(dev, BAR_TSTRORM_INTMEM + | ||
4091 | TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(func, i), | ||
4092 | cp->conn_buf_info.pgtbl[2 * i]); | ||
4093 | CNIC_WR(dev, BAR_TSTRORM_INTMEM + | ||
4094 | TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(func, i) + 4, | ||
4095 | cp->conn_buf_info.pgtbl[(2 * i) + 1]); | ||
4096 | } | ||
4097 | |||
4098 | CNIC_WR(dev, BAR_USTRORM_INTMEM + | ||
4099 | USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(func), | ||
4100 | cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff); | ||
4101 | CNIC_WR(dev, BAR_USTRORM_INTMEM + | ||
4102 | USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(func) + 4, | ||
4103 | (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32); | ||
4104 | |||
4105 | cnic_setup_bnx2x_context(dev); | ||
4106 | |||
4107 | eq_idx = CNIC_RD16(dev, BAR_CSTRORM_INTMEM + | ||
4108 | CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id) + | ||
4109 | offsetof(struct cstorm_status_block_c, | ||
4110 | index_values[HC_INDEX_C_ISCSI_EQ_CONS])); | ||
4111 | if (eq_idx != 0) { | ||
4112 | printk(KERN_ERR PFX "%s: EQ cons index %x != 0\n", | ||
4113 | dev->netdev->name, eq_idx); | ||
4114 | return -EBUSY; | ||
4115 | } | ||
4116 | ret = cnic_init_bnx2x_irq(dev); | ||
4117 | if (ret) | ||
4118 | return ret; | ||
4119 | |||
4120 | cnic_init_bnx2x_tx_ring(dev); | ||
4121 | cnic_init_bnx2x_rx_ring(dev); | ||
4122 | |||
4123 | return 0; | ||
4124 | } | ||
4125 | |||
2485 | static void cnic_init_rings(struct cnic_dev *dev) | 4126 | static void cnic_init_rings(struct cnic_dev *dev) |
2486 | { | 4127 | { |
2487 | if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { | 4128 | if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { |
2488 | cnic_init_bnx2_tx_ring(dev); | 4129 | cnic_init_bnx2_tx_ring(dev); |
2489 | cnic_init_bnx2_rx_ring(dev); | 4130 | cnic_init_bnx2_rx_ring(dev); |
4131 | } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { | ||
4132 | struct cnic_local *cp = dev->cnic_priv; | ||
4133 | struct cnic_eth_dev *ethdev = cp->ethdev; | ||
4134 | u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); | ||
4135 | union l5cm_specific_data l5_data; | ||
4136 | struct ustorm_eth_rx_producers rx_prods = {0}; | ||
4137 | void __iomem *doorbell; | ||
4138 | int i; | ||
4139 | |||
4140 | rx_prods.bd_prod = 0; | ||
4141 | rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT; | ||
4142 | barrier(); | ||
4143 | |||
4144 | doorbell = ethdev->io_base2 + BAR_USTRORM_INTMEM + | ||
4145 | USTORM_RX_PRODS_OFFSET(CNIC_PORT(cp), cli); | ||
4146 | |||
4147 | for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++) | ||
4148 | writel(((u32 *) &rx_prods)[i], doorbell + i * 4); | ||
4149 | |||
4150 | cnic_init_bnx2x_tx_ring(dev); | ||
4151 | cnic_init_bnx2x_rx_ring(dev); | ||
4152 | |||
4153 | l5_data.phy_address.lo = cli; | ||
4154 | l5_data.phy_address.hi = 0; | ||
4155 | cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP, | ||
4156 | BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE, &l5_data); | ||
4157 | cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 1); | ||
2490 | } | 4158 | } |
2491 | } | 4159 | } |
2492 | 4160 | ||
@@ -2494,6 +4162,11 @@ static void cnic_shutdown_rings(struct cnic_dev *dev) | |||
2494 | { | 4162 | { |
2495 | if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { | 4163 | if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { |
2496 | cnic_shutdown_bnx2_rx_ring(dev); | 4164 | cnic_shutdown_bnx2_rx_ring(dev); |
4165 | } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { | ||
4166 | struct cnic_local *cp = dev->cnic_priv; | ||
4167 | u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); | ||
4168 | |||
4169 | cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 0); | ||
2497 | } | 4170 | } |
2498 | } | 4171 | } |
2499 | 4172 | ||
@@ -2587,6 +4260,22 @@ static void cnic_stop_bnx2_hw(struct cnic_dev *dev) | |||
2587 | cnic_free_resc(dev); | 4260 | cnic_free_resc(dev); |
2588 | } | 4261 | } |
2589 | 4262 | ||
4263 | |||
4264 | static void cnic_stop_bnx2x_hw(struct cnic_dev *dev) | ||
4265 | { | ||
4266 | struct cnic_local *cp = dev->cnic_priv; | ||
4267 | u8 sb_id = cp->status_blk_num; | ||
4268 | int port = CNIC_PORT(cp); | ||
4269 | |||
4270 | cnic_free_irq(dev); | ||
4271 | CNIC_WR16(dev, BAR_CSTRORM_INTMEM + | ||
4272 | CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id) + | ||
4273 | offsetof(struct cstorm_status_block_c, | ||
4274 | index_values[HC_INDEX_C_ISCSI_EQ_CONS]), | ||
4275 | 0); | ||
4276 | cnic_free_resc(dev); | ||
4277 | } | ||
4278 | |||
2590 | static void cnic_stop_hw(struct cnic_dev *dev) | 4279 | static void cnic_stop_hw(struct cnic_dev *dev) |
2591 | { | 4280 | { |
2592 | if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { | 4281 | if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { |
@@ -2718,6 +4407,57 @@ cnic_err: | |||
2718 | return NULL; | 4407 | return NULL; |
2719 | } | 4408 | } |
2720 | 4409 | ||
4410 | static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev) | ||
4411 | { | ||
4412 | struct pci_dev *pdev; | ||
4413 | struct cnic_dev *cdev; | ||
4414 | struct cnic_local *cp; | ||
4415 | struct cnic_eth_dev *ethdev = NULL; | ||
4416 | struct cnic_eth_dev *(*probe)(struct net_device *) = NULL; | ||
4417 | |||
4418 | probe = symbol_get(bnx2x_cnic_probe); | ||
4419 | if (probe) { | ||
4420 | ethdev = (*probe)(dev); | ||
4421 | symbol_put(bnx2x_cnic_probe); | ||
4422 | } | ||
4423 | if (!ethdev) | ||
4424 | return NULL; | ||
4425 | |||
4426 | pdev = ethdev->pdev; | ||
4427 | if (!pdev) | ||
4428 | return NULL; | ||
4429 | |||
4430 | dev_hold(dev); | ||
4431 | cdev = cnic_alloc_dev(dev, pdev); | ||
4432 | if (cdev == NULL) { | ||
4433 | dev_put(dev); | ||
4434 | return NULL; | ||
4435 | } | ||
4436 | |||
4437 | set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags); | ||
4438 | cdev->submit_kwqes = cnic_submit_bnx2x_kwqes; | ||
4439 | |||
4440 | cp = cdev->cnic_priv; | ||
4441 | cp->ethdev = ethdev; | ||
4442 | cdev->pcidev = pdev; | ||
4443 | |||
4444 | cp->cnic_ops = &cnic_bnx2x_ops; | ||
4445 | cp->start_hw = cnic_start_bnx2x_hw; | ||
4446 | cp->stop_hw = cnic_stop_bnx2x_hw; | ||
4447 | cp->setup_pgtbl = cnic_setup_page_tbl_le; | ||
4448 | cp->alloc_resc = cnic_alloc_bnx2x_resc; | ||
4449 | cp->free_resc = cnic_free_resc; | ||
4450 | cp->start_cm = cnic_cm_init_bnx2x_hw; | ||
4451 | cp->stop_cm = cnic_cm_stop_bnx2x_hw; | ||
4452 | cp->enable_int = cnic_enable_bnx2x_int; | ||
4453 | cp->disable_int_sync = cnic_disable_bnx2x_int_sync; | ||
4454 | cp->ack_int = cnic_ack_bnx2x_msix; | ||
4455 | cp->close_conn = cnic_close_bnx2x_conn; | ||
4456 | cp->next_idx = cnic_bnx2x_next_idx; | ||
4457 | cp->hw_idx = cnic_bnx2x_hw_idx; | ||
4458 | return cdev; | ||
4459 | } | ||
4460 | |||
2721 | static struct cnic_dev *is_cnic_dev(struct net_device *dev) | 4461 | static struct cnic_dev *is_cnic_dev(struct net_device *dev) |
2722 | { | 4462 | { |
2723 | struct ethtool_drvinfo drvinfo; | 4463 | struct ethtool_drvinfo drvinfo; |
@@ -2729,6 +4469,8 @@ static struct cnic_dev *is_cnic_dev(struct net_device *dev) | |||
2729 | 4469 | ||
2730 | if (!strcmp(drvinfo.driver, "bnx2")) | 4470 | if (!strcmp(drvinfo.driver, "bnx2")) |
2731 | cdev = init_bnx2_cnic(dev); | 4471 | cdev = init_bnx2_cnic(dev); |
4472 | if (!strcmp(drvinfo.driver, "bnx2x")) | ||
4473 | cdev = init_bnx2x_cnic(dev); | ||
2732 | if (cdev) { | 4474 | if (cdev) { |
2733 | write_lock(&cnic_dev_lock); | 4475 | write_lock(&cnic_dev_lock); |
2734 | list_add(&cdev->list, &cnic_dev_list); | 4476 | list_add(&cdev->list, &cnic_dev_list); |