diff options
author | Karen Xie <kxie@chelsio.com> | 2013-05-29 20:13:28 -0400 |
---|---|---|
committer | James Bottomley <JBottomley@Parallels.com> | 2013-06-26 13:48:24 -0400 |
commit | 3bd3e8bf6250f32c153d95f85ec9249ed305589d (patch) | |
tree | 6f2f5c3a88b7c48946df58ccb7766b877994163e /drivers/scsi/cxgbi | |
parent | 2ee3e26c673e75c05ef8b914f54fadee3d7b9c88 (diff) |
[SCSI] cxgb4i: add support for T5 adapter
Signed-off-by: Karen Xie <kxie@chelsio.com>
Reviewed-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'drivers/scsi/cxgbi')
-rw-r--r-- | drivers/scsi/cxgbi/cxgb4i/cxgb4i.c | 159 |
1 files changed, 128 insertions, 31 deletions
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 3fecf35ba292..e659febaedcb 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <net/dst.h> | 20 | #include <net/dst.h> |
21 | #include <linux/netdevice.h> | 21 | #include <linux/netdevice.h> |
22 | 22 | ||
23 | #include "t4_regs.h" | ||
23 | #include "t4_msg.h" | 24 | #include "t4_msg.h" |
24 | #include "cxgb4.h" | 25 | #include "cxgb4.h" |
25 | #include "cxgb4_uld.h" | 26 | #include "cxgb4_uld.h" |
@@ -32,13 +33,12 @@ static unsigned int dbg_level; | |||
32 | #include "../libcxgbi.h" | 33 | #include "../libcxgbi.h" |
33 | 34 | ||
34 | #define DRV_MODULE_NAME "cxgb4i" | 35 | #define DRV_MODULE_NAME "cxgb4i" |
35 | #define DRV_MODULE_DESC "Chelsio T4 iSCSI Driver" | 36 | #define DRV_MODULE_DESC "Chelsio T4/T5 iSCSI Driver" |
36 | #define DRV_MODULE_VERSION "0.9.1" | 37 | #define DRV_MODULE_VERSION "0.9.4" |
37 | #define DRV_MODULE_RELDATE "Aug. 2010" | ||
38 | 38 | ||
39 | static char version[] = | 39 | static char version[] = |
40 | DRV_MODULE_DESC " " DRV_MODULE_NAME | 40 | DRV_MODULE_DESC " " DRV_MODULE_NAME |
41 | " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; | 41 | " v" DRV_MODULE_VERSION "\n"; |
42 | 42 | ||
43 | MODULE_AUTHOR("Chelsio Communications, Inc."); | 43 | MODULE_AUTHOR("Chelsio Communications, Inc."); |
44 | MODULE_DESCRIPTION(DRV_MODULE_DESC); | 44 | MODULE_DESCRIPTION(DRV_MODULE_DESC); |
@@ -175,10 +175,56 @@ static inline int is_ofld_imm(const struct sk_buff *skb) | |||
175 | sizeof(struct fw_ofld_tx_data_wr)); | 175 | sizeof(struct fw_ofld_tx_data_wr)); |
176 | } | 176 | } |
177 | 177 | ||
178 | |||
179 | #define VLAN_NONE 0xfff | ||
180 | #define FILTER_SEL_VLAN_NONE 0xffff | ||
181 | #define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */ | ||
182 | #define FILTER_SEL_WIDTH_VIN_P_FC \ | ||
183 | (6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/ | ||
184 | #define FILTER_SEL_WIDTH_TAG_P_FC \ | ||
185 | (3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */ | ||
186 | #define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC) | ||
187 | |||
188 | static unsigned int select_ntuple(struct cxgbi_device *cdev, | ||
189 | struct l2t_entry *l2t) | ||
190 | { | ||
191 | struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); | ||
192 | unsigned int ntuple = 0; | ||
193 | u32 viid; | ||
194 | |||
195 | switch (lldi->filt_mode) { | ||
196 | |||
197 | /* default filter mode */ | ||
198 | case HW_TPL_FR_MT_PR_IV_P_FC: | ||
199 | if (l2t->vlan == VLAN_NONE) | ||
200 | ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC; | ||
201 | else { | ||
202 | ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC; | ||
203 | ntuple |= 1 << FILTER_SEL_WIDTH_VLD_TAG_P_FC; | ||
204 | } | ||
205 | ntuple |= l2t->lport << S_PORT | IPPROTO_TCP << | ||
206 | FILTER_SEL_WIDTH_VLD_TAG_P_FC; | ||
207 | break; | ||
208 | case HW_TPL_FR_MT_PR_OV_P_FC: { | ||
209 | viid = cxgb4_port_viid(l2t->neigh->dev); | ||
210 | |||
211 | ntuple |= FW_VIID_VIN_GET(viid) << FILTER_SEL_WIDTH_P_FC; | ||
212 | ntuple |= FW_VIID_PFN_GET(viid) << FILTER_SEL_WIDTH_VIN_P_FC; | ||
213 | ntuple |= FW_VIID_VIVLD_GET(viid) << FILTER_SEL_WIDTH_TAG_P_FC; | ||
214 | ntuple |= l2t->lport << S_PORT | IPPROTO_TCP << | ||
215 | FILTER_SEL_WIDTH_VLD_TAG_P_FC; | ||
216 | break; | ||
217 | } | ||
218 | default: | ||
219 | break; | ||
220 | } | ||
221 | return ntuple; | ||
222 | } | ||
223 | |||
178 | static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, | 224 | static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, |
179 | struct l2t_entry *e) | 225 | struct l2t_entry *e) |
180 | { | 226 | { |
181 | struct cpl_act_open_req *req; | 227 | struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); |
182 | int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); | 228 | int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); |
183 | unsigned long long opt0; | 229 | unsigned long long opt0; |
184 | unsigned int opt2; | 230 | unsigned int opt2; |
@@ -195,29 +241,58 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, | |||
195 | RCV_BUFSIZ(cxgb4i_rcv_win >> 10); | 241 | RCV_BUFSIZ(cxgb4i_rcv_win >> 10); |
196 | opt2 = RX_CHANNEL(0) | | 242 | opt2 = RX_CHANNEL(0) | |
197 | RSS_QUEUE_VALID | | 243 | RSS_QUEUE_VALID | |
198 | (1 << 20) | (1 << 22) | | 244 | (1 << 20) | |
199 | RSS_QUEUE(csk->rss_qid); | 245 | RSS_QUEUE(csk->rss_qid); |
200 | 246 | ||
201 | set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); | 247 | if (is_t4(lldi->adapter_type)) { |
202 | req = (struct cpl_act_open_req *)skb->head; | 248 | struct cpl_act_open_req *req = |
249 | (struct cpl_act_open_req *)skb->head; | ||
203 | 250 | ||
204 | INIT_TP_WR(req, 0); | 251 | req = (struct cpl_act_open_req *)skb->head; |
205 | OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, | 252 | |
253 | INIT_TP_WR(req, 0); | ||
254 | OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, | ||
206 | qid_atid)); | 255 | qid_atid)); |
207 | req->local_port = csk->saddr.sin_port; | 256 | req->local_port = csk->saddr.sin_port; |
208 | req->peer_port = csk->daddr.sin_port; | 257 | req->peer_port = csk->daddr.sin_port; |
209 | req->local_ip = csk->saddr.sin_addr.s_addr; | 258 | req->local_ip = csk->saddr.sin_addr.s_addr; |
210 | req->peer_ip = csk->daddr.sin_addr.s_addr; | 259 | req->peer_ip = csk->daddr.sin_addr.s_addr; |
211 | req->opt0 = cpu_to_be64(opt0); | 260 | req->opt0 = cpu_to_be64(opt0); |
212 | req->params = 0; | 261 | req->params = cpu_to_be32(select_ntuple(csk->cdev, csk->l2t)); |
213 | req->opt2 = cpu_to_be32(opt2); | 262 | opt2 |= 1 << 22; |
263 | req->opt2 = cpu_to_be32(opt2); | ||
214 | 264 | ||
215 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | 265 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
216 | "csk 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n", | 266 | "csk t4 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n", |
217 | csk, &req->local_ip, ntohs(req->local_port), | 267 | csk, &req->local_ip, ntohs(req->local_port), |
218 | &req->peer_ip, ntohs(req->peer_port), | 268 | &req->peer_ip, ntohs(req->peer_port), |
219 | csk->atid, csk->rss_qid); | 269 | csk->atid, csk->rss_qid); |
270 | } else { | ||
271 | struct cpl_t5_act_open_req *req = | ||
272 | (struct cpl_t5_act_open_req *)skb->head; | ||
273 | |||
274 | req = (struct cpl_t5_act_open_req *)skb->head; | ||
275 | |||
276 | INIT_TP_WR(req, 0); | ||
277 | OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, | ||
278 | qid_atid)); | ||
279 | req->local_port = csk->saddr.sin_port; | ||
280 | req->peer_port = csk->daddr.sin_port; | ||
281 | req->local_ip = csk->saddr.sin_addr.s_addr; | ||
282 | req->peer_ip = csk->daddr.sin_addr.s_addr; | ||
283 | req->opt0 = cpu_to_be64(opt0); | ||
284 | req->params = cpu_to_be32(select_ntuple(csk->cdev, csk->l2t)); | ||
285 | opt2 |= 1 << 31; | ||
286 | req->opt2 = cpu_to_be32(opt2); | ||
220 | 287 | ||
288 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | ||
289 | "csk t5 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n", | ||
290 | csk, &req->local_ip, ntohs(req->local_port), | ||
291 | &req->peer_ip, ntohs(req->peer_port), | ||
292 | csk->atid, csk->rss_qid); | ||
293 | } | ||
294 | |||
295 | set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); | ||
221 | cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); | 296 | cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); |
222 | } | 297 | } |
223 | 298 | ||
@@ -632,6 +707,7 @@ static void csk_act_open_retry_timer(unsigned long data) | |||
632 | { | 707 | { |
633 | struct sk_buff *skb; | 708 | struct sk_buff *skb; |
634 | struct cxgbi_sock *csk = (struct cxgbi_sock *)data; | 709 | struct cxgbi_sock *csk = (struct cxgbi_sock *)data; |
710 | struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); | ||
635 | 711 | ||
636 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | 712 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
637 | "csk 0x%p,%u,0x%lx,%u.\n", | 713 | "csk 0x%p,%u,0x%lx,%u.\n", |
@@ -639,7 +715,10 @@ static void csk_act_open_retry_timer(unsigned long data) | |||
639 | 715 | ||
640 | cxgbi_sock_get(csk); | 716 | cxgbi_sock_get(csk); |
641 | spin_lock_bh(&csk->lock); | 717 | spin_lock_bh(&csk->lock); |
642 | skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_ATOMIC); | 718 | skb = alloc_wr(is_t4(lldi->adapter_type) ? |
719 | sizeof(struct cpl_act_open_req) : | ||
720 | sizeof(struct cpl_t5_act_open_req), | ||
721 | 0, GFP_ATOMIC); | ||
643 | if (!skb) | 722 | if (!skb) |
644 | cxgbi_sock_fail_act_open(csk, -ENOMEM); | 723 | cxgbi_sock_fail_act_open(csk, -ENOMEM); |
645 | else { | 724 | else { |
@@ -871,7 +950,7 @@ static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb) | |||
871 | 950 | ||
872 | if (!csk->skb_ulp_lhdr) { | 951 | if (!csk->skb_ulp_lhdr) { |
873 | unsigned char *bhs; | 952 | unsigned char *bhs; |
874 | unsigned int hlen, dlen; | 953 | unsigned int hlen, dlen, plen; |
875 | 954 | ||
876 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, | 955 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, |
877 | "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n", | 956 | "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n", |
@@ -890,11 +969,15 @@ static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb) | |||
890 | hlen = ntohs(cpl->len); | 969 | hlen = ntohs(cpl->len); |
891 | dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF; | 970 | dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF; |
892 | 971 | ||
893 | if ((hlen + dlen) != ISCSI_PDU_LEN(pdu_len_ddp) - 40) { | 972 | plen = ISCSI_PDU_LEN(pdu_len_ddp); |
973 | if (is_t4(lldi->adapter_type)) | ||
974 | plen -= 40; | ||
975 | |||
976 | if ((hlen + dlen) != plen) { | ||
894 | pr_info("tid 0x%x, CPL_ISCSI_HDR, pdu len " | 977 | pr_info("tid 0x%x, CPL_ISCSI_HDR, pdu len " |
895 | "mismatch %u != %u + %u, seq 0x%x.\n", | 978 | "mismatch %u != %u + %u, seq 0x%x.\n", |
896 | csk->tid, ISCSI_PDU_LEN(pdu_len_ddp) - 40, | 979 | csk->tid, plen, hlen, dlen, |
897 | hlen, dlen, cxgbi_skcb_tcp_seq(skb)); | 980 | cxgbi_skcb_tcp_seq(skb)); |
898 | goto abort_conn; | 981 | goto abort_conn; |
899 | } | 982 | } |
900 | 983 | ||
@@ -1154,7 +1237,10 @@ static int init_act_open(struct cxgbi_sock *csk) | |||
1154 | } | 1237 | } |
1155 | cxgbi_sock_get(csk); | 1238 | cxgbi_sock_get(csk); |
1156 | 1239 | ||
1157 | skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL); | 1240 | skb = alloc_wr(is_t4(lldi->adapter_type) ? |
1241 | sizeof(struct cpl_act_open_req) : | ||
1242 | sizeof(struct cpl_t5_act_open_req), | ||
1243 | 0, GFP_ATOMIC); | ||
1158 | if (!skb) | 1244 | if (!skb) |
1159 | goto rel_resource; | 1245 | goto rel_resource; |
1160 | skb->sk = (struct sock *)csk; | 1246 | skb->sk = (struct sock *)csk; |
@@ -1193,6 +1279,8 @@ rel_resource: | |||
1193 | return -EINVAL; | 1279 | return -EINVAL; |
1194 | } | 1280 | } |
1195 | 1281 | ||
1282 | #define CPL_ISCSI_DATA 0xB2 | ||
1283 | #define CPL_RX_ISCSI_DDP 0x49 | ||
1196 | cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = { | 1284 | cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = { |
1197 | [CPL_ACT_ESTABLISH] = do_act_establish, | 1285 | [CPL_ACT_ESTABLISH] = do_act_establish, |
1198 | [CPL_ACT_OPEN_RPL] = do_act_open_rpl, | 1286 | [CPL_ACT_OPEN_RPL] = do_act_open_rpl, |
@@ -1202,8 +1290,10 @@ cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = { | |||
1202 | [CPL_CLOSE_CON_RPL] = do_close_con_rpl, | 1290 | [CPL_CLOSE_CON_RPL] = do_close_con_rpl, |
1203 | [CPL_FW4_ACK] = do_fw4_ack, | 1291 | [CPL_FW4_ACK] = do_fw4_ack, |
1204 | [CPL_ISCSI_HDR] = do_rx_iscsi_hdr, | 1292 | [CPL_ISCSI_HDR] = do_rx_iscsi_hdr, |
1293 | [CPL_ISCSI_DATA] = do_rx_iscsi_hdr, | ||
1205 | [CPL_SET_TCB_RPL] = do_set_tcb_rpl, | 1294 | [CPL_SET_TCB_RPL] = do_set_tcb_rpl, |
1206 | [CPL_RX_DATA_DDP] = do_rx_data_ddp, | 1295 | [CPL_RX_DATA_DDP] = do_rx_data_ddp, |
1296 | [CPL_RX_ISCSI_DDP] = do_rx_data_ddp, | ||
1207 | }; | 1297 | }; |
1208 | 1298 | ||
1209 | int cxgb4i_ofld_init(struct cxgbi_device *cdev) | 1299 | int cxgb4i_ofld_init(struct cxgbi_device *cdev) |
@@ -1234,14 +1324,20 @@ int cxgb4i_ofld_init(struct cxgbi_device *cdev) | |||
1234 | * functions to program the pagepod in h/w | 1324 | * functions to program the pagepod in h/w |
1235 | */ | 1325 | */ |
1236 | #define ULPMEM_IDATA_MAX_NPPODS 4 /* 256/PPOD_SIZE */ | 1326 | #define ULPMEM_IDATA_MAX_NPPODS 4 /* 256/PPOD_SIZE */ |
1237 | static inline void ulp_mem_io_set_hdr(struct ulp_mem_io *req, | 1327 | static inline void ulp_mem_io_set_hdr(struct cxgb4_lld_info *lldi, |
1328 | struct ulp_mem_io *req, | ||
1238 | unsigned int wr_len, unsigned int dlen, | 1329 | unsigned int wr_len, unsigned int dlen, |
1239 | unsigned int pm_addr) | 1330 | unsigned int pm_addr) |
1240 | { | 1331 | { |
1241 | struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1); | 1332 | struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1); |
1242 | 1333 | ||
1243 | INIT_ULPTX_WR(req, wr_len, 0, 0); | 1334 | INIT_ULPTX_WR(req, wr_len, 0, 0); |
1244 | req->cmd = htonl(ULPTX_CMD(ULP_TX_MEM_WRITE) | (1 << 23)); | 1335 | if (is_t4(lldi->adapter_type)) |
1336 | req->cmd = htonl(ULPTX_CMD(ULP_TX_MEM_WRITE) | | ||
1337 | (ULP_MEMIO_ORDER(1))); | ||
1338 | else | ||
1339 | req->cmd = htonl(ULPTX_CMD(ULP_TX_MEM_WRITE) | | ||
1340 | (V_T5_ULP_MEMIO_IMM(1))); | ||
1245 | req->dlen = htonl(ULP_MEMIO_DATA_LEN(dlen >> 5)); | 1341 | req->dlen = htonl(ULP_MEMIO_DATA_LEN(dlen >> 5)); |
1246 | req->lock_addr = htonl(ULP_MEMIO_ADDR(pm_addr >> 5)); | 1342 | req->lock_addr = htonl(ULP_MEMIO_ADDR(pm_addr >> 5)); |
1247 | req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16)); | 1343 | req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16)); |
@@ -1257,6 +1353,7 @@ static int ddp_ppod_write_idata(struct cxgbi_device *cdev, unsigned int port_id, | |||
1257 | unsigned int gl_pidx) | 1353 | unsigned int gl_pidx) |
1258 | { | 1354 | { |
1259 | struct cxgbi_ddp_info *ddp = cdev->ddp; | 1355 | struct cxgbi_ddp_info *ddp = cdev->ddp; |
1356 | struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); | ||
1260 | struct sk_buff *skb; | 1357 | struct sk_buff *skb; |
1261 | struct ulp_mem_io *req; | 1358 | struct ulp_mem_io *req; |
1262 | struct ulptx_idata *idata; | 1359 | struct ulptx_idata *idata; |
@@ -1276,7 +1373,7 @@ static int ddp_ppod_write_idata(struct cxgbi_device *cdev, unsigned int port_id, | |||
1276 | req = (struct ulp_mem_io *)skb->head; | 1373 | req = (struct ulp_mem_io *)skb->head; |
1277 | set_queue(skb, CPL_PRIORITY_CONTROL, NULL); | 1374 | set_queue(skb, CPL_PRIORITY_CONTROL, NULL); |
1278 | 1375 | ||
1279 | ulp_mem_io_set_hdr(req, wr_len, dlen, pm_addr); | 1376 | ulp_mem_io_set_hdr(lldi, req, wr_len, dlen, pm_addr); |
1280 | idata = (struct ulptx_idata *)(req + 1); | 1377 | idata = (struct ulptx_idata *)(req + 1); |
1281 | ppod = (struct cxgbi_pagepod *)(idata + 1); | 1378 | ppod = (struct cxgbi_pagepod *)(idata + 1); |
1282 | 1379 | ||