diff options
author | kxie@chelsio.com <kxie@chelsio.com> | 2010-09-23 19:43:23 -0400 |
---|---|---|
committer | James Bottomley <James.Bottomley@suse.de> | 2010-10-07 18:21:36 -0400 |
commit | e27d6169c79e3c75edc74a14424a7856e7ff487c (patch) | |
tree | 4d06f26fa3dcd43c43e118d2602767f76bfda40f /drivers/scsi | |
parent | 0b3d8947972bfd2dd6d55c8009427ad2941ef038 (diff) |
[SCSI] cxgb4i: connection and ddp setting update
Update cxgb4i connection setting and pagepod programming.
Signed-off-by: Karen Xie <kxie@chelsio.com>
Reviewed-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/scsi')
-rw-r--r-- | drivers/scsi/cxgbi/cxgb4i/cxgb4i.c | 128 | ||||
-rw-r--r-- | drivers/scsi/cxgbi/cxgb4i/cxgb4i.h | 5 | ||||
-rw-r--r-- | drivers/scsi/cxgbi/libcxgbi.h | 10 |
3 files changed, 71 insertions, 72 deletions
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 1056d97e686e..99f2b8c5dd63 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c | |||
@@ -34,8 +34,8 @@ static unsigned int dbg_level; | |||
34 | 34 | ||
35 | #define DRV_MODULE_NAME "cxgb4i" | 35 | #define DRV_MODULE_NAME "cxgb4i" |
36 | #define DRV_MODULE_DESC "Chelsio T4 iSCSI Driver" | 36 | #define DRV_MODULE_DESC "Chelsio T4 iSCSI Driver" |
37 | #define DRV_MODULE_VERSION "0.9.0" | 37 | #define DRV_MODULE_VERSION "0.9.1" |
38 | #define DRV_MODULE_RELDATE "May 2010" | 38 | #define DRV_MODULE_RELDATE "Aug. 2010" |
39 | 39 | ||
40 | static char version[] = | 40 | static char version[] = |
41 | DRV_MODULE_DESC " " DRV_MODULE_NAME | 41 | DRV_MODULE_DESC " " DRV_MODULE_NAME |
@@ -396,7 +396,7 @@ static inline void send_tx_flowc_wr(struct cxgbi_sock *csk) | |||
396 | htonl(FW_WR_LEN16(DIV_ROUND_UP(72, 16)) | | 396 | htonl(FW_WR_LEN16(DIV_ROUND_UP(72, 16)) | |
397 | FW_WR_FLOWID(csk->tid)); | 397 | FW_WR_FLOWID(csk->tid)); |
398 | flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; | 398 | flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; |
399 | flowc->mnemval[0].val = htonl(0); | 399 | flowc->mnemval[0].val = htonl(csk->cdev->pfvf); |
400 | flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; | 400 | flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; |
401 | flowc->mnemval[1].val = htonl(csk->tx_chan); | 401 | flowc->mnemval[1].val = htonl(csk->tx_chan); |
402 | flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; | 402 | flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; |
@@ -568,6 +568,12 @@ static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb) | |||
568 | goto rel_skb; | 568 | goto rel_skb; |
569 | } | 569 | } |
570 | 570 | ||
571 | if (csk->atid != atid) { | ||
572 | pr_err("bad conn atid %u, csk 0x%p,%u,0x%lx,tid %u, atid %u.\n", | ||
573 | atid, csk, csk->state, csk->flags, csk->tid, csk->atid); | ||
574 | goto rel_skb; | ||
575 | } | ||
576 | |||
571 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | 577 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
572 | "csk 0x%p,%u,0x%lx, tid %u, atid %u, rseq %u.\n", | 578 | "csk 0x%p,%u,0x%lx, tid %u, atid %u, rseq %u.\n", |
573 | csk, csk->state, csk->flags, tid, atid, rcv_isn); | 579 | csk, csk->state, csk->flags, tid, atid, rcv_isn); |
@@ -681,9 +687,10 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) | |||
681 | goto rel_skb; | 687 | goto rel_skb; |
682 | } | 688 | } |
683 | 689 | ||
684 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | 690 | pr_info("%pI4:%u-%pI4:%u, atid %u,%u, status %u, csk 0x%p,%u,0x%lx.\n", |
685 | "csk 0x%p,%u,0x%lx, status %u, atid %u, tid %u.\n", | 691 | &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port), |
686 | csk, csk->state, csk->flags, status, atid, tid); | 692 | &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port), |
693 | atid, tid, status, csk, csk->state, csk->flags); | ||
687 | 694 | ||
688 | if (status && status != CPL_ERR_TCAM_FULL && | 695 | if (status && status != CPL_ERR_TCAM_FULL && |
689 | status != CPL_ERR_CONN_EXIST && | 696 | status != CPL_ERR_CONN_EXIST && |
@@ -846,7 +853,6 @@ static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb) | |||
846 | unsigned int tid = GET_TID(cpl); | 853 | unsigned int tid = GET_TID(cpl); |
847 | struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); | 854 | struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); |
848 | struct tid_info *t = lldi->tids; | 855 | struct tid_info *t = lldi->tids; |
849 | struct sk_buff *lskb; | ||
850 | 856 | ||
851 | csk = lookup_tid(t, tid); | 857 | csk = lookup_tid(t, tid); |
852 | if (unlikely(!csk)) { | 858 | if (unlikely(!csk)) { |
@@ -872,6 +878,8 @@ static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb) | |||
872 | } | 878 | } |
873 | 879 | ||
874 | cxgbi_skcb_tcp_seq(skb) = ntohl(cpl->seq); | 880 | cxgbi_skcb_tcp_seq(skb) = ntohl(cpl->seq); |
881 | cxgbi_skcb_flags(skb) = 0; | ||
882 | |||
875 | skb_reset_transport_header(skb); | 883 | skb_reset_transport_header(skb); |
876 | __skb_pull(skb, sizeof(*cpl)); | 884 | __skb_pull(skb, sizeof(*cpl)); |
877 | __pskb_trim(skb, ntohs(cpl->len)); | 885 | __pskb_trim(skb, ntohs(cpl->len)); |
@@ -884,17 +892,16 @@ static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb) | |||
884 | "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n", | 892 | "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n", |
885 | csk, csk->state, csk->flags, csk->tid, skb); | 893 | csk, csk->state, csk->flags, csk->tid, skb); |
886 | csk->skb_ulp_lhdr = skb; | 894 | csk->skb_ulp_lhdr = skb; |
887 | lskb = csk->skb_ulp_lhdr; | 895 | cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR); |
888 | cxgbi_skcb_set_flag(lskb, SKCBF_RX_HDR); | ||
889 | 896 | ||
890 | if (cxgbi_skcb_tcp_seq(lskb) != csk->rcv_nxt) { | 897 | if (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt) { |
891 | pr_info("tid %u, CPL_ISCSI_HDR, bad seq, 0x%x/0x%x.\n", | 898 | pr_info("tid %u, CPL_ISCSI_HDR, bad seq, 0x%x/0x%x.\n", |
892 | csk->tid, cxgbi_skcb_tcp_seq(lskb), | 899 | csk->tid, cxgbi_skcb_tcp_seq(skb), |
893 | csk->rcv_nxt); | 900 | csk->rcv_nxt); |
894 | goto abort_conn; | 901 | goto abort_conn; |
895 | } | 902 | } |
896 | 903 | ||
897 | bhs = lskb->data; | 904 | bhs = skb->data; |
898 | hlen = ntohs(cpl->len); | 905 | hlen = ntohs(cpl->len); |
899 | dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF; | 906 | dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF; |
900 | 907 | ||
@@ -918,9 +925,9 @@ static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb) | |||
918 | ntohl(*((unsigned int *)(bhs + 24)))); | 925 | ntohl(*((unsigned int *)(bhs + 24)))); |
919 | 926 | ||
920 | } else { | 927 | } else { |
921 | lskb = csk->skb_ulp_lhdr; | 928 | struct sk_buff *lskb = csk->skb_ulp_lhdr; |
922 | cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA); | ||
923 | 929 | ||
930 | cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA); | ||
924 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, | 931 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, |
925 | "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n", | 932 | "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n", |
926 | csk, csk->state, csk->flags, skb, lskb); | 933 | csk, csk->state, csk->flags, skb, lskb); |
@@ -979,7 +986,6 @@ static void do_rx_data_ddp(struct cxgbi_device *cdev, | |||
979 | lskb = csk->skb_ulp_lhdr; | 986 | lskb = csk->skb_ulp_lhdr; |
980 | csk->skb_ulp_lhdr = NULL; | 987 | csk->skb_ulp_lhdr = NULL; |
981 | 988 | ||
982 | cxgbi_skcb_set_flag(lskb, SKCBF_RX_STATUS); | ||
983 | cxgbi_skcb_rx_ddigest(lskb) = ntohl(rpl->ulp_crc); | 989 | cxgbi_skcb_rx_ddigest(lskb) = ntohl(rpl->ulp_crc); |
984 | 990 | ||
985 | if (ntohs(rpl->len) != cxgbi_skcb_rx_pdulen(lskb)) | 991 | if (ntohs(rpl->len) != cxgbi_skcb_rx_pdulen(lskb)) |
@@ -987,15 +993,13 @@ static void do_rx_data_ddp(struct cxgbi_device *cdev, | |||
987 | csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb)); | 993 | csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb)); |
988 | 994 | ||
989 | if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) { | 995 | if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) { |
990 | log_debug(1 << CXGBI_DBG_PDU_RX, | 996 | pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n", |
991 | "csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad.\n", | 997 | csk, lskb, status, cxgbi_skcb_flags(lskb)); |
992 | csk, lskb, status); | ||
993 | cxgbi_skcb_set_flag(lskb, SKCBF_RX_HCRC_ERR); | 998 | cxgbi_skcb_set_flag(lskb, SKCBF_RX_HCRC_ERR); |
994 | } | 999 | } |
995 | if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) { | 1000 | if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) { |
996 | log_debug(1 << CXGBI_DBG_PDU_RX, | 1001 | pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n", |
997 | "csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad.\n", | 1002 | csk, lskb, status, cxgbi_skcb_flags(lskb)); |
998 | csk, lskb, status); | ||
999 | cxgbi_skcb_set_flag(lskb, SKCBF_RX_DCRC_ERR); | 1003 | cxgbi_skcb_set_flag(lskb, SKCBF_RX_DCRC_ERR); |
1000 | } | 1004 | } |
1001 | if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) { | 1005 | if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) { |
@@ -1015,6 +1019,7 @@ static void do_rx_data_ddp(struct cxgbi_device *cdev, | |||
1015 | "csk 0x%p, lskb 0x%p, f 0x%lx.\n", | 1019 | "csk 0x%p, lskb 0x%p, f 0x%lx.\n", |
1016 | csk, lskb, cxgbi_skcb_flags(lskb)); | 1020 | csk, lskb, cxgbi_skcb_flags(lskb)); |
1017 | 1021 | ||
1022 | cxgbi_skcb_set_flag(lskb, SKCBF_RX_STATUS); | ||
1018 | cxgbi_conn_pdu_ready(csk); | 1023 | cxgbi_conn_pdu_ready(csk); |
1019 | spin_unlock_bh(&csk->lock); | 1024 | spin_unlock_bh(&csk->lock); |
1020 | goto rel_skb; | 1025 | goto rel_skb; |
@@ -1234,41 +1239,41 @@ int cxgb4i_ofld_init(struct cxgbi_device *cdev) | |||
1234 | /* | 1239 | /* |
1235 | * functions to program the pagepod in h/w | 1240 | * functions to program the pagepod in h/w |
1236 | */ | 1241 | */ |
1242 | #define ULPMEM_IDATA_MAX_NPPODS 4 /* 256/PPOD_SIZE */ | ||
1237 | static inline void ulp_mem_io_set_hdr(struct ulp_mem_io *req, | 1243 | static inline void ulp_mem_io_set_hdr(struct ulp_mem_io *req, |
1238 | unsigned int dlen, unsigned int pm_addr) | 1244 | unsigned int wr_len, unsigned int dlen, |
1245 | unsigned int pm_addr) | ||
1239 | { | 1246 | { |
1240 | struct ulptx_sgl *sgl; | 1247 | struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1); |
1241 | unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) + | ||
1242 | sizeof(struct ulptx_sgl), 16); | ||
1243 | 1248 | ||
1244 | INIT_ULPTX_WR(req, wr_len, 0, 0); | 1249 | INIT_ULPTX_WR(req, wr_len, 0, 0); |
1245 | req->cmd = htonl(ULPTX_CMD(ULP_TX_MEM_WRITE)); | 1250 | req->cmd = htonl(ULPTX_CMD(ULP_TX_MEM_WRITE) | (1 << 23)); |
1246 | req->dlen = htonl(ULP_MEMIO_DATA_LEN(dlen >> 5)); | 1251 | req->dlen = htonl(ULP_MEMIO_DATA_LEN(dlen >> 5)); |
1247 | req->lock_addr = htonl(ULP_MEMIO_ADDR(pm_addr >> 5)); | 1252 | req->lock_addr = htonl(ULP_MEMIO_ADDR(pm_addr >> 5)); |
1248 | req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16)); | 1253 | req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16)); |
1249 | sgl = (struct ulptx_sgl *)(req + 1); | 1254 | |
1250 | sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_NSGE(1)); | 1255 | idata->cmd_more = htonl(ULPTX_CMD(ULP_TX_SC_IMM)); |
1251 | sgl->len0 = htonl(dlen); | 1256 | idata->len = htonl(dlen); |
1252 | } | 1257 | } |
1253 | 1258 | ||
1254 | static int ddp_ppod_write_sgl(struct cxgbi_device *cdev, unsigned int port_id, | 1259 | static int ddp_ppod_write_idata(struct cxgbi_device *cdev, unsigned int port_id, |
1255 | struct cxgbi_pagepod_hdr *hdr, unsigned int idx, | 1260 | struct cxgbi_pagepod_hdr *hdr, unsigned int idx, |
1256 | unsigned int npods, | 1261 | unsigned int npods, |
1257 | struct cxgbi_gather_list *gl, | 1262 | struct cxgbi_gather_list *gl, |
1258 | unsigned int gl_pidx) | 1263 | unsigned int gl_pidx) |
1259 | { | 1264 | { |
1260 | struct cxgbi_ddp_info *ddp = cdev->ddp; | 1265 | struct cxgbi_ddp_info *ddp = cdev->ddp; |
1261 | unsigned int dlen, pm_addr; | ||
1262 | struct sk_buff *skb; | 1266 | struct sk_buff *skb; |
1263 | struct ulp_mem_io *req; | 1267 | struct ulp_mem_io *req; |
1264 | struct ulptx_sgl *sgl; | 1268 | struct ulptx_idata *idata; |
1265 | struct cxgbi_pagepod *ppod; | 1269 | struct cxgbi_pagepod *ppod; |
1270 | unsigned int pm_addr = idx * PPOD_SIZE + ddp->llimit; | ||
1271 | unsigned int dlen = PPOD_SIZE * npods; | ||
1272 | unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) + | ||
1273 | sizeof(struct ulptx_idata) + dlen, 16); | ||
1266 | unsigned int i; | 1274 | unsigned int i; |
1267 | 1275 | ||
1268 | dlen = PPOD_SIZE * npods; | 1276 | skb = alloc_wr(wr_len, 0, GFP_ATOMIC); |
1269 | pm_addr = idx * PPOD_SIZE + ddp->llimit; | ||
1270 | |||
1271 | skb = alloc_wr(sizeof(*req) + sizeof(*sgl), dlen, GFP_ATOMIC); | ||
1272 | if (!skb) { | 1277 | if (!skb) { |
1273 | pr_err("cdev 0x%p, idx %u, npods %u, OOM.\n", | 1278 | pr_err("cdev 0x%p, idx %u, npods %u, OOM.\n", |
1274 | cdev, idx, npods); | 1279 | cdev, idx, npods); |
@@ -1277,10 +1282,9 @@ static int ddp_ppod_write_sgl(struct cxgbi_device *cdev, unsigned int port_id, | |||
1277 | req = (struct ulp_mem_io *)skb->head; | 1282 | req = (struct ulp_mem_io *)skb->head; |
1278 | set_queue(skb, CPL_PRIORITY_CONTROL, NULL); | 1283 | set_queue(skb, CPL_PRIORITY_CONTROL, NULL); |
1279 | 1284 | ||
1280 | ulp_mem_io_set_hdr(req, dlen, pm_addr); | 1285 | ulp_mem_io_set_hdr(req, wr_len, dlen, pm_addr); |
1281 | sgl = (struct ulptx_sgl *)(req + 1); | 1286 | idata = (struct ulptx_idata *)(req + 1); |
1282 | ppod = (struct cxgbi_pagepod *)(sgl + 1); | 1287 | ppod = (struct cxgbi_pagepod *)(idata + 1); |
1283 | sgl->addr0 = cpu_to_be64(virt_to_phys(ppod)); | ||
1284 | 1288 | ||
1285 | for (i = 0; i < npods; i++, ppod++, gl_pidx += PPOD_PAGES_MAX) { | 1289 | for (i = 0; i < npods; i++, ppod++, gl_pidx += PPOD_PAGES_MAX) { |
1286 | if (!hdr && !gl) | 1290 | if (!hdr && !gl) |
@@ -1302,9 +1306,9 @@ static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr, | |||
1302 | 1306 | ||
1303 | for (i = 0; i < npods; i += cnt, idx += cnt) { | 1307 | for (i = 0; i < npods; i += cnt, idx += cnt) { |
1304 | cnt = npods - i; | 1308 | cnt = npods - i; |
1305 | if (cnt > ULPMEM_DSGL_MAX_NPPODS) | 1309 | if (cnt > ULPMEM_IDATA_MAX_NPPODS) |
1306 | cnt = ULPMEM_DSGL_MAX_NPPODS; | 1310 | cnt = ULPMEM_IDATA_MAX_NPPODS; |
1307 | err = ddp_ppod_write_sgl(csk->cdev, csk->port_id, hdr, | 1311 | err = ddp_ppod_write_idata(csk->cdev, csk->port_id, hdr, |
1308 | idx, cnt, gl, 4 * i); | 1312 | idx, cnt, gl, 4 * i); |
1309 | if (err < 0) | 1313 | if (err < 0) |
1310 | break; | 1314 | break; |
@@ -1320,9 +1324,9 @@ static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag, | |||
1320 | 1324 | ||
1321 | for (i = 0; i < npods; i += cnt, idx += cnt) { | 1325 | for (i = 0; i < npods; i += cnt, idx += cnt) { |
1322 | cnt = npods - i; | 1326 | cnt = npods - i; |
1323 | if (cnt > ULPMEM_DSGL_MAX_NPPODS) | 1327 | if (cnt > ULPMEM_IDATA_MAX_NPPODS) |
1324 | cnt = ULPMEM_DSGL_MAX_NPPODS; | 1328 | cnt = ULPMEM_IDATA_MAX_NPPODS; |
1325 | err = ddp_ppod_write_sgl(chba->cdev, chba->port_id, NULL, | 1329 | err = ddp_ppod_write_idata(chba->cdev, chba->port_id, NULL, |
1326 | idx, cnt, NULL, 0); | 1330 | idx, cnt, NULL, 0); |
1327 | if (err < 0) | 1331 | if (err < 0) |
1328 | break; | 1332 | break; |
@@ -1334,26 +1338,22 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, | |||
1334 | { | 1338 | { |
1335 | struct sk_buff *skb; | 1339 | struct sk_buff *skb; |
1336 | struct cpl_set_tcb_field *req; | 1340 | struct cpl_set_tcb_field *req; |
1337 | u64 val = pg_idx < DDP_PGIDX_MAX ? pg_idx : 0; | ||
1338 | 1341 | ||
1339 | if (!pg_idx) | 1342 | if (!pg_idx || pg_idx >= DDP_PGIDX_MAX) |
1340 | return 0; | 1343 | return 0; |
1341 | 1344 | ||
1342 | skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL); | 1345 | skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL); |
1343 | if (!skb) | 1346 | if (!skb) |
1344 | return -ENOMEM; | 1347 | return -ENOMEM; |
1345 | 1348 | ||
1346 | /* set up ulp submode and page size */ | 1349 | /* set up ulp page size */ |
1347 | val = (val & 0x03) << 2; | ||
1348 | val |= TCB_ULP_TYPE(ULP2_MODE_ISCSI); | ||
1349 | |||
1350 | req = (struct cpl_set_tcb_field *)skb->head; | 1350 | req = (struct cpl_set_tcb_field *)skb->head; |
1351 | INIT_TP_WR(req, csk->tid); | 1351 | INIT_TP_WR(req, csk->tid); |
1352 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); | 1352 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); |
1353 | req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid)); | 1353 | req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid)); |
1354 | req->word_cookie = htons(TCB_WORD(W_TCB_ULP_RAW)); | 1354 | req->word_cookie = htons(0); |
1355 | req->mask = cpu_to_be64(TCB_ULP_TYPE(TCB_ULP_TYPE_MASK)); | 1355 | req->mask = cpu_to_be64(0x3 << 8); |
1356 | req->val = cpu_to_be64(val); | 1356 | req->val = cpu_to_be64(pg_idx << 8); |
1357 | set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); | 1357 | set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); |
1358 | 1358 | ||
1359 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | 1359 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
@@ -1368,10 +1368,9 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, | |||
1368 | { | 1368 | { |
1369 | struct sk_buff *skb; | 1369 | struct sk_buff *skb; |
1370 | struct cpl_set_tcb_field *req; | 1370 | struct cpl_set_tcb_field *req; |
1371 | u64 val = (hcrc ? ULP_CRC_HEADER : 0) | (dcrc ? ULP_CRC_DATA : 0); | ||
1372 | 1371 | ||
1373 | val = TCB_ULP_RAW(val); | 1372 | if (!hcrc && !dcrc) |
1374 | val |= TCB_ULP_TYPE(ULP2_MODE_ISCSI); | 1373 | return 0; |
1375 | 1374 | ||
1376 | skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL); | 1375 | skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL); |
1377 | if (!skb) | 1376 | if (!skb) |
@@ -1379,14 +1378,15 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, | |||
1379 | 1378 | ||
1380 | csk->hcrc_len = (hcrc ? 4 : 0); | 1379 | csk->hcrc_len = (hcrc ? 4 : 0); |
1381 | csk->dcrc_len = (dcrc ? 4 : 0); | 1380 | csk->dcrc_len = (dcrc ? 4 : 0); |
1382 | /* set up ulp submode and page size */ | 1381 | /* set up ulp submode */ |
1383 | req = (struct cpl_set_tcb_field *)skb->head; | 1382 | req = (struct cpl_set_tcb_field *)skb->head; |
1384 | INIT_TP_WR(req, tid); | 1383 | INIT_TP_WR(req, tid); |
1385 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); | 1384 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); |
1386 | req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid)); | 1385 | req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid)); |
1387 | req->word_cookie = htons(TCB_WORD(W_TCB_ULP_RAW)); | 1386 | req->word_cookie = htons(0); |
1388 | req->mask = cpu_to_be64(TCB_ULP_RAW(TCB_ULP_RAW_MASK)); | 1387 | req->mask = cpu_to_be64(0x3 << 4); |
1389 | req->val = cpu_to_be64(val); | 1388 | req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) | |
1389 | (dcrc ? ULP_CRC_DATA : 0)) << 4); | ||
1390 | set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); | 1390 | set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); |
1391 | 1391 | ||
1392 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | 1392 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
@@ -1477,6 +1477,10 @@ static void *t4_uld_add(const struct cxgb4_lld_info *lldi) | |||
1477 | cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr); | 1477 | cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr); |
1478 | cdev->itp = &cxgb4i_iscsi_transport; | 1478 | cdev->itp = &cxgb4i_iscsi_transport; |
1479 | 1479 | ||
1480 | cdev->pfvf = FW_VIID_PFN_GET(cxgb4_port_viid(lldi->ports[0])) << 8; | ||
1481 | pr_info("cdev 0x%p,%s, pfvf %u.\n", | ||
1482 | cdev, lldi->ports[0]->name, cdev->pfvf); | ||
1483 | |||
1480 | rc = cxgb4i_ddp_init(cdev); | 1484 | rc = cxgb4i_ddp_init(cdev); |
1481 | if (rc) { | 1485 | if (rc) { |
1482 | pr_info("t4 0x%p ddp init failed.\n", cdev); | 1486 | pr_info("t4 0x%p ddp init failed.\n", cdev); |
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h index 342263b1f542..1096026ba241 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h | |||
@@ -23,6 +23,11 @@ | |||
23 | #define CXGB4I_TX_HEADER_LEN \ | 23 | #define CXGB4I_TX_HEADER_LEN \ |
24 | (sizeof(struct fw_ofld_tx_data_wr) + sizeof(struct sge_opaque_hdr)) | 24 | (sizeof(struct fw_ofld_tx_data_wr) + sizeof(struct sge_opaque_hdr)) |
25 | 25 | ||
26 | struct ulptx_idata { | ||
27 | __be32 cmd_more; | ||
28 | __be32 len; | ||
29 | }; | ||
30 | |||
26 | struct cpl_rx_data_ddp { | 31 | struct cpl_rx_data_ddp { |
27 | union opcode_tid ot; | 32 | union opcode_tid ot; |
28 | __be16 urg; | 33 | __be16 urg; |
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h index 43025b71e295..c57d59db000c 100644 --- a/drivers/scsi/cxgbi/libcxgbi.h +++ b/drivers/scsi/cxgbi/libcxgbi.h | |||
@@ -162,16 +162,6 @@ struct cxgbi_ddp_info { | |||
162 | #define PPOD_VALID(x) ((x) << PPOD_VALID_SHIFT) | 162 | #define PPOD_VALID(x) ((x) << PPOD_VALID_SHIFT) |
163 | #define PPOD_VALID_FLAG PPOD_VALID(1U) | 163 | #define PPOD_VALID_FLAG PPOD_VALID(1U) |
164 | 164 | ||
165 | #define W_TCB_ULP_TYPE 0 | ||
166 | #define TCB_ULP_TYPE_SHIFT 0 | ||
167 | #define TCB_ULP_TYPE_MASK 0xfULL | ||
168 | #define TCB_ULP_TYPE(x) ((x) << TCB_ULP_TYPE_SHIFT) | ||
169 | |||
170 | #define W_TCB_ULP_RAW 0 | ||
171 | #define TCB_ULP_RAW_SHIFT 4 | ||
172 | #define TCB_ULP_RAW_MASK 0xffULL | ||
173 | #define TCB_ULP_RAW(x) ((x) << TCB_ULP_RAW_SHIFT) | ||
174 | |||
175 | /* | 165 | /* |
176 | * sge_opaque_hdr - | 166 | * sge_opaque_hdr - |
177 | * Opaque version of structure the SGE stores at skb->head of TX_DATA packets | 167 | * Opaque version of structure the SGE stores at skb->head of TX_DATA packets |