aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-12-12 11:22:34 -0500
committerDavid S. Miller <davem@davemloft.net>2014-12-12 11:22:34 -0500
commit3a923f5a43d5fb2e945f6255d29938edaee1f23a (patch)
treea1f9de6d0543bfc7b45a9d9a9088eb6f446a28ef
parent37e9a6904520b525b542ecd67201164d06fdb95a (diff)
parented481a33ee824bfee20319fc478503926bcf5f56 (diff)
Merge branch 'cxgb4'
Karen Xie says: ==================== cxgb4/cxgbi: misc. fixes for cxgb4i This patch set fixes cxgb4i's tx credit calculation and adds handling of additional rx message and negative advice types. It also removes the duplicate code in cxgb4i to set the outgoing queues of a packet. Karen Xie (7): cxgb4i: fix tx immediate data credit check cxgb4i: fix credit check for tx_data_wr cxgb4/cxgb4i: set max. outgoing pdu length in the f/w cxgb4i: add more types of negative advice cxgb4i: handle non pdu-aligned rx data cxgb4i: use cxgb4's set_wr_txq() for setting outgoing queues libcxgbi: fix the debug print accessing skb after it is freed Sending to net as the fixes are mostly in the network area and it touches cxgb4's header file (t4fw_api.h). v2 corrects the "CHECK"s flagged by checkpatch.pl --strict. v3 splits the 3rd patch from v2 to two separate patches. Adds detailed commit messages and makes subject more concise. Patch 3/6 also changes the return value of is_neg_adv() from int to bool. v4 -- please ignore. v5 splits the 1st patch from v3 to two separate patches and reduces code duplication in make_tx_data_wr(). v6 removed the code style cleanup in the 2nd patch. The style update will be addressed in a separate patch. v7 updates the 7th patch with more detailed commit message. v8 removes the duplicate subject lines from the message bodies. v9 reformatted the commit messages to be max. 80 characters per line. v10 rebased to net-next tree. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h1
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c144
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c4
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h4
4 files changed, 110 insertions, 43 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index beaf80a6214b..89a75e31f6ae 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -560,6 +560,7 @@ enum fw_flowc_mnem {
560 FW_FLOWC_MNEM_RCVNXT, 560 FW_FLOWC_MNEM_RCVNXT,
561 FW_FLOWC_MNEM_SNDBUF, 561 FW_FLOWC_MNEM_SNDBUF,
562 FW_FLOWC_MNEM_MSS, 562 FW_FLOWC_MNEM_MSS,
563 FW_FLOWC_MNEM_TXDATAPLEN_MAX,
563}; 564};
564 565
565struct fw_flowc_mnemval { 566struct fw_flowc_mnemval {
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 69fbfc89efb6..a83d2ceded83 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -75,6 +75,7 @@ typedef void (*cxgb4i_cplhandler_func)(struct cxgbi_device *, struct sk_buff *);
75static void *t4_uld_add(const struct cxgb4_lld_info *); 75static void *t4_uld_add(const struct cxgb4_lld_info *);
76static int t4_uld_rx_handler(void *, const __be64 *, const struct pkt_gl *); 76static int t4_uld_rx_handler(void *, const __be64 *, const struct pkt_gl *);
77static int t4_uld_state_change(void *, enum cxgb4_state state); 77static int t4_uld_state_change(void *, enum cxgb4_state state);
78static inline int send_tx_flowc_wr(struct cxgbi_sock *);
78 79
79static const struct cxgb4_uld_info cxgb4i_uld_info = { 80static const struct cxgb4_uld_info cxgb4i_uld_info = {
80 .name = DRV_MODULE_NAME, 81 .name = DRV_MODULE_NAME,
@@ -157,12 +158,6 @@ static struct scsi_transport_template *cxgb4i_stt;
157#define RCV_BUFSIZ_MASK 0x3FFU 158#define RCV_BUFSIZ_MASK 0x3FFU
158#define MAX_IMM_TX_PKT_LEN 128 159#define MAX_IMM_TX_PKT_LEN 128
159 160
160static inline void set_queue(struct sk_buff *skb, unsigned int queue,
161 const struct cxgbi_sock *csk)
162{
163 skb->queue_mapping = queue;
164}
165
166static int push_tx_frames(struct cxgbi_sock *, int); 161static int push_tx_frames(struct cxgbi_sock *, int);
167 162
168/* 163/*
@@ -172,10 +167,14 @@ static int push_tx_frames(struct cxgbi_sock *, int);
172 * Returns true if a packet can be sent as an offload WR with immediate 167 * Returns true if a packet can be sent as an offload WR with immediate
173 * data. We currently use the same limit as for Ethernet packets. 168 * data. We currently use the same limit as for Ethernet packets.
174 */ 169 */
175static inline int is_ofld_imm(const struct sk_buff *skb) 170static inline bool is_ofld_imm(const struct sk_buff *skb)
176{ 171{
177 return skb->len <= (MAX_IMM_TX_PKT_LEN - 172 int len = skb->len;
178 sizeof(struct fw_ofld_tx_data_wr)); 173
174 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR)))
175 len += sizeof(struct fw_ofld_tx_data_wr);
176
177 return len <= MAX_IMM_TX_PKT_LEN;
179} 178}
180 179
181static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, 180static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
@@ -388,13 +387,19 @@ static void send_abort_req(struct cxgbi_sock *csk)
388 387
389 if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev) 388 if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev)
390 return; 389 return;
390
391 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
392 send_tx_flowc_wr(csk);
393 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
394 }
395
391 cxgbi_sock_set_state(csk, CTP_ABORTING); 396 cxgbi_sock_set_state(csk, CTP_ABORTING);
392 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING); 397 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING);
393 cxgbi_sock_purge_write_queue(csk); 398 cxgbi_sock_purge_write_queue(csk);
394 399
395 csk->cpl_abort_req = NULL; 400 csk->cpl_abort_req = NULL;
396 req = (struct cpl_abort_req *)skb->head; 401 req = (struct cpl_abort_req *)skb->head;
397 set_queue(skb, CPL_PRIORITY_DATA, csk); 402 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
398 req->cmd = CPL_ABORT_SEND_RST; 403 req->cmd = CPL_ABORT_SEND_RST;
399 t4_set_arp_err_handler(skb, csk, abort_arp_failure); 404 t4_set_arp_err_handler(skb, csk, abort_arp_failure);
400 INIT_TP_WR(req, csk->tid); 405 INIT_TP_WR(req, csk->tid);
@@ -420,7 +425,7 @@ static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status)
420 csk, csk->state, csk->flags, csk->tid, rst_status); 425 csk, csk->state, csk->flags, csk->tid, rst_status);
421 426
422 csk->cpl_abort_rpl = NULL; 427 csk->cpl_abort_rpl = NULL;
423 set_queue(skb, CPL_PRIORITY_DATA, csk); 428 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
424 INIT_TP_WR(rpl, csk->tid); 429 INIT_TP_WR(rpl, csk->tid);
425 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid)); 430 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid));
426 rpl->cmd = rst_status; 431 rpl->cmd = rst_status;
@@ -491,20 +496,40 @@ static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
491 return flits + sgl_len(cnt); 496 return flits + sgl_len(cnt);
492} 497}
493 498
494static inline void send_tx_flowc_wr(struct cxgbi_sock *csk) 499#define FLOWC_WR_NPARAMS_MIN 9
500static inline int tx_flowc_wr_credits(int *nparamsp, int *flowclenp)
501{
502 int nparams, flowclen16, flowclen;
503
504 nparams = FLOWC_WR_NPARAMS_MIN;
505 flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
506 flowclen16 = DIV_ROUND_UP(flowclen, 16);
507 flowclen = flowclen16 * 16;
508 /*
509 * Return the number of 16-byte credits used by the FlowC request.
510 * Pass back the nparams and actual FlowC length if requested.
511 */
512 if (nparamsp)
513 *nparamsp = nparams;
514 if (flowclenp)
515 *flowclenp = flowclen;
516
517 return flowclen16;
518}
519
520static inline int send_tx_flowc_wr(struct cxgbi_sock *csk)
495{ 521{
496 struct sk_buff *skb; 522 struct sk_buff *skb;
497 struct fw_flowc_wr *flowc; 523 struct fw_flowc_wr *flowc;
498 int flowclen, i; 524 int nparams, flowclen16, flowclen;
499 525
500 flowclen = 80; 526 flowclen16 = tx_flowc_wr_credits(&nparams, &flowclen);
501 skb = alloc_wr(flowclen, 0, GFP_ATOMIC); 527 skb = alloc_wr(flowclen, 0, GFP_ATOMIC);
502 flowc = (struct fw_flowc_wr *)skb->head; 528 flowc = (struct fw_flowc_wr *)skb->head;
503 flowc->op_to_nparams = 529 flowc->op_to_nparams =
504 htonl(FW_WR_OP_V(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS_V(8)); 530 htonl(FW_WR_OP_V(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS_V(nparams));
505 flowc->flowid_len16 = 531 flowc->flowid_len16 =
506 htonl(FW_WR_LEN16_V(DIV_ROUND_UP(72, 16)) | 532 htonl(FW_WR_LEN16_V(flowclen16) | FW_WR_FLOWID_V(csk->tid));
507 FW_WR_FLOWID_V(csk->tid));
508 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 533 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
509 flowc->mnemval[0].val = htonl(csk->cdev->pfvf); 534 flowc->mnemval[0].val = htonl(csk->cdev->pfvf);
510 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 535 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
@@ -523,12 +548,10 @@ static inline void send_tx_flowc_wr(struct cxgbi_sock *csk)
523 flowc->mnemval[7].val = htonl(csk->advmss); 548 flowc->mnemval[7].val = htonl(csk->advmss);
524 flowc->mnemval[8].mnemonic = 0; 549 flowc->mnemval[8].mnemonic = 0;
525 flowc->mnemval[8].val = 0; 550 flowc->mnemval[8].val = 0;
526 for (i = 0; i < 9; i++) { 551 flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
527 flowc->mnemval[i].r4[0] = 0; 552 flowc->mnemval[8].val = 16384;
528 flowc->mnemval[i].r4[1] = 0; 553
529 flowc->mnemval[i].r4[2] = 0; 554 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
530 }
531 set_queue(skb, CPL_PRIORITY_DATA, csk);
532 555
533 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 556 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
534 "csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n", 557 "csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n",
@@ -537,6 +560,8 @@ static inline void send_tx_flowc_wr(struct cxgbi_sock *csk)
537 csk->advmss); 560 csk->advmss);
538 561
539 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 562 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
563
564 return flowclen16;
540} 565}
541 566
542static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb, 567static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
@@ -545,10 +570,11 @@ static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
545 struct fw_ofld_tx_data_wr *req; 570 struct fw_ofld_tx_data_wr *req;
546 unsigned int submode = cxgbi_skcb_ulp_mode(skb) & 3; 571 unsigned int submode = cxgbi_skcb_ulp_mode(skb) & 3;
547 unsigned int wr_ulp_mode = 0, val; 572 unsigned int wr_ulp_mode = 0, val;
573 bool imm = is_ofld_imm(skb);
548 574
549 req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, sizeof(*req)); 575 req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, sizeof(*req));
550 576
551 if (is_ofld_imm(skb)) { 577 if (imm) {
552 req->op_to_immdlen = htonl(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | 578 req->op_to_immdlen = htonl(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
553 FW_WR_COMPL_F | 579 FW_WR_COMPL_F |
554 FW_WR_IMMDLEN_V(dlen)); 580 FW_WR_IMMDLEN_V(dlen));
@@ -597,16 +623,32 @@ static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
597 int dlen = skb->len; 623 int dlen = skb->len;
598 int len = skb->len; 624 int len = skb->len;
599 unsigned int credits_needed; 625 unsigned int credits_needed;
626 int flowclen16 = 0;
600 627
601 skb_reset_transport_header(skb); 628 skb_reset_transport_header(skb);
602 if (is_ofld_imm(skb)) 629 if (is_ofld_imm(skb))
603 credits_needed = DIV_ROUND_UP(dlen + 630 credits_needed = DIV_ROUND_UP(dlen, 16);
604 sizeof(struct fw_ofld_tx_data_wr), 16);
605 else 631 else
606 credits_needed = DIV_ROUND_UP(8*calc_tx_flits_ofld(skb) 632 credits_needed = DIV_ROUND_UP(
607 + sizeof(struct fw_ofld_tx_data_wr), 633 8 * calc_tx_flits_ofld(skb),
634 16);
635
636 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR)))
637 credits_needed += DIV_ROUND_UP(
638 sizeof(struct fw_ofld_tx_data_wr),
608 16); 639 16);
609 640
641 /*
642 * Assumes the initial credits is large enough to support
643 * fw_flowc_wr plus largest possible first payload
644 */
645 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
646 flowclen16 = send_tx_flowc_wr(csk);
647 csk->wr_cred -= flowclen16;
648 csk->wr_una_cred += flowclen16;
649 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
650 }
651
610 if (csk->wr_cred < credits_needed) { 652 if (csk->wr_cred < credits_needed) {
611 log_debug(1 << CXGBI_DBG_PDU_TX, 653 log_debug(1 << CXGBI_DBG_PDU_TX,
612 "csk 0x%p, skb %u/%u, wr %d < %u.\n", 654 "csk 0x%p, skb %u/%u, wr %d < %u.\n",
@@ -615,8 +657,8 @@ static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
615 break; 657 break;
616 } 658 }
617 __skb_unlink(skb, &csk->write_queue); 659 __skb_unlink(skb, &csk->write_queue);
618 set_queue(skb, CPL_PRIORITY_DATA, csk); 660 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
619 skb->csum = credits_needed; 661 skb->csum = credits_needed + flowclen16;
620 csk->wr_cred -= credits_needed; 662 csk->wr_cred -= credits_needed;
621 csk->wr_una_cred += credits_needed; 663 csk->wr_una_cred += credits_needed;
622 cxgbi_sock_enqueue_wr(csk, skb); 664 cxgbi_sock_enqueue_wr(csk, skb);
@@ -627,12 +669,6 @@ static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
627 csk->wr_cred, csk->wr_una_cred); 669 csk->wr_cred, csk->wr_una_cred);
628 670
629 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) { 671 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) {
630 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
631 send_tx_flowc_wr(csk);
632 skb->csum += 5;
633 csk->wr_cred -= 5;
634 csk->wr_una_cred += 5;
635 }
636 len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb)); 672 len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
637 make_tx_data_wr(csk, skb, dlen, len, credits_needed, 673 make_tx_data_wr(csk, skb, dlen, len, credits_needed,
638 req_completion); 674 req_completion);
@@ -807,6 +843,13 @@ static void csk_act_open_retry_timer(unsigned long data)
807 843
808} 844}
809 845
846static inline bool is_neg_adv(unsigned int status)
847{
848 return status == CPL_ERR_RTX_NEG_ADVICE ||
849 status == CPL_ERR_KEEPALV_NEG_ADVICE ||
850 status == CPL_ERR_PERSIST_NEG_ADVICE;
851}
852
810static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) 853static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
811{ 854{
812 struct cxgbi_sock *csk; 855 struct cxgbi_sock *csk;
@@ -828,7 +871,7 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
828 "csk 0x%p,%u,0x%lx. ", (&csk->saddr), (&csk->daddr), 871 "csk 0x%p,%u,0x%lx. ", (&csk->saddr), (&csk->daddr),
829 atid, tid, status, csk, csk->state, csk->flags); 872 atid, tid, status, csk, csk->state, csk->flags);
830 873
831 if (status == CPL_ERR_RTX_NEG_ADVICE) 874 if (is_neg_adv(status))
832 goto rel_skb; 875 goto rel_skb;
833 876
834 module_put(THIS_MODULE); 877 module_put(THIS_MODULE);
@@ -934,8 +977,7 @@ static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
934 (&csk->saddr), (&csk->daddr), 977 (&csk->saddr), (&csk->daddr),
935 csk, csk->state, csk->flags, csk->tid, req->status); 978 csk, csk->state, csk->flags, csk->tid, req->status);
936 979
937 if (req->status == CPL_ERR_RTX_NEG_ADVICE || 980 if (is_neg_adv(req->status))
938 req->status == CPL_ERR_PERSIST_NEG_ADVICE)
939 goto rel_skb; 981 goto rel_skb;
940 982
941 cxgbi_sock_get(csk); 983 cxgbi_sock_get(csk);
@@ -989,6 +1031,27 @@ rel_skb:
989 __kfree_skb(skb); 1031 __kfree_skb(skb);
990} 1032}
991 1033
1034static void do_rx_data(struct cxgbi_device *cdev, struct sk_buff *skb)
1035{
1036 struct cxgbi_sock *csk;
1037 struct cpl_rx_data *cpl = (struct cpl_rx_data *)skb->data;
1038 unsigned int tid = GET_TID(cpl);
1039 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1040 struct tid_info *t = lldi->tids;
1041
1042 csk = lookup_tid(t, tid);
1043 if (!csk) {
1044 pr_err("can't find connection for tid %u.\n", tid);
1045 } else {
1046 /* not expecting this, reset the connection. */
1047 pr_err("csk 0x%p, tid %u, rcv cpl_rx_data.\n", csk, tid);
1048 spin_lock_bh(&csk->lock);
1049 send_abort_req(csk);
1050 spin_unlock_bh(&csk->lock);
1051 }
1052 __kfree_skb(skb);
1053}
1054
992static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb) 1055static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb)
993{ 1056{
994 struct cxgbi_sock *csk; 1057 struct cxgbi_sock *csk;
@@ -1408,6 +1471,7 @@ cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = {
1408 [CPL_SET_TCB_RPL] = do_set_tcb_rpl, 1471 [CPL_SET_TCB_RPL] = do_set_tcb_rpl,
1409 [CPL_RX_DATA_DDP] = do_rx_data_ddp, 1472 [CPL_RX_DATA_DDP] = do_rx_data_ddp,
1410 [CPL_RX_ISCSI_DDP] = do_rx_data_ddp, 1473 [CPL_RX_ISCSI_DDP] = do_rx_data_ddp,
1474 [CPL_RX_DATA] = do_rx_data,
1411}; 1475};
1412 1476
1413int cxgb4i_ofld_init(struct cxgbi_device *cdev) 1477int cxgb4i_ofld_init(struct cxgbi_device *cdev)
@@ -1485,7 +1549,7 @@ static int ddp_ppod_write_idata(struct cxgbi_device *cdev, unsigned int port_id,
1485 return -ENOMEM; 1549 return -ENOMEM;
1486 } 1550 }
1487 req = (struct ulp_mem_io *)skb->head; 1551 req = (struct ulp_mem_io *)skb->head;
1488 set_queue(skb, CPL_PRIORITY_CONTROL, NULL); 1552 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
1489 1553
1490 ulp_mem_io_set_hdr(lldi, req, wr_len, dlen, pm_addr); 1554 ulp_mem_io_set_hdr(lldi, req, wr_len, dlen, pm_addr);
1491 idata = (struct ulptx_idata *)(req + 1); 1555 idata = (struct ulptx_idata *)(req + 1);
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 7da59c38a69e..eb58afcfb73b 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -2294,10 +2294,12 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
2294 return err; 2294 return err;
2295 } 2295 }
2296 2296
2297 kfree_skb(skb);
2298 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, 2297 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
2299 "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n", 2298 "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
2300 task->itt, skb, skb->len, skb->data_len, err); 2299 task->itt, skb, skb->len, skb->data_len, err);
2300
2301 kfree_skb(skb);
2302
2301 iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err); 2303 iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
2302 iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED); 2304 iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
2303 return err; 2305 return err;
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index 2c7cb1c0c453..aba1af720df6 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -317,8 +317,8 @@ static inline void cxgbi_skcb_clear_flag(struct sk_buff *skb,
317 __clear_bit(flag, &(cxgbi_skcb_flags(skb))); 317 __clear_bit(flag, &(cxgbi_skcb_flags(skb)));
318} 318}
319 319
320static inline int cxgbi_skcb_test_flag(struct sk_buff *skb, 320static inline int cxgbi_skcb_test_flag(const struct sk_buff *skb,
321 enum cxgbi_skcb_flags flag) 321 enum cxgbi_skcb_flags flag)
322{ 322{
323 return test_bit(flag, &(cxgbi_skcb_flags(skb))); 323 return test_bit(flag, &(cxgbi_skcb_flags(skb)));
324} 324}