aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/cxgbi
diff options
context:
space:
mode:
authorKaren Xie <kxie@chelsio.com>2014-12-11 22:13:35 -0500
committerDavid S. Miller <davem@davemloft.net>2014-12-12 11:22:29 -0500
commit64bfead85dc3caff74964fae1d03a8ee060770a6 (patch)
tree78dbfd9ad5e53b0f84eecedb237addfad5cc6e5b /drivers/scsi/cxgbi
parent7857c62a35041a21a66ccab551601c942b748330 (diff)
cxgb4/cxgb4i: set the max. pdu length in firmware
Programs the firmware of the maximum outgoing iscsi pdu length per connection. Signed-off-by: Karen Xie <kxie@chelsio.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/scsi/cxgbi')
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c69
1 files changed, 51 insertions, 18 deletions
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 197d7de189fb..93ae720e8264 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -75,6 +75,7 @@ typedef void (*cxgb4i_cplhandler_func)(struct cxgbi_device *, struct sk_buff *);
75static void *t4_uld_add(const struct cxgb4_lld_info *); 75static void *t4_uld_add(const struct cxgb4_lld_info *);
76static int t4_uld_rx_handler(void *, const __be64 *, const struct pkt_gl *); 76static int t4_uld_rx_handler(void *, const __be64 *, const struct pkt_gl *);
77static int t4_uld_state_change(void *, enum cxgb4_state state); 77static int t4_uld_state_change(void *, enum cxgb4_state state);
78static inline int send_tx_flowc_wr(struct cxgbi_sock *);
78 79
79static const struct cxgb4_uld_info cxgb4i_uld_info = { 80static const struct cxgb4_uld_info cxgb4i_uld_info = {
80 .name = DRV_MODULE_NAME, 81 .name = DRV_MODULE_NAME,
@@ -392,6 +393,12 @@ static void send_abort_req(struct cxgbi_sock *csk)
392 393
393 if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev) 394 if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev)
394 return; 395 return;
396
397 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
398 send_tx_flowc_wr(csk);
399 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
400 }
401
395 cxgbi_sock_set_state(csk, CTP_ABORTING); 402 cxgbi_sock_set_state(csk, CTP_ABORTING);
396 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING); 403 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING);
397 cxgbi_sock_purge_write_queue(csk); 404 cxgbi_sock_purge_write_queue(csk);
@@ -495,20 +502,40 @@ static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
495 return flits + sgl_len(cnt); 502 return flits + sgl_len(cnt);
496} 503}
497 504
498static inline void send_tx_flowc_wr(struct cxgbi_sock *csk) 505#define FLOWC_WR_NPARAMS_MIN 9
506static inline int tx_flowc_wr_credits(int *nparamsp, int *flowclenp)
507{
508 int nparams, flowclen16, flowclen;
509
510 nparams = FLOWC_WR_NPARAMS_MIN;
511 flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
512 flowclen16 = DIV_ROUND_UP(flowclen, 16);
513 flowclen = flowclen16 * 16;
514 /*
515 * Return the number of 16-byte credits used by the FlowC request.
516 * Pass back the nparams and actual FlowC length if requested.
517 */
518 if (nparamsp)
519 *nparamsp = nparams;
520 if (flowclenp)
521 *flowclenp = flowclen;
522
523 return flowclen16;
524}
525
526static inline int send_tx_flowc_wr(struct cxgbi_sock *csk)
499{ 527{
500 struct sk_buff *skb; 528 struct sk_buff *skb;
501 struct fw_flowc_wr *flowc; 529 struct fw_flowc_wr *flowc;
502 int flowclen, i; 530 int nparams, flowclen16, flowclen;
503 531
504 flowclen = 80; 532 flowclen16 = tx_flowc_wr_credits(&nparams, &flowclen);
505 skb = alloc_wr(flowclen, 0, GFP_ATOMIC); 533 skb = alloc_wr(flowclen, 0, GFP_ATOMIC);
506 flowc = (struct fw_flowc_wr *)skb->head; 534 flowc = (struct fw_flowc_wr *)skb->head;
507 flowc->op_to_nparams = 535 flowc->op_to_nparams =
508 htonl(FW_WR_OP_V(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS_V(8)); 536 htonl(FW_WR_OP_V(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS_V(nparams));
509 flowc->flowid_len16 = 537 flowc->flowid_len16 =
510 htonl(FW_WR_LEN16_V(DIV_ROUND_UP(72, 16)) | 538 htonl(FW_WR_LEN16_V(flowclen16) | FW_WR_FLOWID_V(csk->tid));
511 FW_WR_FLOWID_V(csk->tid));
512 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 539 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
513 flowc->mnemval[0].val = htonl(csk->cdev->pfvf); 540 flowc->mnemval[0].val = htonl(csk->cdev->pfvf);
514 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 541 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
@@ -527,11 +554,9 @@ static inline void send_tx_flowc_wr(struct cxgbi_sock *csk)
527 flowc->mnemval[7].val = htonl(csk->advmss); 554 flowc->mnemval[7].val = htonl(csk->advmss);
528 flowc->mnemval[8].mnemonic = 0; 555 flowc->mnemval[8].mnemonic = 0;
529 flowc->mnemval[8].val = 0; 556 flowc->mnemval[8].val = 0;
530 for (i = 0; i < 9; i++) { 557 flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
531 flowc->mnemval[i].r4[0] = 0; 558 flowc->mnemval[8].val = 16384;
532 flowc->mnemval[i].r4[1] = 0; 559
533 flowc->mnemval[i].r4[2] = 0;
534 }
535 set_queue(skb, CPL_PRIORITY_DATA, csk); 560 set_queue(skb, CPL_PRIORITY_DATA, csk);
536 561
537 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 562 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
@@ -541,6 +566,8 @@ static inline void send_tx_flowc_wr(struct cxgbi_sock *csk)
541 csk->advmss); 566 csk->advmss);
542 567
543 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 568 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
569
570 return flowclen16;
544} 571}
545 572
546static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb, 573static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
@@ -602,6 +629,7 @@ static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
602 int dlen = skb->len; 629 int dlen = skb->len;
603 int len = skb->len; 630 int len = skb->len;
604 unsigned int credits_needed; 631 unsigned int credits_needed;
632 int flowclen16 = 0;
605 633
606 skb_reset_transport_header(skb); 634 skb_reset_transport_header(skb);
607 if (is_ofld_imm(skb)) 635 if (is_ofld_imm(skb))
@@ -616,6 +644,17 @@ static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
616 sizeof(struct fw_ofld_tx_data_wr), 644 sizeof(struct fw_ofld_tx_data_wr),
617 16); 645 16);
618 646
647 /*
648 * Assumes the initial credits is large enough to support
649 * fw_flowc_wr plus largest possible first payload
650 */
651 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
652 flowclen16 = send_tx_flowc_wr(csk);
653 csk->wr_cred -= flowclen16;
654 csk->wr_una_cred += flowclen16;
655 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
656 }
657
619 if (csk->wr_cred < credits_needed) { 658 if (csk->wr_cred < credits_needed) {
620 log_debug(1 << CXGBI_DBG_PDU_TX, 659 log_debug(1 << CXGBI_DBG_PDU_TX,
621 "csk 0x%p, skb %u/%u, wr %d < %u.\n", 660 "csk 0x%p, skb %u/%u, wr %d < %u.\n",
@@ -625,7 +664,7 @@ static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
625 } 664 }
626 __skb_unlink(skb, &csk->write_queue); 665 __skb_unlink(skb, &csk->write_queue);
627 set_queue(skb, CPL_PRIORITY_DATA, csk); 666 set_queue(skb, CPL_PRIORITY_DATA, csk);
628 skb->csum = credits_needed; 667 skb->csum = credits_needed + flowclen16;
629 csk->wr_cred -= credits_needed; 668 csk->wr_cred -= credits_needed;
630 csk->wr_una_cred += credits_needed; 669 csk->wr_una_cred += credits_needed;
631 cxgbi_sock_enqueue_wr(csk, skb); 670 cxgbi_sock_enqueue_wr(csk, skb);
@@ -636,12 +675,6 @@ static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
636 csk->wr_cred, csk->wr_una_cred); 675 csk->wr_cred, csk->wr_una_cred);
637 676
638 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) { 677 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) {
639 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
640 send_tx_flowc_wr(csk);
641 skb->csum += 5;
642 csk->wr_cred -= 5;
643 csk->wr_una_cred += 5;
644 }
645 len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb)); 678 len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
646 make_tx_data_wr(csk, skb, dlen, len, credits_needed, 679 make_tx_data_wr(csk, skb, dlen, len, credits_needed,
647 req_completion); 680 req_completion);