aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVarun Prakash <varun@chelsio.com>2016-09-13 11:54:03 -0400
committerDavid S. Miller <davem@davemloft.net>2016-09-15 20:49:20 -0400
commit29fb6f42e7282322672eff8b4ad85918b9dcbae3 (patch)
treec745228ae738982d22029a921c6bbeffe374eec9
parenta1a234542b7817c28770ad4e80be1bf69e6a4f86 (diff)
libcxgb, iw_cxgb4, cxgbit: add cxgb_mk_close_con_req()
Add cxgb_mk_close_con_req() to remove duplicate code to form CPL_CLOSE_CON_REQ hardware command. Signed-off-by: Varun Prakash <varun@chelsio.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c13
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h16
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_cm.c13
3 files changed, 23 insertions, 19 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index b818bd6d1fb5..22bccd87c5d2 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -637,21 +637,16 @@ static int send_flowc(struct c4iw_ep *ep)
637 637
638static int send_halfclose(struct c4iw_ep *ep) 638static int send_halfclose(struct c4iw_ep *ep)
639{ 639{
640 struct cpl_close_con_req *req;
641 struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list); 640 struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
642 int wrlen = roundup(sizeof *req, 16); 641 u32 wrlen = roundup(sizeof(struct cpl_close_con_req), 16);
643 642
644 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 643 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
645 if (WARN_ON(!skb)) 644 if (WARN_ON(!skb))
646 return -ENOMEM; 645 return -ENOMEM;
647 646
648 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 647 cxgb_mk_close_con_req(skb, wrlen, ep->hwtid, ep->txq_idx,
649 t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 648 NULL, arp_failure_discard);
650 req = (struct cpl_close_con_req *) skb_put(skb, wrlen); 649
651 memset(req, 0, wrlen);
652 INIT_TP_WR(req, ep->hwtid);
653 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ,
654 ep->hwtid));
655 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 650 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
656} 651}
657 652
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h
index fbb973e9ec29..e77661d98738 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h
@@ -38,6 +38,7 @@
38 38
39#include <cxgb4.h> 39#include <cxgb4.h>
40#include <t4_msg.h> 40#include <t4_msg.h>
41#include <l2t.h>
41 42
42void 43void
43cxgb_get_4tuple(struct cpl_pass_accept_req *, enum chip_type, 44cxgb_get_4tuple(struct cpl_pass_accept_req *, enum chip_type,
@@ -96,4 +97,19 @@ cxgb_mk_tid_release(struct sk_buff *skb, u32 len, u32 tid, u16 chan)
96 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, tid)); 97 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
97 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan); 98 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
98} 99}
100
101static inline void
102cxgb_mk_close_con_req(struct sk_buff *skb, u32 len, u32 tid, u16 chan,
103 void *handle, arp_err_handler_t handler)
104{
105 struct cpl_close_con_req *req;
106
107 req = (struct cpl_close_con_req *)__skb_put(skb, len);
108 memset(req, 0, len);
109
110 INIT_TP_WR(req, tid);
111 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
112 set_wr_txq(skb, CPL_PRIORITY_DATA, chan);
113 t4_set_arp_err_handler(skb, handle, handler);
114}
99#endif 115#endif
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
index 994058f0c4e0..a8f5f360414f 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
@@ -615,21 +615,14 @@ void cxgbit_free_np(struct iscsi_np *np)
615static void cxgbit_send_halfclose(struct cxgbit_sock *csk) 615static void cxgbit_send_halfclose(struct cxgbit_sock *csk)
616{ 616{
617 struct sk_buff *skb; 617 struct sk_buff *skb;
618 struct cpl_close_con_req *req; 618 u32 len = roundup(sizeof(struct cpl_close_con_req), 16);
619 unsigned int len = roundup(sizeof(struct cpl_close_con_req), 16);
620 619
621 skb = alloc_skb(len, GFP_ATOMIC); 620 skb = alloc_skb(len, GFP_ATOMIC);
622 if (!skb) 621 if (!skb)
623 return; 622 return;
624 623
625 req = (struct cpl_close_con_req *)__skb_put(skb, len); 624 cxgb_mk_close_con_req(skb, len, csk->tid, csk->txq_idx,
626 memset(req, 0, len); 625 NULL, NULL);
627
628 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
629 INIT_TP_WR(req, csk->tid);
630 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ,
631 csk->tid));
632 req->rsvd = 0;
633 626
634 cxgbit_skcb_flags(skb) |= SKCBF_TX_FLAG_COMPL; 627 cxgbit_skcb_flags(skb) |= SKCBF_TX_FLAG_COMPL;
635 __skb_queue_tail(&csk->txq, skb); 628 __skb_queue_tail(&csk->txq, skb);