aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
authorHariprasad Shenai <hariprasad@chelsio.com>2014-11-06 23:05:25 -0500
committerDavid S. Miller <davem@davemloft.net>2014-11-10 12:57:10 -0500
commite2ac9628959cc152a811931a6422757b137ac4a4 (patch)
treeb73982788602de801c3e835d63247d7af174f740 /drivers/infiniband/hw
parent6559a7e8296002b4379e5f2c26a2a3a339d5e60a (diff)
cxgb4: Cleanup macros so they follow the same style and look consistent, part 2
Various patches have ended up changing the style of the symbolic macros/register defines to different style. As a result, the current kernel.org files are a mix of different macro styles. Since this macro/register defines is used by different drivers a few patch series have ended up adding duplicate macro/register define entries with different styles. This makes these register define/macro files a complete mess and we want to make them clean and consistent. This patch cleans up a part of it. Signed-off-by: Hariprasad Shenai <hariprasad@chelsio.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c56
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c8
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c14
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c26
4 files changed, 52 insertions, 52 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index fb61f6685809..a07d8e124a80 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -472,10 +472,10 @@ static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
472 skb = get_skb(skb, flowclen, GFP_KERNEL); 472 skb = get_skb(skb, flowclen, GFP_KERNEL);
473 flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen); 473 flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
474 474
475 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) | 475 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
476 FW_FLOWC_WR_NPARAMS(8)); 476 FW_FLOWC_WR_NPARAMS_V(8));
477 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen, 477 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(flowclen,
478 16)) | FW_WR_FLOWID(ep->hwtid)); 478 16)) | FW_WR_FLOWID_V(ep->hwtid));
479 479
480 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 480 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
481 flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN 481 flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN
@@ -803,16 +803,16 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
803 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); 803 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
804 memset(req, 0, wrlen); 804 memset(req, 0, wrlen);
805 req->op_to_immdlen = cpu_to_be32( 805 req->op_to_immdlen = cpu_to_be32(
806 FW_WR_OP(FW_OFLD_TX_DATA_WR) | 806 FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
807 FW_WR_COMPL(1) | 807 FW_WR_COMPL_F |
808 FW_WR_IMMDLEN(mpalen)); 808 FW_WR_IMMDLEN_V(mpalen));
809 req->flowid_len16 = cpu_to_be32( 809 req->flowid_len16 = cpu_to_be32(
810 FW_WR_FLOWID(ep->hwtid) | 810 FW_WR_FLOWID_V(ep->hwtid) |
811 FW_WR_LEN16(wrlen >> 4)); 811 FW_WR_LEN16_V(wrlen >> 4));
812 req->plen = cpu_to_be32(mpalen); 812 req->plen = cpu_to_be32(mpalen);
813 req->tunnel_to_proxy = cpu_to_be32( 813 req->tunnel_to_proxy = cpu_to_be32(
814 FW_OFLD_TX_DATA_WR_FLUSH(1) | 814 FW_OFLD_TX_DATA_WR_FLUSH_F |
815 FW_OFLD_TX_DATA_WR_SHOVE(1)); 815 FW_OFLD_TX_DATA_WR_SHOVE_F);
816 816
817 mpa = (struct mpa_message *)(req + 1); 817 mpa = (struct mpa_message *)(req + 1);
818 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); 818 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
@@ -897,16 +897,16 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
897 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); 897 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
898 memset(req, 0, wrlen); 898 memset(req, 0, wrlen);
899 req->op_to_immdlen = cpu_to_be32( 899 req->op_to_immdlen = cpu_to_be32(
900 FW_WR_OP(FW_OFLD_TX_DATA_WR) | 900 FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
901 FW_WR_COMPL(1) | 901 FW_WR_COMPL_F |
902 FW_WR_IMMDLEN(mpalen)); 902 FW_WR_IMMDLEN_V(mpalen));
903 req->flowid_len16 = cpu_to_be32( 903 req->flowid_len16 = cpu_to_be32(
904 FW_WR_FLOWID(ep->hwtid) | 904 FW_WR_FLOWID_V(ep->hwtid) |
905 FW_WR_LEN16(wrlen >> 4)); 905 FW_WR_LEN16_V(wrlen >> 4));
906 req->plen = cpu_to_be32(mpalen); 906 req->plen = cpu_to_be32(mpalen);
907 req->tunnel_to_proxy = cpu_to_be32( 907 req->tunnel_to_proxy = cpu_to_be32(
908 FW_OFLD_TX_DATA_WR_FLUSH(1) | 908 FW_OFLD_TX_DATA_WR_FLUSH_F |
909 FW_OFLD_TX_DATA_WR_SHOVE(1)); 909 FW_OFLD_TX_DATA_WR_SHOVE_F);
910 910
911 mpa = (struct mpa_message *)(req + 1); 911 mpa = (struct mpa_message *)(req + 1);
912 memset(mpa, 0, sizeof(*mpa)); 912 memset(mpa, 0, sizeof(*mpa));
@@ -977,16 +977,16 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
977 req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen); 977 req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen);
978 memset(req, 0, wrlen); 978 memset(req, 0, wrlen);
979 req->op_to_immdlen = cpu_to_be32( 979 req->op_to_immdlen = cpu_to_be32(
980 FW_WR_OP(FW_OFLD_TX_DATA_WR) | 980 FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
981 FW_WR_COMPL(1) | 981 FW_WR_COMPL_F |
982 FW_WR_IMMDLEN(mpalen)); 982 FW_WR_IMMDLEN_V(mpalen));
983 req->flowid_len16 = cpu_to_be32( 983 req->flowid_len16 = cpu_to_be32(
984 FW_WR_FLOWID(ep->hwtid) | 984 FW_WR_FLOWID_V(ep->hwtid) |
985 FW_WR_LEN16(wrlen >> 4)); 985 FW_WR_LEN16_V(wrlen >> 4));
986 req->plen = cpu_to_be32(mpalen); 986 req->plen = cpu_to_be32(mpalen);
987 req->tunnel_to_proxy = cpu_to_be32( 987 req->tunnel_to_proxy = cpu_to_be32(
988 FW_OFLD_TX_DATA_WR_FLUSH(1) | 988 FW_OFLD_TX_DATA_WR_FLUSH_F |
989 FW_OFLD_TX_DATA_WR_SHOVE(1)); 989 FW_OFLD_TX_DATA_WR_SHOVE_F);
990 990
991 mpa = (struct mpa_message *)(req + 1); 991 mpa = (struct mpa_message *)(req + 1);
992 memset(mpa, 0, sizeof(*mpa)); 992 memset(mpa, 0, sizeof(*mpa));
@@ -1751,7 +1751,7 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
1751 req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req)); 1751 req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req));
1752 memset(req, 0, sizeof(*req)); 1752 memset(req, 0, sizeof(*req));
1753 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR)); 1753 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR));
1754 req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16))); 1754 req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
1755 req->le.filter = cpu_to_be32(cxgb4_select_ntuple( 1755 req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
1756 ep->com.dev->rdev.lldi.ports[0], 1756 ep->com.dev->rdev.lldi.ports[0],
1757 ep->l2t)); 1757 ep->l2t));
@@ -3537,8 +3537,8 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
3537 req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL); 3537 req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
3538 req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req)); 3538 req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req));
3539 memset(req, 0, sizeof(*req)); 3539 memset(req, 0, sizeof(*req));
3540 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL(1)); 3540 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F);
3541 req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16))); 3541 req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
3542 req->le.version_cpl = htonl(F_FW_OFLD_CONNECTION_WR_CPL); 3542 req->le.version_cpl = htonl(F_FW_OFLD_CONNECTION_WR_CPL);
3543 req->le.filter = (__force __be32) filter; 3543 req->le.filter = (__force __be32) filter;
3544 req->le.lport = lport; 3544 req->le.lport = lport;
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 0f773e78e080..e9fd3a029296 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -51,9 +51,9 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
51 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len); 51 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
52 memset(res_wr, 0, wr_len); 52 memset(res_wr, 0, wr_len);
53 res_wr->op_nres = cpu_to_be32( 53 res_wr->op_nres = cpu_to_be32(
54 FW_WR_OP(FW_RI_RES_WR) | 54 FW_WR_OP_V(FW_RI_RES_WR) |
55 V_FW_RI_RES_WR_NRES(1) | 55 V_FW_RI_RES_WR_NRES(1) |
56 FW_WR_COMPL(1)); 56 FW_WR_COMPL_F);
57 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); 57 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
58 res_wr->cookie = (unsigned long) &wr_wait; 58 res_wr->cookie = (unsigned long) &wr_wait;
59 res = res_wr->res; 59 res = res_wr->res;
@@ -121,9 +121,9 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
121 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len); 121 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
122 memset(res_wr, 0, wr_len); 122 memset(res_wr, 0, wr_len);
123 res_wr->op_nres = cpu_to_be32( 123 res_wr->op_nres = cpu_to_be32(
124 FW_WR_OP(FW_RI_RES_WR) | 124 FW_WR_OP_V(FW_RI_RES_WR) |
125 V_FW_RI_RES_WR_NRES(1) | 125 V_FW_RI_RES_WR_NRES(1) |
126 FW_WR_COMPL(1)); 126 FW_WR_COMPL_F);
127 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); 127 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
128 res_wr->cookie = (unsigned long) &wr_wait; 128 res_wr->cookie = (unsigned long) &wr_wait;
129 res = res_wr->res; 129 res = res_wr->res;
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index ec7a2988a703..9335148c1ad9 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -74,10 +74,10 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
74 req = (struct ulp_mem_io *)__skb_put(skb, wr_len); 74 req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
75 memset(req, 0, wr_len); 75 memset(req, 0, wr_len);
76 INIT_ULPTX_WR(req, wr_len, 0, 0); 76 INIT_ULPTX_WR(req, wr_len, 0, 0);
77 req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) | 77 req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
78 (wait ? FW_WR_COMPL(1) : 0)); 78 (wait ? FW_WR_COMPL_F : 0));
79 req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L; 79 req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L;
80 req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16))); 80 req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
81 req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE)); 81 req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE));
82 req->cmd |= cpu_to_be32(V_T5_ULP_MEMIO_ORDER(1)); 82 req->cmd |= cpu_to_be32(V_T5_ULP_MEMIO_ORDER(1));
83 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(len>>5)); 83 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(len>>5));
@@ -135,13 +135,13 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
135 INIT_ULPTX_WR(req, wr_len, 0, 0); 135 INIT_ULPTX_WR(req, wr_len, 0, 0);
136 136
137 if (i == (num_wqe-1)) { 137 if (i == (num_wqe-1)) {
138 req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) | 138 req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
139 FW_WR_COMPL(1)); 139 FW_WR_COMPL_F);
140 req->wr.wr_lo = (__force __be64)(unsigned long) &wr_wait; 140 req->wr.wr_lo = (__force __be64)(unsigned long) &wr_wait;
141 } else 141 } else
142 req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR)); 142 req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR));
143 req->wr.wr_mid = cpu_to_be32( 143 req->wr.wr_mid = cpu_to_be32(
144 FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16))); 144 FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
145 145
146 req->cmd = cmd; 146 req->cmd = cmd;
147 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN( 147 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 41cd6882b648..2ed3ece2b2ee 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -271,9 +271,9 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
271 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len); 271 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
272 memset(res_wr, 0, wr_len); 272 memset(res_wr, 0, wr_len);
273 res_wr->op_nres = cpu_to_be32( 273 res_wr->op_nres = cpu_to_be32(
274 FW_WR_OP(FW_RI_RES_WR) | 274 FW_WR_OP_V(FW_RI_RES_WR) |
275 V_FW_RI_RES_WR_NRES(2) | 275 V_FW_RI_RES_WR_NRES(2) |
276 FW_WR_COMPL(1)); 276 FW_WR_COMPL_F);
277 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); 277 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
278 res_wr->cookie = (unsigned long) &wr_wait; 278 res_wr->cookie = (unsigned long) &wr_wait;
279 res = res_wr->res; 279 res = res_wr->res;
@@ -1082,10 +1082,10 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
1082 1082
1083 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); 1083 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
1084 memset(wqe, 0, sizeof *wqe); 1084 memset(wqe, 0, sizeof *wqe);
1085 wqe->op_compl = cpu_to_be32(FW_WR_OP(FW_RI_INIT_WR)); 1085 wqe->op_compl = cpu_to_be32(FW_WR_OP_V(FW_RI_INIT_WR));
1086 wqe->flowid_len16 = cpu_to_be32( 1086 wqe->flowid_len16 = cpu_to_be32(
1087 FW_WR_FLOWID(qhp->ep->hwtid) | 1087 FW_WR_FLOWID_V(qhp->ep->hwtid) |
1088 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16))); 1088 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
1089 1089
1090 wqe->u.terminate.type = FW_RI_TYPE_TERMINATE; 1090 wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
1091 wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term); 1091 wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
@@ -1204,11 +1204,11 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1204 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); 1204 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
1205 memset(wqe, 0, sizeof *wqe); 1205 memset(wqe, 0, sizeof *wqe);
1206 wqe->op_compl = cpu_to_be32( 1206 wqe->op_compl = cpu_to_be32(
1207 FW_WR_OP(FW_RI_INIT_WR) | 1207 FW_WR_OP_V(FW_RI_INIT_WR) |
1208 FW_WR_COMPL(1)); 1208 FW_WR_COMPL_F);
1209 wqe->flowid_len16 = cpu_to_be32( 1209 wqe->flowid_len16 = cpu_to_be32(
1210 FW_WR_FLOWID(ep->hwtid) | 1210 FW_WR_FLOWID_V(ep->hwtid) |
1211 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16))); 1211 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
1212 wqe->cookie = (unsigned long) &ep->com.wr_wait; 1212 wqe->cookie = (unsigned long) &ep->com.wr_wait;
1213 1213
1214 wqe->u.fini.type = FW_RI_TYPE_FINI; 1214 wqe->u.fini.type = FW_RI_TYPE_FINI;
@@ -1273,11 +1273,11 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1273 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); 1273 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
1274 memset(wqe, 0, sizeof *wqe); 1274 memset(wqe, 0, sizeof *wqe);
1275 wqe->op_compl = cpu_to_be32( 1275 wqe->op_compl = cpu_to_be32(
1276 FW_WR_OP(FW_RI_INIT_WR) | 1276 FW_WR_OP_V(FW_RI_INIT_WR) |
1277 FW_WR_COMPL(1)); 1277 FW_WR_COMPL_F);
1278 wqe->flowid_len16 = cpu_to_be32( 1278 wqe->flowid_len16 = cpu_to_be32(
1279 FW_WR_FLOWID(qhp->ep->hwtid) | 1279 FW_WR_FLOWID_V(qhp->ep->hwtid) |
1280 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16))); 1280 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
1281 1281
1282 wqe->cookie = (unsigned long) &qhp->ep->com.wr_wait; 1282 wqe->cookie = (unsigned long) &qhp->ep->com.wr_wait;
1283 1283